2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <rdma/ib_smi.h>
40 static int reply(struct ib_smp
*smp
)
43 * The verbs framework will handle the directed/LID route
46 smp
->method
= IB_MGMT_METHOD_GET_RESP
;
47 if (smp
->mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)
48 smp
->status
|= IB_SMP_DIRECTION
;
49 return IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_REPLY
;
52 static void qib_send_trap(struct qib_ibport
*ibp
, void *data
, unsigned len
)
54 struct ib_mad_send_buf
*send_buf
;
55 struct ib_mad_agent
*agent
;
59 unsigned long timeout
;
61 agent
= ibp
->send_agent
;
66 if (!(ppd_from_ibp(ibp
)->lflags
& QIBL_LINKACTIVE
))
70 if (ibp
->trap_timeout
&& time_before(jiffies
, ibp
->trap_timeout
))
73 send_buf
= ib_create_send_mad(agent
, 0, 0, 0, IB_MGMT_MAD_HDR
,
74 IB_MGMT_MAD_DATA
, GFP_ATOMIC
);
79 smp
->base_version
= IB_MGMT_BASE_VERSION
;
80 smp
->mgmt_class
= IB_MGMT_CLASS_SUBN_LID_ROUTED
;
81 smp
->class_version
= 1;
82 smp
->method
= IB_MGMT_METHOD_TRAP
;
84 smp
->tid
= cpu_to_be64(ibp
->tid
);
85 smp
->attr_id
= IB_SMP_ATTR_NOTICE
;
86 /* o14-1: smp->mkey = 0; */
87 memcpy(smp
->data
, data
, len
);
89 spin_lock_irqsave(&ibp
->lock
, flags
);
91 if (ibp
->sm_lid
!= be16_to_cpu(IB_LID_PERMISSIVE
)) {
93 struct ib_ah_attr attr
;
95 memset(&attr
, 0, sizeof attr
);
96 attr
.dlid
= ibp
->sm_lid
;
97 attr
.port_num
= ppd_from_ibp(ibp
)->port
;
98 ah
= ib_create_ah(ibp
->qp0
->ibqp
.pd
, &attr
);
103 ibp
->sm_ah
= to_iah(ah
);
109 send_buf
->ah
= &ibp
->sm_ah
->ibah
;
112 spin_unlock_irqrestore(&ibp
->lock
, flags
);
115 ret
= ib_post_send_mad(send_buf
, NULL
);
118 timeout
= (4096 * (1UL << ibp
->subnet_timeout
)) / 1000;
119 ibp
->trap_timeout
= jiffies
+ usecs_to_jiffies(timeout
);
121 ib_free_send_mad(send_buf
);
122 ibp
->trap_timeout
= 0;
127 * Send a bad [PQ]_Key trap (ch. 14.3.8).
129 void qib_bad_pqkey(struct qib_ibport
*ibp
, __be16 trap_num
, u32 key
, u32 sl
,
130 u32 qp1
, u32 qp2
, __be16 lid1
, __be16 lid2
)
132 struct ib_mad_notice_attr data
;
134 if (trap_num
== IB_NOTICE_TRAP_BAD_PKEY
)
135 ibp
->pkey_violations
++;
137 ibp
->qkey_violations
++;
140 /* Send violation trap */
141 data
.generic_type
= IB_NOTICE_TYPE_SECURITY
;
142 data
.prod_type_msb
= 0;
143 data
.prod_type_lsb
= IB_NOTICE_PROD_CA
;
144 data
.trap_num
= trap_num
;
145 data
.issuer_lid
= cpu_to_be16(ppd_from_ibp(ibp
)->lid
);
146 data
.toggle_count
= 0;
147 memset(&data
.details
, 0, sizeof data
.details
);
148 data
.details
.ntc_257_258
.lid1
= lid1
;
149 data
.details
.ntc_257_258
.lid2
= lid2
;
150 data
.details
.ntc_257_258
.key
= cpu_to_be32(key
);
151 data
.details
.ntc_257_258
.sl_qp1
= cpu_to_be32((sl
<< 28) | qp1
);
152 data
.details
.ntc_257_258
.qp2
= cpu_to_be32(qp2
);
154 qib_send_trap(ibp
, &data
, sizeof data
);
158 * Send a bad M_Key trap (ch. 14.3.9).
160 static void qib_bad_mkey(struct qib_ibport
*ibp
, struct ib_smp
*smp
)
162 struct ib_mad_notice_attr data
;
164 /* Send violation trap */
165 data
.generic_type
= IB_NOTICE_TYPE_SECURITY
;
166 data
.prod_type_msb
= 0;
167 data
.prod_type_lsb
= IB_NOTICE_PROD_CA
;
168 data
.trap_num
= IB_NOTICE_TRAP_BAD_MKEY
;
169 data
.issuer_lid
= cpu_to_be16(ppd_from_ibp(ibp
)->lid
);
170 data
.toggle_count
= 0;
171 memset(&data
.details
, 0, sizeof data
.details
);
172 data
.details
.ntc_256
.lid
= data
.issuer_lid
;
173 data
.details
.ntc_256
.method
= smp
->method
;
174 data
.details
.ntc_256
.attr_id
= smp
->attr_id
;
175 data
.details
.ntc_256
.attr_mod
= smp
->attr_mod
;
176 data
.details
.ntc_256
.mkey
= smp
->mkey
;
177 if (smp
->mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
) {
180 data
.details
.ntc_256
.dr_slid
= smp
->dr_slid
;
181 data
.details
.ntc_256
.dr_trunc_hop
= IB_NOTICE_TRAP_DR_NOTICE
;
182 hop_cnt
= smp
->hop_cnt
;
183 if (hop_cnt
> ARRAY_SIZE(data
.details
.ntc_256
.dr_rtn_path
)) {
184 data
.details
.ntc_256
.dr_trunc_hop
|=
185 IB_NOTICE_TRAP_DR_TRUNC
;
186 hop_cnt
= ARRAY_SIZE(data
.details
.ntc_256
.dr_rtn_path
);
188 data
.details
.ntc_256
.dr_trunc_hop
|= hop_cnt
;
189 memcpy(data
.details
.ntc_256
.dr_rtn_path
, smp
->return_path
,
193 qib_send_trap(ibp
, &data
, sizeof data
);
197 * Send a Port Capability Mask Changed trap (ch. 14.3.11).
199 void qib_cap_mask_chg(struct qib_ibport
*ibp
)
201 struct ib_mad_notice_attr data
;
203 data
.generic_type
= IB_NOTICE_TYPE_INFO
;
204 data
.prod_type_msb
= 0;
205 data
.prod_type_lsb
= IB_NOTICE_PROD_CA
;
206 data
.trap_num
= IB_NOTICE_TRAP_CAP_MASK_CHG
;
207 data
.issuer_lid
= cpu_to_be16(ppd_from_ibp(ibp
)->lid
);
208 data
.toggle_count
= 0;
209 memset(&data
.details
, 0, sizeof data
.details
);
210 data
.details
.ntc_144
.lid
= data
.issuer_lid
;
211 data
.details
.ntc_144
.new_cap_mask
= cpu_to_be32(ibp
->port_cap_flags
);
213 qib_send_trap(ibp
, &data
, sizeof data
);
217 * Send a System Image GUID Changed trap (ch. 14.3.12).
219 void qib_sys_guid_chg(struct qib_ibport
*ibp
)
221 struct ib_mad_notice_attr data
;
223 data
.generic_type
= IB_NOTICE_TYPE_INFO
;
224 data
.prod_type_msb
= 0;
225 data
.prod_type_lsb
= IB_NOTICE_PROD_CA
;
226 data
.trap_num
= IB_NOTICE_TRAP_SYS_GUID_CHG
;
227 data
.issuer_lid
= cpu_to_be16(ppd_from_ibp(ibp
)->lid
);
228 data
.toggle_count
= 0;
229 memset(&data
.details
, 0, sizeof data
.details
);
230 data
.details
.ntc_145
.lid
= data
.issuer_lid
;
231 data
.details
.ntc_145
.new_sys_guid
= ib_qib_sys_image_guid
;
233 qib_send_trap(ibp
, &data
, sizeof data
);
237 * Send a Node Description Changed trap (ch. 14.3.13).
239 void qib_node_desc_chg(struct qib_ibport
*ibp
)
241 struct ib_mad_notice_attr data
;
243 data
.generic_type
= IB_NOTICE_TYPE_INFO
;
244 data
.prod_type_msb
= 0;
245 data
.prod_type_lsb
= IB_NOTICE_PROD_CA
;
246 data
.trap_num
= IB_NOTICE_TRAP_CAP_MASK_CHG
;
247 data
.issuer_lid
= cpu_to_be16(ppd_from_ibp(ibp
)->lid
);
248 data
.toggle_count
= 0;
249 memset(&data
.details
, 0, sizeof data
.details
);
250 data
.details
.ntc_144
.lid
= data
.issuer_lid
;
251 data
.details
.ntc_144
.local_changes
= 1;
252 data
.details
.ntc_144
.change_flags
= IB_NOTICE_TRAP_NODE_DESC_CHG
;
254 qib_send_trap(ibp
, &data
, sizeof data
);
257 static int subn_get_nodedescription(struct ib_smp
*smp
,
258 struct ib_device
*ibdev
)
261 smp
->status
|= IB_SMP_INVALID_FIELD
;
263 memcpy(smp
->data
, ibdev
->node_desc
, sizeof(smp
->data
));
268 static int subn_get_nodeinfo(struct ib_smp
*smp
, struct ib_device
*ibdev
,
271 struct ib_node_info
*nip
= (struct ib_node_info
*)&smp
->data
;
272 struct qib_devdata
*dd
= dd_from_ibdev(ibdev
);
273 u32 vendor
, majrev
, minrev
;
274 unsigned pidx
= port
- 1; /* IB number port from 1, hdw from 0 */
276 /* GUID 0 is illegal */
277 if (smp
->attr_mod
|| pidx
>= dd
->num_pports
||
278 dd
->pport
[pidx
].guid
== 0)
279 smp
->status
|= IB_SMP_INVALID_FIELD
;
281 nip
->port_guid
= dd
->pport
[pidx
].guid
;
283 nip
->base_version
= 1;
284 nip
->class_version
= 1;
285 nip
->node_type
= 1; /* channel adapter */
286 nip
->num_ports
= ibdev
->phys_port_cnt
;
287 /* This is already in network order */
288 nip
->sys_guid
= ib_qib_sys_image_guid
;
289 nip
->node_guid
= dd
->pport
->guid
; /* Use first-port GUID as node */
290 nip
->partition_cap
= cpu_to_be16(qib_get_npkeys(dd
));
291 nip
->device_id
= cpu_to_be16(dd
->deviceid
);
294 nip
->revision
= cpu_to_be32((majrev
<< 16) | minrev
);
295 nip
->local_port_num
= port
;
296 vendor
= dd
->vendorid
;
297 nip
->vendor_id
[0] = QIB_SRC_OUI_1
;
298 nip
->vendor_id
[1] = QIB_SRC_OUI_2
;
299 nip
->vendor_id
[2] = QIB_SRC_OUI_3
;
304 static int subn_get_guidinfo(struct ib_smp
*smp
, struct ib_device
*ibdev
,
307 struct qib_devdata
*dd
= dd_from_ibdev(ibdev
);
308 u32 startgx
= 8 * be32_to_cpu(smp
->attr_mod
);
309 __be64
*p
= (__be64
*) smp
->data
;
310 unsigned pidx
= port
- 1; /* IB number port from 1, hdw from 0 */
312 /* 32 blocks of 8 64-bit GUIDs per block */
314 memset(smp
->data
, 0, sizeof(smp
->data
));
316 if (startgx
== 0 && pidx
< dd
->num_pports
) {
317 struct qib_pportdata
*ppd
= dd
->pport
+ pidx
;
318 struct qib_ibport
*ibp
= &ppd
->ibport_data
;
319 __be64 g
= ppd
->guid
;
322 /* GUID 0 is illegal */
324 smp
->status
|= IB_SMP_INVALID_FIELD
;
326 /* The first is a copy of the read-only HW GUID. */
328 for (i
= 1; i
< QIB_GUIDS_PER_PORT
; i
++)
329 p
[i
] = ibp
->guids
[i
- 1];
332 smp
->status
|= IB_SMP_INVALID_FIELD
;
337 static void set_link_width_enabled(struct qib_pportdata
*ppd
, u32 w
)
339 (void) ppd
->dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_LWID_ENB
, w
);
342 static void set_link_speed_enabled(struct qib_pportdata
*ppd
, u32 s
)
344 (void) ppd
->dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_SPD_ENB
, s
);
347 static int get_overrunthreshold(struct qib_pportdata
*ppd
)
349 return ppd
->dd
->f_get_ib_cfg(ppd
, QIB_IB_CFG_OVERRUN_THRESH
);
353 * set_overrunthreshold - set the overrun threshold
354 * @ppd: the physical port data
355 * @n: the new threshold
357 * Note that this will only take effect when the link state changes.
359 static int set_overrunthreshold(struct qib_pportdata
*ppd
, unsigned n
)
361 (void) ppd
->dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_OVERRUN_THRESH
,
366 static int get_phyerrthreshold(struct qib_pportdata
*ppd
)
368 return ppd
->dd
->f_get_ib_cfg(ppd
, QIB_IB_CFG_PHYERR_THRESH
);
372 * set_phyerrthreshold - set the physical error threshold
373 * @ppd: the physical port data
374 * @n: the new threshold
376 * Note that this will only take effect when the link state changes.
378 static int set_phyerrthreshold(struct qib_pportdata
*ppd
, unsigned n
)
380 (void) ppd
->dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_PHYERR_THRESH
,
386 * get_linkdowndefaultstate - get the default linkdown state
387 * @ppd: the physical port data
389 * Returns zero if the default is POLL, 1 if the default is SLEEP.
391 static int get_linkdowndefaultstate(struct qib_pportdata
*ppd
)
393 return ppd
->dd
->f_get_ib_cfg(ppd
, QIB_IB_CFG_LINKDEFAULT
) ==
394 IB_LINKINITCMD_SLEEP
;
397 static int check_mkey(struct qib_ibport
*ibp
, struct ib_smp
*smp
, int mad_flags
)
401 /* Is the mkey in the process of expiring? */
402 if (ibp
->mkey_lease_timeout
&&
403 time_after_eq(jiffies
, ibp
->mkey_lease_timeout
)) {
404 /* Clear timeout and mkey protection field. */
405 ibp
->mkey_lease_timeout
= 0;
409 /* M_Key checking depends on Portinfo:M_Key_protect_bits */
410 if ((mad_flags
& IB_MAD_IGNORE_MKEY
) == 0 && ibp
->mkey
!= 0 &&
411 ibp
->mkey
!= smp
->mkey
&&
412 (smp
->method
== IB_MGMT_METHOD_SET
||
413 smp
->method
== IB_MGMT_METHOD_TRAP_REPRESS
||
414 (smp
->method
== IB_MGMT_METHOD_GET
&& ibp
->mkeyprot
>= 2))) {
415 if (ibp
->mkey_violations
!= 0xFFFF)
416 ++ibp
->mkey_violations
;
417 if (!ibp
->mkey_lease_timeout
&& ibp
->mkey_lease_period
)
418 ibp
->mkey_lease_timeout
= jiffies
+
419 ibp
->mkey_lease_period
* HZ
;
420 /* Generate a trap notice. */
421 qib_bad_mkey(ibp
, smp
);
422 ret
= IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_CONSUMED
;
423 } else if (ibp
->mkey_lease_timeout
)
424 ibp
->mkey_lease_timeout
= 0;
429 static int subn_get_portinfo(struct ib_smp
*smp
, struct ib_device
*ibdev
,
432 struct qib_devdata
*dd
;
433 struct qib_pportdata
*ppd
;
434 struct qib_ibport
*ibp
;
435 struct ib_port_info
*pip
= (struct ib_port_info
*)smp
->data
;
440 u32 port_num
= be32_to_cpu(smp
->attr_mod
);
445 if (port_num
> ibdev
->phys_port_cnt
) {
446 smp
->status
|= IB_SMP_INVALID_FIELD
;
450 if (port_num
!= port
) {
451 ibp
= to_iport(ibdev
, port_num
);
452 ret
= check_mkey(ibp
, smp
, 0);
458 dd
= dd_from_ibdev(ibdev
);
459 /* IB numbers ports from 1, hdw from 0 */
460 ppd
= dd
->pport
+ (port_num
- 1);
461 ibp
= &ppd
->ibport_data
;
463 /* Clear all fields. Only set the non-zero fields. */
464 memset(smp
->data
, 0, sizeof(smp
->data
));
466 /* Only return the mkey if the protection field allows it. */
467 if (!(smp
->method
== IB_MGMT_METHOD_GET
&&
468 ibp
->mkey
!= smp
->mkey
&&
470 pip
->mkey
= ibp
->mkey
;
471 pip
->gid_prefix
= ibp
->gid_prefix
;
473 pip
->lid
= lid
? cpu_to_be16(lid
) : IB_LID_PERMISSIVE
;
474 pip
->sm_lid
= cpu_to_be16(ibp
->sm_lid
);
475 pip
->cap_mask
= cpu_to_be32(ibp
->port_cap_flags
);
476 /* pip->diag_code; */
477 pip
->mkey_lease_period
= cpu_to_be16(ibp
->mkey_lease_period
);
478 pip
->local_port_num
= port
;
479 pip
->link_width_enabled
= ppd
->link_width_enabled
;
480 pip
->link_width_supported
= ppd
->link_width_supported
;
481 pip
->link_width_active
= ppd
->link_width_active
;
482 state
= dd
->f_iblink_state(ppd
->lastibcstat
);
483 pip
->linkspeed_portstate
= ppd
->link_speed_supported
<< 4 | state
;
485 pip
->portphysstate_linkdown
=
486 (dd
->f_ibphys_portstate(ppd
->lastibcstat
) << 4) |
487 (get_linkdowndefaultstate(ppd
) ? 1 : 2);
488 pip
->mkeyprot_resv_lmc
= (ibp
->mkeyprot
<< 6) | ppd
->lmc
;
489 pip
->linkspeedactive_enabled
= (ppd
->link_speed_active
<< 4) |
490 ppd
->link_speed_enabled
;
491 switch (ppd
->ibmtu
) {
492 default: /* something is wrong; fall through */
509 pip
->neighbormtu_mastersmsl
= (mtu
<< 4) | ibp
->sm_sl
;
510 pip
->vlcap_inittype
= ppd
->vls_supported
<< 4; /* InitType = 0 */
511 pip
->vl_high_limit
= ibp
->vl_high_limit
;
512 pip
->vl_arb_high_cap
=
513 dd
->f_get_ib_cfg(ppd
, QIB_IB_CFG_VL_HIGH_CAP
);
514 pip
->vl_arb_low_cap
=
515 dd
->f_get_ib_cfg(ppd
, QIB_IB_CFG_VL_LOW_CAP
);
516 /* InitTypeReply = 0 */
517 pip
->inittypereply_mtucap
= qib_ibmtu
? qib_ibmtu
: IB_MTU_4096
;
518 /* HCAs ignore VLStallCount and HOQLife */
519 /* pip->vlstallcnt_hoqlife; */
520 pip
->operationalvl_pei_peo_fpi_fpo
=
521 dd
->f_get_ib_cfg(ppd
, QIB_IB_CFG_OP_VLS
) << 4;
522 pip
->mkey_violations
= cpu_to_be16(ibp
->mkey_violations
);
523 /* P_KeyViolations are counted by hardware. */
524 pip
->pkey_violations
= cpu_to_be16(ibp
->pkey_violations
);
525 pip
->qkey_violations
= cpu_to_be16(ibp
->qkey_violations
);
526 /* Only the hardware GUID is supported for now */
527 pip
->guid_cap
= QIB_GUIDS_PER_PORT
;
528 pip
->clientrereg_resv_subnetto
= ibp
->subnet_timeout
;
529 /* 32.768 usec. response time (guessing) */
530 pip
->resv_resptimevalue
= 3;
531 pip
->localphyerrors_overrunerrors
=
532 (get_phyerrthreshold(ppd
) << 4) |
533 get_overrunthreshold(ppd
);
534 /* pip->max_credit_hint; */
535 if (ibp
->port_cap_flags
& IB_PORT_LINK_LATENCY_SUP
) {
538 v
= dd
->f_get_ib_cfg(ppd
, QIB_IB_CFG_LINKLATENCY
);
539 pip
->link_roundtrip_latency
[0] = v
>> 16;
540 pip
->link_roundtrip_latency
[1] = v
>> 8;
541 pip
->link_roundtrip_latency
[2] = v
;
551 * get_pkeys - return the PKEY table
552 * @dd: the qlogic_ib device
553 * @port: the IB port number
554 * @pkeys: the pkey table is placed here
556 static int get_pkeys(struct qib_devdata
*dd
, u8 port
, u16
*pkeys
)
558 struct qib_pportdata
*ppd
= dd
->pport
+ port
- 1;
560 * always a kernel context, no locking needed.
561 * If we get here with ppd setup, no need to check
564 struct qib_ctxtdata
*rcd
= dd
->rcd
[ppd
->hw_pidx
];
566 memcpy(pkeys
, rcd
->pkeys
, sizeof(rcd
->pkeys
));
571 static int subn_get_pkeytable(struct ib_smp
*smp
, struct ib_device
*ibdev
,
574 u32 startpx
= 32 * (be32_to_cpu(smp
->attr_mod
) & 0xffff);
575 u16
*p
= (u16
*) smp
->data
;
576 __be16
*q
= (__be16
*) smp
->data
;
578 /* 64 blocks of 32 16-bit P_Key entries */
580 memset(smp
->data
, 0, sizeof(smp
->data
));
582 struct qib_devdata
*dd
= dd_from_ibdev(ibdev
);
583 unsigned i
, n
= qib_get_npkeys(dd
);
585 get_pkeys(dd
, port
, p
);
587 for (i
= 0; i
< n
; i
++)
588 q
[i
] = cpu_to_be16(p
[i
]);
590 smp
->status
|= IB_SMP_INVALID_FIELD
;
595 static int subn_set_guidinfo(struct ib_smp
*smp
, struct ib_device
*ibdev
,
598 struct qib_devdata
*dd
= dd_from_ibdev(ibdev
);
599 u32 startgx
= 8 * be32_to_cpu(smp
->attr_mod
);
600 __be64
*p
= (__be64
*) smp
->data
;
601 unsigned pidx
= port
- 1; /* IB number port from 1, hdw from 0 */
603 /* 32 blocks of 8 64-bit GUIDs per block */
605 if (startgx
== 0 && pidx
< dd
->num_pports
) {
606 struct qib_pportdata
*ppd
= dd
->pport
+ pidx
;
607 struct qib_ibport
*ibp
= &ppd
->ibport_data
;
610 /* The first entry is read-only. */
611 for (i
= 1; i
< QIB_GUIDS_PER_PORT
; i
++)
612 ibp
->guids
[i
- 1] = p
[i
];
614 smp
->status
|= IB_SMP_INVALID_FIELD
;
616 /* The only GUID we support is the first read-only entry. */
617 return subn_get_guidinfo(smp
, ibdev
, port
);
621 * subn_set_portinfo - set port information
622 * @smp: the incoming SM packet
623 * @ibdev: the infiniband device
624 * @port: the port on the device
626 * Set Portinfo (see ch. 14.2.5.6).
628 static int subn_set_portinfo(struct ib_smp
*smp
, struct ib_device
*ibdev
,
631 struct ib_port_info
*pip
= (struct ib_port_info
*)smp
->data
;
632 struct ib_event event
;
633 struct qib_devdata
*dd
;
634 struct qib_pportdata
*ppd
;
635 struct qib_ibport
*ibp
;
636 char clientrereg
= 0;
646 u32 port_num
= be32_to_cpu(smp
->attr_mod
);
651 if (port_num
> ibdev
->phys_port_cnt
)
653 /* Port attributes can only be set on the receiving port */
654 if (port_num
!= port
)
658 dd
= dd_from_ibdev(ibdev
);
659 /* IB numbers ports from 1, hdw from 0 */
660 ppd
= dd
->pport
+ (port_num
- 1);
661 ibp
= &ppd
->ibport_data
;
662 event
.device
= ibdev
;
663 event
.element
.port_num
= port
;
665 ibp
->mkey
= pip
->mkey
;
666 ibp
->gid_prefix
= pip
->gid_prefix
;
667 ibp
->mkey_lease_period
= be16_to_cpu(pip
->mkey_lease_period
);
669 lid
= be16_to_cpu(pip
->lid
);
670 /* Must be a valid unicast LID address. */
671 if (lid
== 0 || lid
>= QIB_MULTICAST_LID_BASE
)
672 smp
->status
|= IB_SMP_INVALID_FIELD
;
673 else if (ppd
->lid
!= lid
|| ppd
->lmc
!= (pip
->mkeyprot_resv_lmc
& 7)) {
675 qib_set_uevent_bits(ppd
, _QIB_EVENT_LID_CHANGE_BIT
);
676 if (ppd
->lmc
!= (pip
->mkeyprot_resv_lmc
& 7))
677 qib_set_uevent_bits(ppd
, _QIB_EVENT_LMC_CHANGE_BIT
);
678 qib_set_lid(ppd
, lid
, pip
->mkeyprot_resv_lmc
& 7);
679 event
.event
= IB_EVENT_LID_CHANGE
;
680 ib_dispatch_event(&event
);
683 smlid
= be16_to_cpu(pip
->sm_lid
);
684 msl
= pip
->neighbormtu_mastersmsl
& 0xF;
685 /* Must be a valid unicast LID address. */
686 if (smlid
== 0 || smlid
>= QIB_MULTICAST_LID_BASE
)
687 smp
->status
|= IB_SMP_INVALID_FIELD
;
688 else if (smlid
!= ibp
->sm_lid
|| msl
!= ibp
->sm_sl
) {
689 spin_lock_irqsave(&ibp
->lock
, flags
);
691 if (smlid
!= ibp
->sm_lid
)
692 ibp
->sm_ah
->attr
.dlid
= smlid
;
693 if (msl
!= ibp
->sm_sl
)
694 ibp
->sm_ah
->attr
.sl
= msl
;
696 spin_unlock_irqrestore(&ibp
->lock
, flags
);
697 if (smlid
!= ibp
->sm_lid
)
699 if (msl
!= ibp
->sm_sl
)
701 event
.event
= IB_EVENT_SM_CHANGE
;
702 ib_dispatch_event(&event
);
705 /* Allow 1x or 4x to be set (see 14.2.6.6). */
706 lwe
= pip
->link_width_enabled
;
709 set_link_width_enabled(ppd
, ppd
->link_width_supported
);
710 else if (lwe
>= 16 || (lwe
& ~ppd
->link_width_supported
))
711 smp
->status
|= IB_SMP_INVALID_FIELD
;
712 else if (lwe
!= ppd
->link_width_enabled
)
713 set_link_width_enabled(ppd
, lwe
);
716 lse
= pip
->linkspeedactive_enabled
& 0xF;
719 * The IB 1.2 spec. only allows link speed values
720 * 1, 3, 5, 7, 15. 1.2.1 extended to allow specific
724 set_link_speed_enabled(ppd
,
725 ppd
->link_speed_supported
);
726 else if (lse
>= 8 || (lse
& ~ppd
->link_speed_supported
))
727 smp
->status
|= IB_SMP_INVALID_FIELD
;
728 else if (lse
!= ppd
->link_speed_enabled
)
729 set_link_speed_enabled(ppd
, lse
);
732 /* Set link down default state. */
733 switch (pip
->portphysstate_linkdown
& 0xF) {
737 (void) dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_LINKDEFAULT
,
738 IB_LINKINITCMD_SLEEP
);
741 (void) dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_LINKDEFAULT
,
742 IB_LINKINITCMD_POLL
);
745 smp
->status
|= IB_SMP_INVALID_FIELD
;
748 ibp
->mkeyprot
= pip
->mkeyprot_resv_lmc
>> 6;
749 ibp
->vl_high_limit
= pip
->vl_high_limit
;
750 (void) dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_VL_HIGH_LIMIT
,
753 mtu
= ib_mtu_enum_to_int((pip
->neighbormtu_mastersmsl
>> 4) & 0xF);
755 smp
->status
|= IB_SMP_INVALID_FIELD
;
757 qib_set_mtu(ppd
, mtu
);
759 /* Set operational VLs */
760 vls
= (pip
->operationalvl_pei_peo_fpi_fpo
>> 4) & 0xF;
762 if (vls
> ppd
->vls_supported
)
763 smp
->status
|= IB_SMP_INVALID_FIELD
;
765 (void) dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_OP_VLS
, vls
);
768 if (pip
->mkey_violations
== 0)
769 ibp
->mkey_violations
= 0;
771 if (pip
->pkey_violations
== 0)
772 ibp
->pkey_violations
= 0;
774 if (pip
->qkey_violations
== 0)
775 ibp
->qkey_violations
= 0;
777 ore
= pip
->localphyerrors_overrunerrors
;
778 if (set_phyerrthreshold(ppd
, (ore
>> 4) & 0xF))
779 smp
->status
|= IB_SMP_INVALID_FIELD
;
781 if (set_overrunthreshold(ppd
, (ore
& 0xF)))
782 smp
->status
|= IB_SMP_INVALID_FIELD
;
784 ibp
->subnet_timeout
= pip
->clientrereg_resv_subnetto
& 0x1F;
786 if (pip
->clientrereg_resv_subnetto
& 0x80) {
788 event
.event
= IB_EVENT_CLIENT_REREGISTER
;
789 ib_dispatch_event(&event
);
793 * Do the port state change now that the other link parameters
795 * Changing the port physical state only makes sense if the link
796 * is down or is being set to down.
798 state
= pip
->linkspeed_portstate
& 0xF;
799 lstate
= (pip
->portphysstate_linkdown
>> 4) & 0xF;
800 if (lstate
&& !(state
== IB_PORT_DOWN
|| state
== IB_PORT_NOP
))
801 smp
->status
|= IB_SMP_INVALID_FIELD
;
804 * Only state changes of DOWN, ARM, and ACTIVE are valid
805 * and must be in the correct state to take effect (see 7.2.6).
814 lstate
= QIB_IB_LINKDOWN_ONLY
;
815 else if (lstate
== 1)
816 lstate
= QIB_IB_LINKDOWN_SLEEP
;
817 else if (lstate
== 2)
818 lstate
= QIB_IB_LINKDOWN
;
819 else if (lstate
== 3)
820 lstate
= QIB_IB_LINKDOWN_DISABLE
;
822 smp
->status
|= IB_SMP_INVALID_FIELD
;
825 spin_lock_irqsave(&ppd
->lflags_lock
, flags
);
826 ppd
->lflags
&= ~QIBL_LINKV
;
827 spin_unlock_irqrestore(&ppd
->lflags_lock
, flags
);
828 qib_set_linkstate(ppd
, lstate
);
830 * Don't send a reply if the response would be sent
831 * through the disabled port.
833 if (lstate
== QIB_IB_LINKDOWN_DISABLE
&& smp
->hop_cnt
) {
834 ret
= IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_CONSUMED
;
837 qib_wait_linkstate(ppd
, QIBL_LINKV
, 10);
840 qib_set_linkstate(ppd
, QIB_IB_LINKARM
);
843 qib_set_linkstate(ppd
, QIB_IB_LINKACTIVE
);
846 smp
->status
|= IB_SMP_INVALID_FIELD
;
849 ret
= subn_get_portinfo(smp
, ibdev
, port
);
852 pip
->clientrereg_resv_subnetto
|= 0x80;
857 smp
->status
|= IB_SMP_INVALID_FIELD
;
859 ret
= subn_get_portinfo(smp
, ibdev
, port
);
865 * rm_pkey - decrecment the reference count for the given PKEY
866 * @dd: the qlogic_ib device
867 * @key: the PKEY index
869 * Return true if this was the last reference and the hardware table entry
870 * needs to be changed.
872 static int rm_pkey(struct qib_pportdata
*ppd
, u16 key
)
877 for (i
= 0; i
< ARRAY_SIZE(ppd
->pkeys
); i
++) {
878 if (ppd
->pkeys
[i
] != key
)
880 if (atomic_dec_and_test(&ppd
->pkeyrefs
[i
])) {
895 * add_pkey - add the given PKEY to the hardware table
896 * @dd: the qlogic_ib device
899 * Return an error code if unable to add the entry, zero if no change,
900 * or 1 if the hardware PKEY register needs to be updated.
902 static int add_pkey(struct qib_pportdata
*ppd
, u16 key
)
905 u16 lkey
= key
& 0x7FFF;
909 if (lkey
== 0x7FFF) {
914 /* Look for an empty slot or a matching PKEY. */
915 for (i
= 0; i
< ARRAY_SIZE(ppd
->pkeys
); i
++) {
916 if (!ppd
->pkeys
[i
]) {
920 /* If it matches exactly, try to increment the ref count */
921 if (ppd
->pkeys
[i
] == key
) {
922 if (atomic_inc_return(&ppd
->pkeyrefs
[i
]) > 1) {
926 /* Lost the race. Look for an empty slot below. */
927 atomic_dec(&ppd
->pkeyrefs
[i
]);
931 * It makes no sense to have both the limited and unlimited
932 * PKEY set at the same time since the unlimited one will
933 * disable the limited one.
935 if ((ppd
->pkeys
[i
] & 0x7FFF) == lkey
) {
944 for (i
= 0; i
< ARRAY_SIZE(ppd
->pkeys
); i
++) {
945 if (!ppd
->pkeys
[i
] &&
946 atomic_inc_return(&ppd
->pkeyrefs
[i
]) == 1) {
947 /* for qibstats, etc. */
960 * set_pkeys - set the PKEY table for ctxt 0
961 * @dd: the qlogic_ib device
962 * @port: the IB port number
963 * @pkeys: the PKEY table
965 static int set_pkeys(struct qib_devdata
*dd
, u8 port
, u16
*pkeys
)
967 struct qib_pportdata
*ppd
;
968 struct qib_ctxtdata
*rcd
;
973 * IB port one/two always maps to context zero/one,
974 * always a kernel context, no locking needed
975 * If we get here with ppd setup, no need to check
978 ppd
= dd
->pport
+ (port
- 1);
979 rcd
= dd
->rcd
[ppd
->hw_pidx
];
981 for (i
= 0; i
< ARRAY_SIZE(rcd
->pkeys
); i
++) {
983 u16 okey
= rcd
->pkeys
[i
];
988 * The value of this PKEY table entry is changing.
989 * Remove the old entry in the hardware's array of PKEYs.
992 changed
|= rm_pkey(ppd
, okey
);
994 int ret
= add_pkey(ppd
, key
);
1001 rcd
->pkeys
[i
] = key
;
1004 struct ib_event event
;
1006 (void) dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_PKEYS
, 0);
1008 event
.event
= IB_EVENT_PKEY_CHANGE
;
1009 event
.device
= &dd
->verbs_dev
.ibdev
;
1010 event
.element
.port_num
= 1;
1011 ib_dispatch_event(&event
);
1016 static int subn_set_pkeytable(struct ib_smp
*smp
, struct ib_device
*ibdev
,
1019 u32 startpx
= 32 * (be32_to_cpu(smp
->attr_mod
) & 0xffff);
1020 __be16
*p
= (__be16
*) smp
->data
;
1021 u16
*q
= (u16
*) smp
->data
;
1022 struct qib_devdata
*dd
= dd_from_ibdev(ibdev
);
1023 unsigned i
, n
= qib_get_npkeys(dd
);
1025 for (i
= 0; i
< n
; i
++)
1026 q
[i
] = be16_to_cpu(p
[i
]);
1028 if (startpx
!= 0 || set_pkeys(dd
, port
, q
) != 0)
1029 smp
->status
|= IB_SMP_INVALID_FIELD
;
1031 return subn_get_pkeytable(smp
, ibdev
, port
);
1034 static int subn_get_sl_to_vl(struct ib_smp
*smp
, struct ib_device
*ibdev
,
1037 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1038 u8
*p
= (u8
*) smp
->data
;
1041 memset(smp
->data
, 0, sizeof(smp
->data
));
1043 if (!(ibp
->port_cap_flags
& IB_PORT_SL_MAP_SUP
))
1044 smp
->status
|= IB_SMP_UNSUP_METHOD
;
1046 for (i
= 0; i
< ARRAY_SIZE(ibp
->sl_to_vl
); i
+= 2)
1047 *p
++ = (ibp
->sl_to_vl
[i
] << 4) | ibp
->sl_to_vl
[i
+ 1];
1052 static int subn_set_sl_to_vl(struct ib_smp
*smp
, struct ib_device
*ibdev
,
1055 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1056 u8
*p
= (u8
*) smp
->data
;
1059 if (!(ibp
->port_cap_flags
& IB_PORT_SL_MAP_SUP
)) {
1060 smp
->status
|= IB_SMP_UNSUP_METHOD
;
1064 for (i
= 0; i
< ARRAY_SIZE(ibp
->sl_to_vl
); i
+= 2, p
++) {
1065 ibp
->sl_to_vl
[i
] = *p
>> 4;
1066 ibp
->sl_to_vl
[i
+ 1] = *p
& 0xF;
1068 qib_set_uevent_bits(ppd_from_ibp(to_iport(ibdev
, port
)),
1069 _QIB_EVENT_SL2VL_CHANGE_BIT
);
1071 return subn_get_sl_to_vl(smp
, ibdev
, port
);
1074 static int subn_get_vl_arb(struct ib_smp
*smp
, struct ib_device
*ibdev
,
1077 unsigned which
= be32_to_cpu(smp
->attr_mod
) >> 16;
1078 struct qib_pportdata
*ppd
= ppd_from_ibp(to_iport(ibdev
, port
));
1080 memset(smp
->data
, 0, sizeof(smp
->data
));
1082 if (ppd
->vls_supported
== IB_VL_VL0
)
1083 smp
->status
|= IB_SMP_UNSUP_METHOD
;
1084 else if (which
== IB_VLARB_LOWPRI_0_31
)
1085 (void) ppd
->dd
->f_get_ib_table(ppd
, QIB_IB_TBL_VL_LOW_ARB
,
1087 else if (which
== IB_VLARB_HIGHPRI_0_31
)
1088 (void) ppd
->dd
->f_get_ib_table(ppd
, QIB_IB_TBL_VL_HIGH_ARB
,
1091 smp
->status
|= IB_SMP_INVALID_FIELD
;
1096 static int subn_set_vl_arb(struct ib_smp
*smp
, struct ib_device
*ibdev
,
1099 unsigned which
= be32_to_cpu(smp
->attr_mod
) >> 16;
1100 struct qib_pportdata
*ppd
= ppd_from_ibp(to_iport(ibdev
, port
));
1102 if (ppd
->vls_supported
== IB_VL_VL0
)
1103 smp
->status
|= IB_SMP_UNSUP_METHOD
;
1104 else if (which
== IB_VLARB_LOWPRI_0_31
)
1105 (void) ppd
->dd
->f_set_ib_table(ppd
, QIB_IB_TBL_VL_LOW_ARB
,
1107 else if (which
== IB_VLARB_HIGHPRI_0_31
)
1108 (void) ppd
->dd
->f_set_ib_table(ppd
, QIB_IB_TBL_VL_HIGH_ARB
,
1111 smp
->status
|= IB_SMP_INVALID_FIELD
;
1113 return subn_get_vl_arb(smp
, ibdev
, port
);
1116 static int subn_trap_repress(struct ib_smp
*smp
, struct ib_device
*ibdev
,
1120 * For now, we only send the trap once so no need to process this.
1122 * o14-3.a4 The SMA shall not send any message in response to a valid
1123 * SubnTrapRepress() message.
1125 return IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_CONSUMED
;
1128 static int pma_get_classportinfo(struct ib_pma_mad
*pmp
,
1129 struct ib_device
*ibdev
)
1131 struct ib_class_port_info
*p
=
1132 (struct ib_class_port_info
*)pmp
->data
;
1133 struct qib_devdata
*dd
= dd_from_ibdev(ibdev
);
1135 memset(pmp
->data
, 0, sizeof(pmp
->data
));
1137 if (pmp
->mad_hdr
.attr_mod
!= 0)
1138 pmp
->mad_hdr
.status
|= IB_SMP_INVALID_FIELD
;
1140 /* Note that AllPortSelect is not valid */
1141 p
->base_version
= 1;
1142 p
->class_version
= 1;
1143 p
->capability_mask
= IB_PMA_CLASS_CAP_EXT_WIDTH
;
1145 * Set the most significant bit of CM2 to indicate support for
1146 * congestion statistics
1148 p
->reserved
[0] = dd
->psxmitwait_supported
<< 7;
1150 * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
1152 p
->resp_time_value
= 18;
1154 return reply((struct ib_smp
*) pmp
);
1157 static int pma_get_portsamplescontrol(struct ib_pma_mad
*pmp
,
1158 struct ib_device
*ibdev
, u8 port
)
1160 struct ib_pma_portsamplescontrol
*p
=
1161 (struct ib_pma_portsamplescontrol
*)pmp
->data
;
1162 struct qib_ibdev
*dev
= to_idev(ibdev
);
1163 struct qib_devdata
*dd
= dd_from_dev(dev
);
1164 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1165 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1166 unsigned long flags
;
1167 u8 port_select
= p
->port_select
;
1169 memset(pmp
->data
, 0, sizeof(pmp
->data
));
1171 p
->port_select
= port_select
;
1172 if (pmp
->mad_hdr
.attr_mod
!= 0 || port_select
!= port
) {
1173 pmp
->mad_hdr
.status
|= IB_SMP_INVALID_FIELD
;
1176 spin_lock_irqsave(&ibp
->lock
, flags
);
1177 p
->tick
= dd
->f_get_ib_cfg(ppd
, QIB_IB_CFG_PMA_TICKS
);
1178 p
->sample_status
= dd
->f_portcntr(ppd
, QIBPORTCNTR_PSSTAT
);
1179 p
->counter_width
= 4; /* 32 bit counters */
1180 p
->counter_mask0_9
= COUNTER_MASK0_9
;
1181 p
->sample_start
= cpu_to_be32(ibp
->pma_sample_start
);
1182 p
->sample_interval
= cpu_to_be32(ibp
->pma_sample_interval
);
1183 p
->tag
= cpu_to_be16(ibp
->pma_tag
);
1184 p
->counter_select
[0] = ibp
->pma_counter_select
[0];
1185 p
->counter_select
[1] = ibp
->pma_counter_select
[1];
1186 p
->counter_select
[2] = ibp
->pma_counter_select
[2];
1187 p
->counter_select
[3] = ibp
->pma_counter_select
[3];
1188 p
->counter_select
[4] = ibp
->pma_counter_select
[4];
1189 spin_unlock_irqrestore(&ibp
->lock
, flags
);
1192 return reply((struct ib_smp
*) pmp
);
1195 static int pma_set_portsamplescontrol(struct ib_pma_mad
*pmp
,
1196 struct ib_device
*ibdev
, u8 port
)
1198 struct ib_pma_portsamplescontrol
*p
=
1199 (struct ib_pma_portsamplescontrol
*)pmp
->data
;
1200 struct qib_ibdev
*dev
= to_idev(ibdev
);
1201 struct qib_devdata
*dd
= dd_from_dev(dev
);
1202 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1203 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1204 unsigned long flags
;
1205 u8 status
, xmit_flags
;
1208 if (pmp
->mad_hdr
.attr_mod
!= 0 || p
->port_select
!= port
) {
1209 pmp
->mad_hdr
.status
|= IB_SMP_INVALID_FIELD
;
1210 ret
= reply((struct ib_smp
*) pmp
);
1214 spin_lock_irqsave(&ibp
->lock
, flags
);
1216 /* Port Sampling code owns the PS* HW counters */
1217 xmit_flags
= ppd
->cong_stats
.flags
;
1218 ppd
->cong_stats
.flags
= IB_PMA_CONG_HW_CONTROL_SAMPLE
;
1219 status
= dd
->f_portcntr(ppd
, QIBPORTCNTR_PSSTAT
);
1220 if (status
== IB_PMA_SAMPLE_STATUS_DONE
||
1221 (status
== IB_PMA_SAMPLE_STATUS_RUNNING
&&
1222 xmit_flags
== IB_PMA_CONG_HW_CONTROL_TIMER
)) {
1223 ibp
->pma_sample_start
= be32_to_cpu(p
->sample_start
);
1224 ibp
->pma_sample_interval
= be32_to_cpu(p
->sample_interval
);
1225 ibp
->pma_tag
= be16_to_cpu(p
->tag
);
1226 ibp
->pma_counter_select
[0] = p
->counter_select
[0];
1227 ibp
->pma_counter_select
[1] = p
->counter_select
[1];
1228 ibp
->pma_counter_select
[2] = p
->counter_select
[2];
1229 ibp
->pma_counter_select
[3] = p
->counter_select
[3];
1230 ibp
->pma_counter_select
[4] = p
->counter_select
[4];
1231 dd
->f_set_cntr_sample(ppd
, ibp
->pma_sample_interval
,
1232 ibp
->pma_sample_start
);
1234 spin_unlock_irqrestore(&ibp
->lock
, flags
);
1236 ret
= pma_get_portsamplescontrol(pmp
, ibdev
, port
);
1242 static u64
get_counter(struct qib_ibport
*ibp
, struct qib_pportdata
*ppd
,
1248 case IB_PMA_PORT_XMIT_DATA
:
1249 ret
= ppd
->dd
->f_portcntr(ppd
, QIBPORTCNTR_PSXMITDATA
);
1251 case IB_PMA_PORT_RCV_DATA
:
1252 ret
= ppd
->dd
->f_portcntr(ppd
, QIBPORTCNTR_PSRCVDATA
);
1254 case IB_PMA_PORT_XMIT_PKTS
:
1255 ret
= ppd
->dd
->f_portcntr(ppd
, QIBPORTCNTR_PSXMITPKTS
);
1257 case IB_PMA_PORT_RCV_PKTS
:
1258 ret
= ppd
->dd
->f_portcntr(ppd
, QIBPORTCNTR_PSRCVPKTS
);
1260 case IB_PMA_PORT_XMIT_WAIT
:
1261 ret
= ppd
->dd
->f_portcntr(ppd
, QIBPORTCNTR_PSXMITWAIT
);
1270 /* This function assumes that the xmit_wait lock is already held */
1271 static u64
xmit_wait_get_value_delta(struct qib_pportdata
*ppd
)
1275 delta
= get_counter(&ppd
->ibport_data
, ppd
,
1276 IB_PMA_PORT_XMIT_WAIT
);
1277 return ppd
->cong_stats
.counter
+ delta
;
1280 static void cache_hw_sample_counters(struct qib_pportdata
*ppd
)
1282 struct qib_ibport
*ibp
= &ppd
->ibport_data
;
1284 ppd
->cong_stats
.counter_cache
.psxmitdata
=
1285 get_counter(ibp
, ppd
, IB_PMA_PORT_XMIT_DATA
);
1286 ppd
->cong_stats
.counter_cache
.psrcvdata
=
1287 get_counter(ibp
, ppd
, IB_PMA_PORT_RCV_DATA
);
1288 ppd
->cong_stats
.counter_cache
.psxmitpkts
=
1289 get_counter(ibp
, ppd
, IB_PMA_PORT_XMIT_PKTS
);
1290 ppd
->cong_stats
.counter_cache
.psrcvpkts
=
1291 get_counter(ibp
, ppd
, IB_PMA_PORT_RCV_PKTS
);
1292 ppd
->cong_stats
.counter_cache
.psxmitwait
=
1293 get_counter(ibp
, ppd
, IB_PMA_PORT_XMIT_WAIT
);
1296 static u64
get_cache_hw_sample_counters(struct qib_pportdata
*ppd
,
1302 case IB_PMA_PORT_XMIT_DATA
:
1303 ret
= ppd
->cong_stats
.counter_cache
.psxmitdata
;
1305 case IB_PMA_PORT_RCV_DATA
:
1306 ret
= ppd
->cong_stats
.counter_cache
.psrcvdata
;
1308 case IB_PMA_PORT_XMIT_PKTS
:
1309 ret
= ppd
->cong_stats
.counter_cache
.psxmitpkts
;
1311 case IB_PMA_PORT_RCV_PKTS
:
1312 ret
= ppd
->cong_stats
.counter_cache
.psrcvpkts
;
1314 case IB_PMA_PORT_XMIT_WAIT
:
1315 ret
= ppd
->cong_stats
.counter_cache
.psxmitwait
;
1324 static int pma_get_portsamplesresult(struct ib_pma_mad
*pmp
,
1325 struct ib_device
*ibdev
, u8 port
)
1327 struct ib_pma_portsamplesresult
*p
=
1328 (struct ib_pma_portsamplesresult
*)pmp
->data
;
1329 struct qib_ibdev
*dev
= to_idev(ibdev
);
1330 struct qib_devdata
*dd
= dd_from_dev(dev
);
1331 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1332 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1333 unsigned long flags
;
1337 memset(pmp
->data
, 0, sizeof(pmp
->data
));
1338 spin_lock_irqsave(&ibp
->lock
, flags
);
1339 p
->tag
= cpu_to_be16(ibp
->pma_tag
);
1340 if (ppd
->cong_stats
.flags
== IB_PMA_CONG_HW_CONTROL_TIMER
)
1341 p
->sample_status
= IB_PMA_SAMPLE_STATUS_DONE
;
1343 status
= dd
->f_portcntr(ppd
, QIBPORTCNTR_PSSTAT
);
1344 p
->sample_status
= cpu_to_be16(status
);
1345 if (status
== IB_PMA_SAMPLE_STATUS_DONE
) {
1346 cache_hw_sample_counters(ppd
);
1347 ppd
->cong_stats
.counter
=
1348 xmit_wait_get_value_delta(ppd
);
1349 dd
->f_set_cntr_sample(ppd
,
1350 QIB_CONG_TIMER_PSINTERVAL
, 0);
1351 ppd
->cong_stats
.flags
= IB_PMA_CONG_HW_CONTROL_TIMER
;
1354 for (i
= 0; i
< ARRAY_SIZE(ibp
->pma_counter_select
); i
++)
1355 p
->counter
[i
] = cpu_to_be32(
1356 get_cache_hw_sample_counters(
1357 ppd
, ibp
->pma_counter_select
[i
]));
1358 spin_unlock_irqrestore(&ibp
->lock
, flags
);
1360 return reply((struct ib_smp
*) pmp
);
1363 static int pma_get_portsamplesresult_ext(struct ib_pma_mad
*pmp
,
1364 struct ib_device
*ibdev
, u8 port
)
1366 struct ib_pma_portsamplesresult_ext
*p
=
1367 (struct ib_pma_portsamplesresult_ext
*)pmp
->data
;
1368 struct qib_ibdev
*dev
= to_idev(ibdev
);
1369 struct qib_devdata
*dd
= dd_from_dev(dev
);
1370 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1371 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1372 unsigned long flags
;
1376 /* Port Sampling code owns the PS* HW counters */
1377 memset(pmp
->data
, 0, sizeof(pmp
->data
));
1378 spin_lock_irqsave(&ibp
->lock
, flags
);
1379 p
->tag
= cpu_to_be16(ibp
->pma_tag
);
1380 if (ppd
->cong_stats
.flags
== IB_PMA_CONG_HW_CONTROL_TIMER
)
1381 p
->sample_status
= IB_PMA_SAMPLE_STATUS_DONE
;
1383 status
= dd
->f_portcntr(ppd
, QIBPORTCNTR_PSSTAT
);
1384 p
->sample_status
= cpu_to_be16(status
);
1386 p
->extended_width
= cpu_to_be32(0x80000000);
1387 if (status
== IB_PMA_SAMPLE_STATUS_DONE
) {
1388 cache_hw_sample_counters(ppd
);
1389 ppd
->cong_stats
.counter
=
1390 xmit_wait_get_value_delta(ppd
);
1391 dd
->f_set_cntr_sample(ppd
,
1392 QIB_CONG_TIMER_PSINTERVAL
, 0);
1393 ppd
->cong_stats
.flags
= IB_PMA_CONG_HW_CONTROL_TIMER
;
1396 for (i
= 0; i
< ARRAY_SIZE(ibp
->pma_counter_select
); i
++)
1397 p
->counter
[i
] = cpu_to_be64(
1398 get_cache_hw_sample_counters(
1399 ppd
, ibp
->pma_counter_select
[i
]));
1400 spin_unlock_irqrestore(&ibp
->lock
, flags
);
1402 return reply((struct ib_smp
*) pmp
);
1405 static int pma_get_portcounters(struct ib_pma_mad
*pmp
,
1406 struct ib_device
*ibdev
, u8 port
)
1408 struct ib_pma_portcounters
*p
= (struct ib_pma_portcounters
*)
1410 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1411 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1412 struct qib_verbs_counters cntrs
;
1413 u8 port_select
= p
->port_select
;
1415 qib_get_counters(ppd
, &cntrs
);
1417 /* Adjust counters for any resets done. */
1418 cntrs
.symbol_error_counter
-= ibp
->z_symbol_error_counter
;
1419 cntrs
.link_error_recovery_counter
-=
1420 ibp
->z_link_error_recovery_counter
;
1421 cntrs
.link_downed_counter
-= ibp
->z_link_downed_counter
;
1422 cntrs
.port_rcv_errors
-= ibp
->z_port_rcv_errors
;
1423 cntrs
.port_rcv_remphys_errors
-= ibp
->z_port_rcv_remphys_errors
;
1424 cntrs
.port_xmit_discards
-= ibp
->z_port_xmit_discards
;
1425 cntrs
.port_xmit_data
-= ibp
->z_port_xmit_data
;
1426 cntrs
.port_rcv_data
-= ibp
->z_port_rcv_data
;
1427 cntrs
.port_xmit_packets
-= ibp
->z_port_xmit_packets
;
1428 cntrs
.port_rcv_packets
-= ibp
->z_port_rcv_packets
;
1429 cntrs
.local_link_integrity_errors
-=
1430 ibp
->z_local_link_integrity_errors
;
1431 cntrs
.excessive_buffer_overrun_errors
-=
1432 ibp
->z_excessive_buffer_overrun_errors
;
1433 cntrs
.vl15_dropped
-= ibp
->z_vl15_dropped
;
1434 cntrs
.vl15_dropped
+= ibp
->n_vl15_dropped
;
1436 memset(pmp
->data
, 0, sizeof(pmp
->data
));
1438 p
->port_select
= port_select
;
1439 if (pmp
->mad_hdr
.attr_mod
!= 0 || port_select
!= port
)
1440 pmp
->mad_hdr
.status
|= IB_SMP_INVALID_FIELD
;
1442 if (cntrs
.symbol_error_counter
> 0xFFFFUL
)
1443 p
->symbol_error_counter
= cpu_to_be16(0xFFFF);
1445 p
->symbol_error_counter
=
1446 cpu_to_be16((u16
)cntrs
.symbol_error_counter
);
1447 if (cntrs
.link_error_recovery_counter
> 0xFFUL
)
1448 p
->link_error_recovery_counter
= 0xFF;
1450 p
->link_error_recovery_counter
=
1451 (u8
)cntrs
.link_error_recovery_counter
;
1452 if (cntrs
.link_downed_counter
> 0xFFUL
)
1453 p
->link_downed_counter
= 0xFF;
1455 p
->link_downed_counter
= (u8
)cntrs
.link_downed_counter
;
1456 if (cntrs
.port_rcv_errors
> 0xFFFFUL
)
1457 p
->port_rcv_errors
= cpu_to_be16(0xFFFF);
1459 p
->port_rcv_errors
=
1460 cpu_to_be16((u16
) cntrs
.port_rcv_errors
);
1461 if (cntrs
.port_rcv_remphys_errors
> 0xFFFFUL
)
1462 p
->port_rcv_remphys_errors
= cpu_to_be16(0xFFFF);
1464 p
->port_rcv_remphys_errors
=
1465 cpu_to_be16((u16
)cntrs
.port_rcv_remphys_errors
);
1466 if (cntrs
.port_xmit_discards
> 0xFFFFUL
)
1467 p
->port_xmit_discards
= cpu_to_be16(0xFFFF);
1469 p
->port_xmit_discards
=
1470 cpu_to_be16((u16
)cntrs
.port_xmit_discards
);
1471 if (cntrs
.local_link_integrity_errors
> 0xFUL
)
1472 cntrs
.local_link_integrity_errors
= 0xFUL
;
1473 if (cntrs
.excessive_buffer_overrun_errors
> 0xFUL
)
1474 cntrs
.excessive_buffer_overrun_errors
= 0xFUL
;
1475 p
->link_overrun_errors
= (cntrs
.local_link_integrity_errors
<< 4) |
1476 cntrs
.excessive_buffer_overrun_errors
;
1477 if (cntrs
.vl15_dropped
> 0xFFFFUL
)
1478 p
->vl15_dropped
= cpu_to_be16(0xFFFF);
1480 p
->vl15_dropped
= cpu_to_be16((u16
)cntrs
.vl15_dropped
);
1481 if (cntrs
.port_xmit_data
> 0xFFFFFFFFUL
)
1482 p
->port_xmit_data
= cpu_to_be32(0xFFFFFFFF);
1484 p
->port_xmit_data
= cpu_to_be32((u32
)cntrs
.port_xmit_data
);
1485 if (cntrs
.port_rcv_data
> 0xFFFFFFFFUL
)
1486 p
->port_rcv_data
= cpu_to_be32(0xFFFFFFFF);
1488 p
->port_rcv_data
= cpu_to_be32((u32
)cntrs
.port_rcv_data
);
1489 if (cntrs
.port_xmit_packets
> 0xFFFFFFFFUL
)
1490 p
->port_xmit_packets
= cpu_to_be32(0xFFFFFFFF);
1492 p
->port_xmit_packets
=
1493 cpu_to_be32((u32
)cntrs
.port_xmit_packets
);
1494 if (cntrs
.port_rcv_packets
> 0xFFFFFFFFUL
)
1495 p
->port_rcv_packets
= cpu_to_be32(0xFFFFFFFF);
1497 p
->port_rcv_packets
=
1498 cpu_to_be32((u32
) cntrs
.port_rcv_packets
);
1500 return reply((struct ib_smp
*) pmp
);
1503 static int pma_get_portcounters_cong(struct ib_pma_mad
*pmp
,
1504 struct ib_device
*ibdev
, u8 port
)
1506 /* Congestion PMA packets start at offset 24 not 64 */
1507 struct ib_pma_portcounters_cong
*p
=
1508 (struct ib_pma_portcounters_cong
*)pmp
->reserved
;
1509 struct qib_verbs_counters cntrs
;
1510 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1511 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1512 struct qib_devdata
*dd
= dd_from_ppd(ppd
);
1513 u32 port_select
= be32_to_cpu(pmp
->mad_hdr
.attr_mod
) & 0xFF;
1514 u64 xmit_wait_counter
;
1515 unsigned long flags
;
1518 * This check is performed only in the GET method because the
1519 * SET method ends up calling this anyway.
1521 if (!dd
->psxmitwait_supported
)
1522 pmp
->mad_hdr
.status
|= IB_SMP_UNSUP_METH_ATTR
;
1523 if (port_select
!= port
)
1524 pmp
->mad_hdr
.status
|= IB_SMP_INVALID_FIELD
;
1526 qib_get_counters(ppd
, &cntrs
);
1527 spin_lock_irqsave(&ppd
->ibport_data
.lock
, flags
);
1528 xmit_wait_counter
= xmit_wait_get_value_delta(ppd
);
1529 spin_unlock_irqrestore(&ppd
->ibport_data
.lock
, flags
);
1531 /* Adjust counters for any resets done. */
1532 cntrs
.symbol_error_counter
-= ibp
->z_symbol_error_counter
;
1533 cntrs
.link_error_recovery_counter
-=
1534 ibp
->z_link_error_recovery_counter
;
1535 cntrs
.link_downed_counter
-= ibp
->z_link_downed_counter
;
1536 cntrs
.port_rcv_errors
-= ibp
->z_port_rcv_errors
;
1537 cntrs
.port_rcv_remphys_errors
-=
1538 ibp
->z_port_rcv_remphys_errors
;
1539 cntrs
.port_xmit_discards
-= ibp
->z_port_xmit_discards
;
1540 cntrs
.local_link_integrity_errors
-=
1541 ibp
->z_local_link_integrity_errors
;
1542 cntrs
.excessive_buffer_overrun_errors
-=
1543 ibp
->z_excessive_buffer_overrun_errors
;
1544 cntrs
.vl15_dropped
-= ibp
->z_vl15_dropped
;
1545 cntrs
.vl15_dropped
+= ibp
->n_vl15_dropped
;
1546 cntrs
.port_xmit_data
-= ibp
->z_port_xmit_data
;
1547 cntrs
.port_rcv_data
-= ibp
->z_port_rcv_data
;
1548 cntrs
.port_xmit_packets
-= ibp
->z_port_xmit_packets
;
1549 cntrs
.port_rcv_packets
-= ibp
->z_port_rcv_packets
;
1551 memset(pmp
->reserved
, 0, sizeof(pmp
->reserved
) +
1555 * Set top 3 bits to indicate interval in picoseconds in
1558 p
->port_check_rate
=
1559 cpu_to_be16((QIB_XMIT_RATE_PICO
<< 13) |
1560 (dd
->psxmitwait_check_rate
&
1561 ~(QIB_XMIT_RATE_PICO
<< 13)));
1562 p
->port_adr_events
= cpu_to_be64(0);
1563 p
->port_xmit_wait
= cpu_to_be64(xmit_wait_counter
);
1564 p
->port_xmit_data
= cpu_to_be64(cntrs
.port_xmit_data
);
1565 p
->port_rcv_data
= cpu_to_be64(cntrs
.port_rcv_data
);
1566 p
->port_xmit_packets
=
1567 cpu_to_be64(cntrs
.port_xmit_packets
);
1568 p
->port_rcv_packets
=
1569 cpu_to_be64(cntrs
.port_rcv_packets
);
1570 if (cntrs
.symbol_error_counter
> 0xFFFFUL
)
1571 p
->symbol_error_counter
= cpu_to_be16(0xFFFF);
1573 p
->symbol_error_counter
=
1575 (u16
)cntrs
.symbol_error_counter
);
1576 if (cntrs
.link_error_recovery_counter
> 0xFFUL
)
1577 p
->link_error_recovery_counter
= 0xFF;
1579 p
->link_error_recovery_counter
=
1580 (u8
)cntrs
.link_error_recovery_counter
;
1581 if (cntrs
.link_downed_counter
> 0xFFUL
)
1582 p
->link_downed_counter
= 0xFF;
1584 p
->link_downed_counter
=
1585 (u8
)cntrs
.link_downed_counter
;
1586 if (cntrs
.port_rcv_errors
> 0xFFFFUL
)
1587 p
->port_rcv_errors
= cpu_to_be16(0xFFFF);
1589 p
->port_rcv_errors
=
1590 cpu_to_be16((u16
) cntrs
.port_rcv_errors
);
1591 if (cntrs
.port_rcv_remphys_errors
> 0xFFFFUL
)
1592 p
->port_rcv_remphys_errors
= cpu_to_be16(0xFFFF);
1594 p
->port_rcv_remphys_errors
=
1596 (u16
)cntrs
.port_rcv_remphys_errors
);
1597 if (cntrs
.port_xmit_discards
> 0xFFFFUL
)
1598 p
->port_xmit_discards
= cpu_to_be16(0xFFFF);
1600 p
->port_xmit_discards
=
1601 cpu_to_be16((u16
)cntrs
.port_xmit_discards
);
1602 if (cntrs
.local_link_integrity_errors
> 0xFUL
)
1603 cntrs
.local_link_integrity_errors
= 0xFUL
;
1604 if (cntrs
.excessive_buffer_overrun_errors
> 0xFUL
)
1605 cntrs
.excessive_buffer_overrun_errors
= 0xFUL
;
1606 p
->link_overrun_errors
= (cntrs
.local_link_integrity_errors
<< 4) |
1607 cntrs
.excessive_buffer_overrun_errors
;
1608 if (cntrs
.vl15_dropped
> 0xFFFFUL
)
1609 p
->vl15_dropped
= cpu_to_be16(0xFFFF);
1611 p
->vl15_dropped
= cpu_to_be16((u16
)cntrs
.vl15_dropped
);
1613 return reply((struct ib_smp
*)pmp
);
1616 static int pma_get_portcounters_ext(struct ib_pma_mad
*pmp
,
1617 struct ib_device
*ibdev
, u8 port
)
1619 struct ib_pma_portcounters_ext
*p
=
1620 (struct ib_pma_portcounters_ext
*)pmp
->data
;
1621 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1622 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1623 u64 swords
, rwords
, spkts
, rpkts
, xwait
;
1624 u8 port_select
= p
->port_select
;
1626 memset(pmp
->data
, 0, sizeof(pmp
->data
));
1628 p
->port_select
= port_select
;
1629 if (pmp
->mad_hdr
.attr_mod
!= 0 || port_select
!= port
) {
1630 pmp
->mad_hdr
.status
|= IB_SMP_INVALID_FIELD
;
1634 qib_snapshot_counters(ppd
, &swords
, &rwords
, &spkts
, &rpkts
, &xwait
);
1636 /* Adjust counters for any resets done. */
1637 swords
-= ibp
->z_port_xmit_data
;
1638 rwords
-= ibp
->z_port_rcv_data
;
1639 spkts
-= ibp
->z_port_xmit_packets
;
1640 rpkts
-= ibp
->z_port_rcv_packets
;
1642 p
->port_xmit_data
= cpu_to_be64(swords
);
1643 p
->port_rcv_data
= cpu_to_be64(rwords
);
1644 p
->port_xmit_packets
= cpu_to_be64(spkts
);
1645 p
->port_rcv_packets
= cpu_to_be64(rpkts
);
1646 p
->port_unicast_xmit_packets
= cpu_to_be64(ibp
->n_unicast_xmit
);
1647 p
->port_unicast_rcv_packets
= cpu_to_be64(ibp
->n_unicast_rcv
);
1648 p
->port_multicast_xmit_packets
= cpu_to_be64(ibp
->n_multicast_xmit
);
1649 p
->port_multicast_rcv_packets
= cpu_to_be64(ibp
->n_multicast_rcv
);
1652 return reply((struct ib_smp
*) pmp
);
1655 static int pma_set_portcounters(struct ib_pma_mad
*pmp
,
1656 struct ib_device
*ibdev
, u8 port
)
1658 struct ib_pma_portcounters
*p
= (struct ib_pma_portcounters
*)
1660 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1661 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1662 struct qib_verbs_counters cntrs
;
1665 * Since the HW doesn't support clearing counters, we save the
1666 * current count and subtract it from future responses.
1668 qib_get_counters(ppd
, &cntrs
);
1670 if (p
->counter_select
& IB_PMA_SEL_SYMBOL_ERROR
)
1671 ibp
->z_symbol_error_counter
= cntrs
.symbol_error_counter
;
1673 if (p
->counter_select
& IB_PMA_SEL_LINK_ERROR_RECOVERY
)
1674 ibp
->z_link_error_recovery_counter
=
1675 cntrs
.link_error_recovery_counter
;
1677 if (p
->counter_select
& IB_PMA_SEL_LINK_DOWNED
)
1678 ibp
->z_link_downed_counter
= cntrs
.link_downed_counter
;
1680 if (p
->counter_select
& IB_PMA_SEL_PORT_RCV_ERRORS
)
1681 ibp
->z_port_rcv_errors
= cntrs
.port_rcv_errors
;
1683 if (p
->counter_select
& IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS
)
1684 ibp
->z_port_rcv_remphys_errors
=
1685 cntrs
.port_rcv_remphys_errors
;
1687 if (p
->counter_select
& IB_PMA_SEL_PORT_XMIT_DISCARDS
)
1688 ibp
->z_port_xmit_discards
= cntrs
.port_xmit_discards
;
1690 if (p
->counter_select
& IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS
)
1691 ibp
->z_local_link_integrity_errors
=
1692 cntrs
.local_link_integrity_errors
;
1694 if (p
->counter_select
& IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS
)
1695 ibp
->z_excessive_buffer_overrun_errors
=
1696 cntrs
.excessive_buffer_overrun_errors
;
1698 if (p
->counter_select
& IB_PMA_SEL_PORT_VL15_DROPPED
) {
1699 ibp
->n_vl15_dropped
= 0;
1700 ibp
->z_vl15_dropped
= cntrs
.vl15_dropped
;
1703 if (p
->counter_select
& IB_PMA_SEL_PORT_XMIT_DATA
)
1704 ibp
->z_port_xmit_data
= cntrs
.port_xmit_data
;
1706 if (p
->counter_select
& IB_PMA_SEL_PORT_RCV_DATA
)
1707 ibp
->z_port_rcv_data
= cntrs
.port_rcv_data
;
1709 if (p
->counter_select
& IB_PMA_SEL_PORT_XMIT_PACKETS
)
1710 ibp
->z_port_xmit_packets
= cntrs
.port_xmit_packets
;
1712 if (p
->counter_select
& IB_PMA_SEL_PORT_RCV_PACKETS
)
1713 ibp
->z_port_rcv_packets
= cntrs
.port_rcv_packets
;
1715 return pma_get_portcounters(pmp
, ibdev
, port
);
1718 static int pma_set_portcounters_cong(struct ib_pma_mad
*pmp
,
1719 struct ib_device
*ibdev
, u8 port
)
1721 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1722 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1723 struct qib_devdata
*dd
= dd_from_ppd(ppd
);
1724 struct qib_verbs_counters cntrs
;
1725 u32 counter_select
= (be32_to_cpu(pmp
->mad_hdr
.attr_mod
) >> 24) & 0xFF;
1727 unsigned long flags
;
1729 qib_get_counters(ppd
, &cntrs
);
1730 /* Get counter values before we save them */
1731 ret
= pma_get_portcounters_cong(pmp
, ibdev
, port
);
1733 if (counter_select
& IB_PMA_SEL_CONG_XMIT
) {
1734 spin_lock_irqsave(&ppd
->ibport_data
.lock
, flags
);
1735 ppd
->cong_stats
.counter
= 0;
1736 dd
->f_set_cntr_sample(ppd
, QIB_CONG_TIMER_PSINTERVAL
,
1738 spin_unlock_irqrestore(&ppd
->ibport_data
.lock
, flags
);
1740 if (counter_select
& IB_PMA_SEL_CONG_PORT_DATA
) {
1741 ibp
->z_port_xmit_data
= cntrs
.port_xmit_data
;
1742 ibp
->z_port_rcv_data
= cntrs
.port_rcv_data
;
1743 ibp
->z_port_xmit_packets
= cntrs
.port_xmit_packets
;
1744 ibp
->z_port_rcv_packets
= cntrs
.port_rcv_packets
;
1746 if (counter_select
& IB_PMA_SEL_CONG_ALL
) {
1747 ibp
->z_symbol_error_counter
=
1748 cntrs
.symbol_error_counter
;
1749 ibp
->z_link_error_recovery_counter
=
1750 cntrs
.link_error_recovery_counter
;
1751 ibp
->z_link_downed_counter
=
1752 cntrs
.link_downed_counter
;
1753 ibp
->z_port_rcv_errors
= cntrs
.port_rcv_errors
;
1754 ibp
->z_port_rcv_remphys_errors
=
1755 cntrs
.port_rcv_remphys_errors
;
1756 ibp
->z_port_xmit_discards
=
1757 cntrs
.port_xmit_discards
;
1758 ibp
->z_local_link_integrity_errors
=
1759 cntrs
.local_link_integrity_errors
;
1760 ibp
->z_excessive_buffer_overrun_errors
=
1761 cntrs
.excessive_buffer_overrun_errors
;
1762 ibp
->n_vl15_dropped
= 0;
1763 ibp
->z_vl15_dropped
= cntrs
.vl15_dropped
;
1769 static int pma_set_portcounters_ext(struct ib_pma_mad
*pmp
,
1770 struct ib_device
*ibdev
, u8 port
)
1772 struct ib_pma_portcounters
*p
= (struct ib_pma_portcounters
*)
1774 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1775 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1776 u64 swords
, rwords
, spkts
, rpkts
, xwait
;
1778 qib_snapshot_counters(ppd
, &swords
, &rwords
, &spkts
, &rpkts
, &xwait
);
1780 if (p
->counter_select
& IB_PMA_SELX_PORT_XMIT_DATA
)
1781 ibp
->z_port_xmit_data
= swords
;
1783 if (p
->counter_select
& IB_PMA_SELX_PORT_RCV_DATA
)
1784 ibp
->z_port_rcv_data
= rwords
;
1786 if (p
->counter_select
& IB_PMA_SELX_PORT_XMIT_PACKETS
)
1787 ibp
->z_port_xmit_packets
= spkts
;
1789 if (p
->counter_select
& IB_PMA_SELX_PORT_RCV_PACKETS
)
1790 ibp
->z_port_rcv_packets
= rpkts
;
1792 if (p
->counter_select
& IB_PMA_SELX_PORT_UNI_XMIT_PACKETS
)
1793 ibp
->n_unicast_xmit
= 0;
1795 if (p
->counter_select
& IB_PMA_SELX_PORT_UNI_RCV_PACKETS
)
1796 ibp
->n_unicast_rcv
= 0;
1798 if (p
->counter_select
& IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS
)
1799 ibp
->n_multicast_xmit
= 0;
1801 if (p
->counter_select
& IB_PMA_SELX_PORT_MULTI_RCV_PACKETS
)
1802 ibp
->n_multicast_rcv
= 0;
1804 return pma_get_portcounters_ext(pmp
, ibdev
, port
);
1807 static int process_subn(struct ib_device
*ibdev
, int mad_flags
,
1808 u8 port
, struct ib_mad
*in_mad
,
1809 struct ib_mad
*out_mad
)
1811 struct ib_smp
*smp
= (struct ib_smp
*)out_mad
;
1812 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1813 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1817 if (smp
->class_version
!= 1) {
1818 smp
->status
|= IB_SMP_UNSUP_VERSION
;
1823 ret
= check_mkey(ibp
, smp
, mad_flags
);
1825 u32 port_num
= be32_to_cpu(smp
->attr_mod
);
1828 * If this is a get/set portinfo, we already check the
1829 * M_Key if the MAD is for another port and the M_Key
1830 * is OK on the receiving port. This check is needed
1831 * to increment the error counters when the M_Key
1832 * fails to match on *both* ports.
1834 if (in_mad
->mad_hdr
.attr_id
== IB_SMP_ATTR_PORT_INFO
&&
1835 (smp
->method
== IB_MGMT_METHOD_GET
||
1836 smp
->method
== IB_MGMT_METHOD_SET
) &&
1837 port_num
&& port_num
<= ibdev
->phys_port_cnt
&&
1839 (void) check_mkey(to_iport(ibdev
, port_num
), smp
, 0);
1843 switch (smp
->method
) {
1844 case IB_MGMT_METHOD_GET
:
1845 switch (smp
->attr_id
) {
1846 case IB_SMP_ATTR_NODE_DESC
:
1847 ret
= subn_get_nodedescription(smp
, ibdev
);
1849 case IB_SMP_ATTR_NODE_INFO
:
1850 ret
= subn_get_nodeinfo(smp
, ibdev
, port
);
1852 case IB_SMP_ATTR_GUID_INFO
:
1853 ret
= subn_get_guidinfo(smp
, ibdev
, port
);
1855 case IB_SMP_ATTR_PORT_INFO
:
1856 ret
= subn_get_portinfo(smp
, ibdev
, port
);
1858 case IB_SMP_ATTR_PKEY_TABLE
:
1859 ret
= subn_get_pkeytable(smp
, ibdev
, port
);
1861 case IB_SMP_ATTR_SL_TO_VL_TABLE
:
1862 ret
= subn_get_sl_to_vl(smp
, ibdev
, port
);
1864 case IB_SMP_ATTR_VL_ARB_TABLE
:
1865 ret
= subn_get_vl_arb(smp
, ibdev
, port
);
1867 case IB_SMP_ATTR_SM_INFO
:
1868 if (ibp
->port_cap_flags
& IB_PORT_SM_DISABLED
) {
1869 ret
= IB_MAD_RESULT_SUCCESS
|
1870 IB_MAD_RESULT_CONSUMED
;
1873 if (ibp
->port_cap_flags
& IB_PORT_SM
) {
1874 ret
= IB_MAD_RESULT_SUCCESS
;
1879 smp
->status
|= IB_SMP_UNSUP_METH_ATTR
;
1884 case IB_MGMT_METHOD_SET
:
1885 switch (smp
->attr_id
) {
1886 case IB_SMP_ATTR_GUID_INFO
:
1887 ret
= subn_set_guidinfo(smp
, ibdev
, port
);
1889 case IB_SMP_ATTR_PORT_INFO
:
1890 ret
= subn_set_portinfo(smp
, ibdev
, port
);
1892 case IB_SMP_ATTR_PKEY_TABLE
:
1893 ret
= subn_set_pkeytable(smp
, ibdev
, port
);
1895 case IB_SMP_ATTR_SL_TO_VL_TABLE
:
1896 ret
= subn_set_sl_to_vl(smp
, ibdev
, port
);
1898 case IB_SMP_ATTR_VL_ARB_TABLE
:
1899 ret
= subn_set_vl_arb(smp
, ibdev
, port
);
1901 case IB_SMP_ATTR_SM_INFO
:
1902 if (ibp
->port_cap_flags
& IB_PORT_SM_DISABLED
) {
1903 ret
= IB_MAD_RESULT_SUCCESS
|
1904 IB_MAD_RESULT_CONSUMED
;
1907 if (ibp
->port_cap_flags
& IB_PORT_SM
) {
1908 ret
= IB_MAD_RESULT_SUCCESS
;
1913 smp
->status
|= IB_SMP_UNSUP_METH_ATTR
;
1918 case IB_MGMT_METHOD_TRAP_REPRESS
:
1919 if (smp
->attr_id
== IB_SMP_ATTR_NOTICE
)
1920 ret
= subn_trap_repress(smp
, ibdev
, port
);
1922 smp
->status
|= IB_SMP_UNSUP_METH_ATTR
;
1927 case IB_MGMT_METHOD_TRAP
:
1928 case IB_MGMT_METHOD_REPORT
:
1929 case IB_MGMT_METHOD_REPORT_RESP
:
1930 case IB_MGMT_METHOD_GET_RESP
:
1932 * The ib_mad module will call us to process responses
1933 * before checking for other consumers.
1934 * Just tell the caller to process it normally.
1936 ret
= IB_MAD_RESULT_SUCCESS
;
1939 case IB_MGMT_METHOD_SEND
:
1940 if (ib_get_smp_direction(smp
) &&
1941 smp
->attr_id
== QIB_VENDOR_IPG
) {
1942 ppd
->dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_PORT
,
1944 ret
= IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_CONSUMED
;
1946 ret
= IB_MAD_RESULT_SUCCESS
;
1950 smp
->status
|= IB_SMP_UNSUP_METHOD
;
1958 static int process_perf(struct ib_device
*ibdev
, u8 port
,
1959 struct ib_mad
*in_mad
,
1960 struct ib_mad
*out_mad
)
1962 struct ib_pma_mad
*pmp
= (struct ib_pma_mad
*)out_mad
;
1966 if (pmp
->mad_hdr
.class_version
!= 1) {
1967 pmp
->mad_hdr
.status
|= IB_SMP_UNSUP_VERSION
;
1968 ret
= reply((struct ib_smp
*) pmp
);
1972 switch (pmp
->mad_hdr
.method
) {
1973 case IB_MGMT_METHOD_GET
:
1974 switch (pmp
->mad_hdr
.attr_id
) {
1975 case IB_PMA_CLASS_PORT_INFO
:
1976 ret
= pma_get_classportinfo(pmp
, ibdev
);
1978 case IB_PMA_PORT_SAMPLES_CONTROL
:
1979 ret
= pma_get_portsamplescontrol(pmp
, ibdev
, port
);
1981 case IB_PMA_PORT_SAMPLES_RESULT
:
1982 ret
= pma_get_portsamplesresult(pmp
, ibdev
, port
);
1984 case IB_PMA_PORT_SAMPLES_RESULT_EXT
:
1985 ret
= pma_get_portsamplesresult_ext(pmp
, ibdev
, port
);
1987 case IB_PMA_PORT_COUNTERS
:
1988 ret
= pma_get_portcounters(pmp
, ibdev
, port
);
1990 case IB_PMA_PORT_COUNTERS_EXT
:
1991 ret
= pma_get_portcounters_ext(pmp
, ibdev
, port
);
1993 case IB_PMA_PORT_COUNTERS_CONG
:
1994 ret
= pma_get_portcounters_cong(pmp
, ibdev
, port
);
1997 pmp
->mad_hdr
.status
|= IB_SMP_UNSUP_METH_ATTR
;
1998 ret
= reply((struct ib_smp
*) pmp
);
2002 case IB_MGMT_METHOD_SET
:
2003 switch (pmp
->mad_hdr
.attr_id
) {
2004 case IB_PMA_PORT_SAMPLES_CONTROL
:
2005 ret
= pma_set_portsamplescontrol(pmp
, ibdev
, port
);
2007 case IB_PMA_PORT_COUNTERS
:
2008 ret
= pma_set_portcounters(pmp
, ibdev
, port
);
2010 case IB_PMA_PORT_COUNTERS_EXT
:
2011 ret
= pma_set_portcounters_ext(pmp
, ibdev
, port
);
2013 case IB_PMA_PORT_COUNTERS_CONG
:
2014 ret
= pma_set_portcounters_cong(pmp
, ibdev
, port
);
2017 pmp
->mad_hdr
.status
|= IB_SMP_UNSUP_METH_ATTR
;
2018 ret
= reply((struct ib_smp
*) pmp
);
2022 case IB_MGMT_METHOD_TRAP
:
2023 case IB_MGMT_METHOD_GET_RESP
:
2025 * The ib_mad module will call us to process responses
2026 * before checking for other consumers.
2027 * Just tell the caller to process it normally.
2029 ret
= IB_MAD_RESULT_SUCCESS
;
2033 pmp
->mad_hdr
.status
|= IB_SMP_UNSUP_METHOD
;
2034 ret
= reply((struct ib_smp
*) pmp
);
2042 * qib_process_mad - process an incoming MAD packet
2043 * @ibdev: the infiniband device this packet came in on
2044 * @mad_flags: MAD flags
2045 * @port: the port number this packet came in on
2046 * @in_wc: the work completion entry for this packet
2047 * @in_grh: the global route header for this packet
2048 * @in_mad: the incoming MAD
2049 * @out_mad: any outgoing MAD reply
2051 * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not
2052 * interested in processing.
2054 * Note that the verbs framework has already done the MAD sanity checks,
2055 * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
2058 * This is called by the ib_mad module.
2060 int qib_process_mad(struct ib_device
*ibdev
, int mad_flags
, u8 port
,
2061 struct ib_wc
*in_wc
, struct ib_grh
*in_grh
,
2062 struct ib_mad
*in_mad
, struct ib_mad
*out_mad
)
2066 switch (in_mad
->mad_hdr
.mgmt_class
) {
2067 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
:
2068 case IB_MGMT_CLASS_SUBN_LID_ROUTED
:
2069 ret
= process_subn(ibdev
, mad_flags
, port
, in_mad
, out_mad
);
2072 case IB_MGMT_CLASS_PERF_MGMT
:
2073 ret
= process_perf(ibdev
, port
, in_mad
, out_mad
);
2077 ret
= IB_MAD_RESULT_SUCCESS
;
2084 static void send_handler(struct ib_mad_agent
*agent
,
2085 struct ib_mad_send_wc
*mad_send_wc
)
2087 ib_free_send_mad(mad_send_wc
->send_buf
);
2090 static void xmit_wait_timer_func(unsigned long opaque
)
2092 struct qib_pportdata
*ppd
= (struct qib_pportdata
*)opaque
;
2093 struct qib_devdata
*dd
= dd_from_ppd(ppd
);
2094 unsigned long flags
;
2097 spin_lock_irqsave(&ppd
->ibport_data
.lock
, flags
);
2098 if (ppd
->cong_stats
.flags
== IB_PMA_CONG_HW_CONTROL_SAMPLE
) {
2099 status
= dd
->f_portcntr(ppd
, QIBPORTCNTR_PSSTAT
);
2100 if (status
== IB_PMA_SAMPLE_STATUS_DONE
) {
2101 /* save counter cache */
2102 cache_hw_sample_counters(ppd
);
2103 ppd
->cong_stats
.flags
= IB_PMA_CONG_HW_CONTROL_TIMER
;
2107 ppd
->cong_stats
.counter
= xmit_wait_get_value_delta(ppd
);
2108 dd
->f_set_cntr_sample(ppd
, QIB_CONG_TIMER_PSINTERVAL
, 0x0);
2110 spin_unlock_irqrestore(&ppd
->ibport_data
.lock
, flags
);
2111 mod_timer(&ppd
->cong_stats
.timer
, jiffies
+ HZ
);
2114 int qib_create_agents(struct qib_ibdev
*dev
)
2116 struct qib_devdata
*dd
= dd_from_dev(dev
);
2117 struct ib_mad_agent
*agent
;
2118 struct qib_ibport
*ibp
;
2122 for (p
= 0; p
< dd
->num_pports
; p
++) {
2123 ibp
= &dd
->pport
[p
].ibport_data
;
2124 agent
= ib_register_mad_agent(&dev
->ibdev
, p
+ 1, IB_QPT_SMI
,
2125 NULL
, 0, send_handler
,
2127 if (IS_ERR(agent
)) {
2128 ret
= PTR_ERR(agent
);
2132 /* Initialize xmit_wait structure */
2133 dd
->pport
[p
].cong_stats
.counter
= 0;
2134 init_timer(&dd
->pport
[p
].cong_stats
.timer
);
2135 dd
->pport
[p
].cong_stats
.timer
.function
= xmit_wait_timer_func
;
2136 dd
->pport
[p
].cong_stats
.timer
.data
=
2137 (unsigned long)(&dd
->pport
[p
]);
2138 dd
->pport
[p
].cong_stats
.timer
.expires
= 0;
2139 add_timer(&dd
->pport
[p
].cong_stats
.timer
);
2141 ibp
->send_agent
= agent
;
2147 for (p
= 0; p
< dd
->num_pports
; p
++) {
2148 ibp
= &dd
->pport
[p
].ibport_data
;
2149 if (ibp
->send_agent
) {
2150 agent
= ibp
->send_agent
;
2151 ibp
->send_agent
= NULL
;
2152 ib_unregister_mad_agent(agent
);
2159 void qib_free_agents(struct qib_ibdev
*dev
)
2161 struct qib_devdata
*dd
= dd_from_dev(dev
);
2162 struct ib_mad_agent
*agent
;
2163 struct qib_ibport
*ibp
;
2166 for (p
= 0; p
< dd
->num_pports
; p
++) {
2167 ibp
= &dd
->pport
[p
].ibport_data
;
2168 if (ibp
->send_agent
) {
2169 agent
= ibp
->send_agent
;
2170 ibp
->send_agent
= NULL
;
2171 ib_unregister_mad_agent(agent
);
2174 ib_destroy_ah(&ibp
->sm_ah
->ibah
);
2177 if (dd
->pport
[p
].cong_stats
.timer
.data
)
2178 del_timer_sync(&dd
->pport
[p
].cong_stats
.timer
);