2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <rdma/ib_smi.h>
40 static int reply(struct ib_smp
*smp
)
43 * The verbs framework will handle the directed/LID route
46 smp
->method
= IB_MGMT_METHOD_GET_RESP
;
47 if (smp
->mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)
48 smp
->status
|= IB_SMP_DIRECTION
;
49 return IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_REPLY
;
52 static void qib_send_trap(struct qib_ibport
*ibp
, void *data
, unsigned len
)
54 struct ib_mad_send_buf
*send_buf
;
55 struct ib_mad_agent
*agent
;
59 unsigned long timeout
;
61 agent
= ibp
->send_agent
;
66 if (!(ppd_from_ibp(ibp
)->lflags
& QIBL_LINKACTIVE
))
70 if (ibp
->trap_timeout
&& time_before(jiffies
, ibp
->trap_timeout
))
73 send_buf
= ib_create_send_mad(agent
, 0, 0, 0, IB_MGMT_MAD_HDR
,
74 IB_MGMT_MAD_DATA
, GFP_ATOMIC
);
79 smp
->base_version
= IB_MGMT_BASE_VERSION
;
80 smp
->mgmt_class
= IB_MGMT_CLASS_SUBN_LID_ROUTED
;
81 smp
->class_version
= 1;
82 smp
->method
= IB_MGMT_METHOD_TRAP
;
84 smp
->tid
= cpu_to_be64(ibp
->tid
);
85 smp
->attr_id
= IB_SMP_ATTR_NOTICE
;
86 /* o14-1: smp->mkey = 0; */
87 memcpy(smp
->data
, data
, len
);
89 spin_lock_irqsave(&ibp
->lock
, flags
);
91 if (ibp
->sm_lid
!= be16_to_cpu(IB_LID_PERMISSIVE
)) {
93 struct ib_ah_attr attr
;
95 memset(&attr
, 0, sizeof attr
);
96 attr
.dlid
= ibp
->sm_lid
;
97 attr
.port_num
= ppd_from_ibp(ibp
)->port
;
98 ah
= ib_create_ah(ibp
->qp0
->ibqp
.pd
, &attr
);
103 ibp
->sm_ah
= to_iah(ah
);
109 send_buf
->ah
= &ibp
->sm_ah
->ibah
;
112 spin_unlock_irqrestore(&ibp
->lock
, flags
);
115 ret
= ib_post_send_mad(send_buf
, NULL
);
118 timeout
= (4096 * (1UL << ibp
->subnet_timeout
)) / 1000;
119 ibp
->trap_timeout
= jiffies
+ usecs_to_jiffies(timeout
);
121 ib_free_send_mad(send_buf
);
122 ibp
->trap_timeout
= 0;
127 * Send a bad [PQ]_Key trap (ch. 14.3.8).
129 void qib_bad_pqkey(struct qib_ibport
*ibp
, __be16 trap_num
, u32 key
, u32 sl
,
130 u32 qp1
, u32 qp2
, __be16 lid1
, __be16 lid2
)
132 struct ib_mad_notice_attr data
;
134 if (trap_num
== IB_NOTICE_TRAP_BAD_PKEY
)
135 ibp
->pkey_violations
++;
137 ibp
->qkey_violations
++;
140 /* Send violation trap */
141 data
.generic_type
= IB_NOTICE_TYPE_SECURITY
;
142 data
.prod_type_msb
= 0;
143 data
.prod_type_lsb
= IB_NOTICE_PROD_CA
;
144 data
.trap_num
= trap_num
;
145 data
.issuer_lid
= cpu_to_be16(ppd_from_ibp(ibp
)->lid
);
146 data
.toggle_count
= 0;
147 memset(&data
.details
, 0, sizeof data
.details
);
148 data
.details
.ntc_257_258
.lid1
= lid1
;
149 data
.details
.ntc_257_258
.lid2
= lid2
;
150 data
.details
.ntc_257_258
.key
= cpu_to_be32(key
);
151 data
.details
.ntc_257_258
.sl_qp1
= cpu_to_be32((sl
<< 28) | qp1
);
152 data
.details
.ntc_257_258
.qp2
= cpu_to_be32(qp2
);
154 qib_send_trap(ibp
, &data
, sizeof data
);
158 * Send a bad M_Key trap (ch. 14.3.9).
160 static void qib_bad_mkey(struct qib_ibport
*ibp
, struct ib_smp
*smp
)
162 struct ib_mad_notice_attr data
;
164 /* Send violation trap */
165 data
.generic_type
= IB_NOTICE_TYPE_SECURITY
;
166 data
.prod_type_msb
= 0;
167 data
.prod_type_lsb
= IB_NOTICE_PROD_CA
;
168 data
.trap_num
= IB_NOTICE_TRAP_BAD_MKEY
;
169 data
.issuer_lid
= cpu_to_be16(ppd_from_ibp(ibp
)->lid
);
170 data
.toggle_count
= 0;
171 memset(&data
.details
, 0, sizeof data
.details
);
172 data
.details
.ntc_256
.lid
= data
.issuer_lid
;
173 data
.details
.ntc_256
.method
= smp
->method
;
174 data
.details
.ntc_256
.attr_id
= smp
->attr_id
;
175 data
.details
.ntc_256
.attr_mod
= smp
->attr_mod
;
176 data
.details
.ntc_256
.mkey
= smp
->mkey
;
177 if (smp
->mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
) {
180 data
.details
.ntc_256
.dr_slid
= smp
->dr_slid
;
181 data
.details
.ntc_256
.dr_trunc_hop
= IB_NOTICE_TRAP_DR_NOTICE
;
182 hop_cnt
= smp
->hop_cnt
;
183 if (hop_cnt
> ARRAY_SIZE(data
.details
.ntc_256
.dr_rtn_path
)) {
184 data
.details
.ntc_256
.dr_trunc_hop
|=
185 IB_NOTICE_TRAP_DR_TRUNC
;
186 hop_cnt
= ARRAY_SIZE(data
.details
.ntc_256
.dr_rtn_path
);
188 data
.details
.ntc_256
.dr_trunc_hop
|= hop_cnt
;
189 memcpy(data
.details
.ntc_256
.dr_rtn_path
, smp
->return_path
,
193 qib_send_trap(ibp
, &data
, sizeof data
);
197 * Send a Port Capability Mask Changed trap (ch. 14.3.11).
199 void qib_cap_mask_chg(struct qib_ibport
*ibp
)
201 struct ib_mad_notice_attr data
;
203 data
.generic_type
= IB_NOTICE_TYPE_INFO
;
204 data
.prod_type_msb
= 0;
205 data
.prod_type_lsb
= IB_NOTICE_PROD_CA
;
206 data
.trap_num
= IB_NOTICE_TRAP_CAP_MASK_CHG
;
207 data
.issuer_lid
= cpu_to_be16(ppd_from_ibp(ibp
)->lid
);
208 data
.toggle_count
= 0;
209 memset(&data
.details
, 0, sizeof data
.details
);
210 data
.details
.ntc_144
.lid
= data
.issuer_lid
;
211 data
.details
.ntc_144
.new_cap_mask
= cpu_to_be32(ibp
->port_cap_flags
);
213 qib_send_trap(ibp
, &data
, sizeof data
);
217 * Send a System Image GUID Changed trap (ch. 14.3.12).
219 void qib_sys_guid_chg(struct qib_ibport
*ibp
)
221 struct ib_mad_notice_attr data
;
223 data
.generic_type
= IB_NOTICE_TYPE_INFO
;
224 data
.prod_type_msb
= 0;
225 data
.prod_type_lsb
= IB_NOTICE_PROD_CA
;
226 data
.trap_num
= IB_NOTICE_TRAP_SYS_GUID_CHG
;
227 data
.issuer_lid
= cpu_to_be16(ppd_from_ibp(ibp
)->lid
);
228 data
.toggle_count
= 0;
229 memset(&data
.details
, 0, sizeof data
.details
);
230 data
.details
.ntc_145
.lid
= data
.issuer_lid
;
231 data
.details
.ntc_145
.new_sys_guid
= ib_qib_sys_image_guid
;
233 qib_send_trap(ibp
, &data
, sizeof data
);
237 * Send a Node Description Changed trap (ch. 14.3.13).
239 void qib_node_desc_chg(struct qib_ibport
*ibp
)
241 struct ib_mad_notice_attr data
;
243 data
.generic_type
= IB_NOTICE_TYPE_INFO
;
244 data
.prod_type_msb
= 0;
245 data
.prod_type_lsb
= IB_NOTICE_PROD_CA
;
246 data
.trap_num
= IB_NOTICE_TRAP_CAP_MASK_CHG
;
247 data
.issuer_lid
= cpu_to_be16(ppd_from_ibp(ibp
)->lid
);
248 data
.toggle_count
= 0;
249 memset(&data
.details
, 0, sizeof data
.details
);
250 data
.details
.ntc_144
.lid
= data
.issuer_lid
;
251 data
.details
.ntc_144
.local_changes
= 1;
252 data
.details
.ntc_144
.change_flags
= IB_NOTICE_TRAP_NODE_DESC_CHG
;
254 qib_send_trap(ibp
, &data
, sizeof data
);
257 static int subn_get_nodedescription(struct ib_smp
*smp
,
258 struct ib_device
*ibdev
)
261 smp
->status
|= IB_SMP_INVALID_FIELD
;
263 memcpy(smp
->data
, ibdev
->node_desc
, sizeof(smp
->data
));
268 static int subn_get_nodeinfo(struct ib_smp
*smp
, struct ib_device
*ibdev
,
271 struct ib_node_info
*nip
= (struct ib_node_info
*)&smp
->data
;
272 struct qib_devdata
*dd
= dd_from_ibdev(ibdev
);
273 u32 vendor
, majrev
, minrev
;
274 unsigned pidx
= port
- 1; /* IB number port from 1, hdw from 0 */
276 /* GUID 0 is illegal */
277 if (smp
->attr_mod
|| pidx
>= dd
->num_pports
||
278 dd
->pport
[pidx
].guid
== 0)
279 smp
->status
|= IB_SMP_INVALID_FIELD
;
281 nip
->port_guid
= dd
->pport
[pidx
].guid
;
283 nip
->base_version
= 1;
284 nip
->class_version
= 1;
285 nip
->node_type
= 1; /* channel adapter */
286 nip
->num_ports
= ibdev
->phys_port_cnt
;
287 /* This is already in network order */
288 nip
->sys_guid
= ib_qib_sys_image_guid
;
289 nip
->node_guid
= dd
->pport
->guid
; /* Use first-port GUID as node */
290 nip
->partition_cap
= cpu_to_be16(qib_get_npkeys(dd
));
291 nip
->device_id
= cpu_to_be16(dd
->deviceid
);
294 nip
->revision
= cpu_to_be32((majrev
<< 16) | minrev
);
295 nip
->local_port_num
= port
;
296 vendor
= dd
->vendorid
;
297 nip
->vendor_id
[0] = QIB_SRC_OUI_1
;
298 nip
->vendor_id
[1] = QIB_SRC_OUI_2
;
299 nip
->vendor_id
[2] = QIB_SRC_OUI_3
;
304 static int subn_get_guidinfo(struct ib_smp
*smp
, struct ib_device
*ibdev
,
307 struct qib_devdata
*dd
= dd_from_ibdev(ibdev
);
308 u32 startgx
= 8 * be32_to_cpu(smp
->attr_mod
);
309 __be64
*p
= (__be64
*) smp
->data
;
310 unsigned pidx
= port
- 1; /* IB number port from 1, hdw from 0 */
312 /* 32 blocks of 8 64-bit GUIDs per block */
314 memset(smp
->data
, 0, sizeof(smp
->data
));
316 if (startgx
== 0 && pidx
< dd
->num_pports
) {
317 struct qib_pportdata
*ppd
= dd
->pport
+ pidx
;
318 struct qib_ibport
*ibp
= &ppd
->ibport_data
;
319 __be64 g
= ppd
->guid
;
322 /* GUID 0 is illegal */
324 smp
->status
|= IB_SMP_INVALID_FIELD
;
326 /* The first is a copy of the read-only HW GUID. */
328 for (i
= 1; i
< QIB_GUIDS_PER_PORT
; i
++)
329 p
[i
] = ibp
->guids
[i
- 1];
332 smp
->status
|= IB_SMP_INVALID_FIELD
;
337 static void set_link_width_enabled(struct qib_pportdata
*ppd
, u32 w
)
339 (void) ppd
->dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_LWID_ENB
, w
);
342 static void set_link_speed_enabled(struct qib_pportdata
*ppd
, u32 s
)
344 (void) ppd
->dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_SPD_ENB
, s
);
347 static int get_overrunthreshold(struct qib_pportdata
*ppd
)
349 return ppd
->dd
->f_get_ib_cfg(ppd
, QIB_IB_CFG_OVERRUN_THRESH
);
353 * set_overrunthreshold - set the overrun threshold
354 * @ppd: the physical port data
355 * @n: the new threshold
357 * Note that this will only take effect when the link state changes.
359 static int set_overrunthreshold(struct qib_pportdata
*ppd
, unsigned n
)
361 (void) ppd
->dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_OVERRUN_THRESH
,
366 static int get_phyerrthreshold(struct qib_pportdata
*ppd
)
368 return ppd
->dd
->f_get_ib_cfg(ppd
, QIB_IB_CFG_PHYERR_THRESH
);
372 * set_phyerrthreshold - set the physical error threshold
373 * @ppd: the physical port data
374 * @n: the new threshold
376 * Note that this will only take effect when the link state changes.
378 static int set_phyerrthreshold(struct qib_pportdata
*ppd
, unsigned n
)
380 (void) ppd
->dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_PHYERR_THRESH
,
386 * get_linkdowndefaultstate - get the default linkdown state
387 * @ppd: the physical port data
389 * Returns zero if the default is POLL, 1 if the default is SLEEP.
391 static int get_linkdowndefaultstate(struct qib_pportdata
*ppd
)
393 return ppd
->dd
->f_get_ib_cfg(ppd
, QIB_IB_CFG_LINKDEFAULT
) ==
394 IB_LINKINITCMD_SLEEP
;
397 static int check_mkey(struct qib_ibport
*ibp
, struct ib_smp
*smp
, int mad_flags
)
401 /* Is the mkey in the process of expiring? */
402 if (ibp
->mkey_lease_timeout
&&
403 time_after_eq(jiffies
, ibp
->mkey_lease_timeout
)) {
404 /* Clear timeout and mkey protection field. */
405 ibp
->mkey_lease_timeout
= 0;
409 /* M_Key checking depends on Portinfo:M_Key_protect_bits */
410 if ((mad_flags
& IB_MAD_IGNORE_MKEY
) == 0 && ibp
->mkey
!= 0 &&
411 ibp
->mkey
!= smp
->mkey
&&
412 (smp
->method
== IB_MGMT_METHOD_SET
||
413 smp
->method
== IB_MGMT_METHOD_TRAP_REPRESS
||
414 (smp
->method
== IB_MGMT_METHOD_GET
&& ibp
->mkeyprot
>= 2))) {
415 if (ibp
->mkey_violations
!= 0xFFFF)
416 ++ibp
->mkey_violations
;
417 if (!ibp
->mkey_lease_timeout
&& ibp
->mkey_lease_period
)
418 ibp
->mkey_lease_timeout
= jiffies
+
419 ibp
->mkey_lease_period
* HZ
;
420 /* Generate a trap notice. */
421 qib_bad_mkey(ibp
, smp
);
422 ret
= IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_CONSUMED
;
423 } else if (ibp
->mkey_lease_timeout
)
424 ibp
->mkey_lease_timeout
= 0;
429 static int subn_get_portinfo(struct ib_smp
*smp
, struct ib_device
*ibdev
,
432 struct qib_devdata
*dd
;
433 struct qib_pportdata
*ppd
;
434 struct qib_ibport
*ibp
;
435 struct ib_port_info
*pip
= (struct ib_port_info
*)smp
->data
;
439 u32 port_num
= be32_to_cpu(smp
->attr_mod
);
444 if (port_num
> ibdev
->phys_port_cnt
) {
445 smp
->status
|= IB_SMP_INVALID_FIELD
;
449 if (port_num
!= port
) {
450 ibp
= to_iport(ibdev
, port_num
);
451 ret
= check_mkey(ibp
, smp
, 0);
457 dd
= dd_from_ibdev(ibdev
);
458 /* IB numbers ports from 1, hdw from 0 */
459 ppd
= dd
->pport
+ (port_num
- 1);
460 ibp
= &ppd
->ibport_data
;
462 /* Clear all fields. Only set the non-zero fields. */
463 memset(smp
->data
, 0, sizeof(smp
->data
));
465 /* Only return the mkey if the protection field allows it. */
466 if (!(smp
->method
== IB_MGMT_METHOD_GET
&&
467 ibp
->mkey
!= smp
->mkey
&&
469 pip
->mkey
= ibp
->mkey
;
470 pip
->gid_prefix
= ibp
->gid_prefix
;
471 pip
->lid
= cpu_to_be16(ppd
->lid
);
472 pip
->sm_lid
= cpu_to_be16(ibp
->sm_lid
);
473 pip
->cap_mask
= cpu_to_be32(ibp
->port_cap_flags
);
474 /* pip->diag_code; */
475 pip
->mkey_lease_period
= cpu_to_be16(ibp
->mkey_lease_period
);
476 pip
->local_port_num
= port
;
477 pip
->link_width_enabled
= ppd
->link_width_enabled
;
478 pip
->link_width_supported
= ppd
->link_width_supported
;
479 pip
->link_width_active
= ppd
->link_width_active
;
480 state
= dd
->f_iblink_state(ppd
->lastibcstat
);
481 pip
->linkspeed_portstate
= ppd
->link_speed_supported
<< 4 | state
;
483 pip
->portphysstate_linkdown
=
484 (dd
->f_ibphys_portstate(ppd
->lastibcstat
) << 4) |
485 (get_linkdowndefaultstate(ppd
) ? 1 : 2);
486 pip
->mkeyprot_resv_lmc
= (ibp
->mkeyprot
<< 6) | ppd
->lmc
;
487 pip
->linkspeedactive_enabled
= (ppd
->link_speed_active
<< 4) |
488 ppd
->link_speed_enabled
;
489 switch (ppd
->ibmtu
) {
490 default: /* something is wrong; fall through */
507 pip
->neighbormtu_mastersmsl
= (mtu
<< 4) | ibp
->sm_sl
;
508 pip
->vlcap_inittype
= ppd
->vls_supported
<< 4; /* InitType = 0 */
509 pip
->vl_high_limit
= ibp
->vl_high_limit
;
510 pip
->vl_arb_high_cap
=
511 dd
->f_get_ib_cfg(ppd
, QIB_IB_CFG_VL_HIGH_CAP
);
512 pip
->vl_arb_low_cap
=
513 dd
->f_get_ib_cfg(ppd
, QIB_IB_CFG_VL_LOW_CAP
);
514 /* InitTypeReply = 0 */
515 pip
->inittypereply_mtucap
= qib_ibmtu
? qib_ibmtu
: IB_MTU_4096
;
516 /* HCAs ignore VLStallCount and HOQLife */
517 /* pip->vlstallcnt_hoqlife; */
518 pip
->operationalvl_pei_peo_fpi_fpo
=
519 dd
->f_get_ib_cfg(ppd
, QIB_IB_CFG_OP_VLS
) << 4;
520 pip
->mkey_violations
= cpu_to_be16(ibp
->mkey_violations
);
521 /* P_KeyViolations are counted by hardware. */
522 pip
->pkey_violations
= cpu_to_be16(ibp
->pkey_violations
);
523 pip
->qkey_violations
= cpu_to_be16(ibp
->qkey_violations
);
524 /* Only the hardware GUID is supported for now */
525 pip
->guid_cap
= QIB_GUIDS_PER_PORT
;
526 pip
->clientrereg_resv_subnetto
= ibp
->subnet_timeout
;
527 /* 32.768 usec. response time (guessing) */
528 pip
->resv_resptimevalue
= 3;
529 pip
->localphyerrors_overrunerrors
=
530 (get_phyerrthreshold(ppd
) << 4) |
531 get_overrunthreshold(ppd
);
532 /* pip->max_credit_hint; */
533 if (ibp
->port_cap_flags
& IB_PORT_LINK_LATENCY_SUP
) {
536 v
= dd
->f_get_ib_cfg(ppd
, QIB_IB_CFG_LINKLATENCY
);
537 pip
->link_roundtrip_latency
[0] = v
>> 16;
538 pip
->link_roundtrip_latency
[1] = v
>> 8;
539 pip
->link_roundtrip_latency
[2] = v
;
549 * get_pkeys - return the PKEY table
550 * @dd: the qlogic_ib device
551 * @port: the IB port number
552 * @pkeys: the pkey table is placed here
554 static int get_pkeys(struct qib_devdata
*dd
, u8 port
, u16
*pkeys
)
556 struct qib_pportdata
*ppd
= dd
->pport
+ port
- 1;
558 * always a kernel context, no locking needed.
559 * If we get here with ppd setup, no need to check
562 struct qib_ctxtdata
*rcd
= dd
->rcd
[ppd
->hw_pidx
];
564 memcpy(pkeys
, rcd
->pkeys
, sizeof(rcd
->pkeys
));
569 static int subn_get_pkeytable(struct ib_smp
*smp
, struct ib_device
*ibdev
,
572 u32 startpx
= 32 * (be32_to_cpu(smp
->attr_mod
) & 0xffff);
573 u16
*p
= (u16
*) smp
->data
;
574 __be16
*q
= (__be16
*) smp
->data
;
576 /* 64 blocks of 32 16-bit P_Key entries */
578 memset(smp
->data
, 0, sizeof(smp
->data
));
580 struct qib_devdata
*dd
= dd_from_ibdev(ibdev
);
581 unsigned i
, n
= qib_get_npkeys(dd
);
583 get_pkeys(dd
, port
, p
);
585 for (i
= 0; i
< n
; i
++)
586 q
[i
] = cpu_to_be16(p
[i
]);
588 smp
->status
|= IB_SMP_INVALID_FIELD
;
593 static int subn_set_guidinfo(struct ib_smp
*smp
, struct ib_device
*ibdev
,
596 struct qib_devdata
*dd
= dd_from_ibdev(ibdev
);
597 u32 startgx
= 8 * be32_to_cpu(smp
->attr_mod
);
598 __be64
*p
= (__be64
*) smp
->data
;
599 unsigned pidx
= port
- 1; /* IB number port from 1, hdw from 0 */
601 /* 32 blocks of 8 64-bit GUIDs per block */
603 if (startgx
== 0 && pidx
< dd
->num_pports
) {
604 struct qib_pportdata
*ppd
= dd
->pport
+ pidx
;
605 struct qib_ibport
*ibp
= &ppd
->ibport_data
;
608 /* The first entry is read-only. */
609 for (i
= 1; i
< QIB_GUIDS_PER_PORT
; i
++)
610 ibp
->guids
[i
- 1] = p
[i
];
612 smp
->status
|= IB_SMP_INVALID_FIELD
;
614 /* The only GUID we support is the first read-only entry. */
615 return subn_get_guidinfo(smp
, ibdev
, port
);
619 * subn_set_portinfo - set port information
620 * @smp: the incoming SM packet
621 * @ibdev: the infiniband device
622 * @port: the port on the device
624 * Set Portinfo (see ch. 14.2.5.6).
626 static int subn_set_portinfo(struct ib_smp
*smp
, struct ib_device
*ibdev
,
629 struct ib_port_info
*pip
= (struct ib_port_info
*)smp
->data
;
630 struct ib_event event
;
631 struct qib_devdata
*dd
;
632 struct qib_pportdata
*ppd
;
633 struct qib_ibport
*ibp
;
634 char clientrereg
= 0;
644 u32 port_num
= be32_to_cpu(smp
->attr_mod
);
649 if (port_num
> ibdev
->phys_port_cnt
)
651 /* Port attributes can only be set on the receiving port */
652 if (port_num
!= port
)
656 dd
= dd_from_ibdev(ibdev
);
657 /* IB numbers ports from 1, hdw from 0 */
658 ppd
= dd
->pport
+ (port_num
- 1);
659 ibp
= &ppd
->ibport_data
;
660 event
.device
= ibdev
;
661 event
.element
.port_num
= port
;
663 ibp
->mkey
= pip
->mkey
;
664 ibp
->gid_prefix
= pip
->gid_prefix
;
665 ibp
->mkey_lease_period
= be16_to_cpu(pip
->mkey_lease_period
);
667 lid
= be16_to_cpu(pip
->lid
);
668 /* Must be a valid unicast LID address. */
669 if (lid
== 0 || lid
>= QIB_MULTICAST_LID_BASE
)
670 smp
->status
|= IB_SMP_INVALID_FIELD
;
671 else if (ppd
->lid
!= lid
|| ppd
->lmc
!= (pip
->mkeyprot_resv_lmc
& 7)) {
673 qib_set_uevent_bits(ppd
, _QIB_EVENT_LID_CHANGE_BIT
);
674 if (ppd
->lmc
!= (pip
->mkeyprot_resv_lmc
& 7))
675 qib_set_uevent_bits(ppd
, _QIB_EVENT_LMC_CHANGE_BIT
);
676 qib_set_lid(ppd
, lid
, pip
->mkeyprot_resv_lmc
& 7);
677 event
.event
= IB_EVENT_LID_CHANGE
;
678 ib_dispatch_event(&event
);
681 smlid
= be16_to_cpu(pip
->sm_lid
);
682 msl
= pip
->neighbormtu_mastersmsl
& 0xF;
683 /* Must be a valid unicast LID address. */
684 if (smlid
== 0 || smlid
>= QIB_MULTICAST_LID_BASE
)
685 smp
->status
|= IB_SMP_INVALID_FIELD
;
686 else if (smlid
!= ibp
->sm_lid
|| msl
!= ibp
->sm_sl
) {
687 spin_lock_irqsave(&ibp
->lock
, flags
);
689 if (smlid
!= ibp
->sm_lid
)
690 ibp
->sm_ah
->attr
.dlid
= smlid
;
691 if (msl
!= ibp
->sm_sl
)
692 ibp
->sm_ah
->attr
.sl
= msl
;
694 spin_unlock_irqrestore(&ibp
->lock
, flags
);
695 if (smlid
!= ibp
->sm_lid
)
697 if (msl
!= ibp
->sm_sl
)
699 event
.event
= IB_EVENT_SM_CHANGE
;
700 ib_dispatch_event(&event
);
703 /* Allow 1x or 4x to be set (see 14.2.6.6). */
704 lwe
= pip
->link_width_enabled
;
707 set_link_width_enabled(ppd
, ppd
->link_width_supported
);
708 else if (lwe
>= 16 || (lwe
& ~ppd
->link_width_supported
))
709 smp
->status
|= IB_SMP_INVALID_FIELD
;
710 else if (lwe
!= ppd
->link_width_enabled
)
711 set_link_width_enabled(ppd
, lwe
);
714 lse
= pip
->linkspeedactive_enabled
& 0xF;
717 * The IB 1.2 spec. only allows link speed values
718 * 1, 3, 5, 7, 15. 1.2.1 extended to allow specific
722 set_link_speed_enabled(ppd
,
723 ppd
->link_speed_supported
);
724 else if (lse
>= 8 || (lse
& ~ppd
->link_speed_supported
))
725 smp
->status
|= IB_SMP_INVALID_FIELD
;
726 else if (lse
!= ppd
->link_speed_enabled
)
727 set_link_speed_enabled(ppd
, lse
);
730 /* Set link down default state. */
731 switch (pip
->portphysstate_linkdown
& 0xF) {
735 (void) dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_LINKDEFAULT
,
736 IB_LINKINITCMD_SLEEP
);
739 (void) dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_LINKDEFAULT
,
740 IB_LINKINITCMD_POLL
);
743 smp
->status
|= IB_SMP_INVALID_FIELD
;
746 ibp
->mkeyprot
= pip
->mkeyprot_resv_lmc
>> 6;
747 ibp
->vl_high_limit
= pip
->vl_high_limit
;
748 (void) dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_VL_HIGH_LIMIT
,
751 mtu
= ib_mtu_enum_to_int((pip
->neighbormtu_mastersmsl
>> 4) & 0xF);
753 smp
->status
|= IB_SMP_INVALID_FIELD
;
755 qib_set_mtu(ppd
, mtu
);
757 /* Set operational VLs */
758 vls
= (pip
->operationalvl_pei_peo_fpi_fpo
>> 4) & 0xF;
760 if (vls
> ppd
->vls_supported
)
761 smp
->status
|= IB_SMP_INVALID_FIELD
;
763 (void) dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_OP_VLS
, vls
);
766 if (pip
->mkey_violations
== 0)
767 ibp
->mkey_violations
= 0;
769 if (pip
->pkey_violations
== 0)
770 ibp
->pkey_violations
= 0;
772 if (pip
->qkey_violations
== 0)
773 ibp
->qkey_violations
= 0;
775 ore
= pip
->localphyerrors_overrunerrors
;
776 if (set_phyerrthreshold(ppd
, (ore
>> 4) & 0xF))
777 smp
->status
|= IB_SMP_INVALID_FIELD
;
779 if (set_overrunthreshold(ppd
, (ore
& 0xF)))
780 smp
->status
|= IB_SMP_INVALID_FIELD
;
782 ibp
->subnet_timeout
= pip
->clientrereg_resv_subnetto
& 0x1F;
784 if (pip
->clientrereg_resv_subnetto
& 0x80) {
786 event
.event
= IB_EVENT_CLIENT_REREGISTER
;
787 ib_dispatch_event(&event
);
791 * Do the port state change now that the other link parameters
793 * Changing the port physical state only makes sense if the link
794 * is down or is being set to down.
796 state
= pip
->linkspeed_portstate
& 0xF;
797 lstate
= (pip
->portphysstate_linkdown
>> 4) & 0xF;
798 if (lstate
&& !(state
== IB_PORT_DOWN
|| state
== IB_PORT_NOP
))
799 smp
->status
|= IB_SMP_INVALID_FIELD
;
802 * Only state changes of DOWN, ARM, and ACTIVE are valid
803 * and must be in the correct state to take effect (see 7.2.6).
812 lstate
= QIB_IB_LINKDOWN_ONLY
;
813 else if (lstate
== 1)
814 lstate
= QIB_IB_LINKDOWN_SLEEP
;
815 else if (lstate
== 2)
816 lstate
= QIB_IB_LINKDOWN
;
817 else if (lstate
== 3)
818 lstate
= QIB_IB_LINKDOWN_DISABLE
;
820 smp
->status
|= IB_SMP_INVALID_FIELD
;
823 spin_lock_irqsave(&ppd
->lflags_lock
, flags
);
824 ppd
->lflags
&= ~QIBL_LINKV
;
825 spin_unlock_irqrestore(&ppd
->lflags_lock
, flags
);
826 qib_set_linkstate(ppd
, lstate
);
828 * Don't send a reply if the response would be sent
829 * through the disabled port.
831 if (lstate
== QIB_IB_LINKDOWN_DISABLE
&& smp
->hop_cnt
) {
832 ret
= IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_CONSUMED
;
835 qib_wait_linkstate(ppd
, QIBL_LINKV
, 10);
838 qib_set_linkstate(ppd
, QIB_IB_LINKARM
);
841 qib_set_linkstate(ppd
, QIB_IB_LINKACTIVE
);
844 smp
->status
|= IB_SMP_INVALID_FIELD
;
847 ret
= subn_get_portinfo(smp
, ibdev
, port
);
850 pip
->clientrereg_resv_subnetto
|= 0x80;
855 smp
->status
|= IB_SMP_INVALID_FIELD
;
857 ret
= subn_get_portinfo(smp
, ibdev
, port
);
863 * rm_pkey - decrecment the reference count for the given PKEY
864 * @dd: the qlogic_ib device
865 * @key: the PKEY index
867 * Return true if this was the last reference and the hardware table entry
868 * needs to be changed.
870 static int rm_pkey(struct qib_pportdata
*ppd
, u16 key
)
875 for (i
= 0; i
< ARRAY_SIZE(ppd
->pkeys
); i
++) {
876 if (ppd
->pkeys
[i
] != key
)
878 if (atomic_dec_and_test(&ppd
->pkeyrefs
[i
])) {
893 * add_pkey - add the given PKEY to the hardware table
894 * @dd: the qlogic_ib device
897 * Return an error code if unable to add the entry, zero if no change,
898 * or 1 if the hardware PKEY register needs to be updated.
900 static int add_pkey(struct qib_pportdata
*ppd
, u16 key
)
903 u16 lkey
= key
& 0x7FFF;
907 if (lkey
== 0x7FFF) {
912 /* Look for an empty slot or a matching PKEY. */
913 for (i
= 0; i
< ARRAY_SIZE(ppd
->pkeys
); i
++) {
914 if (!ppd
->pkeys
[i
]) {
918 /* If it matches exactly, try to increment the ref count */
919 if (ppd
->pkeys
[i
] == key
) {
920 if (atomic_inc_return(&ppd
->pkeyrefs
[i
]) > 1) {
924 /* Lost the race. Look for an empty slot below. */
925 atomic_dec(&ppd
->pkeyrefs
[i
]);
929 * It makes no sense to have both the limited and unlimited
930 * PKEY set at the same time since the unlimited one will
931 * disable the limited one.
933 if ((ppd
->pkeys
[i
] & 0x7FFF) == lkey
) {
942 for (i
= 0; i
< ARRAY_SIZE(ppd
->pkeys
); i
++) {
943 if (!ppd
->pkeys
[i
] &&
944 atomic_inc_return(&ppd
->pkeyrefs
[i
]) == 1) {
945 /* for qibstats, etc. */
958 * set_pkeys - set the PKEY table for ctxt 0
959 * @dd: the qlogic_ib device
960 * @port: the IB port number
961 * @pkeys: the PKEY table
963 static int set_pkeys(struct qib_devdata
*dd
, u8 port
, u16
*pkeys
)
965 struct qib_pportdata
*ppd
;
966 struct qib_ctxtdata
*rcd
;
971 * IB port one/two always maps to context zero/one,
972 * always a kernel context, no locking needed
973 * If we get here with ppd setup, no need to check
976 ppd
= dd
->pport
+ (port
- 1);
977 rcd
= dd
->rcd
[ppd
->hw_pidx
];
979 for (i
= 0; i
< ARRAY_SIZE(rcd
->pkeys
); i
++) {
981 u16 okey
= rcd
->pkeys
[i
];
986 * The value of this PKEY table entry is changing.
987 * Remove the old entry in the hardware's array of PKEYs.
990 changed
|= rm_pkey(ppd
, okey
);
992 int ret
= add_pkey(ppd
, key
);
1002 struct ib_event event
;
1004 (void) dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_PKEYS
, 0);
1006 event
.event
= IB_EVENT_PKEY_CHANGE
;
1007 event
.device
= &dd
->verbs_dev
.ibdev
;
1008 event
.element
.port_num
= port
;
1009 ib_dispatch_event(&event
);
1014 static int subn_set_pkeytable(struct ib_smp
*smp
, struct ib_device
*ibdev
,
1017 u32 startpx
= 32 * (be32_to_cpu(smp
->attr_mod
) & 0xffff);
1018 __be16
*p
= (__be16
*) smp
->data
;
1019 u16
*q
= (u16
*) smp
->data
;
1020 struct qib_devdata
*dd
= dd_from_ibdev(ibdev
);
1021 unsigned i
, n
= qib_get_npkeys(dd
);
1023 for (i
= 0; i
< n
; i
++)
1024 q
[i
] = be16_to_cpu(p
[i
]);
1026 if (startpx
!= 0 || set_pkeys(dd
, port
, q
) != 0)
1027 smp
->status
|= IB_SMP_INVALID_FIELD
;
1029 return subn_get_pkeytable(smp
, ibdev
, port
);
1032 static int subn_get_sl_to_vl(struct ib_smp
*smp
, struct ib_device
*ibdev
,
1035 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1036 u8
*p
= (u8
*) smp
->data
;
1039 memset(smp
->data
, 0, sizeof(smp
->data
));
1041 if (!(ibp
->port_cap_flags
& IB_PORT_SL_MAP_SUP
))
1042 smp
->status
|= IB_SMP_UNSUP_METHOD
;
1044 for (i
= 0; i
< ARRAY_SIZE(ibp
->sl_to_vl
); i
+= 2)
1045 *p
++ = (ibp
->sl_to_vl
[i
] << 4) | ibp
->sl_to_vl
[i
+ 1];
1050 static int subn_set_sl_to_vl(struct ib_smp
*smp
, struct ib_device
*ibdev
,
1053 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1054 u8
*p
= (u8
*) smp
->data
;
1057 if (!(ibp
->port_cap_flags
& IB_PORT_SL_MAP_SUP
)) {
1058 smp
->status
|= IB_SMP_UNSUP_METHOD
;
1062 for (i
= 0; i
< ARRAY_SIZE(ibp
->sl_to_vl
); i
+= 2, p
++) {
1063 ibp
->sl_to_vl
[i
] = *p
>> 4;
1064 ibp
->sl_to_vl
[i
+ 1] = *p
& 0xF;
1066 qib_set_uevent_bits(ppd_from_ibp(to_iport(ibdev
, port
)),
1067 _QIB_EVENT_SL2VL_CHANGE_BIT
);
1069 return subn_get_sl_to_vl(smp
, ibdev
, port
);
1072 static int subn_get_vl_arb(struct ib_smp
*smp
, struct ib_device
*ibdev
,
1075 unsigned which
= be32_to_cpu(smp
->attr_mod
) >> 16;
1076 struct qib_pportdata
*ppd
= ppd_from_ibp(to_iport(ibdev
, port
));
1078 memset(smp
->data
, 0, sizeof(smp
->data
));
1080 if (ppd
->vls_supported
== IB_VL_VL0
)
1081 smp
->status
|= IB_SMP_UNSUP_METHOD
;
1082 else if (which
== IB_VLARB_LOWPRI_0_31
)
1083 (void) ppd
->dd
->f_get_ib_table(ppd
, QIB_IB_TBL_VL_LOW_ARB
,
1085 else if (which
== IB_VLARB_HIGHPRI_0_31
)
1086 (void) ppd
->dd
->f_get_ib_table(ppd
, QIB_IB_TBL_VL_HIGH_ARB
,
1089 smp
->status
|= IB_SMP_INVALID_FIELD
;
1094 static int subn_set_vl_arb(struct ib_smp
*smp
, struct ib_device
*ibdev
,
1097 unsigned which
= be32_to_cpu(smp
->attr_mod
) >> 16;
1098 struct qib_pportdata
*ppd
= ppd_from_ibp(to_iport(ibdev
, port
));
1100 if (ppd
->vls_supported
== IB_VL_VL0
)
1101 smp
->status
|= IB_SMP_UNSUP_METHOD
;
1102 else if (which
== IB_VLARB_LOWPRI_0_31
)
1103 (void) ppd
->dd
->f_set_ib_table(ppd
, QIB_IB_TBL_VL_LOW_ARB
,
1105 else if (which
== IB_VLARB_HIGHPRI_0_31
)
1106 (void) ppd
->dd
->f_set_ib_table(ppd
, QIB_IB_TBL_VL_HIGH_ARB
,
1109 smp
->status
|= IB_SMP_INVALID_FIELD
;
1111 return subn_get_vl_arb(smp
, ibdev
, port
);
1114 static int subn_trap_repress(struct ib_smp
*smp
, struct ib_device
*ibdev
,
1118 * For now, we only send the trap once so no need to process this.
1120 * o14-3.a4 The SMA shall not send any message in response to a valid
1121 * SubnTrapRepress() message.
1123 return IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_CONSUMED
;
1126 static int pma_get_classportinfo(struct ib_pma_mad
*pmp
,
1127 struct ib_device
*ibdev
)
1129 struct ib_class_port_info
*p
=
1130 (struct ib_class_port_info
*)pmp
->data
;
1131 struct qib_devdata
*dd
= dd_from_ibdev(ibdev
);
1133 memset(pmp
->data
, 0, sizeof(pmp
->data
));
1135 if (pmp
->mad_hdr
.attr_mod
!= 0)
1136 pmp
->mad_hdr
.status
|= IB_SMP_INVALID_FIELD
;
1138 /* Note that AllPortSelect is not valid */
1139 p
->base_version
= 1;
1140 p
->class_version
= 1;
1141 p
->capability_mask
= IB_PMA_CLASS_CAP_EXT_WIDTH
;
1143 * Set the most significant bit of CM2 to indicate support for
1144 * congestion statistics
1146 p
->reserved
[0] = dd
->psxmitwait_supported
<< 7;
1148 * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
1150 p
->resp_time_value
= 18;
1152 return reply((struct ib_smp
*) pmp
);
1155 static int pma_get_portsamplescontrol(struct ib_pma_mad
*pmp
,
1156 struct ib_device
*ibdev
, u8 port
)
1158 struct ib_pma_portsamplescontrol
*p
=
1159 (struct ib_pma_portsamplescontrol
*)pmp
->data
;
1160 struct qib_ibdev
*dev
= to_idev(ibdev
);
1161 struct qib_devdata
*dd
= dd_from_dev(dev
);
1162 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1163 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1164 unsigned long flags
;
1165 u8 port_select
= p
->port_select
;
1167 memset(pmp
->data
, 0, sizeof(pmp
->data
));
1169 p
->port_select
= port_select
;
1170 if (pmp
->mad_hdr
.attr_mod
!= 0 || port_select
!= port
) {
1171 pmp
->mad_hdr
.status
|= IB_SMP_INVALID_FIELD
;
1174 spin_lock_irqsave(&ibp
->lock
, flags
);
1175 p
->tick
= dd
->f_get_ib_cfg(ppd
, QIB_IB_CFG_PMA_TICKS
);
1176 p
->sample_status
= dd
->f_portcntr(ppd
, QIBPORTCNTR_PSSTAT
);
1177 p
->counter_width
= 4; /* 32 bit counters */
1178 p
->counter_mask0_9
= COUNTER_MASK0_9
;
1179 p
->sample_start
= cpu_to_be32(ibp
->pma_sample_start
);
1180 p
->sample_interval
= cpu_to_be32(ibp
->pma_sample_interval
);
1181 p
->tag
= cpu_to_be16(ibp
->pma_tag
);
1182 p
->counter_select
[0] = ibp
->pma_counter_select
[0];
1183 p
->counter_select
[1] = ibp
->pma_counter_select
[1];
1184 p
->counter_select
[2] = ibp
->pma_counter_select
[2];
1185 p
->counter_select
[3] = ibp
->pma_counter_select
[3];
1186 p
->counter_select
[4] = ibp
->pma_counter_select
[4];
1187 spin_unlock_irqrestore(&ibp
->lock
, flags
);
1190 return reply((struct ib_smp
*) pmp
);
1193 static int pma_set_portsamplescontrol(struct ib_pma_mad
*pmp
,
1194 struct ib_device
*ibdev
, u8 port
)
1196 struct ib_pma_portsamplescontrol
*p
=
1197 (struct ib_pma_portsamplescontrol
*)pmp
->data
;
1198 struct qib_ibdev
*dev
= to_idev(ibdev
);
1199 struct qib_devdata
*dd
= dd_from_dev(dev
);
1200 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1201 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1202 unsigned long flags
;
1203 u8 status
, xmit_flags
;
1206 if (pmp
->mad_hdr
.attr_mod
!= 0 || p
->port_select
!= port
) {
1207 pmp
->mad_hdr
.status
|= IB_SMP_INVALID_FIELD
;
1208 ret
= reply((struct ib_smp
*) pmp
);
1212 spin_lock_irqsave(&ibp
->lock
, flags
);
1214 /* Port Sampling code owns the PS* HW counters */
1215 xmit_flags
= ppd
->cong_stats
.flags
;
1216 ppd
->cong_stats
.flags
= IB_PMA_CONG_HW_CONTROL_SAMPLE
;
1217 status
= dd
->f_portcntr(ppd
, QIBPORTCNTR_PSSTAT
);
1218 if (status
== IB_PMA_SAMPLE_STATUS_DONE
||
1219 (status
== IB_PMA_SAMPLE_STATUS_RUNNING
&&
1220 xmit_flags
== IB_PMA_CONG_HW_CONTROL_TIMER
)) {
1221 ibp
->pma_sample_start
= be32_to_cpu(p
->sample_start
);
1222 ibp
->pma_sample_interval
= be32_to_cpu(p
->sample_interval
);
1223 ibp
->pma_tag
= be16_to_cpu(p
->tag
);
1224 ibp
->pma_counter_select
[0] = p
->counter_select
[0];
1225 ibp
->pma_counter_select
[1] = p
->counter_select
[1];
1226 ibp
->pma_counter_select
[2] = p
->counter_select
[2];
1227 ibp
->pma_counter_select
[3] = p
->counter_select
[3];
1228 ibp
->pma_counter_select
[4] = p
->counter_select
[4];
1229 dd
->f_set_cntr_sample(ppd
, ibp
->pma_sample_interval
,
1230 ibp
->pma_sample_start
);
1232 spin_unlock_irqrestore(&ibp
->lock
, flags
);
1234 ret
= pma_get_portsamplescontrol(pmp
, ibdev
, port
);
1240 static u64
get_counter(struct qib_ibport
*ibp
, struct qib_pportdata
*ppd
,
1246 case IB_PMA_PORT_XMIT_DATA
:
1247 ret
= ppd
->dd
->f_portcntr(ppd
, QIBPORTCNTR_PSXMITDATA
);
1249 case IB_PMA_PORT_RCV_DATA
:
1250 ret
= ppd
->dd
->f_portcntr(ppd
, QIBPORTCNTR_PSRCVDATA
);
1252 case IB_PMA_PORT_XMIT_PKTS
:
1253 ret
= ppd
->dd
->f_portcntr(ppd
, QIBPORTCNTR_PSXMITPKTS
);
1255 case IB_PMA_PORT_RCV_PKTS
:
1256 ret
= ppd
->dd
->f_portcntr(ppd
, QIBPORTCNTR_PSRCVPKTS
);
1258 case IB_PMA_PORT_XMIT_WAIT
:
1259 ret
= ppd
->dd
->f_portcntr(ppd
, QIBPORTCNTR_PSXMITWAIT
);
1268 /* This function assumes that the xmit_wait lock is already held */
1269 static u64
xmit_wait_get_value_delta(struct qib_pportdata
*ppd
)
1273 delta
= get_counter(&ppd
->ibport_data
, ppd
,
1274 IB_PMA_PORT_XMIT_WAIT
);
1275 return ppd
->cong_stats
.counter
+ delta
;
1278 static void cache_hw_sample_counters(struct qib_pportdata
*ppd
)
1280 struct qib_ibport
*ibp
= &ppd
->ibport_data
;
1282 ppd
->cong_stats
.counter_cache
.psxmitdata
=
1283 get_counter(ibp
, ppd
, IB_PMA_PORT_XMIT_DATA
);
1284 ppd
->cong_stats
.counter_cache
.psrcvdata
=
1285 get_counter(ibp
, ppd
, IB_PMA_PORT_RCV_DATA
);
1286 ppd
->cong_stats
.counter_cache
.psxmitpkts
=
1287 get_counter(ibp
, ppd
, IB_PMA_PORT_XMIT_PKTS
);
1288 ppd
->cong_stats
.counter_cache
.psrcvpkts
=
1289 get_counter(ibp
, ppd
, IB_PMA_PORT_RCV_PKTS
);
1290 ppd
->cong_stats
.counter_cache
.psxmitwait
=
1291 get_counter(ibp
, ppd
, IB_PMA_PORT_XMIT_WAIT
);
1294 static u64
get_cache_hw_sample_counters(struct qib_pportdata
*ppd
,
1300 case IB_PMA_PORT_XMIT_DATA
:
1301 ret
= ppd
->cong_stats
.counter_cache
.psxmitdata
;
1303 case IB_PMA_PORT_RCV_DATA
:
1304 ret
= ppd
->cong_stats
.counter_cache
.psrcvdata
;
1306 case IB_PMA_PORT_XMIT_PKTS
:
1307 ret
= ppd
->cong_stats
.counter_cache
.psxmitpkts
;
1309 case IB_PMA_PORT_RCV_PKTS
:
1310 ret
= ppd
->cong_stats
.counter_cache
.psrcvpkts
;
1312 case IB_PMA_PORT_XMIT_WAIT
:
1313 ret
= ppd
->cong_stats
.counter_cache
.psxmitwait
;
1322 static int pma_get_portsamplesresult(struct ib_pma_mad
*pmp
,
1323 struct ib_device
*ibdev
, u8 port
)
1325 struct ib_pma_portsamplesresult
*p
=
1326 (struct ib_pma_portsamplesresult
*)pmp
->data
;
1327 struct qib_ibdev
*dev
= to_idev(ibdev
);
1328 struct qib_devdata
*dd
= dd_from_dev(dev
);
1329 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1330 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1331 unsigned long flags
;
1335 memset(pmp
->data
, 0, sizeof(pmp
->data
));
1336 spin_lock_irqsave(&ibp
->lock
, flags
);
1337 p
->tag
= cpu_to_be16(ibp
->pma_tag
);
1338 if (ppd
->cong_stats
.flags
== IB_PMA_CONG_HW_CONTROL_TIMER
)
1339 p
->sample_status
= IB_PMA_SAMPLE_STATUS_DONE
;
1341 status
= dd
->f_portcntr(ppd
, QIBPORTCNTR_PSSTAT
);
1342 p
->sample_status
= cpu_to_be16(status
);
1343 if (status
== IB_PMA_SAMPLE_STATUS_DONE
) {
1344 cache_hw_sample_counters(ppd
);
1345 ppd
->cong_stats
.counter
=
1346 xmit_wait_get_value_delta(ppd
);
1347 dd
->f_set_cntr_sample(ppd
,
1348 QIB_CONG_TIMER_PSINTERVAL
, 0);
1349 ppd
->cong_stats
.flags
= IB_PMA_CONG_HW_CONTROL_TIMER
;
1352 for (i
= 0; i
< ARRAY_SIZE(ibp
->pma_counter_select
); i
++)
1353 p
->counter
[i
] = cpu_to_be32(
1354 get_cache_hw_sample_counters(
1355 ppd
, ibp
->pma_counter_select
[i
]));
1356 spin_unlock_irqrestore(&ibp
->lock
, flags
);
1358 return reply((struct ib_smp
*) pmp
);
1361 static int pma_get_portsamplesresult_ext(struct ib_pma_mad
*pmp
,
1362 struct ib_device
*ibdev
, u8 port
)
1364 struct ib_pma_portsamplesresult_ext
*p
=
1365 (struct ib_pma_portsamplesresult_ext
*)pmp
->data
;
1366 struct qib_ibdev
*dev
= to_idev(ibdev
);
1367 struct qib_devdata
*dd
= dd_from_dev(dev
);
1368 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1369 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1370 unsigned long flags
;
1374 /* Port Sampling code owns the PS* HW counters */
1375 memset(pmp
->data
, 0, sizeof(pmp
->data
));
1376 spin_lock_irqsave(&ibp
->lock
, flags
);
1377 p
->tag
= cpu_to_be16(ibp
->pma_tag
);
1378 if (ppd
->cong_stats
.flags
== IB_PMA_CONG_HW_CONTROL_TIMER
)
1379 p
->sample_status
= IB_PMA_SAMPLE_STATUS_DONE
;
1381 status
= dd
->f_portcntr(ppd
, QIBPORTCNTR_PSSTAT
);
1382 p
->sample_status
= cpu_to_be16(status
);
1384 p
->extended_width
= cpu_to_be32(0x80000000);
1385 if (status
== IB_PMA_SAMPLE_STATUS_DONE
) {
1386 cache_hw_sample_counters(ppd
);
1387 ppd
->cong_stats
.counter
=
1388 xmit_wait_get_value_delta(ppd
);
1389 dd
->f_set_cntr_sample(ppd
,
1390 QIB_CONG_TIMER_PSINTERVAL
, 0);
1391 ppd
->cong_stats
.flags
= IB_PMA_CONG_HW_CONTROL_TIMER
;
1394 for (i
= 0; i
< ARRAY_SIZE(ibp
->pma_counter_select
); i
++)
1395 p
->counter
[i
] = cpu_to_be64(
1396 get_cache_hw_sample_counters(
1397 ppd
, ibp
->pma_counter_select
[i
]));
1398 spin_unlock_irqrestore(&ibp
->lock
, flags
);
1400 return reply((struct ib_smp
*) pmp
);
1403 static int pma_get_portcounters(struct ib_pma_mad
*pmp
,
1404 struct ib_device
*ibdev
, u8 port
)
1406 struct ib_pma_portcounters
*p
= (struct ib_pma_portcounters
*)
1408 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1409 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1410 struct qib_verbs_counters cntrs
;
1411 u8 port_select
= p
->port_select
;
1413 qib_get_counters(ppd
, &cntrs
);
1415 /* Adjust counters for any resets done. */
1416 cntrs
.symbol_error_counter
-= ibp
->z_symbol_error_counter
;
1417 cntrs
.link_error_recovery_counter
-=
1418 ibp
->z_link_error_recovery_counter
;
1419 cntrs
.link_downed_counter
-= ibp
->z_link_downed_counter
;
1420 cntrs
.port_rcv_errors
-= ibp
->z_port_rcv_errors
;
1421 cntrs
.port_rcv_remphys_errors
-= ibp
->z_port_rcv_remphys_errors
;
1422 cntrs
.port_xmit_discards
-= ibp
->z_port_xmit_discards
;
1423 cntrs
.port_xmit_data
-= ibp
->z_port_xmit_data
;
1424 cntrs
.port_rcv_data
-= ibp
->z_port_rcv_data
;
1425 cntrs
.port_xmit_packets
-= ibp
->z_port_xmit_packets
;
1426 cntrs
.port_rcv_packets
-= ibp
->z_port_rcv_packets
;
1427 cntrs
.local_link_integrity_errors
-=
1428 ibp
->z_local_link_integrity_errors
;
1429 cntrs
.excessive_buffer_overrun_errors
-=
1430 ibp
->z_excessive_buffer_overrun_errors
;
1431 cntrs
.vl15_dropped
-= ibp
->z_vl15_dropped
;
1432 cntrs
.vl15_dropped
+= ibp
->n_vl15_dropped
;
1434 memset(pmp
->data
, 0, sizeof(pmp
->data
));
1436 p
->port_select
= port_select
;
1437 if (pmp
->mad_hdr
.attr_mod
!= 0 || port_select
!= port
)
1438 pmp
->mad_hdr
.status
|= IB_SMP_INVALID_FIELD
;
1440 if (cntrs
.symbol_error_counter
> 0xFFFFUL
)
1441 p
->symbol_error_counter
= cpu_to_be16(0xFFFF);
1443 p
->symbol_error_counter
=
1444 cpu_to_be16((u16
)cntrs
.symbol_error_counter
);
1445 if (cntrs
.link_error_recovery_counter
> 0xFFUL
)
1446 p
->link_error_recovery_counter
= 0xFF;
1448 p
->link_error_recovery_counter
=
1449 (u8
)cntrs
.link_error_recovery_counter
;
1450 if (cntrs
.link_downed_counter
> 0xFFUL
)
1451 p
->link_downed_counter
= 0xFF;
1453 p
->link_downed_counter
= (u8
)cntrs
.link_downed_counter
;
1454 if (cntrs
.port_rcv_errors
> 0xFFFFUL
)
1455 p
->port_rcv_errors
= cpu_to_be16(0xFFFF);
1457 p
->port_rcv_errors
=
1458 cpu_to_be16((u16
) cntrs
.port_rcv_errors
);
1459 if (cntrs
.port_rcv_remphys_errors
> 0xFFFFUL
)
1460 p
->port_rcv_remphys_errors
= cpu_to_be16(0xFFFF);
1462 p
->port_rcv_remphys_errors
=
1463 cpu_to_be16((u16
)cntrs
.port_rcv_remphys_errors
);
1464 if (cntrs
.port_xmit_discards
> 0xFFFFUL
)
1465 p
->port_xmit_discards
= cpu_to_be16(0xFFFF);
1467 p
->port_xmit_discards
=
1468 cpu_to_be16((u16
)cntrs
.port_xmit_discards
);
1469 if (cntrs
.local_link_integrity_errors
> 0xFUL
)
1470 cntrs
.local_link_integrity_errors
= 0xFUL
;
1471 if (cntrs
.excessive_buffer_overrun_errors
> 0xFUL
)
1472 cntrs
.excessive_buffer_overrun_errors
= 0xFUL
;
1473 p
->link_overrun_errors
= (cntrs
.local_link_integrity_errors
<< 4) |
1474 cntrs
.excessive_buffer_overrun_errors
;
1475 if (cntrs
.vl15_dropped
> 0xFFFFUL
)
1476 p
->vl15_dropped
= cpu_to_be16(0xFFFF);
1478 p
->vl15_dropped
= cpu_to_be16((u16
)cntrs
.vl15_dropped
);
1479 if (cntrs
.port_xmit_data
> 0xFFFFFFFFUL
)
1480 p
->port_xmit_data
= cpu_to_be32(0xFFFFFFFF);
1482 p
->port_xmit_data
= cpu_to_be32((u32
)cntrs
.port_xmit_data
);
1483 if (cntrs
.port_rcv_data
> 0xFFFFFFFFUL
)
1484 p
->port_rcv_data
= cpu_to_be32(0xFFFFFFFF);
1486 p
->port_rcv_data
= cpu_to_be32((u32
)cntrs
.port_rcv_data
);
1487 if (cntrs
.port_xmit_packets
> 0xFFFFFFFFUL
)
1488 p
->port_xmit_packets
= cpu_to_be32(0xFFFFFFFF);
1490 p
->port_xmit_packets
=
1491 cpu_to_be32((u32
)cntrs
.port_xmit_packets
);
1492 if (cntrs
.port_rcv_packets
> 0xFFFFFFFFUL
)
1493 p
->port_rcv_packets
= cpu_to_be32(0xFFFFFFFF);
1495 p
->port_rcv_packets
=
1496 cpu_to_be32((u32
) cntrs
.port_rcv_packets
);
1498 return reply((struct ib_smp
*) pmp
);
1501 static int pma_get_portcounters_cong(struct ib_pma_mad
*pmp
,
1502 struct ib_device
*ibdev
, u8 port
)
1504 /* Congestion PMA packets start at offset 24 not 64 */
1505 struct ib_pma_portcounters_cong
*p
=
1506 (struct ib_pma_portcounters_cong
*)pmp
->reserved
;
1507 struct qib_verbs_counters cntrs
;
1508 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1509 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1510 struct qib_devdata
*dd
= dd_from_ppd(ppd
);
1511 u32 port_select
= be32_to_cpu(pmp
->mad_hdr
.attr_mod
) & 0xFF;
1512 u64 xmit_wait_counter
;
1513 unsigned long flags
;
1516 * This check is performed only in the GET method because the
1517 * SET method ends up calling this anyway.
1519 if (!dd
->psxmitwait_supported
)
1520 pmp
->mad_hdr
.status
|= IB_SMP_UNSUP_METH_ATTR
;
1521 if (port_select
!= port
)
1522 pmp
->mad_hdr
.status
|= IB_SMP_INVALID_FIELD
;
1524 qib_get_counters(ppd
, &cntrs
);
1525 spin_lock_irqsave(&ppd
->ibport_data
.lock
, flags
);
1526 xmit_wait_counter
= xmit_wait_get_value_delta(ppd
);
1527 spin_unlock_irqrestore(&ppd
->ibport_data
.lock
, flags
);
1529 /* Adjust counters for any resets done. */
1530 cntrs
.symbol_error_counter
-= ibp
->z_symbol_error_counter
;
1531 cntrs
.link_error_recovery_counter
-=
1532 ibp
->z_link_error_recovery_counter
;
1533 cntrs
.link_downed_counter
-= ibp
->z_link_downed_counter
;
1534 cntrs
.port_rcv_errors
-= ibp
->z_port_rcv_errors
;
1535 cntrs
.port_rcv_remphys_errors
-=
1536 ibp
->z_port_rcv_remphys_errors
;
1537 cntrs
.port_xmit_discards
-= ibp
->z_port_xmit_discards
;
1538 cntrs
.local_link_integrity_errors
-=
1539 ibp
->z_local_link_integrity_errors
;
1540 cntrs
.excessive_buffer_overrun_errors
-=
1541 ibp
->z_excessive_buffer_overrun_errors
;
1542 cntrs
.vl15_dropped
-= ibp
->z_vl15_dropped
;
1543 cntrs
.vl15_dropped
+= ibp
->n_vl15_dropped
;
1544 cntrs
.port_xmit_data
-= ibp
->z_port_xmit_data
;
1545 cntrs
.port_rcv_data
-= ibp
->z_port_rcv_data
;
1546 cntrs
.port_xmit_packets
-= ibp
->z_port_xmit_packets
;
1547 cntrs
.port_rcv_packets
-= ibp
->z_port_rcv_packets
;
1549 memset(pmp
->reserved
, 0, sizeof(pmp
->reserved
) +
1553 * Set top 3 bits to indicate interval in picoseconds in
1556 p
->port_check_rate
=
1557 cpu_to_be16((QIB_XMIT_RATE_PICO
<< 13) |
1558 (dd
->psxmitwait_check_rate
&
1559 ~(QIB_XMIT_RATE_PICO
<< 13)));
1560 p
->port_adr_events
= cpu_to_be64(0);
1561 p
->port_xmit_wait
= cpu_to_be64(xmit_wait_counter
);
1562 p
->port_xmit_data
= cpu_to_be64(cntrs
.port_xmit_data
);
1563 p
->port_rcv_data
= cpu_to_be64(cntrs
.port_rcv_data
);
1564 p
->port_xmit_packets
=
1565 cpu_to_be64(cntrs
.port_xmit_packets
);
1566 p
->port_rcv_packets
=
1567 cpu_to_be64(cntrs
.port_rcv_packets
);
1568 if (cntrs
.symbol_error_counter
> 0xFFFFUL
)
1569 p
->symbol_error_counter
= cpu_to_be16(0xFFFF);
1571 p
->symbol_error_counter
=
1573 (u16
)cntrs
.symbol_error_counter
);
1574 if (cntrs
.link_error_recovery_counter
> 0xFFUL
)
1575 p
->link_error_recovery_counter
= 0xFF;
1577 p
->link_error_recovery_counter
=
1578 (u8
)cntrs
.link_error_recovery_counter
;
1579 if (cntrs
.link_downed_counter
> 0xFFUL
)
1580 p
->link_downed_counter
= 0xFF;
1582 p
->link_downed_counter
=
1583 (u8
)cntrs
.link_downed_counter
;
1584 if (cntrs
.port_rcv_errors
> 0xFFFFUL
)
1585 p
->port_rcv_errors
= cpu_to_be16(0xFFFF);
1587 p
->port_rcv_errors
=
1588 cpu_to_be16((u16
) cntrs
.port_rcv_errors
);
1589 if (cntrs
.port_rcv_remphys_errors
> 0xFFFFUL
)
1590 p
->port_rcv_remphys_errors
= cpu_to_be16(0xFFFF);
1592 p
->port_rcv_remphys_errors
=
1594 (u16
)cntrs
.port_rcv_remphys_errors
);
1595 if (cntrs
.port_xmit_discards
> 0xFFFFUL
)
1596 p
->port_xmit_discards
= cpu_to_be16(0xFFFF);
1598 p
->port_xmit_discards
=
1599 cpu_to_be16((u16
)cntrs
.port_xmit_discards
);
1600 if (cntrs
.local_link_integrity_errors
> 0xFUL
)
1601 cntrs
.local_link_integrity_errors
= 0xFUL
;
1602 if (cntrs
.excessive_buffer_overrun_errors
> 0xFUL
)
1603 cntrs
.excessive_buffer_overrun_errors
= 0xFUL
;
1604 p
->link_overrun_errors
= (cntrs
.local_link_integrity_errors
<< 4) |
1605 cntrs
.excessive_buffer_overrun_errors
;
1606 if (cntrs
.vl15_dropped
> 0xFFFFUL
)
1607 p
->vl15_dropped
= cpu_to_be16(0xFFFF);
1609 p
->vl15_dropped
= cpu_to_be16((u16
)cntrs
.vl15_dropped
);
1611 return reply((struct ib_smp
*)pmp
);
1614 static int pma_get_portcounters_ext(struct ib_pma_mad
*pmp
,
1615 struct ib_device
*ibdev
, u8 port
)
1617 struct ib_pma_portcounters_ext
*p
=
1618 (struct ib_pma_portcounters_ext
*)pmp
->data
;
1619 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1620 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1621 u64 swords
, rwords
, spkts
, rpkts
, xwait
;
1622 u8 port_select
= p
->port_select
;
1624 memset(pmp
->data
, 0, sizeof(pmp
->data
));
1626 p
->port_select
= port_select
;
1627 if (pmp
->mad_hdr
.attr_mod
!= 0 || port_select
!= port
) {
1628 pmp
->mad_hdr
.status
|= IB_SMP_INVALID_FIELD
;
1632 qib_snapshot_counters(ppd
, &swords
, &rwords
, &spkts
, &rpkts
, &xwait
);
1634 /* Adjust counters for any resets done. */
1635 swords
-= ibp
->z_port_xmit_data
;
1636 rwords
-= ibp
->z_port_rcv_data
;
1637 spkts
-= ibp
->z_port_xmit_packets
;
1638 rpkts
-= ibp
->z_port_rcv_packets
;
1640 p
->port_xmit_data
= cpu_to_be64(swords
);
1641 p
->port_rcv_data
= cpu_to_be64(rwords
);
1642 p
->port_xmit_packets
= cpu_to_be64(spkts
);
1643 p
->port_rcv_packets
= cpu_to_be64(rpkts
);
1644 p
->port_unicast_xmit_packets
= cpu_to_be64(ibp
->n_unicast_xmit
);
1645 p
->port_unicast_rcv_packets
= cpu_to_be64(ibp
->n_unicast_rcv
);
1646 p
->port_multicast_xmit_packets
= cpu_to_be64(ibp
->n_multicast_xmit
);
1647 p
->port_multicast_rcv_packets
= cpu_to_be64(ibp
->n_multicast_rcv
);
1650 return reply((struct ib_smp
*) pmp
);
1653 static int pma_set_portcounters(struct ib_pma_mad
*pmp
,
1654 struct ib_device
*ibdev
, u8 port
)
1656 struct ib_pma_portcounters
*p
= (struct ib_pma_portcounters
*)
1658 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1659 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1660 struct qib_verbs_counters cntrs
;
1663 * Since the HW doesn't support clearing counters, we save the
1664 * current count and subtract it from future responses.
1666 qib_get_counters(ppd
, &cntrs
);
1668 if (p
->counter_select
& IB_PMA_SEL_SYMBOL_ERROR
)
1669 ibp
->z_symbol_error_counter
= cntrs
.symbol_error_counter
;
1671 if (p
->counter_select
& IB_PMA_SEL_LINK_ERROR_RECOVERY
)
1672 ibp
->z_link_error_recovery_counter
=
1673 cntrs
.link_error_recovery_counter
;
1675 if (p
->counter_select
& IB_PMA_SEL_LINK_DOWNED
)
1676 ibp
->z_link_downed_counter
= cntrs
.link_downed_counter
;
1678 if (p
->counter_select
& IB_PMA_SEL_PORT_RCV_ERRORS
)
1679 ibp
->z_port_rcv_errors
= cntrs
.port_rcv_errors
;
1681 if (p
->counter_select
& IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS
)
1682 ibp
->z_port_rcv_remphys_errors
=
1683 cntrs
.port_rcv_remphys_errors
;
1685 if (p
->counter_select
& IB_PMA_SEL_PORT_XMIT_DISCARDS
)
1686 ibp
->z_port_xmit_discards
= cntrs
.port_xmit_discards
;
1688 if (p
->counter_select
& IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS
)
1689 ibp
->z_local_link_integrity_errors
=
1690 cntrs
.local_link_integrity_errors
;
1692 if (p
->counter_select
& IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS
)
1693 ibp
->z_excessive_buffer_overrun_errors
=
1694 cntrs
.excessive_buffer_overrun_errors
;
1696 if (p
->counter_select
& IB_PMA_SEL_PORT_VL15_DROPPED
) {
1697 ibp
->n_vl15_dropped
= 0;
1698 ibp
->z_vl15_dropped
= cntrs
.vl15_dropped
;
1701 if (p
->counter_select
& IB_PMA_SEL_PORT_XMIT_DATA
)
1702 ibp
->z_port_xmit_data
= cntrs
.port_xmit_data
;
1704 if (p
->counter_select
& IB_PMA_SEL_PORT_RCV_DATA
)
1705 ibp
->z_port_rcv_data
= cntrs
.port_rcv_data
;
1707 if (p
->counter_select
& IB_PMA_SEL_PORT_XMIT_PACKETS
)
1708 ibp
->z_port_xmit_packets
= cntrs
.port_xmit_packets
;
1710 if (p
->counter_select
& IB_PMA_SEL_PORT_RCV_PACKETS
)
1711 ibp
->z_port_rcv_packets
= cntrs
.port_rcv_packets
;
1713 return pma_get_portcounters(pmp
, ibdev
, port
);
1716 static int pma_set_portcounters_cong(struct ib_pma_mad
*pmp
,
1717 struct ib_device
*ibdev
, u8 port
)
1719 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1720 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1721 struct qib_devdata
*dd
= dd_from_ppd(ppd
);
1722 struct qib_verbs_counters cntrs
;
1723 u32 counter_select
= (be32_to_cpu(pmp
->mad_hdr
.attr_mod
) >> 24) & 0xFF;
1725 unsigned long flags
;
1727 qib_get_counters(ppd
, &cntrs
);
1728 /* Get counter values before we save them */
1729 ret
= pma_get_portcounters_cong(pmp
, ibdev
, port
);
1731 if (counter_select
& IB_PMA_SEL_CONG_XMIT
) {
1732 spin_lock_irqsave(&ppd
->ibport_data
.lock
, flags
);
1733 ppd
->cong_stats
.counter
= 0;
1734 dd
->f_set_cntr_sample(ppd
, QIB_CONG_TIMER_PSINTERVAL
,
1736 spin_unlock_irqrestore(&ppd
->ibport_data
.lock
, flags
);
1738 if (counter_select
& IB_PMA_SEL_CONG_PORT_DATA
) {
1739 ibp
->z_port_xmit_data
= cntrs
.port_xmit_data
;
1740 ibp
->z_port_rcv_data
= cntrs
.port_rcv_data
;
1741 ibp
->z_port_xmit_packets
= cntrs
.port_xmit_packets
;
1742 ibp
->z_port_rcv_packets
= cntrs
.port_rcv_packets
;
1744 if (counter_select
& IB_PMA_SEL_CONG_ALL
) {
1745 ibp
->z_symbol_error_counter
=
1746 cntrs
.symbol_error_counter
;
1747 ibp
->z_link_error_recovery_counter
=
1748 cntrs
.link_error_recovery_counter
;
1749 ibp
->z_link_downed_counter
=
1750 cntrs
.link_downed_counter
;
1751 ibp
->z_port_rcv_errors
= cntrs
.port_rcv_errors
;
1752 ibp
->z_port_rcv_remphys_errors
=
1753 cntrs
.port_rcv_remphys_errors
;
1754 ibp
->z_port_xmit_discards
=
1755 cntrs
.port_xmit_discards
;
1756 ibp
->z_local_link_integrity_errors
=
1757 cntrs
.local_link_integrity_errors
;
1758 ibp
->z_excessive_buffer_overrun_errors
=
1759 cntrs
.excessive_buffer_overrun_errors
;
1760 ibp
->n_vl15_dropped
= 0;
1761 ibp
->z_vl15_dropped
= cntrs
.vl15_dropped
;
1767 static int pma_set_portcounters_ext(struct ib_pma_mad
*pmp
,
1768 struct ib_device
*ibdev
, u8 port
)
1770 struct ib_pma_portcounters
*p
= (struct ib_pma_portcounters
*)
1772 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1773 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1774 u64 swords
, rwords
, spkts
, rpkts
, xwait
;
1776 qib_snapshot_counters(ppd
, &swords
, &rwords
, &spkts
, &rpkts
, &xwait
);
1778 if (p
->counter_select
& IB_PMA_SELX_PORT_XMIT_DATA
)
1779 ibp
->z_port_xmit_data
= swords
;
1781 if (p
->counter_select
& IB_PMA_SELX_PORT_RCV_DATA
)
1782 ibp
->z_port_rcv_data
= rwords
;
1784 if (p
->counter_select
& IB_PMA_SELX_PORT_XMIT_PACKETS
)
1785 ibp
->z_port_xmit_packets
= spkts
;
1787 if (p
->counter_select
& IB_PMA_SELX_PORT_RCV_PACKETS
)
1788 ibp
->z_port_rcv_packets
= rpkts
;
1790 if (p
->counter_select
& IB_PMA_SELX_PORT_UNI_XMIT_PACKETS
)
1791 ibp
->n_unicast_xmit
= 0;
1793 if (p
->counter_select
& IB_PMA_SELX_PORT_UNI_RCV_PACKETS
)
1794 ibp
->n_unicast_rcv
= 0;
1796 if (p
->counter_select
& IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS
)
1797 ibp
->n_multicast_xmit
= 0;
1799 if (p
->counter_select
& IB_PMA_SELX_PORT_MULTI_RCV_PACKETS
)
1800 ibp
->n_multicast_rcv
= 0;
1802 return pma_get_portcounters_ext(pmp
, ibdev
, port
);
1805 static int process_subn(struct ib_device
*ibdev
, int mad_flags
,
1806 u8 port
, struct ib_mad
*in_mad
,
1807 struct ib_mad
*out_mad
)
1809 struct ib_smp
*smp
= (struct ib_smp
*)out_mad
;
1810 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1811 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1815 if (smp
->class_version
!= 1) {
1816 smp
->status
|= IB_SMP_UNSUP_VERSION
;
1821 ret
= check_mkey(ibp
, smp
, mad_flags
);
1823 u32 port_num
= be32_to_cpu(smp
->attr_mod
);
1826 * If this is a get/set portinfo, we already check the
1827 * M_Key if the MAD is for another port and the M_Key
1828 * is OK on the receiving port. This check is needed
1829 * to increment the error counters when the M_Key
1830 * fails to match on *both* ports.
1832 if (in_mad
->mad_hdr
.attr_id
== IB_SMP_ATTR_PORT_INFO
&&
1833 (smp
->method
== IB_MGMT_METHOD_GET
||
1834 smp
->method
== IB_MGMT_METHOD_SET
) &&
1835 port_num
&& port_num
<= ibdev
->phys_port_cnt
&&
1837 (void) check_mkey(to_iport(ibdev
, port_num
), smp
, 0);
1841 switch (smp
->method
) {
1842 case IB_MGMT_METHOD_GET
:
1843 switch (smp
->attr_id
) {
1844 case IB_SMP_ATTR_NODE_DESC
:
1845 ret
= subn_get_nodedescription(smp
, ibdev
);
1847 case IB_SMP_ATTR_NODE_INFO
:
1848 ret
= subn_get_nodeinfo(smp
, ibdev
, port
);
1850 case IB_SMP_ATTR_GUID_INFO
:
1851 ret
= subn_get_guidinfo(smp
, ibdev
, port
);
1853 case IB_SMP_ATTR_PORT_INFO
:
1854 ret
= subn_get_portinfo(smp
, ibdev
, port
);
1856 case IB_SMP_ATTR_PKEY_TABLE
:
1857 ret
= subn_get_pkeytable(smp
, ibdev
, port
);
1859 case IB_SMP_ATTR_SL_TO_VL_TABLE
:
1860 ret
= subn_get_sl_to_vl(smp
, ibdev
, port
);
1862 case IB_SMP_ATTR_VL_ARB_TABLE
:
1863 ret
= subn_get_vl_arb(smp
, ibdev
, port
);
1865 case IB_SMP_ATTR_SM_INFO
:
1866 if (ibp
->port_cap_flags
& IB_PORT_SM_DISABLED
) {
1867 ret
= IB_MAD_RESULT_SUCCESS
|
1868 IB_MAD_RESULT_CONSUMED
;
1871 if (ibp
->port_cap_flags
& IB_PORT_SM
) {
1872 ret
= IB_MAD_RESULT_SUCCESS
;
1877 smp
->status
|= IB_SMP_UNSUP_METH_ATTR
;
1882 case IB_MGMT_METHOD_SET
:
1883 switch (smp
->attr_id
) {
1884 case IB_SMP_ATTR_GUID_INFO
:
1885 ret
= subn_set_guidinfo(smp
, ibdev
, port
);
1887 case IB_SMP_ATTR_PORT_INFO
:
1888 ret
= subn_set_portinfo(smp
, ibdev
, port
);
1890 case IB_SMP_ATTR_PKEY_TABLE
:
1891 ret
= subn_set_pkeytable(smp
, ibdev
, port
);
1893 case IB_SMP_ATTR_SL_TO_VL_TABLE
:
1894 ret
= subn_set_sl_to_vl(smp
, ibdev
, port
);
1896 case IB_SMP_ATTR_VL_ARB_TABLE
:
1897 ret
= subn_set_vl_arb(smp
, ibdev
, port
);
1899 case IB_SMP_ATTR_SM_INFO
:
1900 if (ibp
->port_cap_flags
& IB_PORT_SM_DISABLED
) {
1901 ret
= IB_MAD_RESULT_SUCCESS
|
1902 IB_MAD_RESULT_CONSUMED
;
1905 if (ibp
->port_cap_flags
& IB_PORT_SM
) {
1906 ret
= IB_MAD_RESULT_SUCCESS
;
1911 smp
->status
|= IB_SMP_UNSUP_METH_ATTR
;
1916 case IB_MGMT_METHOD_TRAP_REPRESS
:
1917 if (smp
->attr_id
== IB_SMP_ATTR_NOTICE
)
1918 ret
= subn_trap_repress(smp
, ibdev
, port
);
1920 smp
->status
|= IB_SMP_UNSUP_METH_ATTR
;
1925 case IB_MGMT_METHOD_TRAP
:
1926 case IB_MGMT_METHOD_REPORT
:
1927 case IB_MGMT_METHOD_REPORT_RESP
:
1928 case IB_MGMT_METHOD_GET_RESP
:
1930 * The ib_mad module will call us to process responses
1931 * before checking for other consumers.
1932 * Just tell the caller to process it normally.
1934 ret
= IB_MAD_RESULT_SUCCESS
;
1937 case IB_MGMT_METHOD_SEND
:
1938 if (ib_get_smp_direction(smp
) &&
1939 smp
->attr_id
== QIB_VENDOR_IPG
) {
1940 ppd
->dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_PORT
,
1942 ret
= IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_CONSUMED
;
1944 ret
= IB_MAD_RESULT_SUCCESS
;
1948 smp
->status
|= IB_SMP_UNSUP_METHOD
;
1956 static int process_perf(struct ib_device
*ibdev
, u8 port
,
1957 struct ib_mad
*in_mad
,
1958 struct ib_mad
*out_mad
)
1960 struct ib_pma_mad
*pmp
= (struct ib_pma_mad
*)out_mad
;
1964 if (pmp
->mad_hdr
.class_version
!= 1) {
1965 pmp
->mad_hdr
.status
|= IB_SMP_UNSUP_VERSION
;
1966 ret
= reply((struct ib_smp
*) pmp
);
1970 switch (pmp
->mad_hdr
.method
) {
1971 case IB_MGMT_METHOD_GET
:
1972 switch (pmp
->mad_hdr
.attr_id
) {
1973 case IB_PMA_CLASS_PORT_INFO
:
1974 ret
= pma_get_classportinfo(pmp
, ibdev
);
1976 case IB_PMA_PORT_SAMPLES_CONTROL
:
1977 ret
= pma_get_portsamplescontrol(pmp
, ibdev
, port
);
1979 case IB_PMA_PORT_SAMPLES_RESULT
:
1980 ret
= pma_get_portsamplesresult(pmp
, ibdev
, port
);
1982 case IB_PMA_PORT_SAMPLES_RESULT_EXT
:
1983 ret
= pma_get_portsamplesresult_ext(pmp
, ibdev
, port
);
1985 case IB_PMA_PORT_COUNTERS
:
1986 ret
= pma_get_portcounters(pmp
, ibdev
, port
);
1988 case IB_PMA_PORT_COUNTERS_EXT
:
1989 ret
= pma_get_portcounters_ext(pmp
, ibdev
, port
);
1991 case IB_PMA_PORT_COUNTERS_CONG
:
1992 ret
= pma_get_portcounters_cong(pmp
, ibdev
, port
);
1995 pmp
->mad_hdr
.status
|= IB_SMP_UNSUP_METH_ATTR
;
1996 ret
= reply((struct ib_smp
*) pmp
);
2000 case IB_MGMT_METHOD_SET
:
2001 switch (pmp
->mad_hdr
.attr_id
) {
2002 case IB_PMA_PORT_SAMPLES_CONTROL
:
2003 ret
= pma_set_portsamplescontrol(pmp
, ibdev
, port
);
2005 case IB_PMA_PORT_COUNTERS
:
2006 ret
= pma_set_portcounters(pmp
, ibdev
, port
);
2008 case IB_PMA_PORT_COUNTERS_EXT
:
2009 ret
= pma_set_portcounters_ext(pmp
, ibdev
, port
);
2011 case IB_PMA_PORT_COUNTERS_CONG
:
2012 ret
= pma_set_portcounters_cong(pmp
, ibdev
, port
);
2015 pmp
->mad_hdr
.status
|= IB_SMP_UNSUP_METH_ATTR
;
2016 ret
= reply((struct ib_smp
*) pmp
);
2020 case IB_MGMT_METHOD_TRAP
:
2021 case IB_MGMT_METHOD_GET_RESP
:
2023 * The ib_mad module will call us to process responses
2024 * before checking for other consumers.
2025 * Just tell the caller to process it normally.
2027 ret
= IB_MAD_RESULT_SUCCESS
;
2031 pmp
->mad_hdr
.status
|= IB_SMP_UNSUP_METHOD
;
2032 ret
= reply((struct ib_smp
*) pmp
);
2040 * qib_process_mad - process an incoming MAD packet
2041 * @ibdev: the infiniband device this packet came in on
2042 * @mad_flags: MAD flags
2043 * @port: the port number this packet came in on
2044 * @in_wc: the work completion entry for this packet
2045 * @in_grh: the global route header for this packet
2046 * @in_mad: the incoming MAD
2047 * @out_mad: any outgoing MAD reply
2049 * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not
2050 * interested in processing.
2052 * Note that the verbs framework has already done the MAD sanity checks,
2053 * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
2056 * This is called by the ib_mad module.
2058 int qib_process_mad(struct ib_device
*ibdev
, int mad_flags
, u8 port
,
2059 struct ib_wc
*in_wc
, struct ib_grh
*in_grh
,
2060 struct ib_mad
*in_mad
, struct ib_mad
*out_mad
)
2064 switch (in_mad
->mad_hdr
.mgmt_class
) {
2065 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
:
2066 case IB_MGMT_CLASS_SUBN_LID_ROUTED
:
2067 ret
= process_subn(ibdev
, mad_flags
, port
, in_mad
, out_mad
);
2070 case IB_MGMT_CLASS_PERF_MGMT
:
2071 ret
= process_perf(ibdev
, port
, in_mad
, out_mad
);
2075 ret
= IB_MAD_RESULT_SUCCESS
;
2082 static void send_handler(struct ib_mad_agent
*agent
,
2083 struct ib_mad_send_wc
*mad_send_wc
)
2085 ib_free_send_mad(mad_send_wc
->send_buf
);
2088 static void xmit_wait_timer_func(unsigned long opaque
)
2090 struct qib_pportdata
*ppd
= (struct qib_pportdata
*)opaque
;
2091 struct qib_devdata
*dd
= dd_from_ppd(ppd
);
2092 unsigned long flags
;
2095 spin_lock_irqsave(&ppd
->ibport_data
.lock
, flags
);
2096 if (ppd
->cong_stats
.flags
== IB_PMA_CONG_HW_CONTROL_SAMPLE
) {
2097 status
= dd
->f_portcntr(ppd
, QIBPORTCNTR_PSSTAT
);
2098 if (status
== IB_PMA_SAMPLE_STATUS_DONE
) {
2099 /* save counter cache */
2100 cache_hw_sample_counters(ppd
);
2101 ppd
->cong_stats
.flags
= IB_PMA_CONG_HW_CONTROL_TIMER
;
2105 ppd
->cong_stats
.counter
= xmit_wait_get_value_delta(ppd
);
2106 dd
->f_set_cntr_sample(ppd
, QIB_CONG_TIMER_PSINTERVAL
, 0x0);
2108 spin_unlock_irqrestore(&ppd
->ibport_data
.lock
, flags
);
2109 mod_timer(&ppd
->cong_stats
.timer
, jiffies
+ HZ
);
2112 int qib_create_agents(struct qib_ibdev
*dev
)
2114 struct qib_devdata
*dd
= dd_from_dev(dev
);
2115 struct ib_mad_agent
*agent
;
2116 struct qib_ibport
*ibp
;
2120 for (p
= 0; p
< dd
->num_pports
; p
++) {
2121 ibp
= &dd
->pport
[p
].ibport_data
;
2122 agent
= ib_register_mad_agent(&dev
->ibdev
, p
+ 1, IB_QPT_SMI
,
2123 NULL
, 0, send_handler
,
2125 if (IS_ERR(agent
)) {
2126 ret
= PTR_ERR(agent
);
2130 /* Initialize xmit_wait structure */
2131 dd
->pport
[p
].cong_stats
.counter
= 0;
2132 init_timer(&dd
->pport
[p
].cong_stats
.timer
);
2133 dd
->pport
[p
].cong_stats
.timer
.function
= xmit_wait_timer_func
;
2134 dd
->pport
[p
].cong_stats
.timer
.data
=
2135 (unsigned long)(&dd
->pport
[p
]);
2136 dd
->pport
[p
].cong_stats
.timer
.expires
= 0;
2137 add_timer(&dd
->pport
[p
].cong_stats
.timer
);
2139 ibp
->send_agent
= agent
;
2145 for (p
= 0; p
< dd
->num_pports
; p
++) {
2146 ibp
= &dd
->pport
[p
].ibport_data
;
2147 if (ibp
->send_agent
) {
2148 agent
= ibp
->send_agent
;
2149 ibp
->send_agent
= NULL
;
2150 ib_unregister_mad_agent(agent
);
2157 void qib_free_agents(struct qib_ibdev
*dev
)
2159 struct qib_devdata
*dd
= dd_from_dev(dev
);
2160 struct ib_mad_agent
*agent
;
2161 struct qib_ibport
*ibp
;
2164 for (p
= 0; p
< dd
->num_pports
; p
++) {
2165 ibp
= &dd
->pport
[p
].ibport_data
;
2166 if (ibp
->send_agent
) {
2167 agent
= ibp
->send_agent
;
2168 ibp
->send_agent
= NULL
;
2169 ib_unregister_mad_agent(agent
);
2172 ib_destroy_ah(&ibp
->sm_ah
->ibah
);
2175 if (dd
->pport
[p
].cong_stats
.timer
.data
)
2176 del_timer_sync(&dd
->pport
[p
].cong_stats
.timer
);