2 * Copyright (c) 2012 Intel Corporation. All rights reserved.
3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <rdma/ib_smi.h>
40 static int reply(struct ib_smp
*smp
)
43 * The verbs framework will handle the directed/LID route
46 smp
->method
= IB_MGMT_METHOD_GET_RESP
;
47 if (smp
->mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)
48 smp
->status
|= IB_SMP_DIRECTION
;
49 return IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_REPLY
;
52 static int reply_failure(struct ib_smp
*smp
)
55 * The verbs framework will handle the directed/LID route
58 smp
->method
= IB_MGMT_METHOD_GET_RESP
;
59 if (smp
->mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)
60 smp
->status
|= IB_SMP_DIRECTION
;
61 return IB_MAD_RESULT_FAILURE
| IB_MAD_RESULT_REPLY
;
64 static void qib_send_trap(struct qib_ibport
*ibp
, void *data
, unsigned len
)
66 struct ib_mad_send_buf
*send_buf
;
67 struct ib_mad_agent
*agent
;
71 unsigned long timeout
;
73 agent
= ibp
->rvp
.send_agent
;
78 if (!(ppd_from_ibp(ibp
)->lflags
& QIBL_LINKACTIVE
))
82 if (ibp
->rvp
.trap_timeout
&&
83 time_before(jiffies
, ibp
->rvp
.trap_timeout
))
86 send_buf
= ib_create_send_mad(agent
, 0, 0, 0, IB_MGMT_MAD_HDR
,
87 IB_MGMT_MAD_DATA
, GFP_ATOMIC
,
88 IB_MGMT_BASE_VERSION
);
93 smp
->base_version
= IB_MGMT_BASE_VERSION
;
94 smp
->mgmt_class
= IB_MGMT_CLASS_SUBN_LID_ROUTED
;
95 smp
->class_version
= 1;
96 smp
->method
= IB_MGMT_METHOD_TRAP
;
98 smp
->tid
= cpu_to_be64(ibp
->rvp
.tid
);
99 smp
->attr_id
= IB_SMP_ATTR_NOTICE
;
100 /* o14-1: smp->mkey = 0; */
101 memcpy(smp
->data
, data
, len
);
103 spin_lock_irqsave(&ibp
->rvp
.lock
, flags
);
104 if (!ibp
->rvp
.sm_ah
) {
105 if (ibp
->rvp
.sm_lid
!= be16_to_cpu(IB_LID_PERMISSIVE
)) {
108 ah
= qib_create_qp0_ah(ibp
, ibp
->rvp
.sm_lid
);
113 ibp
->rvp
.sm_ah
= ibah_to_rvtah(ah
);
119 send_buf
->ah
= &ibp
->rvp
.sm_ah
->ibah
;
122 spin_unlock_irqrestore(&ibp
->rvp
.lock
, flags
);
125 ret
= ib_post_send_mad(send_buf
, NULL
);
128 timeout
= (4096 * (1UL << ibp
->rvp
.subnet_timeout
)) / 1000;
129 ibp
->rvp
.trap_timeout
= jiffies
+ usecs_to_jiffies(timeout
);
131 ib_free_send_mad(send_buf
);
132 ibp
->rvp
.trap_timeout
= 0;
137 * Send a bad [PQ]_Key trap (ch. 14.3.8).
139 void qib_bad_pqkey(struct qib_ibport
*ibp
, __be16 trap_num
, u32 key
, u32 sl
,
140 u32 qp1
, u32 qp2
, __be16 lid1
, __be16 lid2
)
142 struct ib_mad_notice_attr data
;
144 if (trap_num
== IB_NOTICE_TRAP_BAD_PKEY
)
145 ibp
->rvp
.pkey_violations
++;
147 ibp
->rvp
.qkey_violations
++;
148 ibp
->rvp
.n_pkt_drops
++;
150 /* Send violation trap */
151 data
.generic_type
= IB_NOTICE_TYPE_SECURITY
;
152 data
.prod_type_msb
= 0;
153 data
.prod_type_lsb
= IB_NOTICE_PROD_CA
;
154 data
.trap_num
= trap_num
;
155 data
.issuer_lid
= cpu_to_be16(ppd_from_ibp(ibp
)->lid
);
156 data
.toggle_count
= 0;
157 memset(&data
.details
, 0, sizeof(data
.details
));
158 data
.details
.ntc_257_258
.lid1
= lid1
;
159 data
.details
.ntc_257_258
.lid2
= lid2
;
160 data
.details
.ntc_257_258
.key
= cpu_to_be32(key
);
161 data
.details
.ntc_257_258
.sl_qp1
= cpu_to_be32((sl
<< 28) | qp1
);
162 data
.details
.ntc_257_258
.qp2
= cpu_to_be32(qp2
);
164 qib_send_trap(ibp
, &data
, sizeof(data
));
168 * Send a bad M_Key trap (ch. 14.3.9).
170 static void qib_bad_mkey(struct qib_ibport
*ibp
, struct ib_smp
*smp
)
172 struct ib_mad_notice_attr data
;
174 /* Send violation trap */
175 data
.generic_type
= IB_NOTICE_TYPE_SECURITY
;
176 data
.prod_type_msb
= 0;
177 data
.prod_type_lsb
= IB_NOTICE_PROD_CA
;
178 data
.trap_num
= IB_NOTICE_TRAP_BAD_MKEY
;
179 data
.issuer_lid
= cpu_to_be16(ppd_from_ibp(ibp
)->lid
);
180 data
.toggle_count
= 0;
181 memset(&data
.details
, 0, sizeof(data
.details
));
182 data
.details
.ntc_256
.lid
= data
.issuer_lid
;
183 data
.details
.ntc_256
.method
= smp
->method
;
184 data
.details
.ntc_256
.attr_id
= smp
->attr_id
;
185 data
.details
.ntc_256
.attr_mod
= smp
->attr_mod
;
186 data
.details
.ntc_256
.mkey
= smp
->mkey
;
187 if (smp
->mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
) {
190 data
.details
.ntc_256
.dr_slid
= smp
->dr_slid
;
191 data
.details
.ntc_256
.dr_trunc_hop
= IB_NOTICE_TRAP_DR_NOTICE
;
192 hop_cnt
= smp
->hop_cnt
;
193 if (hop_cnt
> ARRAY_SIZE(data
.details
.ntc_256
.dr_rtn_path
)) {
194 data
.details
.ntc_256
.dr_trunc_hop
|=
195 IB_NOTICE_TRAP_DR_TRUNC
;
196 hop_cnt
= ARRAY_SIZE(data
.details
.ntc_256
.dr_rtn_path
);
198 data
.details
.ntc_256
.dr_trunc_hop
|= hop_cnt
;
199 memcpy(data
.details
.ntc_256
.dr_rtn_path
, smp
->return_path
,
203 qib_send_trap(ibp
, &data
, sizeof(data
));
207 * Send a Port Capability Mask Changed trap (ch. 14.3.11).
209 void qib_cap_mask_chg(struct rvt_dev_info
*rdi
, u8 port_num
)
211 struct qib_ibdev
*ibdev
= container_of(rdi
, struct qib_ibdev
, rdi
);
212 struct qib_devdata
*dd
= dd_from_dev(ibdev
);
213 struct qib_ibport
*ibp
= &dd
->pport
[port_num
- 1].ibport_data
;
214 struct ib_mad_notice_attr data
;
216 data
.generic_type
= IB_NOTICE_TYPE_INFO
;
217 data
.prod_type_msb
= 0;
218 data
.prod_type_lsb
= IB_NOTICE_PROD_CA
;
219 data
.trap_num
= IB_NOTICE_TRAP_CAP_MASK_CHG
;
220 data
.issuer_lid
= cpu_to_be16(ppd_from_ibp(ibp
)->lid
);
221 data
.toggle_count
= 0;
222 memset(&data
.details
, 0, sizeof(data
.details
));
223 data
.details
.ntc_144
.lid
= data
.issuer_lid
;
224 data
.details
.ntc_144
.new_cap_mask
=
225 cpu_to_be32(ibp
->rvp
.port_cap_flags
);
226 qib_send_trap(ibp
, &data
, sizeof(data
));
230 * Send a System Image GUID Changed trap (ch. 14.3.12).
232 void qib_sys_guid_chg(struct qib_ibport
*ibp
)
234 struct ib_mad_notice_attr data
;
236 data
.generic_type
= IB_NOTICE_TYPE_INFO
;
237 data
.prod_type_msb
= 0;
238 data
.prod_type_lsb
= IB_NOTICE_PROD_CA
;
239 data
.trap_num
= IB_NOTICE_TRAP_SYS_GUID_CHG
;
240 data
.issuer_lid
= cpu_to_be16(ppd_from_ibp(ibp
)->lid
);
241 data
.toggle_count
= 0;
242 memset(&data
.details
, 0, sizeof(data
.details
));
243 data
.details
.ntc_145
.lid
= data
.issuer_lid
;
244 data
.details
.ntc_145
.new_sys_guid
= ib_qib_sys_image_guid
;
246 qib_send_trap(ibp
, &data
, sizeof(data
));
250 * Send a Node Description Changed trap (ch. 14.3.13).
252 void qib_node_desc_chg(struct qib_ibport
*ibp
)
254 struct ib_mad_notice_attr data
;
256 data
.generic_type
= IB_NOTICE_TYPE_INFO
;
257 data
.prod_type_msb
= 0;
258 data
.prod_type_lsb
= IB_NOTICE_PROD_CA
;
259 data
.trap_num
= IB_NOTICE_TRAP_CAP_MASK_CHG
;
260 data
.issuer_lid
= cpu_to_be16(ppd_from_ibp(ibp
)->lid
);
261 data
.toggle_count
= 0;
262 memset(&data
.details
, 0, sizeof(data
.details
));
263 data
.details
.ntc_144
.lid
= data
.issuer_lid
;
264 data
.details
.ntc_144
.local_changes
= 1;
265 data
.details
.ntc_144
.change_flags
= IB_NOTICE_TRAP_NODE_DESC_CHG
;
267 qib_send_trap(ibp
, &data
, sizeof(data
));
270 static int subn_get_nodedescription(struct ib_smp
*smp
,
271 struct ib_device
*ibdev
)
274 smp
->status
|= IB_SMP_INVALID_FIELD
;
276 memcpy(smp
->data
, ibdev
->node_desc
, sizeof(smp
->data
));
281 static int subn_get_nodeinfo(struct ib_smp
*smp
, struct ib_device
*ibdev
,
284 struct ib_node_info
*nip
= (struct ib_node_info
*)&smp
->data
;
285 struct qib_devdata
*dd
= dd_from_ibdev(ibdev
);
286 u32 vendor
, majrev
, minrev
;
287 unsigned pidx
= port
- 1; /* IB number port from 1, hdw from 0 */
289 /* GUID 0 is illegal */
290 if (smp
->attr_mod
|| pidx
>= dd
->num_pports
||
291 dd
->pport
[pidx
].guid
== 0)
292 smp
->status
|= IB_SMP_INVALID_FIELD
;
294 nip
->port_guid
= dd
->pport
[pidx
].guid
;
296 nip
->base_version
= 1;
297 nip
->class_version
= 1;
298 nip
->node_type
= 1; /* channel adapter */
299 nip
->num_ports
= ibdev
->phys_port_cnt
;
300 /* This is already in network order */
301 nip
->sys_guid
= ib_qib_sys_image_guid
;
302 nip
->node_guid
= dd
->pport
->guid
; /* Use first-port GUID as node */
303 nip
->partition_cap
= cpu_to_be16(qib_get_npkeys(dd
));
304 nip
->device_id
= cpu_to_be16(dd
->deviceid
);
307 nip
->revision
= cpu_to_be32((majrev
<< 16) | minrev
);
308 nip
->local_port_num
= port
;
309 vendor
= dd
->vendorid
;
310 nip
->vendor_id
[0] = QIB_SRC_OUI_1
;
311 nip
->vendor_id
[1] = QIB_SRC_OUI_2
;
312 nip
->vendor_id
[2] = QIB_SRC_OUI_3
;
317 static int subn_get_guidinfo(struct ib_smp
*smp
, struct ib_device
*ibdev
,
320 struct qib_devdata
*dd
= dd_from_ibdev(ibdev
);
321 u32 startgx
= 8 * be32_to_cpu(smp
->attr_mod
);
322 __be64
*p
= (__be64
*) smp
->data
;
323 unsigned pidx
= port
- 1; /* IB number port from 1, hdw from 0 */
325 /* 32 blocks of 8 64-bit GUIDs per block */
327 memset(smp
->data
, 0, sizeof(smp
->data
));
329 if (startgx
== 0 && pidx
< dd
->num_pports
) {
330 struct qib_pportdata
*ppd
= dd
->pport
+ pidx
;
331 struct qib_ibport
*ibp
= &ppd
->ibport_data
;
332 __be64 g
= ppd
->guid
;
335 /* GUID 0 is illegal */
337 smp
->status
|= IB_SMP_INVALID_FIELD
;
339 /* The first is a copy of the read-only HW GUID. */
341 for (i
= 1; i
< QIB_GUIDS_PER_PORT
; i
++)
342 p
[i
] = ibp
->guids
[i
- 1];
345 smp
->status
|= IB_SMP_INVALID_FIELD
;
350 static void set_link_width_enabled(struct qib_pportdata
*ppd
, u32 w
)
352 (void) ppd
->dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_LWID_ENB
, w
);
355 static void set_link_speed_enabled(struct qib_pportdata
*ppd
, u32 s
)
357 (void) ppd
->dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_SPD_ENB
, s
);
360 static int get_overrunthreshold(struct qib_pportdata
*ppd
)
362 return ppd
->dd
->f_get_ib_cfg(ppd
, QIB_IB_CFG_OVERRUN_THRESH
);
366 * set_overrunthreshold - set the overrun threshold
367 * @ppd: the physical port data
368 * @n: the new threshold
370 * Note that this will only take effect when the link state changes.
372 static int set_overrunthreshold(struct qib_pportdata
*ppd
, unsigned n
)
374 (void) ppd
->dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_OVERRUN_THRESH
,
379 static int get_phyerrthreshold(struct qib_pportdata
*ppd
)
381 return ppd
->dd
->f_get_ib_cfg(ppd
, QIB_IB_CFG_PHYERR_THRESH
);
385 * set_phyerrthreshold - set the physical error threshold
386 * @ppd: the physical port data
387 * @n: the new threshold
389 * Note that this will only take effect when the link state changes.
391 static int set_phyerrthreshold(struct qib_pportdata
*ppd
, unsigned n
)
393 (void) ppd
->dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_PHYERR_THRESH
,
399 * get_linkdowndefaultstate - get the default linkdown state
400 * @ppd: the physical port data
402 * Returns zero if the default is POLL, 1 if the default is SLEEP.
404 static int get_linkdowndefaultstate(struct qib_pportdata
*ppd
)
406 return ppd
->dd
->f_get_ib_cfg(ppd
, QIB_IB_CFG_LINKDEFAULT
) ==
407 IB_LINKINITCMD_SLEEP
;
410 static int check_mkey(struct qib_ibport
*ibp
, struct ib_smp
*smp
, int mad_flags
)
415 /* Is the mkey in the process of expiring? */
416 if (ibp
->rvp
.mkey_lease_timeout
&&
417 time_after_eq(jiffies
, ibp
->rvp
.mkey_lease_timeout
)) {
418 /* Clear timeout and mkey protection field. */
419 ibp
->rvp
.mkey_lease_timeout
= 0;
420 ibp
->rvp
.mkeyprot
= 0;
423 if ((mad_flags
& IB_MAD_IGNORE_MKEY
) || ibp
->rvp
.mkey
== 0 ||
424 ibp
->rvp
.mkey
== smp
->mkey
)
427 /* Unset lease timeout on any valid Get/Set/TrapRepress */
428 if (valid_mkey
&& ibp
->rvp
.mkey_lease_timeout
&&
429 (smp
->method
== IB_MGMT_METHOD_GET
||
430 smp
->method
== IB_MGMT_METHOD_SET
||
431 smp
->method
== IB_MGMT_METHOD_TRAP_REPRESS
))
432 ibp
->rvp
.mkey_lease_timeout
= 0;
435 switch (smp
->method
) {
436 case IB_MGMT_METHOD_GET
:
437 /* Bad mkey not a violation below level 2 */
438 if (ibp
->rvp
.mkeyprot
< 2)
440 case IB_MGMT_METHOD_SET
:
441 case IB_MGMT_METHOD_TRAP_REPRESS
:
442 if (ibp
->rvp
.mkey_violations
!= 0xFFFF)
443 ++ibp
->rvp
.mkey_violations
;
444 if (!ibp
->rvp
.mkey_lease_timeout
&&
445 ibp
->rvp
.mkey_lease_period
)
446 ibp
->rvp
.mkey_lease_timeout
= jiffies
+
447 ibp
->rvp
.mkey_lease_period
* HZ
;
448 /* Generate a trap notice. */
449 qib_bad_mkey(ibp
, smp
);
457 static int subn_get_portinfo(struct ib_smp
*smp
, struct ib_device
*ibdev
,
460 struct qib_devdata
*dd
;
461 struct qib_pportdata
*ppd
;
462 struct qib_ibport
*ibp
;
463 struct ib_port_info
*pip
= (struct ib_port_info
*)smp
->data
;
467 u32 port_num
= be32_to_cpu(smp
->attr_mod
);
472 if (port_num
> ibdev
->phys_port_cnt
) {
473 smp
->status
|= IB_SMP_INVALID_FIELD
;
477 if (port_num
!= port
) {
478 ibp
= to_iport(ibdev
, port_num
);
479 ret
= check_mkey(ibp
, smp
, 0);
481 ret
= IB_MAD_RESULT_FAILURE
;
487 dd
= dd_from_ibdev(ibdev
);
488 /* IB numbers ports from 1, hdw from 0 */
489 ppd
= dd
->pport
+ (port_num
- 1);
490 ibp
= &ppd
->ibport_data
;
492 /* Clear all fields. Only set the non-zero fields. */
493 memset(smp
->data
, 0, sizeof(smp
->data
));
495 /* Only return the mkey if the protection field allows it. */
496 if (!(smp
->method
== IB_MGMT_METHOD_GET
&&
497 ibp
->rvp
.mkey
!= smp
->mkey
&&
498 ibp
->rvp
.mkeyprot
== 1))
499 pip
->mkey
= ibp
->rvp
.mkey
;
500 pip
->gid_prefix
= ibp
->rvp
.gid_prefix
;
501 pip
->lid
= cpu_to_be16(ppd
->lid
);
502 pip
->sm_lid
= cpu_to_be16(ibp
->rvp
.sm_lid
);
503 pip
->cap_mask
= cpu_to_be32(ibp
->rvp
.port_cap_flags
);
504 /* pip->diag_code; */
505 pip
->mkey_lease_period
= cpu_to_be16(ibp
->rvp
.mkey_lease_period
);
506 pip
->local_port_num
= port
;
507 pip
->link_width_enabled
= ppd
->link_width_enabled
;
508 pip
->link_width_supported
= ppd
->link_width_supported
;
509 pip
->link_width_active
= ppd
->link_width_active
;
510 state
= dd
->f_iblink_state(ppd
->lastibcstat
);
511 pip
->linkspeed_portstate
= ppd
->link_speed_supported
<< 4 | state
;
513 pip
->portphysstate_linkdown
=
514 (dd
->f_ibphys_portstate(ppd
->lastibcstat
) << 4) |
515 (get_linkdowndefaultstate(ppd
) ? 1 : 2);
516 pip
->mkeyprot_resv_lmc
= (ibp
->rvp
.mkeyprot
<< 6) | ppd
->lmc
;
517 pip
->linkspeedactive_enabled
= (ppd
->link_speed_active
<< 4) |
518 ppd
->link_speed_enabled
;
519 switch (ppd
->ibmtu
) {
520 default: /* something is wrong; fall through */
537 pip
->neighbormtu_mastersmsl
= (mtu
<< 4) | ibp
->rvp
.sm_sl
;
538 pip
->vlcap_inittype
= ppd
->vls_supported
<< 4; /* InitType = 0 */
539 pip
->vl_high_limit
= ibp
->rvp
.vl_high_limit
;
540 pip
->vl_arb_high_cap
=
541 dd
->f_get_ib_cfg(ppd
, QIB_IB_CFG_VL_HIGH_CAP
);
542 pip
->vl_arb_low_cap
=
543 dd
->f_get_ib_cfg(ppd
, QIB_IB_CFG_VL_LOW_CAP
);
544 /* InitTypeReply = 0 */
545 pip
->inittypereply_mtucap
= qib_ibmtu
? qib_ibmtu
: IB_MTU_4096
;
546 /* HCAs ignore VLStallCount and HOQLife */
547 /* pip->vlstallcnt_hoqlife; */
548 pip
->operationalvl_pei_peo_fpi_fpo
=
549 dd
->f_get_ib_cfg(ppd
, QIB_IB_CFG_OP_VLS
) << 4;
550 pip
->mkey_violations
= cpu_to_be16(ibp
->rvp
.mkey_violations
);
551 /* P_KeyViolations are counted by hardware. */
552 pip
->pkey_violations
= cpu_to_be16(ibp
->rvp
.pkey_violations
);
553 pip
->qkey_violations
= cpu_to_be16(ibp
->rvp
.qkey_violations
);
554 /* Only the hardware GUID is supported for now */
555 pip
->guid_cap
= QIB_GUIDS_PER_PORT
;
556 pip
->clientrereg_resv_subnetto
= ibp
->rvp
.subnet_timeout
;
557 /* 32.768 usec. response time (guessing) */
558 pip
->resv_resptimevalue
= 3;
559 pip
->localphyerrors_overrunerrors
=
560 (get_phyerrthreshold(ppd
) << 4) |
561 get_overrunthreshold(ppd
);
562 /* pip->max_credit_hint; */
563 if (ibp
->rvp
.port_cap_flags
& IB_PORT_LINK_LATENCY_SUP
) {
566 v
= dd
->f_get_ib_cfg(ppd
, QIB_IB_CFG_LINKLATENCY
);
567 pip
->link_roundtrip_latency
[0] = v
>> 16;
568 pip
->link_roundtrip_latency
[1] = v
>> 8;
569 pip
->link_roundtrip_latency
[2] = v
;
579 * get_pkeys - return the PKEY table
580 * @dd: the qlogic_ib device
581 * @port: the IB port number
582 * @pkeys: the pkey table is placed here
584 static int get_pkeys(struct qib_devdata
*dd
, u8 port
, u16
*pkeys
)
586 struct qib_pportdata
*ppd
= dd
->pport
+ port
- 1;
588 * always a kernel context, no locking needed.
589 * If we get here with ppd setup, no need to check
592 struct qib_ctxtdata
*rcd
= dd
->rcd
[ppd
->hw_pidx
];
594 memcpy(pkeys
, rcd
->pkeys
, sizeof(rcd
->pkeys
));
599 static int subn_get_pkeytable(struct ib_smp
*smp
, struct ib_device
*ibdev
,
602 u32 startpx
= 32 * (be32_to_cpu(smp
->attr_mod
) & 0xffff);
603 u16
*p
= (u16
*) smp
->data
;
604 __be16
*q
= (__be16
*) smp
->data
;
606 /* 64 blocks of 32 16-bit P_Key entries */
608 memset(smp
->data
, 0, sizeof(smp
->data
));
610 struct qib_devdata
*dd
= dd_from_ibdev(ibdev
);
611 unsigned i
, n
= qib_get_npkeys(dd
);
613 get_pkeys(dd
, port
, p
);
615 for (i
= 0; i
< n
; i
++)
616 q
[i
] = cpu_to_be16(p
[i
]);
618 smp
->status
|= IB_SMP_INVALID_FIELD
;
623 static int subn_set_guidinfo(struct ib_smp
*smp
, struct ib_device
*ibdev
,
626 struct qib_devdata
*dd
= dd_from_ibdev(ibdev
);
627 u32 startgx
= 8 * be32_to_cpu(smp
->attr_mod
);
628 __be64
*p
= (__be64
*) smp
->data
;
629 unsigned pidx
= port
- 1; /* IB number port from 1, hdw from 0 */
631 /* 32 blocks of 8 64-bit GUIDs per block */
633 if (startgx
== 0 && pidx
< dd
->num_pports
) {
634 struct qib_pportdata
*ppd
= dd
->pport
+ pidx
;
635 struct qib_ibport
*ibp
= &ppd
->ibport_data
;
638 /* The first entry is read-only. */
639 for (i
= 1; i
< QIB_GUIDS_PER_PORT
; i
++)
640 ibp
->guids
[i
- 1] = p
[i
];
642 smp
->status
|= IB_SMP_INVALID_FIELD
;
644 /* The only GUID we support is the first read-only entry. */
645 return subn_get_guidinfo(smp
, ibdev
, port
);
649 * subn_set_portinfo - set port information
650 * @smp: the incoming SM packet
651 * @ibdev: the infiniband device
652 * @port: the port on the device
654 * Set Portinfo (see ch. 14.2.5.6).
656 static int subn_set_portinfo(struct ib_smp
*smp
, struct ib_device
*ibdev
,
659 struct ib_port_info
*pip
= (struct ib_port_info
*)smp
->data
;
660 struct ib_event event
;
661 struct qib_devdata
*dd
;
662 struct qib_pportdata
*ppd
;
663 struct qib_ibport
*ibp
;
664 u8 clientrereg
= (pip
->clientrereg_resv_subnetto
& 0x80);
674 u32 port_num
= be32_to_cpu(smp
->attr_mod
);
679 if (port_num
> ibdev
->phys_port_cnt
)
681 /* Port attributes can only be set on the receiving port */
682 if (port_num
!= port
)
686 dd
= dd_from_ibdev(ibdev
);
687 /* IB numbers ports from 1, hdw from 0 */
688 ppd
= dd
->pport
+ (port_num
- 1);
689 ibp
= &ppd
->ibport_data
;
690 event
.device
= ibdev
;
691 event
.element
.port_num
= port
;
693 ibp
->rvp
.mkey
= pip
->mkey
;
694 ibp
->rvp
.gid_prefix
= pip
->gid_prefix
;
695 ibp
->rvp
.mkey_lease_period
= be16_to_cpu(pip
->mkey_lease_period
);
697 lid
= be16_to_cpu(pip
->lid
);
698 /* Must be a valid unicast LID address. */
699 if (lid
== 0 || lid
>= be16_to_cpu(IB_MULTICAST_LID_BASE
))
700 smp
->status
|= IB_SMP_INVALID_FIELD
;
701 else if (ppd
->lid
!= lid
|| ppd
->lmc
!= (pip
->mkeyprot_resv_lmc
& 7)) {
703 qib_set_uevent_bits(ppd
, _QIB_EVENT_LID_CHANGE_BIT
);
704 if (ppd
->lmc
!= (pip
->mkeyprot_resv_lmc
& 7))
705 qib_set_uevent_bits(ppd
, _QIB_EVENT_LMC_CHANGE_BIT
);
706 qib_set_lid(ppd
, lid
, pip
->mkeyprot_resv_lmc
& 7);
707 event
.event
= IB_EVENT_LID_CHANGE
;
708 ib_dispatch_event(&event
);
711 smlid
= be16_to_cpu(pip
->sm_lid
);
712 msl
= pip
->neighbormtu_mastersmsl
& 0xF;
713 /* Must be a valid unicast LID address. */
714 if (smlid
== 0 || smlid
>= be16_to_cpu(IB_MULTICAST_LID_BASE
))
715 smp
->status
|= IB_SMP_INVALID_FIELD
;
716 else if (smlid
!= ibp
->rvp
.sm_lid
|| msl
!= ibp
->rvp
.sm_sl
) {
717 spin_lock_irqsave(&ibp
->rvp
.lock
, flags
);
718 if (ibp
->rvp
.sm_ah
) {
719 if (smlid
!= ibp
->rvp
.sm_lid
)
720 ibp
->rvp
.sm_ah
->attr
.dlid
= smlid
;
721 if (msl
!= ibp
->rvp
.sm_sl
)
722 ibp
->rvp
.sm_ah
->attr
.sl
= msl
;
724 spin_unlock_irqrestore(&ibp
->rvp
.lock
, flags
);
725 if (smlid
!= ibp
->rvp
.sm_lid
)
726 ibp
->rvp
.sm_lid
= smlid
;
727 if (msl
!= ibp
->rvp
.sm_sl
)
728 ibp
->rvp
.sm_sl
= msl
;
729 event
.event
= IB_EVENT_SM_CHANGE
;
730 ib_dispatch_event(&event
);
733 /* Allow 1x or 4x to be set (see 14.2.6.6). */
734 lwe
= pip
->link_width_enabled
;
737 set_link_width_enabled(ppd
, ppd
->link_width_supported
);
738 else if (lwe
>= 16 || (lwe
& ~ppd
->link_width_supported
))
739 smp
->status
|= IB_SMP_INVALID_FIELD
;
740 else if (lwe
!= ppd
->link_width_enabled
)
741 set_link_width_enabled(ppd
, lwe
);
744 lse
= pip
->linkspeedactive_enabled
& 0xF;
747 * The IB 1.2 spec. only allows link speed values
748 * 1, 3, 5, 7, 15. 1.2.1 extended to allow specific
752 set_link_speed_enabled(ppd
,
753 ppd
->link_speed_supported
);
754 else if (lse
>= 8 || (lse
& ~ppd
->link_speed_supported
))
755 smp
->status
|= IB_SMP_INVALID_FIELD
;
756 else if (lse
!= ppd
->link_speed_enabled
)
757 set_link_speed_enabled(ppd
, lse
);
760 /* Set link down default state. */
761 switch (pip
->portphysstate_linkdown
& 0xF) {
765 (void) dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_LINKDEFAULT
,
766 IB_LINKINITCMD_SLEEP
);
769 (void) dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_LINKDEFAULT
,
770 IB_LINKINITCMD_POLL
);
773 smp
->status
|= IB_SMP_INVALID_FIELD
;
776 ibp
->rvp
.mkeyprot
= pip
->mkeyprot_resv_lmc
>> 6;
777 ibp
->rvp
.vl_high_limit
= pip
->vl_high_limit
;
778 (void) dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_VL_HIGH_LIMIT
,
779 ibp
->rvp
.vl_high_limit
);
781 mtu
= ib_mtu_enum_to_int((pip
->neighbormtu_mastersmsl
>> 4) & 0xF);
783 smp
->status
|= IB_SMP_INVALID_FIELD
;
785 qib_set_mtu(ppd
, mtu
);
787 /* Set operational VLs */
788 vls
= (pip
->operationalvl_pei_peo_fpi_fpo
>> 4) & 0xF;
790 if (vls
> ppd
->vls_supported
)
791 smp
->status
|= IB_SMP_INVALID_FIELD
;
793 (void) dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_OP_VLS
, vls
);
796 if (pip
->mkey_violations
== 0)
797 ibp
->rvp
.mkey_violations
= 0;
799 if (pip
->pkey_violations
== 0)
800 ibp
->rvp
.pkey_violations
= 0;
802 if (pip
->qkey_violations
== 0)
803 ibp
->rvp
.qkey_violations
= 0;
805 ore
= pip
->localphyerrors_overrunerrors
;
806 if (set_phyerrthreshold(ppd
, (ore
>> 4) & 0xF))
807 smp
->status
|= IB_SMP_INVALID_FIELD
;
809 if (set_overrunthreshold(ppd
, (ore
& 0xF)))
810 smp
->status
|= IB_SMP_INVALID_FIELD
;
812 ibp
->rvp
.subnet_timeout
= pip
->clientrereg_resv_subnetto
& 0x1F;
815 * Do the port state change now that the other link parameters
817 * Changing the port physical state only makes sense if the link
818 * is down or is being set to down.
820 state
= pip
->linkspeed_portstate
& 0xF;
821 lstate
= (pip
->portphysstate_linkdown
>> 4) & 0xF;
822 if (lstate
&& !(state
== IB_PORT_DOWN
|| state
== IB_PORT_NOP
))
823 smp
->status
|= IB_SMP_INVALID_FIELD
;
826 * Only state changes of DOWN, ARM, and ACTIVE are valid
827 * and must be in the correct state to take effect (see 7.2.6).
836 lstate
= QIB_IB_LINKDOWN_ONLY
;
837 else if (lstate
== 1)
838 lstate
= QIB_IB_LINKDOWN_SLEEP
;
839 else if (lstate
== 2)
840 lstate
= QIB_IB_LINKDOWN
;
841 else if (lstate
== 3)
842 lstate
= QIB_IB_LINKDOWN_DISABLE
;
844 smp
->status
|= IB_SMP_INVALID_FIELD
;
847 spin_lock_irqsave(&ppd
->lflags_lock
, flags
);
848 ppd
->lflags
&= ~QIBL_LINKV
;
849 spin_unlock_irqrestore(&ppd
->lflags_lock
, flags
);
850 qib_set_linkstate(ppd
, lstate
);
852 * Don't send a reply if the response would be sent
853 * through the disabled port.
855 if (lstate
== QIB_IB_LINKDOWN_DISABLE
&& smp
->hop_cnt
) {
856 ret
= IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_CONSUMED
;
859 qib_wait_linkstate(ppd
, QIBL_LINKV
, 10);
862 qib_set_linkstate(ppd
, QIB_IB_LINKARM
);
865 qib_set_linkstate(ppd
, QIB_IB_LINKACTIVE
);
868 smp
->status
|= IB_SMP_INVALID_FIELD
;
872 event
.event
= IB_EVENT_CLIENT_REREGISTER
;
873 ib_dispatch_event(&event
);
876 ret
= subn_get_portinfo(smp
, ibdev
, port
);
878 /* restore re-reg bit per o14-12.2.1 */
879 pip
->clientrereg_resv_subnetto
|= clientrereg
;
884 smp
->status
|= IB_SMP_INVALID_FIELD
;
886 ret
= subn_get_portinfo(smp
, ibdev
, port
);
892 * rm_pkey - decrecment the reference count for the given PKEY
893 * @dd: the qlogic_ib device
894 * @key: the PKEY index
896 * Return true if this was the last reference and the hardware table entry
897 * needs to be changed.
899 static int rm_pkey(struct qib_pportdata
*ppd
, u16 key
)
904 for (i
= 0; i
< ARRAY_SIZE(ppd
->pkeys
); i
++) {
905 if (ppd
->pkeys
[i
] != key
)
907 if (atomic_dec_and_test(&ppd
->pkeyrefs
[i
])) {
922 * add_pkey - add the given PKEY to the hardware table
923 * @dd: the qlogic_ib device
926 * Return an error code if unable to add the entry, zero if no change,
927 * or 1 if the hardware PKEY register needs to be updated.
929 static int add_pkey(struct qib_pportdata
*ppd
, u16 key
)
932 u16 lkey
= key
& 0x7FFF;
936 if (lkey
== 0x7FFF) {
941 /* Look for an empty slot or a matching PKEY. */
942 for (i
= 0; i
< ARRAY_SIZE(ppd
->pkeys
); i
++) {
943 if (!ppd
->pkeys
[i
]) {
947 /* If it matches exactly, try to increment the ref count */
948 if (ppd
->pkeys
[i
] == key
) {
949 if (atomic_inc_return(&ppd
->pkeyrefs
[i
]) > 1) {
953 /* Lost the race. Look for an empty slot below. */
954 atomic_dec(&ppd
->pkeyrefs
[i
]);
958 * It makes no sense to have both the limited and unlimited
959 * PKEY set at the same time since the unlimited one will
960 * disable the limited one.
962 if ((ppd
->pkeys
[i
] & 0x7FFF) == lkey
) {
971 for (i
= 0; i
< ARRAY_SIZE(ppd
->pkeys
); i
++) {
972 if (!ppd
->pkeys
[i
] &&
973 atomic_inc_return(&ppd
->pkeyrefs
[i
]) == 1) {
974 /* for qibstats, etc. */
987 * set_pkeys - set the PKEY table for ctxt 0
988 * @dd: the qlogic_ib device
989 * @port: the IB port number
990 * @pkeys: the PKEY table
992 static int set_pkeys(struct qib_devdata
*dd
, u8 port
, u16
*pkeys
)
994 struct qib_pportdata
*ppd
;
995 struct qib_ctxtdata
*rcd
;
1000 * IB port one/two always maps to context zero/one,
1001 * always a kernel context, no locking needed
1002 * If we get here with ppd setup, no need to check
1003 * that rcd is valid.
1005 ppd
= dd
->pport
+ (port
- 1);
1006 rcd
= dd
->rcd
[ppd
->hw_pidx
];
1008 for (i
= 0; i
< ARRAY_SIZE(rcd
->pkeys
); i
++) {
1010 u16 okey
= rcd
->pkeys
[i
];
1015 * The value of this PKEY table entry is changing.
1016 * Remove the old entry in the hardware's array of PKEYs.
1019 changed
|= rm_pkey(ppd
, okey
);
1021 int ret
= add_pkey(ppd
, key
);
1028 rcd
->pkeys
[i
] = key
;
1031 struct ib_event event
;
1033 (void) dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_PKEYS
, 0);
1035 event
.event
= IB_EVENT_PKEY_CHANGE
;
1036 event
.device
= &dd
->verbs_dev
.rdi
.ibdev
;
1037 event
.element
.port_num
= port
;
1038 ib_dispatch_event(&event
);
1043 static int subn_set_pkeytable(struct ib_smp
*smp
, struct ib_device
*ibdev
,
1046 u32 startpx
= 32 * (be32_to_cpu(smp
->attr_mod
) & 0xffff);
1047 __be16
*p
= (__be16
*) smp
->data
;
1048 u16
*q
= (u16
*) smp
->data
;
1049 struct qib_devdata
*dd
= dd_from_ibdev(ibdev
);
1050 unsigned i
, n
= qib_get_npkeys(dd
);
1052 for (i
= 0; i
< n
; i
++)
1053 q
[i
] = be16_to_cpu(p
[i
]);
1055 if (startpx
!= 0 || set_pkeys(dd
, port
, q
) != 0)
1056 smp
->status
|= IB_SMP_INVALID_FIELD
;
1058 return subn_get_pkeytable(smp
, ibdev
, port
);
1061 static int subn_get_sl_to_vl(struct ib_smp
*smp
, struct ib_device
*ibdev
,
1064 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1065 u8
*p
= (u8
*) smp
->data
;
1068 memset(smp
->data
, 0, sizeof(smp
->data
));
1070 if (!(ibp
->rvp
.port_cap_flags
& IB_PORT_SL_MAP_SUP
))
1071 smp
->status
|= IB_SMP_UNSUP_METHOD
;
1073 for (i
= 0; i
< ARRAY_SIZE(ibp
->sl_to_vl
); i
+= 2)
1074 *p
++ = (ibp
->sl_to_vl
[i
] << 4) | ibp
->sl_to_vl
[i
+ 1];
1079 static int subn_set_sl_to_vl(struct ib_smp
*smp
, struct ib_device
*ibdev
,
1082 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1083 u8
*p
= (u8
*) smp
->data
;
1086 if (!(ibp
->rvp
.port_cap_flags
& IB_PORT_SL_MAP_SUP
)) {
1087 smp
->status
|= IB_SMP_UNSUP_METHOD
;
1091 for (i
= 0; i
< ARRAY_SIZE(ibp
->sl_to_vl
); i
+= 2, p
++) {
1092 ibp
->sl_to_vl
[i
] = *p
>> 4;
1093 ibp
->sl_to_vl
[i
+ 1] = *p
& 0xF;
1095 qib_set_uevent_bits(ppd_from_ibp(to_iport(ibdev
, port
)),
1096 _QIB_EVENT_SL2VL_CHANGE_BIT
);
1098 return subn_get_sl_to_vl(smp
, ibdev
, port
);
1101 static int subn_get_vl_arb(struct ib_smp
*smp
, struct ib_device
*ibdev
,
1104 unsigned which
= be32_to_cpu(smp
->attr_mod
) >> 16;
1105 struct qib_pportdata
*ppd
= ppd_from_ibp(to_iport(ibdev
, port
));
1107 memset(smp
->data
, 0, sizeof(smp
->data
));
1109 if (ppd
->vls_supported
== IB_VL_VL0
)
1110 smp
->status
|= IB_SMP_UNSUP_METHOD
;
1111 else if (which
== IB_VLARB_LOWPRI_0_31
)
1112 (void) ppd
->dd
->f_get_ib_table(ppd
, QIB_IB_TBL_VL_LOW_ARB
,
1114 else if (which
== IB_VLARB_HIGHPRI_0_31
)
1115 (void) ppd
->dd
->f_get_ib_table(ppd
, QIB_IB_TBL_VL_HIGH_ARB
,
1118 smp
->status
|= IB_SMP_INVALID_FIELD
;
1123 static int subn_set_vl_arb(struct ib_smp
*smp
, struct ib_device
*ibdev
,
1126 unsigned which
= be32_to_cpu(smp
->attr_mod
) >> 16;
1127 struct qib_pportdata
*ppd
= ppd_from_ibp(to_iport(ibdev
, port
));
1129 if (ppd
->vls_supported
== IB_VL_VL0
)
1130 smp
->status
|= IB_SMP_UNSUP_METHOD
;
1131 else if (which
== IB_VLARB_LOWPRI_0_31
)
1132 (void) ppd
->dd
->f_set_ib_table(ppd
, QIB_IB_TBL_VL_LOW_ARB
,
1134 else if (which
== IB_VLARB_HIGHPRI_0_31
)
1135 (void) ppd
->dd
->f_set_ib_table(ppd
, QIB_IB_TBL_VL_HIGH_ARB
,
1138 smp
->status
|= IB_SMP_INVALID_FIELD
;
1140 return subn_get_vl_arb(smp
, ibdev
, port
);
1143 static int subn_trap_repress(struct ib_smp
*smp
, struct ib_device
*ibdev
,
1147 * For now, we only send the trap once so no need to process this.
1149 * o14-3.a4 The SMA shall not send any message in response to a valid
1150 * SubnTrapRepress() message.
1152 return IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_CONSUMED
;
1155 static int pma_get_classportinfo(struct ib_pma_mad
*pmp
,
1156 struct ib_device
*ibdev
)
1158 struct ib_class_port_info
*p
=
1159 (struct ib_class_port_info
*)pmp
->data
;
1160 struct qib_devdata
*dd
= dd_from_ibdev(ibdev
);
1162 memset(pmp
->data
, 0, sizeof(pmp
->data
));
1164 if (pmp
->mad_hdr
.attr_mod
!= 0)
1165 pmp
->mad_hdr
.status
|= IB_SMP_INVALID_FIELD
;
1167 /* Note that AllPortSelect is not valid */
1168 p
->base_version
= 1;
1169 p
->class_version
= 1;
1170 p
->capability_mask
= IB_PMA_CLASS_CAP_EXT_WIDTH
;
1172 * Set the most significant bit of CM2 to indicate support for
1173 * congestion statistics
1175 ib_set_cpi_capmask2(p
,
1176 dd
->psxmitwait_supported
<<
1177 (31 - IB_CLASS_PORT_INFO_RESP_TIME_FIELD_SIZE
));
1179 * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
1181 ib_set_cpi_resp_time(p
, 18);
1183 return reply((struct ib_smp
*) pmp
);
1186 static int pma_get_portsamplescontrol(struct ib_pma_mad
*pmp
,
1187 struct ib_device
*ibdev
, u8 port
)
1189 struct ib_pma_portsamplescontrol
*p
=
1190 (struct ib_pma_portsamplescontrol
*)pmp
->data
;
1191 struct qib_ibdev
*dev
= to_idev(ibdev
);
1192 struct qib_devdata
*dd
= dd_from_dev(dev
);
1193 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1194 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1195 unsigned long flags
;
1196 u8 port_select
= p
->port_select
;
1198 memset(pmp
->data
, 0, sizeof(pmp
->data
));
1200 p
->port_select
= port_select
;
1201 if (pmp
->mad_hdr
.attr_mod
!= 0 || port_select
!= port
) {
1202 pmp
->mad_hdr
.status
|= IB_SMP_INVALID_FIELD
;
1205 spin_lock_irqsave(&ibp
->rvp
.lock
, flags
);
1206 p
->tick
= dd
->f_get_ib_cfg(ppd
, QIB_IB_CFG_PMA_TICKS
);
1207 p
->sample_status
= dd
->f_portcntr(ppd
, QIBPORTCNTR_PSSTAT
);
1208 p
->counter_width
= 4; /* 32 bit counters */
1209 p
->counter_mask0_9
= COUNTER_MASK0_9
;
1210 p
->sample_start
= cpu_to_be32(ibp
->rvp
.pma_sample_start
);
1211 p
->sample_interval
= cpu_to_be32(ibp
->rvp
.pma_sample_interval
);
1212 p
->tag
= cpu_to_be16(ibp
->rvp
.pma_tag
);
1213 p
->counter_select
[0] = ibp
->rvp
.pma_counter_select
[0];
1214 p
->counter_select
[1] = ibp
->rvp
.pma_counter_select
[1];
1215 p
->counter_select
[2] = ibp
->rvp
.pma_counter_select
[2];
1216 p
->counter_select
[3] = ibp
->rvp
.pma_counter_select
[3];
1217 p
->counter_select
[4] = ibp
->rvp
.pma_counter_select
[4];
1218 spin_unlock_irqrestore(&ibp
->rvp
.lock
, flags
);
1221 return reply((struct ib_smp
*) pmp
);
1224 static int pma_set_portsamplescontrol(struct ib_pma_mad
*pmp
,
1225 struct ib_device
*ibdev
, u8 port
)
1227 struct ib_pma_portsamplescontrol
*p
=
1228 (struct ib_pma_portsamplescontrol
*)pmp
->data
;
1229 struct qib_ibdev
*dev
= to_idev(ibdev
);
1230 struct qib_devdata
*dd
= dd_from_dev(dev
);
1231 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1232 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1233 unsigned long flags
;
1234 u8 status
, xmit_flags
;
1237 if (pmp
->mad_hdr
.attr_mod
!= 0 || p
->port_select
!= port
) {
1238 pmp
->mad_hdr
.status
|= IB_SMP_INVALID_FIELD
;
1239 ret
= reply((struct ib_smp
*) pmp
);
1243 spin_lock_irqsave(&ibp
->rvp
.lock
, flags
);
1245 /* Port Sampling code owns the PS* HW counters */
1246 xmit_flags
= ppd
->cong_stats
.flags
;
1247 ppd
->cong_stats
.flags
= IB_PMA_CONG_HW_CONTROL_SAMPLE
;
1248 status
= dd
->f_portcntr(ppd
, QIBPORTCNTR_PSSTAT
);
1249 if (status
== IB_PMA_SAMPLE_STATUS_DONE
||
1250 (status
== IB_PMA_SAMPLE_STATUS_RUNNING
&&
1251 xmit_flags
== IB_PMA_CONG_HW_CONTROL_TIMER
)) {
1252 ibp
->rvp
.pma_sample_start
= be32_to_cpu(p
->sample_start
);
1253 ibp
->rvp
.pma_sample_interval
= be32_to_cpu(p
->sample_interval
);
1254 ibp
->rvp
.pma_tag
= be16_to_cpu(p
->tag
);
1255 ibp
->rvp
.pma_counter_select
[0] = p
->counter_select
[0];
1256 ibp
->rvp
.pma_counter_select
[1] = p
->counter_select
[1];
1257 ibp
->rvp
.pma_counter_select
[2] = p
->counter_select
[2];
1258 ibp
->rvp
.pma_counter_select
[3] = p
->counter_select
[3];
1259 ibp
->rvp
.pma_counter_select
[4] = p
->counter_select
[4];
1260 dd
->f_set_cntr_sample(ppd
, ibp
->rvp
.pma_sample_interval
,
1261 ibp
->rvp
.pma_sample_start
);
1263 spin_unlock_irqrestore(&ibp
->rvp
.lock
, flags
);
1265 ret
= pma_get_portsamplescontrol(pmp
, ibdev
, port
);
1271 static u64
get_counter(struct qib_ibport
*ibp
, struct qib_pportdata
*ppd
,
1277 case IB_PMA_PORT_XMIT_DATA
:
1278 ret
= ppd
->dd
->f_portcntr(ppd
, QIBPORTCNTR_PSXMITDATA
);
1280 case IB_PMA_PORT_RCV_DATA
:
1281 ret
= ppd
->dd
->f_portcntr(ppd
, QIBPORTCNTR_PSRCVDATA
);
1283 case IB_PMA_PORT_XMIT_PKTS
:
1284 ret
= ppd
->dd
->f_portcntr(ppd
, QIBPORTCNTR_PSXMITPKTS
);
1286 case IB_PMA_PORT_RCV_PKTS
:
1287 ret
= ppd
->dd
->f_portcntr(ppd
, QIBPORTCNTR_PSRCVPKTS
);
1289 case IB_PMA_PORT_XMIT_WAIT
:
1290 ret
= ppd
->dd
->f_portcntr(ppd
, QIBPORTCNTR_PSXMITWAIT
);
1299 /* This function assumes that the xmit_wait lock is already held */
1300 static u64
xmit_wait_get_value_delta(struct qib_pportdata
*ppd
)
1304 delta
= get_counter(&ppd
->ibport_data
, ppd
,
1305 IB_PMA_PORT_XMIT_WAIT
);
1306 return ppd
->cong_stats
.counter
+ delta
;
1309 static void cache_hw_sample_counters(struct qib_pportdata
*ppd
)
1311 struct qib_ibport
*ibp
= &ppd
->ibport_data
;
1313 ppd
->cong_stats
.counter_cache
.psxmitdata
=
1314 get_counter(ibp
, ppd
, IB_PMA_PORT_XMIT_DATA
);
1315 ppd
->cong_stats
.counter_cache
.psrcvdata
=
1316 get_counter(ibp
, ppd
, IB_PMA_PORT_RCV_DATA
);
1317 ppd
->cong_stats
.counter_cache
.psxmitpkts
=
1318 get_counter(ibp
, ppd
, IB_PMA_PORT_XMIT_PKTS
);
1319 ppd
->cong_stats
.counter_cache
.psrcvpkts
=
1320 get_counter(ibp
, ppd
, IB_PMA_PORT_RCV_PKTS
);
1321 ppd
->cong_stats
.counter_cache
.psxmitwait
=
1322 get_counter(ibp
, ppd
, IB_PMA_PORT_XMIT_WAIT
);
1325 static u64
get_cache_hw_sample_counters(struct qib_pportdata
*ppd
,
1331 case IB_PMA_PORT_XMIT_DATA
:
1332 ret
= ppd
->cong_stats
.counter_cache
.psxmitdata
;
1334 case IB_PMA_PORT_RCV_DATA
:
1335 ret
= ppd
->cong_stats
.counter_cache
.psrcvdata
;
1337 case IB_PMA_PORT_XMIT_PKTS
:
1338 ret
= ppd
->cong_stats
.counter_cache
.psxmitpkts
;
1340 case IB_PMA_PORT_RCV_PKTS
:
1341 ret
= ppd
->cong_stats
.counter_cache
.psrcvpkts
;
1343 case IB_PMA_PORT_XMIT_WAIT
:
1344 ret
= ppd
->cong_stats
.counter_cache
.psxmitwait
;
1353 static int pma_get_portsamplesresult(struct ib_pma_mad
*pmp
,
1354 struct ib_device
*ibdev
, u8 port
)
1356 struct ib_pma_portsamplesresult
*p
=
1357 (struct ib_pma_portsamplesresult
*)pmp
->data
;
1358 struct qib_ibdev
*dev
= to_idev(ibdev
);
1359 struct qib_devdata
*dd
= dd_from_dev(dev
);
1360 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1361 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1362 unsigned long flags
;
1366 memset(pmp
->data
, 0, sizeof(pmp
->data
));
1367 spin_lock_irqsave(&ibp
->rvp
.lock
, flags
);
1368 p
->tag
= cpu_to_be16(ibp
->rvp
.pma_tag
);
1369 if (ppd
->cong_stats
.flags
== IB_PMA_CONG_HW_CONTROL_TIMER
)
1370 p
->sample_status
= IB_PMA_SAMPLE_STATUS_DONE
;
1372 status
= dd
->f_portcntr(ppd
, QIBPORTCNTR_PSSTAT
);
1373 p
->sample_status
= cpu_to_be16(status
);
1374 if (status
== IB_PMA_SAMPLE_STATUS_DONE
) {
1375 cache_hw_sample_counters(ppd
);
1376 ppd
->cong_stats
.counter
=
1377 xmit_wait_get_value_delta(ppd
);
1378 dd
->f_set_cntr_sample(ppd
,
1379 QIB_CONG_TIMER_PSINTERVAL
, 0);
1380 ppd
->cong_stats
.flags
= IB_PMA_CONG_HW_CONTROL_TIMER
;
1383 for (i
= 0; i
< ARRAY_SIZE(ibp
->rvp
.pma_counter_select
); i
++)
1384 p
->counter
[i
] = cpu_to_be32(
1385 get_cache_hw_sample_counters(
1386 ppd
, ibp
->rvp
.pma_counter_select
[i
]));
1387 spin_unlock_irqrestore(&ibp
->rvp
.lock
, flags
);
1389 return reply((struct ib_smp
*) pmp
);
1392 static int pma_get_portsamplesresult_ext(struct ib_pma_mad
*pmp
,
1393 struct ib_device
*ibdev
, u8 port
)
1395 struct ib_pma_portsamplesresult_ext
*p
=
1396 (struct ib_pma_portsamplesresult_ext
*)pmp
->data
;
1397 struct qib_ibdev
*dev
= to_idev(ibdev
);
1398 struct qib_devdata
*dd
= dd_from_dev(dev
);
1399 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1400 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1401 unsigned long flags
;
1405 /* Port Sampling code owns the PS* HW counters */
1406 memset(pmp
->data
, 0, sizeof(pmp
->data
));
1407 spin_lock_irqsave(&ibp
->rvp
.lock
, flags
);
1408 p
->tag
= cpu_to_be16(ibp
->rvp
.pma_tag
);
1409 if (ppd
->cong_stats
.flags
== IB_PMA_CONG_HW_CONTROL_TIMER
)
1410 p
->sample_status
= IB_PMA_SAMPLE_STATUS_DONE
;
1412 status
= dd
->f_portcntr(ppd
, QIBPORTCNTR_PSSTAT
);
1413 p
->sample_status
= cpu_to_be16(status
);
1415 p
->extended_width
= cpu_to_be32(0x80000000);
1416 if (status
== IB_PMA_SAMPLE_STATUS_DONE
) {
1417 cache_hw_sample_counters(ppd
);
1418 ppd
->cong_stats
.counter
=
1419 xmit_wait_get_value_delta(ppd
);
1420 dd
->f_set_cntr_sample(ppd
,
1421 QIB_CONG_TIMER_PSINTERVAL
, 0);
1422 ppd
->cong_stats
.flags
= IB_PMA_CONG_HW_CONTROL_TIMER
;
1425 for (i
= 0; i
< ARRAY_SIZE(ibp
->rvp
.pma_counter_select
); i
++)
1426 p
->counter
[i
] = cpu_to_be64(
1427 get_cache_hw_sample_counters(
1428 ppd
, ibp
->rvp
.pma_counter_select
[i
]));
1429 spin_unlock_irqrestore(&ibp
->rvp
.lock
, flags
);
1431 return reply((struct ib_smp
*) pmp
);
1434 static int pma_get_portcounters(struct ib_pma_mad
*pmp
,
1435 struct ib_device
*ibdev
, u8 port
)
1437 struct ib_pma_portcounters
*p
= (struct ib_pma_portcounters
*)
1439 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1440 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1441 struct qib_verbs_counters cntrs
;
1442 u8 port_select
= p
->port_select
;
1444 qib_get_counters(ppd
, &cntrs
);
1446 /* Adjust counters for any resets done. */
1447 cntrs
.symbol_error_counter
-= ibp
->z_symbol_error_counter
;
1448 cntrs
.link_error_recovery_counter
-=
1449 ibp
->z_link_error_recovery_counter
;
1450 cntrs
.link_downed_counter
-= ibp
->z_link_downed_counter
;
1451 cntrs
.port_rcv_errors
-= ibp
->z_port_rcv_errors
;
1452 cntrs
.port_rcv_remphys_errors
-= ibp
->z_port_rcv_remphys_errors
;
1453 cntrs
.port_xmit_discards
-= ibp
->z_port_xmit_discards
;
1454 cntrs
.port_xmit_data
-= ibp
->z_port_xmit_data
;
1455 cntrs
.port_rcv_data
-= ibp
->z_port_rcv_data
;
1456 cntrs
.port_xmit_packets
-= ibp
->z_port_xmit_packets
;
1457 cntrs
.port_rcv_packets
-= ibp
->z_port_rcv_packets
;
1458 cntrs
.local_link_integrity_errors
-=
1459 ibp
->z_local_link_integrity_errors
;
1460 cntrs
.excessive_buffer_overrun_errors
-=
1461 ibp
->z_excessive_buffer_overrun_errors
;
1462 cntrs
.vl15_dropped
-= ibp
->z_vl15_dropped
;
1463 cntrs
.vl15_dropped
+= ibp
->rvp
.n_vl15_dropped
;
1465 memset(pmp
->data
, 0, sizeof(pmp
->data
));
1467 p
->port_select
= port_select
;
1468 if (pmp
->mad_hdr
.attr_mod
!= 0 || port_select
!= port
)
1469 pmp
->mad_hdr
.status
|= IB_SMP_INVALID_FIELD
;
1471 if (cntrs
.symbol_error_counter
> 0xFFFFUL
)
1472 p
->symbol_error_counter
= cpu_to_be16(0xFFFF);
1474 p
->symbol_error_counter
=
1475 cpu_to_be16((u16
)cntrs
.symbol_error_counter
);
1476 if (cntrs
.link_error_recovery_counter
> 0xFFUL
)
1477 p
->link_error_recovery_counter
= 0xFF;
1479 p
->link_error_recovery_counter
=
1480 (u8
)cntrs
.link_error_recovery_counter
;
1481 if (cntrs
.link_downed_counter
> 0xFFUL
)
1482 p
->link_downed_counter
= 0xFF;
1484 p
->link_downed_counter
= (u8
)cntrs
.link_downed_counter
;
1485 if (cntrs
.port_rcv_errors
> 0xFFFFUL
)
1486 p
->port_rcv_errors
= cpu_to_be16(0xFFFF);
1488 p
->port_rcv_errors
=
1489 cpu_to_be16((u16
) cntrs
.port_rcv_errors
);
1490 if (cntrs
.port_rcv_remphys_errors
> 0xFFFFUL
)
1491 p
->port_rcv_remphys_errors
= cpu_to_be16(0xFFFF);
1493 p
->port_rcv_remphys_errors
=
1494 cpu_to_be16((u16
)cntrs
.port_rcv_remphys_errors
);
1495 if (cntrs
.port_xmit_discards
> 0xFFFFUL
)
1496 p
->port_xmit_discards
= cpu_to_be16(0xFFFF);
1498 p
->port_xmit_discards
=
1499 cpu_to_be16((u16
)cntrs
.port_xmit_discards
);
1500 if (cntrs
.local_link_integrity_errors
> 0xFUL
)
1501 cntrs
.local_link_integrity_errors
= 0xFUL
;
1502 if (cntrs
.excessive_buffer_overrun_errors
> 0xFUL
)
1503 cntrs
.excessive_buffer_overrun_errors
= 0xFUL
;
1504 p
->link_overrun_errors
= (cntrs
.local_link_integrity_errors
<< 4) |
1505 cntrs
.excessive_buffer_overrun_errors
;
1506 if (cntrs
.vl15_dropped
> 0xFFFFUL
)
1507 p
->vl15_dropped
= cpu_to_be16(0xFFFF);
1509 p
->vl15_dropped
= cpu_to_be16((u16
)cntrs
.vl15_dropped
);
1510 if (cntrs
.port_xmit_data
> 0xFFFFFFFFUL
)
1511 p
->port_xmit_data
= cpu_to_be32(0xFFFFFFFF);
1513 p
->port_xmit_data
= cpu_to_be32((u32
)cntrs
.port_xmit_data
);
1514 if (cntrs
.port_rcv_data
> 0xFFFFFFFFUL
)
1515 p
->port_rcv_data
= cpu_to_be32(0xFFFFFFFF);
1517 p
->port_rcv_data
= cpu_to_be32((u32
)cntrs
.port_rcv_data
);
1518 if (cntrs
.port_xmit_packets
> 0xFFFFFFFFUL
)
1519 p
->port_xmit_packets
= cpu_to_be32(0xFFFFFFFF);
1521 p
->port_xmit_packets
=
1522 cpu_to_be32((u32
)cntrs
.port_xmit_packets
);
1523 if (cntrs
.port_rcv_packets
> 0xFFFFFFFFUL
)
1524 p
->port_rcv_packets
= cpu_to_be32(0xFFFFFFFF);
1526 p
->port_rcv_packets
=
1527 cpu_to_be32((u32
) cntrs
.port_rcv_packets
);
1529 return reply((struct ib_smp
*) pmp
);
1532 static int pma_get_portcounters_cong(struct ib_pma_mad
*pmp
,
1533 struct ib_device
*ibdev
, u8 port
)
1535 /* Congestion PMA packets start at offset 24 not 64 */
1536 struct ib_pma_portcounters_cong
*p
=
1537 (struct ib_pma_portcounters_cong
*)pmp
->reserved
;
1538 struct qib_verbs_counters cntrs
;
1539 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1540 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1541 struct qib_devdata
*dd
= dd_from_ppd(ppd
);
1542 u32 port_select
= be32_to_cpu(pmp
->mad_hdr
.attr_mod
) & 0xFF;
1543 u64 xmit_wait_counter
;
1544 unsigned long flags
;
1547 * This check is performed only in the GET method because the
1548 * SET method ends up calling this anyway.
1550 if (!dd
->psxmitwait_supported
)
1551 pmp
->mad_hdr
.status
|= IB_SMP_UNSUP_METH_ATTR
;
1552 if (port_select
!= port
)
1553 pmp
->mad_hdr
.status
|= IB_SMP_INVALID_FIELD
;
1555 qib_get_counters(ppd
, &cntrs
);
1556 spin_lock_irqsave(&ppd
->ibport_data
.rvp
.lock
, flags
);
1557 xmit_wait_counter
= xmit_wait_get_value_delta(ppd
);
1558 spin_unlock_irqrestore(&ppd
->ibport_data
.rvp
.lock
, flags
);
1560 /* Adjust counters for any resets done. */
1561 cntrs
.symbol_error_counter
-= ibp
->z_symbol_error_counter
;
1562 cntrs
.link_error_recovery_counter
-=
1563 ibp
->z_link_error_recovery_counter
;
1564 cntrs
.link_downed_counter
-= ibp
->z_link_downed_counter
;
1565 cntrs
.port_rcv_errors
-= ibp
->z_port_rcv_errors
;
1566 cntrs
.port_rcv_remphys_errors
-=
1567 ibp
->z_port_rcv_remphys_errors
;
1568 cntrs
.port_xmit_discards
-= ibp
->z_port_xmit_discards
;
1569 cntrs
.local_link_integrity_errors
-=
1570 ibp
->z_local_link_integrity_errors
;
1571 cntrs
.excessive_buffer_overrun_errors
-=
1572 ibp
->z_excessive_buffer_overrun_errors
;
1573 cntrs
.vl15_dropped
-= ibp
->z_vl15_dropped
;
1574 cntrs
.vl15_dropped
+= ibp
->rvp
.n_vl15_dropped
;
1575 cntrs
.port_xmit_data
-= ibp
->z_port_xmit_data
;
1576 cntrs
.port_rcv_data
-= ibp
->z_port_rcv_data
;
1577 cntrs
.port_xmit_packets
-= ibp
->z_port_xmit_packets
;
1578 cntrs
.port_rcv_packets
-= ibp
->z_port_rcv_packets
;
1580 memset(pmp
->reserved
, 0, sizeof(pmp
->reserved
) +
1584 * Set top 3 bits to indicate interval in picoseconds in
1587 p
->port_check_rate
=
1588 cpu_to_be16((QIB_XMIT_RATE_PICO
<< 13) |
1589 (dd
->psxmitwait_check_rate
&
1590 ~(QIB_XMIT_RATE_PICO
<< 13)));
1591 p
->port_adr_events
= cpu_to_be64(0);
1592 p
->port_xmit_wait
= cpu_to_be64(xmit_wait_counter
);
1593 p
->port_xmit_data
= cpu_to_be64(cntrs
.port_xmit_data
);
1594 p
->port_rcv_data
= cpu_to_be64(cntrs
.port_rcv_data
);
1595 p
->port_xmit_packets
=
1596 cpu_to_be64(cntrs
.port_xmit_packets
);
1597 p
->port_rcv_packets
=
1598 cpu_to_be64(cntrs
.port_rcv_packets
);
1599 if (cntrs
.symbol_error_counter
> 0xFFFFUL
)
1600 p
->symbol_error_counter
= cpu_to_be16(0xFFFF);
1602 p
->symbol_error_counter
=
1604 (u16
)cntrs
.symbol_error_counter
);
1605 if (cntrs
.link_error_recovery_counter
> 0xFFUL
)
1606 p
->link_error_recovery_counter
= 0xFF;
1608 p
->link_error_recovery_counter
=
1609 (u8
)cntrs
.link_error_recovery_counter
;
1610 if (cntrs
.link_downed_counter
> 0xFFUL
)
1611 p
->link_downed_counter
= 0xFF;
1613 p
->link_downed_counter
=
1614 (u8
)cntrs
.link_downed_counter
;
1615 if (cntrs
.port_rcv_errors
> 0xFFFFUL
)
1616 p
->port_rcv_errors
= cpu_to_be16(0xFFFF);
1618 p
->port_rcv_errors
=
1619 cpu_to_be16((u16
) cntrs
.port_rcv_errors
);
1620 if (cntrs
.port_rcv_remphys_errors
> 0xFFFFUL
)
1621 p
->port_rcv_remphys_errors
= cpu_to_be16(0xFFFF);
1623 p
->port_rcv_remphys_errors
=
1625 (u16
)cntrs
.port_rcv_remphys_errors
);
1626 if (cntrs
.port_xmit_discards
> 0xFFFFUL
)
1627 p
->port_xmit_discards
= cpu_to_be16(0xFFFF);
1629 p
->port_xmit_discards
=
1630 cpu_to_be16((u16
)cntrs
.port_xmit_discards
);
1631 if (cntrs
.local_link_integrity_errors
> 0xFUL
)
1632 cntrs
.local_link_integrity_errors
= 0xFUL
;
1633 if (cntrs
.excessive_buffer_overrun_errors
> 0xFUL
)
1634 cntrs
.excessive_buffer_overrun_errors
= 0xFUL
;
1635 p
->link_overrun_errors
= (cntrs
.local_link_integrity_errors
<< 4) |
1636 cntrs
.excessive_buffer_overrun_errors
;
1637 if (cntrs
.vl15_dropped
> 0xFFFFUL
)
1638 p
->vl15_dropped
= cpu_to_be16(0xFFFF);
1640 p
->vl15_dropped
= cpu_to_be16((u16
)cntrs
.vl15_dropped
);
1642 return reply((struct ib_smp
*)pmp
);
1645 static void qib_snapshot_pmacounters(
1646 struct qib_ibport
*ibp
,
1647 struct qib_pma_counters
*pmacounters
)
1649 struct qib_pma_counters
*p
;
1652 memset(pmacounters
, 0, sizeof(*pmacounters
));
1653 for_each_possible_cpu(cpu
) {
1654 p
= per_cpu_ptr(ibp
->pmastats
, cpu
);
1655 pmacounters
->n_unicast_xmit
+= p
->n_unicast_xmit
;
1656 pmacounters
->n_unicast_rcv
+= p
->n_unicast_rcv
;
1657 pmacounters
->n_multicast_xmit
+= p
->n_multicast_xmit
;
1658 pmacounters
->n_multicast_rcv
+= p
->n_multicast_rcv
;
1662 static int pma_get_portcounters_ext(struct ib_pma_mad
*pmp
,
1663 struct ib_device
*ibdev
, u8 port
)
1665 struct ib_pma_portcounters_ext
*p
=
1666 (struct ib_pma_portcounters_ext
*)pmp
->data
;
1667 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1668 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1669 u64 swords
, rwords
, spkts
, rpkts
, xwait
;
1670 struct qib_pma_counters pma
;
1671 u8 port_select
= p
->port_select
;
1673 memset(pmp
->data
, 0, sizeof(pmp
->data
));
1675 p
->port_select
= port_select
;
1676 if (pmp
->mad_hdr
.attr_mod
!= 0 || port_select
!= port
) {
1677 pmp
->mad_hdr
.status
|= IB_SMP_INVALID_FIELD
;
1681 qib_snapshot_counters(ppd
, &swords
, &rwords
, &spkts
, &rpkts
, &xwait
);
1683 /* Adjust counters for any resets done. */
1684 swords
-= ibp
->z_port_xmit_data
;
1685 rwords
-= ibp
->z_port_rcv_data
;
1686 spkts
-= ibp
->z_port_xmit_packets
;
1687 rpkts
-= ibp
->z_port_rcv_packets
;
1689 p
->port_xmit_data
= cpu_to_be64(swords
);
1690 p
->port_rcv_data
= cpu_to_be64(rwords
);
1691 p
->port_xmit_packets
= cpu_to_be64(spkts
);
1692 p
->port_rcv_packets
= cpu_to_be64(rpkts
);
1694 qib_snapshot_pmacounters(ibp
, &pma
);
1696 p
->port_unicast_xmit_packets
= cpu_to_be64(pma
.n_unicast_xmit
1697 - ibp
->z_unicast_xmit
);
1698 p
->port_unicast_rcv_packets
= cpu_to_be64(pma
.n_unicast_rcv
1699 - ibp
->z_unicast_rcv
);
1700 p
->port_multicast_xmit_packets
= cpu_to_be64(pma
.n_multicast_xmit
1701 - ibp
->z_multicast_xmit
);
1702 p
->port_multicast_rcv_packets
= cpu_to_be64(pma
.n_multicast_rcv
1703 - ibp
->z_multicast_rcv
);
1706 return reply((struct ib_smp
*) pmp
);
1709 static int pma_set_portcounters(struct ib_pma_mad
*pmp
,
1710 struct ib_device
*ibdev
, u8 port
)
1712 struct ib_pma_portcounters
*p
= (struct ib_pma_portcounters
*)
1714 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1715 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1716 struct qib_verbs_counters cntrs
;
1719 * Since the HW doesn't support clearing counters, we save the
1720 * current count and subtract it from future responses.
1722 qib_get_counters(ppd
, &cntrs
);
1724 if (p
->counter_select
& IB_PMA_SEL_SYMBOL_ERROR
)
1725 ibp
->z_symbol_error_counter
= cntrs
.symbol_error_counter
;
1727 if (p
->counter_select
& IB_PMA_SEL_LINK_ERROR_RECOVERY
)
1728 ibp
->z_link_error_recovery_counter
=
1729 cntrs
.link_error_recovery_counter
;
1731 if (p
->counter_select
& IB_PMA_SEL_LINK_DOWNED
)
1732 ibp
->z_link_downed_counter
= cntrs
.link_downed_counter
;
1734 if (p
->counter_select
& IB_PMA_SEL_PORT_RCV_ERRORS
)
1735 ibp
->z_port_rcv_errors
= cntrs
.port_rcv_errors
;
1737 if (p
->counter_select
& IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS
)
1738 ibp
->z_port_rcv_remphys_errors
=
1739 cntrs
.port_rcv_remphys_errors
;
1741 if (p
->counter_select
& IB_PMA_SEL_PORT_XMIT_DISCARDS
)
1742 ibp
->z_port_xmit_discards
= cntrs
.port_xmit_discards
;
1744 if (p
->counter_select
& IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS
)
1745 ibp
->z_local_link_integrity_errors
=
1746 cntrs
.local_link_integrity_errors
;
1748 if (p
->counter_select
& IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS
)
1749 ibp
->z_excessive_buffer_overrun_errors
=
1750 cntrs
.excessive_buffer_overrun_errors
;
1752 if (p
->counter_select
& IB_PMA_SEL_PORT_VL15_DROPPED
) {
1753 ibp
->rvp
.n_vl15_dropped
= 0;
1754 ibp
->z_vl15_dropped
= cntrs
.vl15_dropped
;
1757 if (p
->counter_select
& IB_PMA_SEL_PORT_XMIT_DATA
)
1758 ibp
->z_port_xmit_data
= cntrs
.port_xmit_data
;
1760 if (p
->counter_select
& IB_PMA_SEL_PORT_RCV_DATA
)
1761 ibp
->z_port_rcv_data
= cntrs
.port_rcv_data
;
1763 if (p
->counter_select
& IB_PMA_SEL_PORT_XMIT_PACKETS
)
1764 ibp
->z_port_xmit_packets
= cntrs
.port_xmit_packets
;
1766 if (p
->counter_select
& IB_PMA_SEL_PORT_RCV_PACKETS
)
1767 ibp
->z_port_rcv_packets
= cntrs
.port_rcv_packets
;
1769 return pma_get_portcounters(pmp
, ibdev
, port
);
1772 static int pma_set_portcounters_cong(struct ib_pma_mad
*pmp
,
1773 struct ib_device
*ibdev
, u8 port
)
1775 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1776 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1777 struct qib_devdata
*dd
= dd_from_ppd(ppd
);
1778 struct qib_verbs_counters cntrs
;
1779 u32 counter_select
= (be32_to_cpu(pmp
->mad_hdr
.attr_mod
) >> 24) & 0xFF;
1781 unsigned long flags
;
1783 qib_get_counters(ppd
, &cntrs
);
1784 /* Get counter values before we save them */
1785 ret
= pma_get_portcounters_cong(pmp
, ibdev
, port
);
1787 if (counter_select
& IB_PMA_SEL_CONG_XMIT
) {
1788 spin_lock_irqsave(&ppd
->ibport_data
.rvp
.lock
, flags
);
1789 ppd
->cong_stats
.counter
= 0;
1790 dd
->f_set_cntr_sample(ppd
, QIB_CONG_TIMER_PSINTERVAL
,
1792 spin_unlock_irqrestore(&ppd
->ibport_data
.rvp
.lock
, flags
);
1794 if (counter_select
& IB_PMA_SEL_CONG_PORT_DATA
) {
1795 ibp
->z_port_xmit_data
= cntrs
.port_xmit_data
;
1796 ibp
->z_port_rcv_data
= cntrs
.port_rcv_data
;
1797 ibp
->z_port_xmit_packets
= cntrs
.port_xmit_packets
;
1798 ibp
->z_port_rcv_packets
= cntrs
.port_rcv_packets
;
1800 if (counter_select
& IB_PMA_SEL_CONG_ALL
) {
1801 ibp
->z_symbol_error_counter
=
1802 cntrs
.symbol_error_counter
;
1803 ibp
->z_link_error_recovery_counter
=
1804 cntrs
.link_error_recovery_counter
;
1805 ibp
->z_link_downed_counter
=
1806 cntrs
.link_downed_counter
;
1807 ibp
->z_port_rcv_errors
= cntrs
.port_rcv_errors
;
1808 ibp
->z_port_rcv_remphys_errors
=
1809 cntrs
.port_rcv_remphys_errors
;
1810 ibp
->z_port_xmit_discards
=
1811 cntrs
.port_xmit_discards
;
1812 ibp
->z_local_link_integrity_errors
=
1813 cntrs
.local_link_integrity_errors
;
1814 ibp
->z_excessive_buffer_overrun_errors
=
1815 cntrs
.excessive_buffer_overrun_errors
;
1816 ibp
->rvp
.n_vl15_dropped
= 0;
1817 ibp
->z_vl15_dropped
= cntrs
.vl15_dropped
;
1823 static int pma_set_portcounters_ext(struct ib_pma_mad
*pmp
,
1824 struct ib_device
*ibdev
, u8 port
)
1826 struct ib_pma_portcounters
*p
= (struct ib_pma_portcounters
*)
1828 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1829 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1830 u64 swords
, rwords
, spkts
, rpkts
, xwait
;
1831 struct qib_pma_counters pma
;
1833 qib_snapshot_counters(ppd
, &swords
, &rwords
, &spkts
, &rpkts
, &xwait
);
1835 if (p
->counter_select
& IB_PMA_SELX_PORT_XMIT_DATA
)
1836 ibp
->z_port_xmit_data
= swords
;
1838 if (p
->counter_select
& IB_PMA_SELX_PORT_RCV_DATA
)
1839 ibp
->z_port_rcv_data
= rwords
;
1841 if (p
->counter_select
& IB_PMA_SELX_PORT_XMIT_PACKETS
)
1842 ibp
->z_port_xmit_packets
= spkts
;
1844 if (p
->counter_select
& IB_PMA_SELX_PORT_RCV_PACKETS
)
1845 ibp
->z_port_rcv_packets
= rpkts
;
1847 qib_snapshot_pmacounters(ibp
, &pma
);
1849 if (p
->counter_select
& IB_PMA_SELX_PORT_UNI_XMIT_PACKETS
)
1850 ibp
->z_unicast_xmit
= pma
.n_unicast_xmit
;
1852 if (p
->counter_select
& IB_PMA_SELX_PORT_UNI_RCV_PACKETS
)
1853 ibp
->z_unicast_rcv
= pma
.n_unicast_rcv
;
1855 if (p
->counter_select
& IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS
)
1856 ibp
->z_multicast_xmit
= pma
.n_multicast_xmit
;
1858 if (p
->counter_select
& IB_PMA_SELX_PORT_MULTI_RCV_PACKETS
)
1859 ibp
->z_multicast_rcv
= pma
.n_multicast_rcv
;
1861 return pma_get_portcounters_ext(pmp
, ibdev
, port
);
1864 static int process_subn(struct ib_device
*ibdev
, int mad_flags
,
1865 u8 port
, const struct ib_mad
*in_mad
,
1866 struct ib_mad
*out_mad
)
1868 struct ib_smp
*smp
= (struct ib_smp
*)out_mad
;
1869 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1870 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1874 if (smp
->class_version
!= 1) {
1875 smp
->status
|= IB_SMP_UNSUP_VERSION
;
1880 ret
= check_mkey(ibp
, smp
, mad_flags
);
1882 u32 port_num
= be32_to_cpu(smp
->attr_mod
);
1885 * If this is a get/set portinfo, we already check the
1886 * M_Key if the MAD is for another port and the M_Key
1887 * is OK on the receiving port. This check is needed
1888 * to increment the error counters when the M_Key
1889 * fails to match on *both* ports.
1891 if (in_mad
->mad_hdr
.attr_id
== IB_SMP_ATTR_PORT_INFO
&&
1892 (smp
->method
== IB_MGMT_METHOD_GET
||
1893 smp
->method
== IB_MGMT_METHOD_SET
) &&
1894 port_num
&& port_num
<= ibdev
->phys_port_cnt
&&
1896 (void) check_mkey(to_iport(ibdev
, port_num
), smp
, 0);
1897 ret
= IB_MAD_RESULT_FAILURE
;
1901 switch (smp
->method
) {
1902 case IB_MGMT_METHOD_GET
:
1903 switch (smp
->attr_id
) {
1904 case IB_SMP_ATTR_NODE_DESC
:
1905 ret
= subn_get_nodedescription(smp
, ibdev
);
1907 case IB_SMP_ATTR_NODE_INFO
:
1908 ret
= subn_get_nodeinfo(smp
, ibdev
, port
);
1910 case IB_SMP_ATTR_GUID_INFO
:
1911 ret
= subn_get_guidinfo(smp
, ibdev
, port
);
1913 case IB_SMP_ATTR_PORT_INFO
:
1914 ret
= subn_get_portinfo(smp
, ibdev
, port
);
1916 case IB_SMP_ATTR_PKEY_TABLE
:
1917 ret
= subn_get_pkeytable(smp
, ibdev
, port
);
1919 case IB_SMP_ATTR_SL_TO_VL_TABLE
:
1920 ret
= subn_get_sl_to_vl(smp
, ibdev
, port
);
1922 case IB_SMP_ATTR_VL_ARB_TABLE
:
1923 ret
= subn_get_vl_arb(smp
, ibdev
, port
);
1925 case IB_SMP_ATTR_SM_INFO
:
1926 if (ibp
->rvp
.port_cap_flags
& IB_PORT_SM_DISABLED
) {
1927 ret
= IB_MAD_RESULT_SUCCESS
|
1928 IB_MAD_RESULT_CONSUMED
;
1931 if (ibp
->rvp
.port_cap_flags
& IB_PORT_SM
) {
1932 ret
= IB_MAD_RESULT_SUCCESS
;
1937 smp
->status
|= IB_SMP_UNSUP_METH_ATTR
;
1942 case IB_MGMT_METHOD_SET
:
1943 switch (smp
->attr_id
) {
1944 case IB_SMP_ATTR_GUID_INFO
:
1945 ret
= subn_set_guidinfo(smp
, ibdev
, port
);
1947 case IB_SMP_ATTR_PORT_INFO
:
1948 ret
= subn_set_portinfo(smp
, ibdev
, port
);
1950 case IB_SMP_ATTR_PKEY_TABLE
:
1951 ret
= subn_set_pkeytable(smp
, ibdev
, port
);
1953 case IB_SMP_ATTR_SL_TO_VL_TABLE
:
1954 ret
= subn_set_sl_to_vl(smp
, ibdev
, port
);
1956 case IB_SMP_ATTR_VL_ARB_TABLE
:
1957 ret
= subn_set_vl_arb(smp
, ibdev
, port
);
1959 case IB_SMP_ATTR_SM_INFO
:
1960 if (ibp
->rvp
.port_cap_flags
& IB_PORT_SM_DISABLED
) {
1961 ret
= IB_MAD_RESULT_SUCCESS
|
1962 IB_MAD_RESULT_CONSUMED
;
1965 if (ibp
->rvp
.port_cap_flags
& IB_PORT_SM
) {
1966 ret
= IB_MAD_RESULT_SUCCESS
;
1971 smp
->status
|= IB_SMP_UNSUP_METH_ATTR
;
1976 case IB_MGMT_METHOD_TRAP_REPRESS
:
1977 if (smp
->attr_id
== IB_SMP_ATTR_NOTICE
)
1978 ret
= subn_trap_repress(smp
, ibdev
, port
);
1980 smp
->status
|= IB_SMP_UNSUP_METH_ATTR
;
1985 case IB_MGMT_METHOD_TRAP
:
1986 case IB_MGMT_METHOD_REPORT
:
1987 case IB_MGMT_METHOD_REPORT_RESP
:
1988 case IB_MGMT_METHOD_GET_RESP
:
1990 * The ib_mad module will call us to process responses
1991 * before checking for other consumers.
1992 * Just tell the caller to process it normally.
1994 ret
= IB_MAD_RESULT_SUCCESS
;
1997 case IB_MGMT_METHOD_SEND
:
1998 if (ib_get_smp_direction(smp
) &&
1999 smp
->attr_id
== QIB_VENDOR_IPG
) {
2000 ppd
->dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_PORT
,
2002 ret
= IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_CONSUMED
;
2004 ret
= IB_MAD_RESULT_SUCCESS
;
2008 smp
->status
|= IB_SMP_UNSUP_METHOD
;
2016 static int process_perf(struct ib_device
*ibdev
, u8 port
,
2017 const struct ib_mad
*in_mad
,
2018 struct ib_mad
*out_mad
)
2020 struct ib_pma_mad
*pmp
= (struct ib_pma_mad
*)out_mad
;
2024 if (pmp
->mad_hdr
.class_version
!= 1) {
2025 pmp
->mad_hdr
.status
|= IB_SMP_UNSUP_VERSION
;
2026 ret
= reply((struct ib_smp
*) pmp
);
2030 switch (pmp
->mad_hdr
.method
) {
2031 case IB_MGMT_METHOD_GET
:
2032 switch (pmp
->mad_hdr
.attr_id
) {
2033 case IB_PMA_CLASS_PORT_INFO
:
2034 ret
= pma_get_classportinfo(pmp
, ibdev
);
2036 case IB_PMA_PORT_SAMPLES_CONTROL
:
2037 ret
= pma_get_portsamplescontrol(pmp
, ibdev
, port
);
2039 case IB_PMA_PORT_SAMPLES_RESULT
:
2040 ret
= pma_get_portsamplesresult(pmp
, ibdev
, port
);
2042 case IB_PMA_PORT_SAMPLES_RESULT_EXT
:
2043 ret
= pma_get_portsamplesresult_ext(pmp
, ibdev
, port
);
2045 case IB_PMA_PORT_COUNTERS
:
2046 ret
= pma_get_portcounters(pmp
, ibdev
, port
);
2048 case IB_PMA_PORT_COUNTERS_EXT
:
2049 ret
= pma_get_portcounters_ext(pmp
, ibdev
, port
);
2051 case IB_PMA_PORT_COUNTERS_CONG
:
2052 ret
= pma_get_portcounters_cong(pmp
, ibdev
, port
);
2055 pmp
->mad_hdr
.status
|= IB_SMP_UNSUP_METH_ATTR
;
2056 ret
= reply((struct ib_smp
*) pmp
);
2060 case IB_MGMT_METHOD_SET
:
2061 switch (pmp
->mad_hdr
.attr_id
) {
2062 case IB_PMA_PORT_SAMPLES_CONTROL
:
2063 ret
= pma_set_portsamplescontrol(pmp
, ibdev
, port
);
2065 case IB_PMA_PORT_COUNTERS
:
2066 ret
= pma_set_portcounters(pmp
, ibdev
, port
);
2068 case IB_PMA_PORT_COUNTERS_EXT
:
2069 ret
= pma_set_portcounters_ext(pmp
, ibdev
, port
);
2071 case IB_PMA_PORT_COUNTERS_CONG
:
2072 ret
= pma_set_portcounters_cong(pmp
, ibdev
, port
);
2075 pmp
->mad_hdr
.status
|= IB_SMP_UNSUP_METH_ATTR
;
2076 ret
= reply((struct ib_smp
*) pmp
);
2080 case IB_MGMT_METHOD_TRAP
:
2081 case IB_MGMT_METHOD_GET_RESP
:
2083 * The ib_mad module will call us to process responses
2084 * before checking for other consumers.
2085 * Just tell the caller to process it normally.
2087 ret
= IB_MAD_RESULT_SUCCESS
;
2091 pmp
->mad_hdr
.status
|= IB_SMP_UNSUP_METHOD
;
2092 ret
= reply((struct ib_smp
*) pmp
);
2099 static int cc_get_classportinfo(struct ib_cc_mad
*ccp
,
2100 struct ib_device
*ibdev
)
2102 struct ib_cc_classportinfo_attr
*p
=
2103 (struct ib_cc_classportinfo_attr
*)ccp
->mgmt_data
;
2105 memset(ccp
->mgmt_data
, 0, sizeof(ccp
->mgmt_data
));
2107 p
->base_version
= 1;
2108 p
->class_version
= 1;
2112 * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
2114 p
->resp_time_value
= 18;
2116 return reply((struct ib_smp
*) ccp
);
2119 static int cc_get_congestion_info(struct ib_cc_mad
*ccp
,
2120 struct ib_device
*ibdev
, u8 port
)
2122 struct ib_cc_info_attr
*p
=
2123 (struct ib_cc_info_attr
*)ccp
->mgmt_data
;
2124 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
2125 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
2127 memset(ccp
->mgmt_data
, 0, sizeof(ccp
->mgmt_data
));
2129 p
->congestion_info
= 0;
2130 p
->control_table_cap
= ppd
->cc_max_table_entries
;
2132 return reply((struct ib_smp
*) ccp
);
2135 static int cc_get_congestion_setting(struct ib_cc_mad
*ccp
,
2136 struct ib_device
*ibdev
, u8 port
)
2139 struct ib_cc_congestion_setting_attr
*p
=
2140 (struct ib_cc_congestion_setting_attr
*)ccp
->mgmt_data
;
2141 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
2142 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
2143 struct ib_cc_congestion_entry_shadow
*entries
;
2145 memset(ccp
->mgmt_data
, 0, sizeof(ccp
->mgmt_data
));
2147 spin_lock(&ppd
->cc_shadow_lock
);
2149 entries
= ppd
->congestion_entries_shadow
->entries
;
2150 p
->port_control
= cpu_to_be16(
2151 ppd
->congestion_entries_shadow
->port_control
);
2152 p
->control_map
= cpu_to_be16(
2153 ppd
->congestion_entries_shadow
->control_map
);
2154 for (i
= 0; i
< IB_CC_CCS_ENTRIES
; i
++) {
2155 p
->entries
[i
].ccti_increase
= entries
[i
].ccti_increase
;
2156 p
->entries
[i
].ccti_timer
= cpu_to_be16(entries
[i
].ccti_timer
);
2157 p
->entries
[i
].trigger_threshold
= entries
[i
].trigger_threshold
;
2158 p
->entries
[i
].ccti_min
= entries
[i
].ccti_min
;
2161 spin_unlock(&ppd
->cc_shadow_lock
);
2163 return reply((struct ib_smp
*) ccp
);
2166 static int cc_get_congestion_control_table(struct ib_cc_mad
*ccp
,
2167 struct ib_device
*ibdev
, u8 port
)
2169 struct ib_cc_table_attr
*p
=
2170 (struct ib_cc_table_attr
*)ccp
->mgmt_data
;
2171 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
2172 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
2173 u32 cct_block_index
= be32_to_cpu(ccp
->attr_mod
);
2176 struct ib_cc_table_entry_shadow
*entries
;
2179 /* Is the table index more than what is supported? */
2180 if (cct_block_index
> IB_CC_TABLE_CAP_DEFAULT
- 1)
2183 memset(ccp
->mgmt_data
, 0, sizeof(ccp
->mgmt_data
));
2185 spin_lock(&ppd
->cc_shadow_lock
);
2188 (ppd
->ccti_entries_shadow
->ccti_last_entry
+ 1)/IB_CCT_ENTRIES
;
2189 max_cct_block
= max_cct_block
? max_cct_block
- 1 : 0;
2191 if (cct_block_index
> max_cct_block
) {
2192 spin_unlock(&ppd
->cc_shadow_lock
);
2196 ccp
->attr_mod
= cpu_to_be32(cct_block_index
);
2198 cct_entry
= IB_CCT_ENTRIES
* (cct_block_index
+ 1);
2202 p
->ccti_limit
= cpu_to_be16(cct_entry
);
2204 entries
= &ppd
->ccti_entries_shadow
->
2205 entries
[IB_CCT_ENTRIES
* cct_block_index
];
2206 cct_entry
%= IB_CCT_ENTRIES
;
2208 for (i
= 0; i
<= cct_entry
; i
++)
2209 p
->ccti_entries
[i
].entry
= cpu_to_be16(entries
[i
].entry
);
2211 spin_unlock(&ppd
->cc_shadow_lock
);
2213 return reply((struct ib_smp
*) ccp
);
2216 return reply_failure((struct ib_smp
*) ccp
);
2219 static int cc_set_congestion_setting(struct ib_cc_mad
*ccp
,
2220 struct ib_device
*ibdev
, u8 port
)
2222 struct ib_cc_congestion_setting_attr
*p
=
2223 (struct ib_cc_congestion_setting_attr
*)ccp
->mgmt_data
;
2224 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
2225 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
2228 ppd
->cc_sl_control_map
= be16_to_cpu(p
->control_map
);
2230 for (i
= 0; i
< IB_CC_CCS_ENTRIES
; i
++) {
2231 ppd
->congestion_entries
[i
].ccti_increase
=
2232 p
->entries
[i
].ccti_increase
;
2234 ppd
->congestion_entries
[i
].ccti_timer
=
2235 be16_to_cpu(p
->entries
[i
].ccti_timer
);
2237 ppd
->congestion_entries
[i
].trigger_threshold
=
2238 p
->entries
[i
].trigger_threshold
;
2240 ppd
->congestion_entries
[i
].ccti_min
=
2241 p
->entries
[i
].ccti_min
;
2244 return reply((struct ib_smp
*) ccp
);
2247 static int cc_set_congestion_control_table(struct ib_cc_mad
*ccp
,
2248 struct ib_device
*ibdev
, u8 port
)
2250 struct ib_cc_table_attr
*p
=
2251 (struct ib_cc_table_attr
*)ccp
->mgmt_data
;
2252 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
2253 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
2254 u32 cct_block_index
= be32_to_cpu(ccp
->attr_mod
);
2256 struct ib_cc_table_entry_shadow
*entries
;
2259 /* Is the table index more than what is supported? */
2260 if (cct_block_index
> IB_CC_TABLE_CAP_DEFAULT
- 1)
2263 /* If this packet is the first in the sequence then
2264 * zero the total table entry count.
2266 if (be16_to_cpu(p
->ccti_limit
) < IB_CCT_ENTRIES
)
2267 ppd
->total_cct_entry
= 0;
2269 cct_entry
= (be16_to_cpu(p
->ccti_limit
))%IB_CCT_ENTRIES
;
2271 /* ccti_limit is 0 to 63 */
2272 ppd
->total_cct_entry
+= (cct_entry
+ 1);
2274 if (ppd
->total_cct_entry
> ppd
->cc_supported_table_entries
)
2277 ppd
->ccti_limit
= be16_to_cpu(p
->ccti_limit
);
2279 entries
= ppd
->ccti_entries
+ (IB_CCT_ENTRIES
* cct_block_index
);
2281 for (i
= 0; i
<= cct_entry
; i
++)
2282 entries
[i
].entry
= be16_to_cpu(p
->ccti_entries
[i
].entry
);
2284 spin_lock(&ppd
->cc_shadow_lock
);
2286 ppd
->ccti_entries_shadow
->ccti_last_entry
= ppd
->total_cct_entry
- 1;
2287 memcpy(ppd
->ccti_entries_shadow
->entries
, ppd
->ccti_entries
,
2288 (ppd
->total_cct_entry
* sizeof(struct ib_cc_table_entry
)));
2290 ppd
->congestion_entries_shadow
->port_control
= IB_CC_CCS_PC_SL_BASED
;
2291 ppd
->congestion_entries_shadow
->control_map
= ppd
->cc_sl_control_map
;
2292 memcpy(ppd
->congestion_entries_shadow
->entries
, ppd
->congestion_entries
,
2293 IB_CC_CCS_ENTRIES
* sizeof(struct ib_cc_congestion_entry
));
2295 spin_unlock(&ppd
->cc_shadow_lock
);
2297 return reply((struct ib_smp
*) ccp
);
2300 return reply_failure((struct ib_smp
*) ccp
);
2303 static int check_cc_key(struct qib_ibport
*ibp
,
2304 struct ib_cc_mad
*ccp
, int mad_flags
)
2309 static int process_cc(struct ib_device
*ibdev
, int mad_flags
,
2310 u8 port
, const struct ib_mad
*in_mad
,
2311 struct ib_mad
*out_mad
)
2313 struct ib_cc_mad
*ccp
= (struct ib_cc_mad
*)out_mad
;
2314 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
2319 if (ccp
->class_version
!= 2) {
2320 ccp
->status
|= IB_SMP_UNSUP_VERSION
;
2321 ret
= reply((struct ib_smp
*)ccp
);
2325 ret
= check_cc_key(ibp
, ccp
, mad_flags
);
2329 switch (ccp
->method
) {
2330 case IB_MGMT_METHOD_GET
:
2331 switch (ccp
->attr_id
) {
2332 case IB_CC_ATTR_CLASSPORTINFO
:
2333 ret
= cc_get_classportinfo(ccp
, ibdev
);
2336 case IB_CC_ATTR_CONGESTION_INFO
:
2337 ret
= cc_get_congestion_info(ccp
, ibdev
, port
);
2340 case IB_CC_ATTR_CA_CONGESTION_SETTING
:
2341 ret
= cc_get_congestion_setting(ccp
, ibdev
, port
);
2344 case IB_CC_ATTR_CONGESTION_CONTROL_TABLE
:
2345 ret
= cc_get_congestion_control_table(ccp
, ibdev
, port
);
2350 ccp
->status
|= IB_SMP_UNSUP_METH_ATTR
;
2351 ret
= reply((struct ib_smp
*) ccp
);
2355 case IB_MGMT_METHOD_SET
:
2356 switch (ccp
->attr_id
) {
2357 case IB_CC_ATTR_CA_CONGESTION_SETTING
:
2358 ret
= cc_set_congestion_setting(ccp
, ibdev
, port
);
2361 case IB_CC_ATTR_CONGESTION_CONTROL_TABLE
:
2362 ret
= cc_set_congestion_control_table(ccp
, ibdev
, port
);
2367 ccp
->status
|= IB_SMP_UNSUP_METH_ATTR
;
2368 ret
= reply((struct ib_smp
*) ccp
);
2372 case IB_MGMT_METHOD_GET_RESP
:
2374 * The ib_mad module will call us to process responses
2375 * before checking for other consumers.
2376 * Just tell the caller to process it normally.
2378 ret
= IB_MAD_RESULT_SUCCESS
;
2381 case IB_MGMT_METHOD_TRAP
:
2383 ccp
->status
|= IB_SMP_UNSUP_METHOD
;
2384 ret
= reply((struct ib_smp
*) ccp
);
2392 * qib_process_mad - process an incoming MAD packet
2393 * @ibdev: the infiniband device this packet came in on
2394 * @mad_flags: MAD flags
2395 * @port: the port number this packet came in on
2396 * @in_wc: the work completion entry for this packet
2397 * @in_grh: the global route header for this packet
2398 * @in_mad: the incoming MAD
2399 * @out_mad: any outgoing MAD reply
2401 * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not
2402 * interested in processing.
2404 * Note that the verbs framework has already done the MAD sanity checks,
2405 * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
2408 * This is called by the ib_mad module.
2410 int qib_process_mad(struct ib_device
*ibdev
, int mad_flags
, u8 port
,
2411 const struct ib_wc
*in_wc
, const struct ib_grh
*in_grh
,
2412 const struct ib_mad_hdr
*in
, size_t in_mad_size
,
2413 struct ib_mad_hdr
*out
, size_t *out_mad_size
,
2414 u16
*out_mad_pkey_index
)
2417 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
2418 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
2419 const struct ib_mad
*in_mad
= (const struct ib_mad
*)in
;
2420 struct ib_mad
*out_mad
= (struct ib_mad
*)out
;
2422 if (WARN_ON_ONCE(in_mad_size
!= sizeof(*in_mad
) ||
2423 *out_mad_size
!= sizeof(*out_mad
)))
2424 return IB_MAD_RESULT_FAILURE
;
2426 switch (in_mad
->mad_hdr
.mgmt_class
) {
2427 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
:
2428 case IB_MGMT_CLASS_SUBN_LID_ROUTED
:
2429 ret
= process_subn(ibdev
, mad_flags
, port
, in_mad
, out_mad
);
2432 case IB_MGMT_CLASS_PERF_MGMT
:
2433 ret
= process_perf(ibdev
, port
, in_mad
, out_mad
);
2436 case IB_MGMT_CLASS_CONG_MGMT
:
2437 if (!ppd
->congestion_entries_shadow
||
2438 !qib_cc_table_size
) {
2439 ret
= IB_MAD_RESULT_SUCCESS
;
2442 ret
= process_cc(ibdev
, mad_flags
, port
, in_mad
, out_mad
);
2446 ret
= IB_MAD_RESULT_SUCCESS
;
2453 static void xmit_wait_timer_func(unsigned long opaque
)
2455 struct qib_pportdata
*ppd
= (struct qib_pportdata
*)opaque
;
2456 struct qib_devdata
*dd
= dd_from_ppd(ppd
);
2457 unsigned long flags
;
2460 spin_lock_irqsave(&ppd
->ibport_data
.rvp
.lock
, flags
);
2461 if (ppd
->cong_stats
.flags
== IB_PMA_CONG_HW_CONTROL_SAMPLE
) {
2462 status
= dd
->f_portcntr(ppd
, QIBPORTCNTR_PSSTAT
);
2463 if (status
== IB_PMA_SAMPLE_STATUS_DONE
) {
2464 /* save counter cache */
2465 cache_hw_sample_counters(ppd
);
2466 ppd
->cong_stats
.flags
= IB_PMA_CONG_HW_CONTROL_TIMER
;
2470 ppd
->cong_stats
.counter
= xmit_wait_get_value_delta(ppd
);
2471 dd
->f_set_cntr_sample(ppd
, QIB_CONG_TIMER_PSINTERVAL
, 0x0);
2473 spin_unlock_irqrestore(&ppd
->ibport_data
.rvp
.lock
, flags
);
2474 mod_timer(&ppd
->cong_stats
.timer
, jiffies
+ HZ
);
2477 void qib_notify_create_mad_agent(struct rvt_dev_info
*rdi
, int port_idx
)
2479 struct qib_ibdev
*ibdev
= container_of(rdi
, struct qib_ibdev
, rdi
);
2480 struct qib_devdata
*dd
= container_of(ibdev
,
2481 struct qib_devdata
, verbs_dev
);
2483 /* Initialize xmit_wait structure */
2484 dd
->pport
[port_idx
].cong_stats
.counter
= 0;
2485 init_timer(&dd
->pport
[port_idx
].cong_stats
.timer
);
2486 dd
->pport
[port_idx
].cong_stats
.timer
.function
= xmit_wait_timer_func
;
2487 dd
->pport
[port_idx
].cong_stats
.timer
.data
=
2488 (unsigned long)(&dd
->pport
[port_idx
]);
2489 dd
->pport
[port_idx
].cong_stats
.timer
.expires
= 0;
2490 add_timer(&dd
->pport
[port_idx
].cong_stats
.timer
);
2493 void qib_notify_free_mad_agent(struct rvt_dev_info
*rdi
, int port_idx
)
2495 struct qib_ibdev
*ibdev
= container_of(rdi
, struct qib_ibdev
, rdi
);
2496 struct qib_devdata
*dd
= container_of(ibdev
,
2497 struct qib_devdata
, verbs_dev
);
2499 if (dd
->pport
[port_idx
].cong_stats
.timer
.data
)
2500 del_timer_sync(&dd
->pport
[port_idx
].cong_stats
.timer
);
2502 if (dd
->pport
[port_idx
].ibport_data
.smi_ah
)
2503 ib_destroy_ah(&dd
->pport
[port_idx
].ibport_data
.smi_ah
->ibah
);