2 * Copyright (c) 2012 Intel Corporation. All rights reserved.
3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <rdma/ib_smi.h>
40 static int reply(struct ib_smp
*smp
)
43 * The verbs framework will handle the directed/LID route
46 smp
->method
= IB_MGMT_METHOD_GET_RESP
;
47 if (smp
->mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)
48 smp
->status
|= IB_SMP_DIRECTION
;
49 return IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_REPLY
;
52 static int reply_failure(struct ib_smp
*smp
)
55 * The verbs framework will handle the directed/LID route
58 smp
->method
= IB_MGMT_METHOD_GET_RESP
;
59 if (smp
->mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)
60 smp
->status
|= IB_SMP_DIRECTION
;
61 return IB_MAD_RESULT_FAILURE
| IB_MAD_RESULT_REPLY
;
64 static void qib_send_trap(struct qib_ibport
*ibp
, void *data
, unsigned len
)
66 struct ib_mad_send_buf
*send_buf
;
67 struct ib_mad_agent
*agent
;
71 unsigned long timeout
;
73 agent
= ibp
->send_agent
;
78 if (!(ppd_from_ibp(ibp
)->lflags
& QIBL_LINKACTIVE
))
82 if (ibp
->trap_timeout
&& time_before(jiffies
, ibp
->trap_timeout
))
85 send_buf
= ib_create_send_mad(agent
, 0, 0, 0, IB_MGMT_MAD_HDR
,
86 IB_MGMT_MAD_DATA
, GFP_ATOMIC
,
87 IB_MGMT_BASE_VERSION
);
92 smp
->base_version
= IB_MGMT_BASE_VERSION
;
93 smp
->mgmt_class
= IB_MGMT_CLASS_SUBN_LID_ROUTED
;
94 smp
->class_version
= 1;
95 smp
->method
= IB_MGMT_METHOD_TRAP
;
97 smp
->tid
= cpu_to_be64(ibp
->tid
);
98 smp
->attr_id
= IB_SMP_ATTR_NOTICE
;
99 /* o14-1: smp->mkey = 0; */
100 memcpy(smp
->data
, data
, len
);
102 spin_lock_irqsave(&ibp
->lock
, flags
);
104 if (ibp
->sm_lid
!= be16_to_cpu(IB_LID_PERMISSIVE
)) {
107 ah
= qib_create_qp0_ah(ibp
, ibp
->sm_lid
);
112 ibp
->sm_ah
= to_iah(ah
);
118 send_buf
->ah
= &ibp
->sm_ah
->ibah
;
121 spin_unlock_irqrestore(&ibp
->lock
, flags
);
124 ret
= ib_post_send_mad(send_buf
, NULL
);
127 timeout
= (4096 * (1UL << ibp
->subnet_timeout
)) / 1000;
128 ibp
->trap_timeout
= jiffies
+ usecs_to_jiffies(timeout
);
130 ib_free_send_mad(send_buf
);
131 ibp
->trap_timeout
= 0;
136 * Send a bad [PQ]_Key trap (ch. 14.3.8).
138 void qib_bad_pqkey(struct qib_ibport
*ibp
, __be16 trap_num
, u32 key
, u32 sl
,
139 u32 qp1
, u32 qp2
, __be16 lid1
, __be16 lid2
)
141 struct ib_mad_notice_attr data
;
143 if (trap_num
== IB_NOTICE_TRAP_BAD_PKEY
)
144 ibp
->pkey_violations
++;
146 ibp
->qkey_violations
++;
149 /* Send violation trap */
150 data
.generic_type
= IB_NOTICE_TYPE_SECURITY
;
151 data
.prod_type_msb
= 0;
152 data
.prod_type_lsb
= IB_NOTICE_PROD_CA
;
153 data
.trap_num
= trap_num
;
154 data
.issuer_lid
= cpu_to_be16(ppd_from_ibp(ibp
)->lid
);
155 data
.toggle_count
= 0;
156 memset(&data
.details
, 0, sizeof(data
.details
));
157 data
.details
.ntc_257_258
.lid1
= lid1
;
158 data
.details
.ntc_257_258
.lid2
= lid2
;
159 data
.details
.ntc_257_258
.key
= cpu_to_be32(key
);
160 data
.details
.ntc_257_258
.sl_qp1
= cpu_to_be32((sl
<< 28) | qp1
);
161 data
.details
.ntc_257_258
.qp2
= cpu_to_be32(qp2
);
163 qib_send_trap(ibp
, &data
, sizeof(data
));
167 * Send a bad M_Key trap (ch. 14.3.9).
169 static void qib_bad_mkey(struct qib_ibport
*ibp
, struct ib_smp
*smp
)
171 struct ib_mad_notice_attr data
;
173 /* Send violation trap */
174 data
.generic_type
= IB_NOTICE_TYPE_SECURITY
;
175 data
.prod_type_msb
= 0;
176 data
.prod_type_lsb
= IB_NOTICE_PROD_CA
;
177 data
.trap_num
= IB_NOTICE_TRAP_BAD_MKEY
;
178 data
.issuer_lid
= cpu_to_be16(ppd_from_ibp(ibp
)->lid
);
179 data
.toggle_count
= 0;
180 memset(&data
.details
, 0, sizeof(data
.details
));
181 data
.details
.ntc_256
.lid
= data
.issuer_lid
;
182 data
.details
.ntc_256
.method
= smp
->method
;
183 data
.details
.ntc_256
.attr_id
= smp
->attr_id
;
184 data
.details
.ntc_256
.attr_mod
= smp
->attr_mod
;
185 data
.details
.ntc_256
.mkey
= smp
->mkey
;
186 if (smp
->mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
) {
189 data
.details
.ntc_256
.dr_slid
= smp
->dr_slid
;
190 data
.details
.ntc_256
.dr_trunc_hop
= IB_NOTICE_TRAP_DR_NOTICE
;
191 hop_cnt
= smp
->hop_cnt
;
192 if (hop_cnt
> ARRAY_SIZE(data
.details
.ntc_256
.dr_rtn_path
)) {
193 data
.details
.ntc_256
.dr_trunc_hop
|=
194 IB_NOTICE_TRAP_DR_TRUNC
;
195 hop_cnt
= ARRAY_SIZE(data
.details
.ntc_256
.dr_rtn_path
);
197 data
.details
.ntc_256
.dr_trunc_hop
|= hop_cnt
;
198 memcpy(data
.details
.ntc_256
.dr_rtn_path
, smp
->return_path
,
202 qib_send_trap(ibp
, &data
, sizeof(data
));
206 * Send a Port Capability Mask Changed trap (ch. 14.3.11).
208 void qib_cap_mask_chg(struct qib_ibport
*ibp
)
210 struct ib_mad_notice_attr data
;
212 data
.generic_type
= IB_NOTICE_TYPE_INFO
;
213 data
.prod_type_msb
= 0;
214 data
.prod_type_lsb
= IB_NOTICE_PROD_CA
;
215 data
.trap_num
= IB_NOTICE_TRAP_CAP_MASK_CHG
;
216 data
.issuer_lid
= cpu_to_be16(ppd_from_ibp(ibp
)->lid
);
217 data
.toggle_count
= 0;
218 memset(&data
.details
, 0, sizeof(data
.details
));
219 data
.details
.ntc_144
.lid
= data
.issuer_lid
;
220 data
.details
.ntc_144
.new_cap_mask
= cpu_to_be32(ibp
->port_cap_flags
);
222 qib_send_trap(ibp
, &data
, sizeof(data
));
226 * Send a System Image GUID Changed trap (ch. 14.3.12).
228 void qib_sys_guid_chg(struct qib_ibport
*ibp
)
230 struct ib_mad_notice_attr data
;
232 data
.generic_type
= IB_NOTICE_TYPE_INFO
;
233 data
.prod_type_msb
= 0;
234 data
.prod_type_lsb
= IB_NOTICE_PROD_CA
;
235 data
.trap_num
= IB_NOTICE_TRAP_SYS_GUID_CHG
;
236 data
.issuer_lid
= cpu_to_be16(ppd_from_ibp(ibp
)->lid
);
237 data
.toggle_count
= 0;
238 memset(&data
.details
, 0, sizeof(data
.details
));
239 data
.details
.ntc_145
.lid
= data
.issuer_lid
;
240 data
.details
.ntc_145
.new_sys_guid
= ib_qib_sys_image_guid
;
242 qib_send_trap(ibp
, &data
, sizeof(data
));
246 * Send a Node Description Changed trap (ch. 14.3.13).
248 void qib_node_desc_chg(struct qib_ibport
*ibp
)
250 struct ib_mad_notice_attr data
;
252 data
.generic_type
= IB_NOTICE_TYPE_INFO
;
253 data
.prod_type_msb
= 0;
254 data
.prod_type_lsb
= IB_NOTICE_PROD_CA
;
255 data
.trap_num
= IB_NOTICE_TRAP_CAP_MASK_CHG
;
256 data
.issuer_lid
= cpu_to_be16(ppd_from_ibp(ibp
)->lid
);
257 data
.toggle_count
= 0;
258 memset(&data
.details
, 0, sizeof(data
.details
));
259 data
.details
.ntc_144
.lid
= data
.issuer_lid
;
260 data
.details
.ntc_144
.local_changes
= 1;
261 data
.details
.ntc_144
.change_flags
= IB_NOTICE_TRAP_NODE_DESC_CHG
;
263 qib_send_trap(ibp
, &data
, sizeof(data
));
266 static int subn_get_nodedescription(struct ib_smp
*smp
,
267 struct ib_device
*ibdev
)
270 smp
->status
|= IB_SMP_INVALID_FIELD
;
272 memcpy(smp
->data
, ibdev
->node_desc
, sizeof(smp
->data
));
277 static int subn_get_nodeinfo(struct ib_smp
*smp
, struct ib_device
*ibdev
,
280 struct ib_node_info
*nip
= (struct ib_node_info
*)&smp
->data
;
281 struct qib_devdata
*dd
= dd_from_ibdev(ibdev
);
282 u32 vendor
, majrev
, minrev
;
283 unsigned pidx
= port
- 1; /* IB number port from 1, hdw from 0 */
285 /* GUID 0 is illegal */
286 if (smp
->attr_mod
|| pidx
>= dd
->num_pports
||
287 dd
->pport
[pidx
].guid
== 0)
288 smp
->status
|= IB_SMP_INVALID_FIELD
;
290 nip
->port_guid
= dd
->pport
[pidx
].guid
;
292 nip
->base_version
= 1;
293 nip
->class_version
= 1;
294 nip
->node_type
= 1; /* channel adapter */
295 nip
->num_ports
= ibdev
->phys_port_cnt
;
296 /* This is already in network order */
297 nip
->sys_guid
= ib_qib_sys_image_guid
;
298 nip
->node_guid
= dd
->pport
->guid
; /* Use first-port GUID as node */
299 nip
->partition_cap
= cpu_to_be16(qib_get_npkeys(dd
));
300 nip
->device_id
= cpu_to_be16(dd
->deviceid
);
303 nip
->revision
= cpu_to_be32((majrev
<< 16) | minrev
);
304 nip
->local_port_num
= port
;
305 vendor
= dd
->vendorid
;
306 nip
->vendor_id
[0] = QIB_SRC_OUI_1
;
307 nip
->vendor_id
[1] = QIB_SRC_OUI_2
;
308 nip
->vendor_id
[2] = QIB_SRC_OUI_3
;
313 static int subn_get_guidinfo(struct ib_smp
*smp
, struct ib_device
*ibdev
,
316 struct qib_devdata
*dd
= dd_from_ibdev(ibdev
);
317 u32 startgx
= 8 * be32_to_cpu(smp
->attr_mod
);
318 __be64
*p
= (__be64
*) smp
->data
;
319 unsigned pidx
= port
- 1; /* IB number port from 1, hdw from 0 */
321 /* 32 blocks of 8 64-bit GUIDs per block */
323 memset(smp
->data
, 0, sizeof(smp
->data
));
325 if (startgx
== 0 && pidx
< dd
->num_pports
) {
326 struct qib_pportdata
*ppd
= dd
->pport
+ pidx
;
327 struct qib_ibport
*ibp
= &ppd
->ibport_data
;
328 __be64 g
= ppd
->guid
;
331 /* GUID 0 is illegal */
333 smp
->status
|= IB_SMP_INVALID_FIELD
;
335 /* The first is a copy of the read-only HW GUID. */
337 for (i
= 1; i
< QIB_GUIDS_PER_PORT
; i
++)
338 p
[i
] = ibp
->guids
[i
- 1];
341 smp
->status
|= IB_SMP_INVALID_FIELD
;
346 static void set_link_width_enabled(struct qib_pportdata
*ppd
, u32 w
)
348 (void) ppd
->dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_LWID_ENB
, w
);
351 static void set_link_speed_enabled(struct qib_pportdata
*ppd
, u32 s
)
353 (void) ppd
->dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_SPD_ENB
, s
);
356 static int get_overrunthreshold(struct qib_pportdata
*ppd
)
358 return ppd
->dd
->f_get_ib_cfg(ppd
, QIB_IB_CFG_OVERRUN_THRESH
);
362 * set_overrunthreshold - set the overrun threshold
363 * @ppd: the physical port data
364 * @n: the new threshold
366 * Note that this will only take effect when the link state changes.
368 static int set_overrunthreshold(struct qib_pportdata
*ppd
, unsigned n
)
370 (void) ppd
->dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_OVERRUN_THRESH
,
375 static int get_phyerrthreshold(struct qib_pportdata
*ppd
)
377 return ppd
->dd
->f_get_ib_cfg(ppd
, QIB_IB_CFG_PHYERR_THRESH
);
381 * set_phyerrthreshold - set the physical error threshold
382 * @ppd: the physical port data
383 * @n: the new threshold
385 * Note that this will only take effect when the link state changes.
387 static int set_phyerrthreshold(struct qib_pportdata
*ppd
, unsigned n
)
389 (void) ppd
->dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_PHYERR_THRESH
,
395 * get_linkdowndefaultstate - get the default linkdown state
396 * @ppd: the physical port data
398 * Returns zero if the default is POLL, 1 if the default is SLEEP.
400 static int get_linkdowndefaultstate(struct qib_pportdata
*ppd
)
402 return ppd
->dd
->f_get_ib_cfg(ppd
, QIB_IB_CFG_LINKDEFAULT
) ==
403 IB_LINKINITCMD_SLEEP
;
406 static int check_mkey(struct qib_ibport
*ibp
, struct ib_smp
*smp
, int mad_flags
)
411 /* Is the mkey in the process of expiring? */
412 if (ibp
->mkey_lease_timeout
&&
413 time_after_eq(jiffies
, ibp
->mkey_lease_timeout
)) {
414 /* Clear timeout and mkey protection field. */
415 ibp
->mkey_lease_timeout
= 0;
419 if ((mad_flags
& IB_MAD_IGNORE_MKEY
) || ibp
->mkey
== 0 ||
420 ibp
->mkey
== smp
->mkey
)
423 /* Unset lease timeout on any valid Get/Set/TrapRepress */
424 if (valid_mkey
&& ibp
->mkey_lease_timeout
&&
425 (smp
->method
== IB_MGMT_METHOD_GET
||
426 smp
->method
== IB_MGMT_METHOD_SET
||
427 smp
->method
== IB_MGMT_METHOD_TRAP_REPRESS
))
428 ibp
->mkey_lease_timeout
= 0;
431 switch (smp
->method
) {
432 case IB_MGMT_METHOD_GET
:
433 /* Bad mkey not a violation below level 2 */
434 if (ibp
->mkeyprot
< 2)
436 case IB_MGMT_METHOD_SET
:
437 case IB_MGMT_METHOD_TRAP_REPRESS
:
438 if (ibp
->mkey_violations
!= 0xFFFF)
439 ++ibp
->mkey_violations
;
440 if (!ibp
->mkey_lease_timeout
&& ibp
->mkey_lease_period
)
441 ibp
->mkey_lease_timeout
= jiffies
+
442 ibp
->mkey_lease_period
* HZ
;
443 /* Generate a trap notice. */
444 qib_bad_mkey(ibp
, smp
);
452 static int subn_get_portinfo(struct ib_smp
*smp
, struct ib_device
*ibdev
,
455 struct qib_devdata
*dd
;
456 struct qib_pportdata
*ppd
;
457 struct qib_ibport
*ibp
;
458 struct ib_port_info
*pip
= (struct ib_port_info
*)smp
->data
;
462 u32 port_num
= be32_to_cpu(smp
->attr_mod
);
467 if (port_num
> ibdev
->phys_port_cnt
) {
468 smp
->status
|= IB_SMP_INVALID_FIELD
;
472 if (port_num
!= port
) {
473 ibp
= to_iport(ibdev
, port_num
);
474 ret
= check_mkey(ibp
, smp
, 0);
476 ret
= IB_MAD_RESULT_FAILURE
;
482 dd
= dd_from_ibdev(ibdev
);
483 /* IB numbers ports from 1, hdw from 0 */
484 ppd
= dd
->pport
+ (port_num
- 1);
485 ibp
= &ppd
->ibport_data
;
487 /* Clear all fields. Only set the non-zero fields. */
488 memset(smp
->data
, 0, sizeof(smp
->data
));
490 /* Only return the mkey if the protection field allows it. */
491 if (!(smp
->method
== IB_MGMT_METHOD_GET
&&
492 ibp
->mkey
!= smp
->mkey
&&
494 pip
->mkey
= ibp
->mkey
;
495 pip
->gid_prefix
= ibp
->gid_prefix
;
496 pip
->lid
= cpu_to_be16(ppd
->lid
);
497 pip
->sm_lid
= cpu_to_be16(ibp
->sm_lid
);
498 pip
->cap_mask
= cpu_to_be32(ibp
->port_cap_flags
);
499 /* pip->diag_code; */
500 pip
->mkey_lease_period
= cpu_to_be16(ibp
->mkey_lease_period
);
501 pip
->local_port_num
= port
;
502 pip
->link_width_enabled
= ppd
->link_width_enabled
;
503 pip
->link_width_supported
= ppd
->link_width_supported
;
504 pip
->link_width_active
= ppd
->link_width_active
;
505 state
= dd
->f_iblink_state(ppd
->lastibcstat
);
506 pip
->linkspeed_portstate
= ppd
->link_speed_supported
<< 4 | state
;
508 pip
->portphysstate_linkdown
=
509 (dd
->f_ibphys_portstate(ppd
->lastibcstat
) << 4) |
510 (get_linkdowndefaultstate(ppd
) ? 1 : 2);
511 pip
->mkeyprot_resv_lmc
= (ibp
->mkeyprot
<< 6) | ppd
->lmc
;
512 pip
->linkspeedactive_enabled
= (ppd
->link_speed_active
<< 4) |
513 ppd
->link_speed_enabled
;
514 switch (ppd
->ibmtu
) {
515 default: /* something is wrong; fall through */
532 pip
->neighbormtu_mastersmsl
= (mtu
<< 4) | ibp
->sm_sl
;
533 pip
->vlcap_inittype
= ppd
->vls_supported
<< 4; /* InitType = 0 */
534 pip
->vl_high_limit
= ibp
->vl_high_limit
;
535 pip
->vl_arb_high_cap
=
536 dd
->f_get_ib_cfg(ppd
, QIB_IB_CFG_VL_HIGH_CAP
);
537 pip
->vl_arb_low_cap
=
538 dd
->f_get_ib_cfg(ppd
, QIB_IB_CFG_VL_LOW_CAP
);
539 /* InitTypeReply = 0 */
540 pip
->inittypereply_mtucap
= qib_ibmtu
? qib_ibmtu
: IB_MTU_4096
;
541 /* HCAs ignore VLStallCount and HOQLife */
542 /* pip->vlstallcnt_hoqlife; */
543 pip
->operationalvl_pei_peo_fpi_fpo
=
544 dd
->f_get_ib_cfg(ppd
, QIB_IB_CFG_OP_VLS
) << 4;
545 pip
->mkey_violations
= cpu_to_be16(ibp
->mkey_violations
);
546 /* P_KeyViolations are counted by hardware. */
547 pip
->pkey_violations
= cpu_to_be16(ibp
->pkey_violations
);
548 pip
->qkey_violations
= cpu_to_be16(ibp
->qkey_violations
);
549 /* Only the hardware GUID is supported for now */
550 pip
->guid_cap
= QIB_GUIDS_PER_PORT
;
551 pip
->clientrereg_resv_subnetto
= ibp
->subnet_timeout
;
552 /* 32.768 usec. response time (guessing) */
553 pip
->resv_resptimevalue
= 3;
554 pip
->localphyerrors_overrunerrors
=
555 (get_phyerrthreshold(ppd
) << 4) |
556 get_overrunthreshold(ppd
);
557 /* pip->max_credit_hint; */
558 if (ibp
->port_cap_flags
& IB_PORT_LINK_LATENCY_SUP
) {
561 v
= dd
->f_get_ib_cfg(ppd
, QIB_IB_CFG_LINKLATENCY
);
562 pip
->link_roundtrip_latency
[0] = v
>> 16;
563 pip
->link_roundtrip_latency
[1] = v
>> 8;
564 pip
->link_roundtrip_latency
[2] = v
;
574 * get_pkeys - return the PKEY table
575 * @dd: the qlogic_ib device
576 * @port: the IB port number
577 * @pkeys: the pkey table is placed here
579 static int get_pkeys(struct qib_devdata
*dd
, u8 port
, u16
*pkeys
)
581 struct qib_pportdata
*ppd
= dd
->pport
+ port
- 1;
583 * always a kernel context, no locking needed.
584 * If we get here with ppd setup, no need to check
587 struct qib_ctxtdata
*rcd
= dd
->rcd
[ppd
->hw_pidx
];
589 memcpy(pkeys
, rcd
->pkeys
, sizeof(rcd
->pkeys
));
594 static int subn_get_pkeytable(struct ib_smp
*smp
, struct ib_device
*ibdev
,
597 u32 startpx
= 32 * (be32_to_cpu(smp
->attr_mod
) & 0xffff);
598 u16
*p
= (u16
*) smp
->data
;
599 __be16
*q
= (__be16
*) smp
->data
;
601 /* 64 blocks of 32 16-bit P_Key entries */
603 memset(smp
->data
, 0, sizeof(smp
->data
));
605 struct qib_devdata
*dd
= dd_from_ibdev(ibdev
);
606 unsigned i
, n
= qib_get_npkeys(dd
);
608 get_pkeys(dd
, port
, p
);
610 for (i
= 0; i
< n
; i
++)
611 q
[i
] = cpu_to_be16(p
[i
]);
613 smp
->status
|= IB_SMP_INVALID_FIELD
;
618 static int subn_set_guidinfo(struct ib_smp
*smp
, struct ib_device
*ibdev
,
621 struct qib_devdata
*dd
= dd_from_ibdev(ibdev
);
622 u32 startgx
= 8 * be32_to_cpu(smp
->attr_mod
);
623 __be64
*p
= (__be64
*) smp
->data
;
624 unsigned pidx
= port
- 1; /* IB number port from 1, hdw from 0 */
626 /* 32 blocks of 8 64-bit GUIDs per block */
628 if (startgx
== 0 && pidx
< dd
->num_pports
) {
629 struct qib_pportdata
*ppd
= dd
->pport
+ pidx
;
630 struct qib_ibport
*ibp
= &ppd
->ibport_data
;
633 /* The first entry is read-only. */
634 for (i
= 1; i
< QIB_GUIDS_PER_PORT
; i
++)
635 ibp
->guids
[i
- 1] = p
[i
];
637 smp
->status
|= IB_SMP_INVALID_FIELD
;
639 /* The only GUID we support is the first read-only entry. */
640 return subn_get_guidinfo(smp
, ibdev
, port
);
644 * subn_set_portinfo - set port information
645 * @smp: the incoming SM packet
646 * @ibdev: the infiniband device
647 * @port: the port on the device
649 * Set Portinfo (see ch. 14.2.5.6).
651 static int subn_set_portinfo(struct ib_smp
*smp
, struct ib_device
*ibdev
,
654 struct ib_port_info
*pip
= (struct ib_port_info
*)smp
->data
;
655 struct ib_event event
;
656 struct qib_devdata
*dd
;
657 struct qib_pportdata
*ppd
;
658 struct qib_ibport
*ibp
;
659 u8 clientrereg
= (pip
->clientrereg_resv_subnetto
& 0x80);
669 u32 port_num
= be32_to_cpu(smp
->attr_mod
);
674 if (port_num
> ibdev
->phys_port_cnt
)
676 /* Port attributes can only be set on the receiving port */
677 if (port_num
!= port
)
681 dd
= dd_from_ibdev(ibdev
);
682 /* IB numbers ports from 1, hdw from 0 */
683 ppd
= dd
->pport
+ (port_num
- 1);
684 ibp
= &ppd
->ibport_data
;
685 event
.device
= ibdev
;
686 event
.element
.port_num
= port
;
688 ibp
->mkey
= pip
->mkey
;
689 ibp
->gid_prefix
= pip
->gid_prefix
;
690 ibp
->mkey_lease_period
= be16_to_cpu(pip
->mkey_lease_period
);
692 lid
= be16_to_cpu(pip
->lid
);
693 /* Must be a valid unicast LID address. */
694 if (lid
== 0 || lid
>= QIB_MULTICAST_LID_BASE
)
695 smp
->status
|= IB_SMP_INVALID_FIELD
;
696 else if (ppd
->lid
!= lid
|| ppd
->lmc
!= (pip
->mkeyprot_resv_lmc
& 7)) {
698 qib_set_uevent_bits(ppd
, _QIB_EVENT_LID_CHANGE_BIT
);
699 if (ppd
->lmc
!= (pip
->mkeyprot_resv_lmc
& 7))
700 qib_set_uevent_bits(ppd
, _QIB_EVENT_LMC_CHANGE_BIT
);
701 qib_set_lid(ppd
, lid
, pip
->mkeyprot_resv_lmc
& 7);
702 event
.event
= IB_EVENT_LID_CHANGE
;
703 ib_dispatch_event(&event
);
706 smlid
= be16_to_cpu(pip
->sm_lid
);
707 msl
= pip
->neighbormtu_mastersmsl
& 0xF;
708 /* Must be a valid unicast LID address. */
709 if (smlid
== 0 || smlid
>= QIB_MULTICAST_LID_BASE
)
710 smp
->status
|= IB_SMP_INVALID_FIELD
;
711 else if (smlid
!= ibp
->sm_lid
|| msl
!= ibp
->sm_sl
) {
712 spin_lock_irqsave(&ibp
->lock
, flags
);
714 if (smlid
!= ibp
->sm_lid
)
715 ibp
->sm_ah
->attr
.dlid
= smlid
;
716 if (msl
!= ibp
->sm_sl
)
717 ibp
->sm_ah
->attr
.sl
= msl
;
719 spin_unlock_irqrestore(&ibp
->lock
, flags
);
720 if (smlid
!= ibp
->sm_lid
)
722 if (msl
!= ibp
->sm_sl
)
724 event
.event
= IB_EVENT_SM_CHANGE
;
725 ib_dispatch_event(&event
);
728 /* Allow 1x or 4x to be set (see 14.2.6.6). */
729 lwe
= pip
->link_width_enabled
;
732 set_link_width_enabled(ppd
, ppd
->link_width_supported
);
733 else if (lwe
>= 16 || (lwe
& ~ppd
->link_width_supported
))
734 smp
->status
|= IB_SMP_INVALID_FIELD
;
735 else if (lwe
!= ppd
->link_width_enabled
)
736 set_link_width_enabled(ppd
, lwe
);
739 lse
= pip
->linkspeedactive_enabled
& 0xF;
742 * The IB 1.2 spec. only allows link speed values
743 * 1, 3, 5, 7, 15. 1.2.1 extended to allow specific
747 set_link_speed_enabled(ppd
,
748 ppd
->link_speed_supported
);
749 else if (lse
>= 8 || (lse
& ~ppd
->link_speed_supported
))
750 smp
->status
|= IB_SMP_INVALID_FIELD
;
751 else if (lse
!= ppd
->link_speed_enabled
)
752 set_link_speed_enabled(ppd
, lse
);
755 /* Set link down default state. */
756 switch (pip
->portphysstate_linkdown
& 0xF) {
760 (void) dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_LINKDEFAULT
,
761 IB_LINKINITCMD_SLEEP
);
764 (void) dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_LINKDEFAULT
,
765 IB_LINKINITCMD_POLL
);
768 smp
->status
|= IB_SMP_INVALID_FIELD
;
771 ibp
->mkeyprot
= pip
->mkeyprot_resv_lmc
>> 6;
772 ibp
->vl_high_limit
= pip
->vl_high_limit
;
773 (void) dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_VL_HIGH_LIMIT
,
776 mtu
= ib_mtu_enum_to_int((pip
->neighbormtu_mastersmsl
>> 4) & 0xF);
778 smp
->status
|= IB_SMP_INVALID_FIELD
;
780 qib_set_mtu(ppd
, mtu
);
782 /* Set operational VLs */
783 vls
= (pip
->operationalvl_pei_peo_fpi_fpo
>> 4) & 0xF;
785 if (vls
> ppd
->vls_supported
)
786 smp
->status
|= IB_SMP_INVALID_FIELD
;
788 (void) dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_OP_VLS
, vls
);
791 if (pip
->mkey_violations
== 0)
792 ibp
->mkey_violations
= 0;
794 if (pip
->pkey_violations
== 0)
795 ibp
->pkey_violations
= 0;
797 if (pip
->qkey_violations
== 0)
798 ibp
->qkey_violations
= 0;
800 ore
= pip
->localphyerrors_overrunerrors
;
801 if (set_phyerrthreshold(ppd
, (ore
>> 4) & 0xF))
802 smp
->status
|= IB_SMP_INVALID_FIELD
;
804 if (set_overrunthreshold(ppd
, (ore
& 0xF)))
805 smp
->status
|= IB_SMP_INVALID_FIELD
;
807 ibp
->subnet_timeout
= pip
->clientrereg_resv_subnetto
& 0x1F;
810 * Do the port state change now that the other link parameters
812 * Changing the port physical state only makes sense if the link
813 * is down or is being set to down.
815 state
= pip
->linkspeed_portstate
& 0xF;
816 lstate
= (pip
->portphysstate_linkdown
>> 4) & 0xF;
817 if (lstate
&& !(state
== IB_PORT_DOWN
|| state
== IB_PORT_NOP
))
818 smp
->status
|= IB_SMP_INVALID_FIELD
;
821 * Only state changes of DOWN, ARM, and ACTIVE are valid
822 * and must be in the correct state to take effect (see 7.2.6).
831 lstate
= QIB_IB_LINKDOWN_ONLY
;
832 else if (lstate
== 1)
833 lstate
= QIB_IB_LINKDOWN_SLEEP
;
834 else if (lstate
== 2)
835 lstate
= QIB_IB_LINKDOWN
;
836 else if (lstate
== 3)
837 lstate
= QIB_IB_LINKDOWN_DISABLE
;
839 smp
->status
|= IB_SMP_INVALID_FIELD
;
842 spin_lock_irqsave(&ppd
->lflags_lock
, flags
);
843 ppd
->lflags
&= ~QIBL_LINKV
;
844 spin_unlock_irqrestore(&ppd
->lflags_lock
, flags
);
845 qib_set_linkstate(ppd
, lstate
);
847 * Don't send a reply if the response would be sent
848 * through the disabled port.
850 if (lstate
== QIB_IB_LINKDOWN_DISABLE
&& smp
->hop_cnt
) {
851 ret
= IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_CONSUMED
;
854 qib_wait_linkstate(ppd
, QIBL_LINKV
, 10);
857 qib_set_linkstate(ppd
, QIB_IB_LINKARM
);
860 qib_set_linkstate(ppd
, QIB_IB_LINKACTIVE
);
863 smp
->status
|= IB_SMP_INVALID_FIELD
;
867 event
.event
= IB_EVENT_CLIENT_REREGISTER
;
868 ib_dispatch_event(&event
);
871 ret
= subn_get_portinfo(smp
, ibdev
, port
);
873 /* restore re-reg bit per o14-12.2.1 */
874 pip
->clientrereg_resv_subnetto
|= clientrereg
;
879 smp
->status
|= IB_SMP_INVALID_FIELD
;
881 ret
= subn_get_portinfo(smp
, ibdev
, port
);
887 * rm_pkey - decrecment the reference count for the given PKEY
888 * @dd: the qlogic_ib device
889 * @key: the PKEY index
891 * Return true if this was the last reference and the hardware table entry
892 * needs to be changed.
894 static int rm_pkey(struct qib_pportdata
*ppd
, u16 key
)
899 for (i
= 0; i
< ARRAY_SIZE(ppd
->pkeys
); i
++) {
900 if (ppd
->pkeys
[i
] != key
)
902 if (atomic_dec_and_test(&ppd
->pkeyrefs
[i
])) {
917 * add_pkey - add the given PKEY to the hardware table
918 * @dd: the qlogic_ib device
921 * Return an error code if unable to add the entry, zero if no change,
922 * or 1 if the hardware PKEY register needs to be updated.
924 static int add_pkey(struct qib_pportdata
*ppd
, u16 key
)
927 u16 lkey
= key
& 0x7FFF;
931 if (lkey
== 0x7FFF) {
936 /* Look for an empty slot or a matching PKEY. */
937 for (i
= 0; i
< ARRAY_SIZE(ppd
->pkeys
); i
++) {
938 if (!ppd
->pkeys
[i
]) {
942 /* If it matches exactly, try to increment the ref count */
943 if (ppd
->pkeys
[i
] == key
) {
944 if (atomic_inc_return(&ppd
->pkeyrefs
[i
]) > 1) {
948 /* Lost the race. Look for an empty slot below. */
949 atomic_dec(&ppd
->pkeyrefs
[i
]);
953 * It makes no sense to have both the limited and unlimited
954 * PKEY set at the same time since the unlimited one will
955 * disable the limited one.
957 if ((ppd
->pkeys
[i
] & 0x7FFF) == lkey
) {
966 for (i
= 0; i
< ARRAY_SIZE(ppd
->pkeys
); i
++) {
967 if (!ppd
->pkeys
[i
] &&
968 atomic_inc_return(&ppd
->pkeyrefs
[i
]) == 1) {
969 /* for qibstats, etc. */
982 * set_pkeys - set the PKEY table for ctxt 0
983 * @dd: the qlogic_ib device
984 * @port: the IB port number
985 * @pkeys: the PKEY table
987 static int set_pkeys(struct qib_devdata
*dd
, u8 port
, u16
*pkeys
)
989 struct qib_pportdata
*ppd
;
990 struct qib_ctxtdata
*rcd
;
995 * IB port one/two always maps to context zero/one,
996 * always a kernel context, no locking needed
997 * If we get here with ppd setup, no need to check
1000 ppd
= dd
->pport
+ (port
- 1);
1001 rcd
= dd
->rcd
[ppd
->hw_pidx
];
1003 for (i
= 0; i
< ARRAY_SIZE(rcd
->pkeys
); i
++) {
1005 u16 okey
= rcd
->pkeys
[i
];
1010 * The value of this PKEY table entry is changing.
1011 * Remove the old entry in the hardware's array of PKEYs.
1014 changed
|= rm_pkey(ppd
, okey
);
1016 int ret
= add_pkey(ppd
, key
);
1023 rcd
->pkeys
[i
] = key
;
1026 struct ib_event event
;
1028 (void) dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_PKEYS
, 0);
1030 event
.event
= IB_EVENT_PKEY_CHANGE
;
1031 event
.device
= &dd
->verbs_dev
.ibdev
;
1032 event
.element
.port_num
= port
;
1033 ib_dispatch_event(&event
);
1038 static int subn_set_pkeytable(struct ib_smp
*smp
, struct ib_device
*ibdev
,
1041 u32 startpx
= 32 * (be32_to_cpu(smp
->attr_mod
) & 0xffff);
1042 __be16
*p
= (__be16
*) smp
->data
;
1043 u16
*q
= (u16
*) smp
->data
;
1044 struct qib_devdata
*dd
= dd_from_ibdev(ibdev
);
1045 unsigned i
, n
= qib_get_npkeys(dd
);
1047 for (i
= 0; i
< n
; i
++)
1048 q
[i
] = be16_to_cpu(p
[i
]);
1050 if (startpx
!= 0 || set_pkeys(dd
, port
, q
) != 0)
1051 smp
->status
|= IB_SMP_INVALID_FIELD
;
1053 return subn_get_pkeytable(smp
, ibdev
, port
);
1056 static int subn_get_sl_to_vl(struct ib_smp
*smp
, struct ib_device
*ibdev
,
1059 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1060 u8
*p
= (u8
*) smp
->data
;
1063 memset(smp
->data
, 0, sizeof(smp
->data
));
1065 if (!(ibp
->port_cap_flags
& IB_PORT_SL_MAP_SUP
))
1066 smp
->status
|= IB_SMP_UNSUP_METHOD
;
1068 for (i
= 0; i
< ARRAY_SIZE(ibp
->sl_to_vl
); i
+= 2)
1069 *p
++ = (ibp
->sl_to_vl
[i
] << 4) | ibp
->sl_to_vl
[i
+ 1];
1074 static int subn_set_sl_to_vl(struct ib_smp
*smp
, struct ib_device
*ibdev
,
1077 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1078 u8
*p
= (u8
*) smp
->data
;
1081 if (!(ibp
->port_cap_flags
& IB_PORT_SL_MAP_SUP
)) {
1082 smp
->status
|= IB_SMP_UNSUP_METHOD
;
1086 for (i
= 0; i
< ARRAY_SIZE(ibp
->sl_to_vl
); i
+= 2, p
++) {
1087 ibp
->sl_to_vl
[i
] = *p
>> 4;
1088 ibp
->sl_to_vl
[i
+ 1] = *p
& 0xF;
1090 qib_set_uevent_bits(ppd_from_ibp(to_iport(ibdev
, port
)),
1091 _QIB_EVENT_SL2VL_CHANGE_BIT
);
1093 return subn_get_sl_to_vl(smp
, ibdev
, port
);
1096 static int subn_get_vl_arb(struct ib_smp
*smp
, struct ib_device
*ibdev
,
1099 unsigned which
= be32_to_cpu(smp
->attr_mod
) >> 16;
1100 struct qib_pportdata
*ppd
= ppd_from_ibp(to_iport(ibdev
, port
));
1102 memset(smp
->data
, 0, sizeof(smp
->data
));
1104 if (ppd
->vls_supported
== IB_VL_VL0
)
1105 smp
->status
|= IB_SMP_UNSUP_METHOD
;
1106 else if (which
== IB_VLARB_LOWPRI_0_31
)
1107 (void) ppd
->dd
->f_get_ib_table(ppd
, QIB_IB_TBL_VL_LOW_ARB
,
1109 else if (which
== IB_VLARB_HIGHPRI_0_31
)
1110 (void) ppd
->dd
->f_get_ib_table(ppd
, QIB_IB_TBL_VL_HIGH_ARB
,
1113 smp
->status
|= IB_SMP_INVALID_FIELD
;
1118 static int subn_set_vl_arb(struct ib_smp
*smp
, struct ib_device
*ibdev
,
1121 unsigned which
= be32_to_cpu(smp
->attr_mod
) >> 16;
1122 struct qib_pportdata
*ppd
= ppd_from_ibp(to_iport(ibdev
, port
));
1124 if (ppd
->vls_supported
== IB_VL_VL0
)
1125 smp
->status
|= IB_SMP_UNSUP_METHOD
;
1126 else if (which
== IB_VLARB_LOWPRI_0_31
)
1127 (void) ppd
->dd
->f_set_ib_table(ppd
, QIB_IB_TBL_VL_LOW_ARB
,
1129 else if (which
== IB_VLARB_HIGHPRI_0_31
)
1130 (void) ppd
->dd
->f_set_ib_table(ppd
, QIB_IB_TBL_VL_HIGH_ARB
,
1133 smp
->status
|= IB_SMP_INVALID_FIELD
;
1135 return subn_get_vl_arb(smp
, ibdev
, port
);
1138 static int subn_trap_repress(struct ib_smp
*smp
, struct ib_device
*ibdev
,
1142 * For now, we only send the trap once so no need to process this.
1144 * o14-3.a4 The SMA shall not send any message in response to a valid
1145 * SubnTrapRepress() message.
1147 return IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_CONSUMED
;
1150 static int pma_get_classportinfo(struct ib_pma_mad
*pmp
,
1151 struct ib_device
*ibdev
)
1153 struct ib_class_port_info
*p
=
1154 (struct ib_class_port_info
*)pmp
->data
;
1155 struct qib_devdata
*dd
= dd_from_ibdev(ibdev
);
1157 memset(pmp
->data
, 0, sizeof(pmp
->data
));
1159 if (pmp
->mad_hdr
.attr_mod
!= 0)
1160 pmp
->mad_hdr
.status
|= IB_SMP_INVALID_FIELD
;
1162 /* Note that AllPortSelect is not valid */
1163 p
->base_version
= 1;
1164 p
->class_version
= 1;
1165 p
->capability_mask
= IB_PMA_CLASS_CAP_EXT_WIDTH
;
1167 * Set the most significant bit of CM2 to indicate support for
1168 * congestion statistics
1170 p
->reserved
[0] = dd
->psxmitwait_supported
<< 7;
1172 * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
1174 p
->resp_time_value
= 18;
1176 return reply((struct ib_smp
*) pmp
);
1179 static int pma_get_portsamplescontrol(struct ib_pma_mad
*pmp
,
1180 struct ib_device
*ibdev
, u8 port
)
1182 struct ib_pma_portsamplescontrol
*p
=
1183 (struct ib_pma_portsamplescontrol
*)pmp
->data
;
1184 struct qib_ibdev
*dev
= to_idev(ibdev
);
1185 struct qib_devdata
*dd
= dd_from_dev(dev
);
1186 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1187 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1188 unsigned long flags
;
1189 u8 port_select
= p
->port_select
;
1191 memset(pmp
->data
, 0, sizeof(pmp
->data
));
1193 p
->port_select
= port_select
;
1194 if (pmp
->mad_hdr
.attr_mod
!= 0 || port_select
!= port
) {
1195 pmp
->mad_hdr
.status
|= IB_SMP_INVALID_FIELD
;
1198 spin_lock_irqsave(&ibp
->lock
, flags
);
1199 p
->tick
= dd
->f_get_ib_cfg(ppd
, QIB_IB_CFG_PMA_TICKS
);
1200 p
->sample_status
= dd
->f_portcntr(ppd
, QIBPORTCNTR_PSSTAT
);
1201 p
->counter_width
= 4; /* 32 bit counters */
1202 p
->counter_mask0_9
= COUNTER_MASK0_9
;
1203 p
->sample_start
= cpu_to_be32(ibp
->pma_sample_start
);
1204 p
->sample_interval
= cpu_to_be32(ibp
->pma_sample_interval
);
1205 p
->tag
= cpu_to_be16(ibp
->pma_tag
);
1206 p
->counter_select
[0] = ibp
->pma_counter_select
[0];
1207 p
->counter_select
[1] = ibp
->pma_counter_select
[1];
1208 p
->counter_select
[2] = ibp
->pma_counter_select
[2];
1209 p
->counter_select
[3] = ibp
->pma_counter_select
[3];
1210 p
->counter_select
[4] = ibp
->pma_counter_select
[4];
1211 spin_unlock_irqrestore(&ibp
->lock
, flags
);
1214 return reply((struct ib_smp
*) pmp
);
1217 static int pma_set_portsamplescontrol(struct ib_pma_mad
*pmp
,
1218 struct ib_device
*ibdev
, u8 port
)
1220 struct ib_pma_portsamplescontrol
*p
=
1221 (struct ib_pma_portsamplescontrol
*)pmp
->data
;
1222 struct qib_ibdev
*dev
= to_idev(ibdev
);
1223 struct qib_devdata
*dd
= dd_from_dev(dev
);
1224 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1225 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1226 unsigned long flags
;
1227 u8 status
, xmit_flags
;
1230 if (pmp
->mad_hdr
.attr_mod
!= 0 || p
->port_select
!= port
) {
1231 pmp
->mad_hdr
.status
|= IB_SMP_INVALID_FIELD
;
1232 ret
= reply((struct ib_smp
*) pmp
);
1236 spin_lock_irqsave(&ibp
->lock
, flags
);
1238 /* Port Sampling code owns the PS* HW counters */
1239 xmit_flags
= ppd
->cong_stats
.flags
;
1240 ppd
->cong_stats
.flags
= IB_PMA_CONG_HW_CONTROL_SAMPLE
;
1241 status
= dd
->f_portcntr(ppd
, QIBPORTCNTR_PSSTAT
);
1242 if (status
== IB_PMA_SAMPLE_STATUS_DONE
||
1243 (status
== IB_PMA_SAMPLE_STATUS_RUNNING
&&
1244 xmit_flags
== IB_PMA_CONG_HW_CONTROL_TIMER
)) {
1245 ibp
->pma_sample_start
= be32_to_cpu(p
->sample_start
);
1246 ibp
->pma_sample_interval
= be32_to_cpu(p
->sample_interval
);
1247 ibp
->pma_tag
= be16_to_cpu(p
->tag
);
1248 ibp
->pma_counter_select
[0] = p
->counter_select
[0];
1249 ibp
->pma_counter_select
[1] = p
->counter_select
[1];
1250 ibp
->pma_counter_select
[2] = p
->counter_select
[2];
1251 ibp
->pma_counter_select
[3] = p
->counter_select
[3];
1252 ibp
->pma_counter_select
[4] = p
->counter_select
[4];
1253 dd
->f_set_cntr_sample(ppd
, ibp
->pma_sample_interval
,
1254 ibp
->pma_sample_start
);
1256 spin_unlock_irqrestore(&ibp
->lock
, flags
);
1258 ret
= pma_get_portsamplescontrol(pmp
, ibdev
, port
);
1264 static u64
get_counter(struct qib_ibport
*ibp
, struct qib_pportdata
*ppd
,
1270 case IB_PMA_PORT_XMIT_DATA
:
1271 ret
= ppd
->dd
->f_portcntr(ppd
, QIBPORTCNTR_PSXMITDATA
);
1273 case IB_PMA_PORT_RCV_DATA
:
1274 ret
= ppd
->dd
->f_portcntr(ppd
, QIBPORTCNTR_PSRCVDATA
);
1276 case IB_PMA_PORT_XMIT_PKTS
:
1277 ret
= ppd
->dd
->f_portcntr(ppd
, QIBPORTCNTR_PSXMITPKTS
);
1279 case IB_PMA_PORT_RCV_PKTS
:
1280 ret
= ppd
->dd
->f_portcntr(ppd
, QIBPORTCNTR_PSRCVPKTS
);
1282 case IB_PMA_PORT_XMIT_WAIT
:
1283 ret
= ppd
->dd
->f_portcntr(ppd
, QIBPORTCNTR_PSXMITWAIT
);
1292 /* This function assumes that the xmit_wait lock is already held */
1293 static u64
xmit_wait_get_value_delta(struct qib_pportdata
*ppd
)
1297 delta
= get_counter(&ppd
->ibport_data
, ppd
,
1298 IB_PMA_PORT_XMIT_WAIT
);
1299 return ppd
->cong_stats
.counter
+ delta
;
1302 static void cache_hw_sample_counters(struct qib_pportdata
*ppd
)
1304 struct qib_ibport
*ibp
= &ppd
->ibport_data
;
1306 ppd
->cong_stats
.counter_cache
.psxmitdata
=
1307 get_counter(ibp
, ppd
, IB_PMA_PORT_XMIT_DATA
);
1308 ppd
->cong_stats
.counter_cache
.psrcvdata
=
1309 get_counter(ibp
, ppd
, IB_PMA_PORT_RCV_DATA
);
1310 ppd
->cong_stats
.counter_cache
.psxmitpkts
=
1311 get_counter(ibp
, ppd
, IB_PMA_PORT_XMIT_PKTS
);
1312 ppd
->cong_stats
.counter_cache
.psrcvpkts
=
1313 get_counter(ibp
, ppd
, IB_PMA_PORT_RCV_PKTS
);
1314 ppd
->cong_stats
.counter_cache
.psxmitwait
=
1315 get_counter(ibp
, ppd
, IB_PMA_PORT_XMIT_WAIT
);
1318 static u64
get_cache_hw_sample_counters(struct qib_pportdata
*ppd
,
1324 case IB_PMA_PORT_XMIT_DATA
:
1325 ret
= ppd
->cong_stats
.counter_cache
.psxmitdata
;
1327 case IB_PMA_PORT_RCV_DATA
:
1328 ret
= ppd
->cong_stats
.counter_cache
.psrcvdata
;
1330 case IB_PMA_PORT_XMIT_PKTS
:
1331 ret
= ppd
->cong_stats
.counter_cache
.psxmitpkts
;
1333 case IB_PMA_PORT_RCV_PKTS
:
1334 ret
= ppd
->cong_stats
.counter_cache
.psrcvpkts
;
1336 case IB_PMA_PORT_XMIT_WAIT
:
1337 ret
= ppd
->cong_stats
.counter_cache
.psxmitwait
;
1346 static int pma_get_portsamplesresult(struct ib_pma_mad
*pmp
,
1347 struct ib_device
*ibdev
, u8 port
)
1349 struct ib_pma_portsamplesresult
*p
=
1350 (struct ib_pma_portsamplesresult
*)pmp
->data
;
1351 struct qib_ibdev
*dev
= to_idev(ibdev
);
1352 struct qib_devdata
*dd
= dd_from_dev(dev
);
1353 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1354 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1355 unsigned long flags
;
1359 memset(pmp
->data
, 0, sizeof(pmp
->data
));
1360 spin_lock_irqsave(&ibp
->lock
, flags
);
1361 p
->tag
= cpu_to_be16(ibp
->pma_tag
);
1362 if (ppd
->cong_stats
.flags
== IB_PMA_CONG_HW_CONTROL_TIMER
)
1363 p
->sample_status
= IB_PMA_SAMPLE_STATUS_DONE
;
1365 status
= dd
->f_portcntr(ppd
, QIBPORTCNTR_PSSTAT
);
1366 p
->sample_status
= cpu_to_be16(status
);
1367 if (status
== IB_PMA_SAMPLE_STATUS_DONE
) {
1368 cache_hw_sample_counters(ppd
);
1369 ppd
->cong_stats
.counter
=
1370 xmit_wait_get_value_delta(ppd
);
1371 dd
->f_set_cntr_sample(ppd
,
1372 QIB_CONG_TIMER_PSINTERVAL
, 0);
1373 ppd
->cong_stats
.flags
= IB_PMA_CONG_HW_CONTROL_TIMER
;
1376 for (i
= 0; i
< ARRAY_SIZE(ibp
->pma_counter_select
); i
++)
1377 p
->counter
[i
] = cpu_to_be32(
1378 get_cache_hw_sample_counters(
1379 ppd
, ibp
->pma_counter_select
[i
]));
1380 spin_unlock_irqrestore(&ibp
->lock
, flags
);
1382 return reply((struct ib_smp
*) pmp
);
1385 static int pma_get_portsamplesresult_ext(struct ib_pma_mad
*pmp
,
1386 struct ib_device
*ibdev
, u8 port
)
1388 struct ib_pma_portsamplesresult_ext
*p
=
1389 (struct ib_pma_portsamplesresult_ext
*)pmp
->data
;
1390 struct qib_ibdev
*dev
= to_idev(ibdev
);
1391 struct qib_devdata
*dd
= dd_from_dev(dev
);
1392 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1393 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1394 unsigned long flags
;
1398 /* Port Sampling code owns the PS* HW counters */
1399 memset(pmp
->data
, 0, sizeof(pmp
->data
));
1400 spin_lock_irqsave(&ibp
->lock
, flags
);
1401 p
->tag
= cpu_to_be16(ibp
->pma_tag
);
1402 if (ppd
->cong_stats
.flags
== IB_PMA_CONG_HW_CONTROL_TIMER
)
1403 p
->sample_status
= IB_PMA_SAMPLE_STATUS_DONE
;
1405 status
= dd
->f_portcntr(ppd
, QIBPORTCNTR_PSSTAT
);
1406 p
->sample_status
= cpu_to_be16(status
);
1408 p
->extended_width
= cpu_to_be32(0x80000000);
1409 if (status
== IB_PMA_SAMPLE_STATUS_DONE
) {
1410 cache_hw_sample_counters(ppd
);
1411 ppd
->cong_stats
.counter
=
1412 xmit_wait_get_value_delta(ppd
);
1413 dd
->f_set_cntr_sample(ppd
,
1414 QIB_CONG_TIMER_PSINTERVAL
, 0);
1415 ppd
->cong_stats
.flags
= IB_PMA_CONG_HW_CONTROL_TIMER
;
1418 for (i
= 0; i
< ARRAY_SIZE(ibp
->pma_counter_select
); i
++)
1419 p
->counter
[i
] = cpu_to_be64(
1420 get_cache_hw_sample_counters(
1421 ppd
, ibp
->pma_counter_select
[i
]));
1422 spin_unlock_irqrestore(&ibp
->lock
, flags
);
1424 return reply((struct ib_smp
*) pmp
);
1427 static int pma_get_portcounters(struct ib_pma_mad
*pmp
,
1428 struct ib_device
*ibdev
, u8 port
)
1430 struct ib_pma_portcounters
*p
= (struct ib_pma_portcounters
*)
1432 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1433 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1434 struct qib_verbs_counters cntrs
;
1435 u8 port_select
= p
->port_select
;
1437 qib_get_counters(ppd
, &cntrs
);
1439 /* Adjust counters for any resets done. */
1440 cntrs
.symbol_error_counter
-= ibp
->z_symbol_error_counter
;
1441 cntrs
.link_error_recovery_counter
-=
1442 ibp
->z_link_error_recovery_counter
;
1443 cntrs
.link_downed_counter
-= ibp
->z_link_downed_counter
;
1444 cntrs
.port_rcv_errors
-= ibp
->z_port_rcv_errors
;
1445 cntrs
.port_rcv_remphys_errors
-= ibp
->z_port_rcv_remphys_errors
;
1446 cntrs
.port_xmit_discards
-= ibp
->z_port_xmit_discards
;
1447 cntrs
.port_xmit_data
-= ibp
->z_port_xmit_data
;
1448 cntrs
.port_rcv_data
-= ibp
->z_port_rcv_data
;
1449 cntrs
.port_xmit_packets
-= ibp
->z_port_xmit_packets
;
1450 cntrs
.port_rcv_packets
-= ibp
->z_port_rcv_packets
;
1451 cntrs
.local_link_integrity_errors
-=
1452 ibp
->z_local_link_integrity_errors
;
1453 cntrs
.excessive_buffer_overrun_errors
-=
1454 ibp
->z_excessive_buffer_overrun_errors
;
1455 cntrs
.vl15_dropped
-= ibp
->z_vl15_dropped
;
1456 cntrs
.vl15_dropped
+= ibp
->n_vl15_dropped
;
1458 memset(pmp
->data
, 0, sizeof(pmp
->data
));
1460 p
->port_select
= port_select
;
1461 if (pmp
->mad_hdr
.attr_mod
!= 0 || port_select
!= port
)
1462 pmp
->mad_hdr
.status
|= IB_SMP_INVALID_FIELD
;
1464 if (cntrs
.symbol_error_counter
> 0xFFFFUL
)
1465 p
->symbol_error_counter
= cpu_to_be16(0xFFFF);
1467 p
->symbol_error_counter
=
1468 cpu_to_be16((u16
)cntrs
.symbol_error_counter
);
1469 if (cntrs
.link_error_recovery_counter
> 0xFFUL
)
1470 p
->link_error_recovery_counter
= 0xFF;
1472 p
->link_error_recovery_counter
=
1473 (u8
)cntrs
.link_error_recovery_counter
;
1474 if (cntrs
.link_downed_counter
> 0xFFUL
)
1475 p
->link_downed_counter
= 0xFF;
1477 p
->link_downed_counter
= (u8
)cntrs
.link_downed_counter
;
1478 if (cntrs
.port_rcv_errors
> 0xFFFFUL
)
1479 p
->port_rcv_errors
= cpu_to_be16(0xFFFF);
1481 p
->port_rcv_errors
=
1482 cpu_to_be16((u16
) cntrs
.port_rcv_errors
);
1483 if (cntrs
.port_rcv_remphys_errors
> 0xFFFFUL
)
1484 p
->port_rcv_remphys_errors
= cpu_to_be16(0xFFFF);
1486 p
->port_rcv_remphys_errors
=
1487 cpu_to_be16((u16
)cntrs
.port_rcv_remphys_errors
);
1488 if (cntrs
.port_xmit_discards
> 0xFFFFUL
)
1489 p
->port_xmit_discards
= cpu_to_be16(0xFFFF);
1491 p
->port_xmit_discards
=
1492 cpu_to_be16((u16
)cntrs
.port_xmit_discards
);
1493 if (cntrs
.local_link_integrity_errors
> 0xFUL
)
1494 cntrs
.local_link_integrity_errors
= 0xFUL
;
1495 if (cntrs
.excessive_buffer_overrun_errors
> 0xFUL
)
1496 cntrs
.excessive_buffer_overrun_errors
= 0xFUL
;
1497 p
->link_overrun_errors
= (cntrs
.local_link_integrity_errors
<< 4) |
1498 cntrs
.excessive_buffer_overrun_errors
;
1499 if (cntrs
.vl15_dropped
> 0xFFFFUL
)
1500 p
->vl15_dropped
= cpu_to_be16(0xFFFF);
1502 p
->vl15_dropped
= cpu_to_be16((u16
)cntrs
.vl15_dropped
);
1503 if (cntrs
.port_xmit_data
> 0xFFFFFFFFUL
)
1504 p
->port_xmit_data
= cpu_to_be32(0xFFFFFFFF);
1506 p
->port_xmit_data
= cpu_to_be32((u32
)cntrs
.port_xmit_data
);
1507 if (cntrs
.port_rcv_data
> 0xFFFFFFFFUL
)
1508 p
->port_rcv_data
= cpu_to_be32(0xFFFFFFFF);
1510 p
->port_rcv_data
= cpu_to_be32((u32
)cntrs
.port_rcv_data
);
1511 if (cntrs
.port_xmit_packets
> 0xFFFFFFFFUL
)
1512 p
->port_xmit_packets
= cpu_to_be32(0xFFFFFFFF);
1514 p
->port_xmit_packets
=
1515 cpu_to_be32((u32
)cntrs
.port_xmit_packets
);
1516 if (cntrs
.port_rcv_packets
> 0xFFFFFFFFUL
)
1517 p
->port_rcv_packets
= cpu_to_be32(0xFFFFFFFF);
1519 p
->port_rcv_packets
=
1520 cpu_to_be32((u32
) cntrs
.port_rcv_packets
);
1522 return reply((struct ib_smp
*) pmp
);
1525 static int pma_get_portcounters_cong(struct ib_pma_mad
*pmp
,
1526 struct ib_device
*ibdev
, u8 port
)
1528 /* Congestion PMA packets start at offset 24 not 64 */
1529 struct ib_pma_portcounters_cong
*p
=
1530 (struct ib_pma_portcounters_cong
*)pmp
->reserved
;
1531 struct qib_verbs_counters cntrs
;
1532 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1533 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1534 struct qib_devdata
*dd
= dd_from_ppd(ppd
);
1535 u32 port_select
= be32_to_cpu(pmp
->mad_hdr
.attr_mod
) & 0xFF;
1536 u64 xmit_wait_counter
;
1537 unsigned long flags
;
1540 * This check is performed only in the GET method because the
1541 * SET method ends up calling this anyway.
1543 if (!dd
->psxmitwait_supported
)
1544 pmp
->mad_hdr
.status
|= IB_SMP_UNSUP_METH_ATTR
;
1545 if (port_select
!= port
)
1546 pmp
->mad_hdr
.status
|= IB_SMP_INVALID_FIELD
;
1548 qib_get_counters(ppd
, &cntrs
);
1549 spin_lock_irqsave(&ppd
->ibport_data
.lock
, flags
);
1550 xmit_wait_counter
= xmit_wait_get_value_delta(ppd
);
1551 spin_unlock_irqrestore(&ppd
->ibport_data
.lock
, flags
);
1553 /* Adjust counters for any resets done. */
1554 cntrs
.symbol_error_counter
-= ibp
->z_symbol_error_counter
;
1555 cntrs
.link_error_recovery_counter
-=
1556 ibp
->z_link_error_recovery_counter
;
1557 cntrs
.link_downed_counter
-= ibp
->z_link_downed_counter
;
1558 cntrs
.port_rcv_errors
-= ibp
->z_port_rcv_errors
;
1559 cntrs
.port_rcv_remphys_errors
-=
1560 ibp
->z_port_rcv_remphys_errors
;
1561 cntrs
.port_xmit_discards
-= ibp
->z_port_xmit_discards
;
1562 cntrs
.local_link_integrity_errors
-=
1563 ibp
->z_local_link_integrity_errors
;
1564 cntrs
.excessive_buffer_overrun_errors
-=
1565 ibp
->z_excessive_buffer_overrun_errors
;
1566 cntrs
.vl15_dropped
-= ibp
->z_vl15_dropped
;
1567 cntrs
.vl15_dropped
+= ibp
->n_vl15_dropped
;
1568 cntrs
.port_xmit_data
-= ibp
->z_port_xmit_data
;
1569 cntrs
.port_rcv_data
-= ibp
->z_port_rcv_data
;
1570 cntrs
.port_xmit_packets
-= ibp
->z_port_xmit_packets
;
1571 cntrs
.port_rcv_packets
-= ibp
->z_port_rcv_packets
;
1573 memset(pmp
->reserved
, 0, sizeof(pmp
->reserved
) +
1577 * Set top 3 bits to indicate interval in picoseconds in
1580 p
->port_check_rate
=
1581 cpu_to_be16((QIB_XMIT_RATE_PICO
<< 13) |
1582 (dd
->psxmitwait_check_rate
&
1583 ~(QIB_XMIT_RATE_PICO
<< 13)));
1584 p
->port_adr_events
= cpu_to_be64(0);
1585 p
->port_xmit_wait
= cpu_to_be64(xmit_wait_counter
);
1586 p
->port_xmit_data
= cpu_to_be64(cntrs
.port_xmit_data
);
1587 p
->port_rcv_data
= cpu_to_be64(cntrs
.port_rcv_data
);
1588 p
->port_xmit_packets
=
1589 cpu_to_be64(cntrs
.port_xmit_packets
);
1590 p
->port_rcv_packets
=
1591 cpu_to_be64(cntrs
.port_rcv_packets
);
1592 if (cntrs
.symbol_error_counter
> 0xFFFFUL
)
1593 p
->symbol_error_counter
= cpu_to_be16(0xFFFF);
1595 p
->symbol_error_counter
=
1597 (u16
)cntrs
.symbol_error_counter
);
1598 if (cntrs
.link_error_recovery_counter
> 0xFFUL
)
1599 p
->link_error_recovery_counter
= 0xFF;
1601 p
->link_error_recovery_counter
=
1602 (u8
)cntrs
.link_error_recovery_counter
;
1603 if (cntrs
.link_downed_counter
> 0xFFUL
)
1604 p
->link_downed_counter
= 0xFF;
1606 p
->link_downed_counter
=
1607 (u8
)cntrs
.link_downed_counter
;
1608 if (cntrs
.port_rcv_errors
> 0xFFFFUL
)
1609 p
->port_rcv_errors
= cpu_to_be16(0xFFFF);
1611 p
->port_rcv_errors
=
1612 cpu_to_be16((u16
) cntrs
.port_rcv_errors
);
1613 if (cntrs
.port_rcv_remphys_errors
> 0xFFFFUL
)
1614 p
->port_rcv_remphys_errors
= cpu_to_be16(0xFFFF);
1616 p
->port_rcv_remphys_errors
=
1618 (u16
)cntrs
.port_rcv_remphys_errors
);
1619 if (cntrs
.port_xmit_discards
> 0xFFFFUL
)
1620 p
->port_xmit_discards
= cpu_to_be16(0xFFFF);
1622 p
->port_xmit_discards
=
1623 cpu_to_be16((u16
)cntrs
.port_xmit_discards
);
1624 if (cntrs
.local_link_integrity_errors
> 0xFUL
)
1625 cntrs
.local_link_integrity_errors
= 0xFUL
;
1626 if (cntrs
.excessive_buffer_overrun_errors
> 0xFUL
)
1627 cntrs
.excessive_buffer_overrun_errors
= 0xFUL
;
1628 p
->link_overrun_errors
= (cntrs
.local_link_integrity_errors
<< 4) |
1629 cntrs
.excessive_buffer_overrun_errors
;
1630 if (cntrs
.vl15_dropped
> 0xFFFFUL
)
1631 p
->vl15_dropped
= cpu_to_be16(0xFFFF);
1633 p
->vl15_dropped
= cpu_to_be16((u16
)cntrs
.vl15_dropped
);
1635 return reply((struct ib_smp
*)pmp
);
1638 static void qib_snapshot_pmacounters(
1639 struct qib_ibport
*ibp
,
1640 struct qib_pma_counters
*pmacounters
)
1642 struct qib_pma_counters
*p
;
1645 memset(pmacounters
, 0, sizeof(*pmacounters
));
1646 for_each_possible_cpu(cpu
) {
1647 p
= per_cpu_ptr(ibp
->pmastats
, cpu
);
1648 pmacounters
->n_unicast_xmit
+= p
->n_unicast_xmit
;
1649 pmacounters
->n_unicast_rcv
+= p
->n_unicast_rcv
;
1650 pmacounters
->n_multicast_xmit
+= p
->n_multicast_xmit
;
1651 pmacounters
->n_multicast_rcv
+= p
->n_multicast_rcv
;
1655 static int pma_get_portcounters_ext(struct ib_pma_mad
*pmp
,
1656 struct ib_device
*ibdev
, u8 port
)
1658 struct ib_pma_portcounters_ext
*p
=
1659 (struct ib_pma_portcounters_ext
*)pmp
->data
;
1660 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1661 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1662 u64 swords
, rwords
, spkts
, rpkts
, xwait
;
1663 struct qib_pma_counters pma
;
1664 u8 port_select
= p
->port_select
;
1666 memset(pmp
->data
, 0, sizeof(pmp
->data
));
1668 p
->port_select
= port_select
;
1669 if (pmp
->mad_hdr
.attr_mod
!= 0 || port_select
!= port
) {
1670 pmp
->mad_hdr
.status
|= IB_SMP_INVALID_FIELD
;
1674 qib_snapshot_counters(ppd
, &swords
, &rwords
, &spkts
, &rpkts
, &xwait
);
1676 /* Adjust counters for any resets done. */
1677 swords
-= ibp
->z_port_xmit_data
;
1678 rwords
-= ibp
->z_port_rcv_data
;
1679 spkts
-= ibp
->z_port_xmit_packets
;
1680 rpkts
-= ibp
->z_port_rcv_packets
;
1682 p
->port_xmit_data
= cpu_to_be64(swords
);
1683 p
->port_rcv_data
= cpu_to_be64(rwords
);
1684 p
->port_xmit_packets
= cpu_to_be64(spkts
);
1685 p
->port_rcv_packets
= cpu_to_be64(rpkts
);
1687 qib_snapshot_pmacounters(ibp
, &pma
);
1689 p
->port_unicast_xmit_packets
= cpu_to_be64(pma
.n_unicast_xmit
1690 - ibp
->z_unicast_xmit
);
1691 p
->port_unicast_rcv_packets
= cpu_to_be64(pma
.n_unicast_rcv
1692 - ibp
->z_unicast_rcv
);
1693 p
->port_multicast_xmit_packets
= cpu_to_be64(pma
.n_multicast_xmit
1694 - ibp
->z_multicast_xmit
);
1695 p
->port_multicast_rcv_packets
= cpu_to_be64(pma
.n_multicast_rcv
1696 - ibp
->z_multicast_rcv
);
1699 return reply((struct ib_smp
*) pmp
);
1702 static int pma_set_portcounters(struct ib_pma_mad
*pmp
,
1703 struct ib_device
*ibdev
, u8 port
)
1705 struct ib_pma_portcounters
*p
= (struct ib_pma_portcounters
*)
1707 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1708 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1709 struct qib_verbs_counters cntrs
;
1712 * Since the HW doesn't support clearing counters, we save the
1713 * current count and subtract it from future responses.
1715 qib_get_counters(ppd
, &cntrs
);
1717 if (p
->counter_select
& IB_PMA_SEL_SYMBOL_ERROR
)
1718 ibp
->z_symbol_error_counter
= cntrs
.symbol_error_counter
;
1720 if (p
->counter_select
& IB_PMA_SEL_LINK_ERROR_RECOVERY
)
1721 ibp
->z_link_error_recovery_counter
=
1722 cntrs
.link_error_recovery_counter
;
1724 if (p
->counter_select
& IB_PMA_SEL_LINK_DOWNED
)
1725 ibp
->z_link_downed_counter
= cntrs
.link_downed_counter
;
1727 if (p
->counter_select
& IB_PMA_SEL_PORT_RCV_ERRORS
)
1728 ibp
->z_port_rcv_errors
= cntrs
.port_rcv_errors
;
1730 if (p
->counter_select
& IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS
)
1731 ibp
->z_port_rcv_remphys_errors
=
1732 cntrs
.port_rcv_remphys_errors
;
1734 if (p
->counter_select
& IB_PMA_SEL_PORT_XMIT_DISCARDS
)
1735 ibp
->z_port_xmit_discards
= cntrs
.port_xmit_discards
;
1737 if (p
->counter_select
& IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS
)
1738 ibp
->z_local_link_integrity_errors
=
1739 cntrs
.local_link_integrity_errors
;
1741 if (p
->counter_select
& IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS
)
1742 ibp
->z_excessive_buffer_overrun_errors
=
1743 cntrs
.excessive_buffer_overrun_errors
;
1745 if (p
->counter_select
& IB_PMA_SEL_PORT_VL15_DROPPED
) {
1746 ibp
->n_vl15_dropped
= 0;
1747 ibp
->z_vl15_dropped
= cntrs
.vl15_dropped
;
1750 if (p
->counter_select
& IB_PMA_SEL_PORT_XMIT_DATA
)
1751 ibp
->z_port_xmit_data
= cntrs
.port_xmit_data
;
1753 if (p
->counter_select
& IB_PMA_SEL_PORT_RCV_DATA
)
1754 ibp
->z_port_rcv_data
= cntrs
.port_rcv_data
;
1756 if (p
->counter_select
& IB_PMA_SEL_PORT_XMIT_PACKETS
)
1757 ibp
->z_port_xmit_packets
= cntrs
.port_xmit_packets
;
1759 if (p
->counter_select
& IB_PMA_SEL_PORT_RCV_PACKETS
)
1760 ibp
->z_port_rcv_packets
= cntrs
.port_rcv_packets
;
1762 return pma_get_portcounters(pmp
, ibdev
, port
);
1765 static int pma_set_portcounters_cong(struct ib_pma_mad
*pmp
,
1766 struct ib_device
*ibdev
, u8 port
)
1768 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1769 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1770 struct qib_devdata
*dd
= dd_from_ppd(ppd
);
1771 struct qib_verbs_counters cntrs
;
1772 u32 counter_select
= (be32_to_cpu(pmp
->mad_hdr
.attr_mod
) >> 24) & 0xFF;
1774 unsigned long flags
;
1776 qib_get_counters(ppd
, &cntrs
);
1777 /* Get counter values before we save them */
1778 ret
= pma_get_portcounters_cong(pmp
, ibdev
, port
);
1780 if (counter_select
& IB_PMA_SEL_CONG_XMIT
) {
1781 spin_lock_irqsave(&ppd
->ibport_data
.lock
, flags
);
1782 ppd
->cong_stats
.counter
= 0;
1783 dd
->f_set_cntr_sample(ppd
, QIB_CONG_TIMER_PSINTERVAL
,
1785 spin_unlock_irqrestore(&ppd
->ibport_data
.lock
, flags
);
1787 if (counter_select
& IB_PMA_SEL_CONG_PORT_DATA
) {
1788 ibp
->z_port_xmit_data
= cntrs
.port_xmit_data
;
1789 ibp
->z_port_rcv_data
= cntrs
.port_rcv_data
;
1790 ibp
->z_port_xmit_packets
= cntrs
.port_xmit_packets
;
1791 ibp
->z_port_rcv_packets
= cntrs
.port_rcv_packets
;
1793 if (counter_select
& IB_PMA_SEL_CONG_ALL
) {
1794 ibp
->z_symbol_error_counter
=
1795 cntrs
.symbol_error_counter
;
1796 ibp
->z_link_error_recovery_counter
=
1797 cntrs
.link_error_recovery_counter
;
1798 ibp
->z_link_downed_counter
=
1799 cntrs
.link_downed_counter
;
1800 ibp
->z_port_rcv_errors
= cntrs
.port_rcv_errors
;
1801 ibp
->z_port_rcv_remphys_errors
=
1802 cntrs
.port_rcv_remphys_errors
;
1803 ibp
->z_port_xmit_discards
=
1804 cntrs
.port_xmit_discards
;
1805 ibp
->z_local_link_integrity_errors
=
1806 cntrs
.local_link_integrity_errors
;
1807 ibp
->z_excessive_buffer_overrun_errors
=
1808 cntrs
.excessive_buffer_overrun_errors
;
1809 ibp
->n_vl15_dropped
= 0;
1810 ibp
->z_vl15_dropped
= cntrs
.vl15_dropped
;
1816 static int pma_set_portcounters_ext(struct ib_pma_mad
*pmp
,
1817 struct ib_device
*ibdev
, u8 port
)
1819 struct ib_pma_portcounters
*p
= (struct ib_pma_portcounters
*)
1821 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1822 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1823 u64 swords
, rwords
, spkts
, rpkts
, xwait
;
1824 struct qib_pma_counters pma
;
1826 qib_snapshot_counters(ppd
, &swords
, &rwords
, &spkts
, &rpkts
, &xwait
);
1828 if (p
->counter_select
& IB_PMA_SELX_PORT_XMIT_DATA
)
1829 ibp
->z_port_xmit_data
= swords
;
1831 if (p
->counter_select
& IB_PMA_SELX_PORT_RCV_DATA
)
1832 ibp
->z_port_rcv_data
= rwords
;
1834 if (p
->counter_select
& IB_PMA_SELX_PORT_XMIT_PACKETS
)
1835 ibp
->z_port_xmit_packets
= spkts
;
1837 if (p
->counter_select
& IB_PMA_SELX_PORT_RCV_PACKETS
)
1838 ibp
->z_port_rcv_packets
= rpkts
;
1840 qib_snapshot_pmacounters(ibp
, &pma
);
1842 if (p
->counter_select
& IB_PMA_SELX_PORT_UNI_XMIT_PACKETS
)
1843 ibp
->z_unicast_xmit
= pma
.n_unicast_xmit
;
1845 if (p
->counter_select
& IB_PMA_SELX_PORT_UNI_RCV_PACKETS
)
1846 ibp
->z_unicast_rcv
= pma
.n_unicast_rcv
;
1848 if (p
->counter_select
& IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS
)
1849 ibp
->z_multicast_xmit
= pma
.n_multicast_xmit
;
1851 if (p
->counter_select
& IB_PMA_SELX_PORT_MULTI_RCV_PACKETS
)
1852 ibp
->z_multicast_rcv
= pma
.n_multicast_rcv
;
1854 return pma_get_portcounters_ext(pmp
, ibdev
, port
);
1857 static int process_subn(struct ib_device
*ibdev
, int mad_flags
,
1858 u8 port
, const struct ib_mad
*in_mad
,
1859 struct ib_mad
*out_mad
)
1861 struct ib_smp
*smp
= (struct ib_smp
*)out_mad
;
1862 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1863 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1867 if (smp
->class_version
!= 1) {
1868 smp
->status
|= IB_SMP_UNSUP_VERSION
;
1873 ret
= check_mkey(ibp
, smp
, mad_flags
);
1875 u32 port_num
= be32_to_cpu(smp
->attr_mod
);
1878 * If this is a get/set portinfo, we already check the
1879 * M_Key if the MAD is for another port and the M_Key
1880 * is OK on the receiving port. This check is needed
1881 * to increment the error counters when the M_Key
1882 * fails to match on *both* ports.
1884 if (in_mad
->mad_hdr
.attr_id
== IB_SMP_ATTR_PORT_INFO
&&
1885 (smp
->method
== IB_MGMT_METHOD_GET
||
1886 smp
->method
== IB_MGMT_METHOD_SET
) &&
1887 port_num
&& port_num
<= ibdev
->phys_port_cnt
&&
1889 (void) check_mkey(to_iport(ibdev
, port_num
), smp
, 0);
1890 ret
= IB_MAD_RESULT_FAILURE
;
1894 switch (smp
->method
) {
1895 case IB_MGMT_METHOD_GET
:
1896 switch (smp
->attr_id
) {
1897 case IB_SMP_ATTR_NODE_DESC
:
1898 ret
= subn_get_nodedescription(smp
, ibdev
);
1900 case IB_SMP_ATTR_NODE_INFO
:
1901 ret
= subn_get_nodeinfo(smp
, ibdev
, port
);
1903 case IB_SMP_ATTR_GUID_INFO
:
1904 ret
= subn_get_guidinfo(smp
, ibdev
, port
);
1906 case IB_SMP_ATTR_PORT_INFO
:
1907 ret
= subn_get_portinfo(smp
, ibdev
, port
);
1909 case IB_SMP_ATTR_PKEY_TABLE
:
1910 ret
= subn_get_pkeytable(smp
, ibdev
, port
);
1912 case IB_SMP_ATTR_SL_TO_VL_TABLE
:
1913 ret
= subn_get_sl_to_vl(smp
, ibdev
, port
);
1915 case IB_SMP_ATTR_VL_ARB_TABLE
:
1916 ret
= subn_get_vl_arb(smp
, ibdev
, port
);
1918 case IB_SMP_ATTR_SM_INFO
:
1919 if (ibp
->port_cap_flags
& IB_PORT_SM_DISABLED
) {
1920 ret
= IB_MAD_RESULT_SUCCESS
|
1921 IB_MAD_RESULT_CONSUMED
;
1924 if (ibp
->port_cap_flags
& IB_PORT_SM
) {
1925 ret
= IB_MAD_RESULT_SUCCESS
;
1930 smp
->status
|= IB_SMP_UNSUP_METH_ATTR
;
1935 case IB_MGMT_METHOD_SET
:
1936 switch (smp
->attr_id
) {
1937 case IB_SMP_ATTR_GUID_INFO
:
1938 ret
= subn_set_guidinfo(smp
, ibdev
, port
);
1940 case IB_SMP_ATTR_PORT_INFO
:
1941 ret
= subn_set_portinfo(smp
, ibdev
, port
);
1943 case IB_SMP_ATTR_PKEY_TABLE
:
1944 ret
= subn_set_pkeytable(smp
, ibdev
, port
);
1946 case IB_SMP_ATTR_SL_TO_VL_TABLE
:
1947 ret
= subn_set_sl_to_vl(smp
, ibdev
, port
);
1949 case IB_SMP_ATTR_VL_ARB_TABLE
:
1950 ret
= subn_set_vl_arb(smp
, ibdev
, port
);
1952 case IB_SMP_ATTR_SM_INFO
:
1953 if (ibp
->port_cap_flags
& IB_PORT_SM_DISABLED
) {
1954 ret
= IB_MAD_RESULT_SUCCESS
|
1955 IB_MAD_RESULT_CONSUMED
;
1958 if (ibp
->port_cap_flags
& IB_PORT_SM
) {
1959 ret
= IB_MAD_RESULT_SUCCESS
;
1964 smp
->status
|= IB_SMP_UNSUP_METH_ATTR
;
1969 case IB_MGMT_METHOD_TRAP_REPRESS
:
1970 if (smp
->attr_id
== IB_SMP_ATTR_NOTICE
)
1971 ret
= subn_trap_repress(smp
, ibdev
, port
);
1973 smp
->status
|= IB_SMP_UNSUP_METH_ATTR
;
1978 case IB_MGMT_METHOD_TRAP
:
1979 case IB_MGMT_METHOD_REPORT
:
1980 case IB_MGMT_METHOD_REPORT_RESP
:
1981 case IB_MGMT_METHOD_GET_RESP
:
1983 * The ib_mad module will call us to process responses
1984 * before checking for other consumers.
1985 * Just tell the caller to process it normally.
1987 ret
= IB_MAD_RESULT_SUCCESS
;
1990 case IB_MGMT_METHOD_SEND
:
1991 if (ib_get_smp_direction(smp
) &&
1992 smp
->attr_id
== QIB_VENDOR_IPG
) {
1993 ppd
->dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_PORT
,
1995 ret
= IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_CONSUMED
;
1997 ret
= IB_MAD_RESULT_SUCCESS
;
2001 smp
->status
|= IB_SMP_UNSUP_METHOD
;
2009 static int process_perf(struct ib_device
*ibdev
, u8 port
,
2010 const struct ib_mad
*in_mad
,
2011 struct ib_mad
*out_mad
)
2013 struct ib_pma_mad
*pmp
= (struct ib_pma_mad
*)out_mad
;
2017 if (pmp
->mad_hdr
.class_version
!= 1) {
2018 pmp
->mad_hdr
.status
|= IB_SMP_UNSUP_VERSION
;
2019 ret
= reply((struct ib_smp
*) pmp
);
2023 switch (pmp
->mad_hdr
.method
) {
2024 case IB_MGMT_METHOD_GET
:
2025 switch (pmp
->mad_hdr
.attr_id
) {
2026 case IB_PMA_CLASS_PORT_INFO
:
2027 ret
= pma_get_classportinfo(pmp
, ibdev
);
2029 case IB_PMA_PORT_SAMPLES_CONTROL
:
2030 ret
= pma_get_portsamplescontrol(pmp
, ibdev
, port
);
2032 case IB_PMA_PORT_SAMPLES_RESULT
:
2033 ret
= pma_get_portsamplesresult(pmp
, ibdev
, port
);
2035 case IB_PMA_PORT_SAMPLES_RESULT_EXT
:
2036 ret
= pma_get_portsamplesresult_ext(pmp
, ibdev
, port
);
2038 case IB_PMA_PORT_COUNTERS
:
2039 ret
= pma_get_portcounters(pmp
, ibdev
, port
);
2041 case IB_PMA_PORT_COUNTERS_EXT
:
2042 ret
= pma_get_portcounters_ext(pmp
, ibdev
, port
);
2044 case IB_PMA_PORT_COUNTERS_CONG
:
2045 ret
= pma_get_portcounters_cong(pmp
, ibdev
, port
);
2048 pmp
->mad_hdr
.status
|= IB_SMP_UNSUP_METH_ATTR
;
2049 ret
= reply((struct ib_smp
*) pmp
);
2053 case IB_MGMT_METHOD_SET
:
2054 switch (pmp
->mad_hdr
.attr_id
) {
2055 case IB_PMA_PORT_SAMPLES_CONTROL
:
2056 ret
= pma_set_portsamplescontrol(pmp
, ibdev
, port
);
2058 case IB_PMA_PORT_COUNTERS
:
2059 ret
= pma_set_portcounters(pmp
, ibdev
, port
);
2061 case IB_PMA_PORT_COUNTERS_EXT
:
2062 ret
= pma_set_portcounters_ext(pmp
, ibdev
, port
);
2064 case IB_PMA_PORT_COUNTERS_CONG
:
2065 ret
= pma_set_portcounters_cong(pmp
, ibdev
, port
);
2068 pmp
->mad_hdr
.status
|= IB_SMP_UNSUP_METH_ATTR
;
2069 ret
= reply((struct ib_smp
*) pmp
);
2073 case IB_MGMT_METHOD_TRAP
:
2074 case IB_MGMT_METHOD_GET_RESP
:
2076 * The ib_mad module will call us to process responses
2077 * before checking for other consumers.
2078 * Just tell the caller to process it normally.
2080 ret
= IB_MAD_RESULT_SUCCESS
;
2084 pmp
->mad_hdr
.status
|= IB_SMP_UNSUP_METHOD
;
2085 ret
= reply((struct ib_smp
*) pmp
);
2092 static int cc_get_classportinfo(struct ib_cc_mad
*ccp
,
2093 struct ib_device
*ibdev
)
2095 struct ib_cc_classportinfo_attr
*p
=
2096 (struct ib_cc_classportinfo_attr
*)ccp
->mgmt_data
;
2098 memset(ccp
->mgmt_data
, 0, sizeof(ccp
->mgmt_data
));
2100 p
->base_version
= 1;
2101 p
->class_version
= 1;
2105 * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
2107 p
->resp_time_value
= 18;
2109 return reply((struct ib_smp
*) ccp
);
2112 static int cc_get_congestion_info(struct ib_cc_mad
*ccp
,
2113 struct ib_device
*ibdev
, u8 port
)
2115 struct ib_cc_info_attr
*p
=
2116 (struct ib_cc_info_attr
*)ccp
->mgmt_data
;
2117 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
2118 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
2120 memset(ccp
->mgmt_data
, 0, sizeof(ccp
->mgmt_data
));
2122 p
->congestion_info
= 0;
2123 p
->control_table_cap
= ppd
->cc_max_table_entries
;
2125 return reply((struct ib_smp
*) ccp
);
2128 static int cc_get_congestion_setting(struct ib_cc_mad
*ccp
,
2129 struct ib_device
*ibdev
, u8 port
)
2132 struct ib_cc_congestion_setting_attr
*p
=
2133 (struct ib_cc_congestion_setting_attr
*)ccp
->mgmt_data
;
2134 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
2135 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
2136 struct ib_cc_congestion_entry_shadow
*entries
;
2138 memset(ccp
->mgmt_data
, 0, sizeof(ccp
->mgmt_data
));
2140 spin_lock(&ppd
->cc_shadow_lock
);
2142 entries
= ppd
->congestion_entries_shadow
->entries
;
2143 p
->port_control
= cpu_to_be16(
2144 ppd
->congestion_entries_shadow
->port_control
);
2145 p
->control_map
= cpu_to_be16(
2146 ppd
->congestion_entries_shadow
->control_map
);
2147 for (i
= 0; i
< IB_CC_CCS_ENTRIES
; i
++) {
2148 p
->entries
[i
].ccti_increase
= entries
[i
].ccti_increase
;
2149 p
->entries
[i
].ccti_timer
= cpu_to_be16(entries
[i
].ccti_timer
);
2150 p
->entries
[i
].trigger_threshold
= entries
[i
].trigger_threshold
;
2151 p
->entries
[i
].ccti_min
= entries
[i
].ccti_min
;
2154 spin_unlock(&ppd
->cc_shadow_lock
);
2156 return reply((struct ib_smp
*) ccp
);
2159 static int cc_get_congestion_control_table(struct ib_cc_mad
*ccp
,
2160 struct ib_device
*ibdev
, u8 port
)
2162 struct ib_cc_table_attr
*p
=
2163 (struct ib_cc_table_attr
*)ccp
->mgmt_data
;
2164 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
2165 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
2166 u32 cct_block_index
= be32_to_cpu(ccp
->attr_mod
);
2169 struct ib_cc_table_entry_shadow
*entries
;
2172 /* Is the table index more than what is supported? */
2173 if (cct_block_index
> IB_CC_TABLE_CAP_DEFAULT
- 1)
2176 memset(ccp
->mgmt_data
, 0, sizeof(ccp
->mgmt_data
));
2178 spin_lock(&ppd
->cc_shadow_lock
);
2181 (ppd
->ccti_entries_shadow
->ccti_last_entry
+ 1)/IB_CCT_ENTRIES
;
2182 max_cct_block
= max_cct_block
? max_cct_block
- 1 : 0;
2184 if (cct_block_index
> max_cct_block
) {
2185 spin_unlock(&ppd
->cc_shadow_lock
);
2189 ccp
->attr_mod
= cpu_to_be32(cct_block_index
);
2191 cct_entry
= IB_CCT_ENTRIES
* (cct_block_index
+ 1);
2195 p
->ccti_limit
= cpu_to_be16(cct_entry
);
2197 entries
= &ppd
->ccti_entries_shadow
->
2198 entries
[IB_CCT_ENTRIES
* cct_block_index
];
2199 cct_entry
%= IB_CCT_ENTRIES
;
2201 for (i
= 0; i
<= cct_entry
; i
++)
2202 p
->ccti_entries
[i
].entry
= cpu_to_be16(entries
[i
].entry
);
2204 spin_unlock(&ppd
->cc_shadow_lock
);
2206 return reply((struct ib_smp
*) ccp
);
2209 return reply_failure((struct ib_smp
*) ccp
);
2212 static int cc_set_congestion_setting(struct ib_cc_mad
*ccp
,
2213 struct ib_device
*ibdev
, u8 port
)
2215 struct ib_cc_congestion_setting_attr
*p
=
2216 (struct ib_cc_congestion_setting_attr
*)ccp
->mgmt_data
;
2217 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
2218 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
2221 ppd
->cc_sl_control_map
= be16_to_cpu(p
->control_map
);
2223 for (i
= 0; i
< IB_CC_CCS_ENTRIES
; i
++) {
2224 ppd
->congestion_entries
[i
].ccti_increase
=
2225 p
->entries
[i
].ccti_increase
;
2227 ppd
->congestion_entries
[i
].ccti_timer
=
2228 be16_to_cpu(p
->entries
[i
].ccti_timer
);
2230 ppd
->congestion_entries
[i
].trigger_threshold
=
2231 p
->entries
[i
].trigger_threshold
;
2233 ppd
->congestion_entries
[i
].ccti_min
=
2234 p
->entries
[i
].ccti_min
;
2237 return reply((struct ib_smp
*) ccp
);
2240 static int cc_set_congestion_control_table(struct ib_cc_mad
*ccp
,
2241 struct ib_device
*ibdev
, u8 port
)
2243 struct ib_cc_table_attr
*p
=
2244 (struct ib_cc_table_attr
*)ccp
->mgmt_data
;
2245 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
2246 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
2247 u32 cct_block_index
= be32_to_cpu(ccp
->attr_mod
);
2249 struct ib_cc_table_entry_shadow
*entries
;
2252 /* Is the table index more than what is supported? */
2253 if (cct_block_index
> IB_CC_TABLE_CAP_DEFAULT
- 1)
2256 /* If this packet is the first in the sequence then
2257 * zero the total table entry count.
2259 if (be16_to_cpu(p
->ccti_limit
) < IB_CCT_ENTRIES
)
2260 ppd
->total_cct_entry
= 0;
2262 cct_entry
= (be16_to_cpu(p
->ccti_limit
))%IB_CCT_ENTRIES
;
2264 /* ccti_limit is 0 to 63 */
2265 ppd
->total_cct_entry
+= (cct_entry
+ 1);
2267 if (ppd
->total_cct_entry
> ppd
->cc_supported_table_entries
)
2270 ppd
->ccti_limit
= be16_to_cpu(p
->ccti_limit
);
2272 entries
= ppd
->ccti_entries
+ (IB_CCT_ENTRIES
* cct_block_index
);
2274 for (i
= 0; i
<= cct_entry
; i
++)
2275 entries
[i
].entry
= be16_to_cpu(p
->ccti_entries
[i
].entry
);
2277 spin_lock(&ppd
->cc_shadow_lock
);
2279 ppd
->ccti_entries_shadow
->ccti_last_entry
= ppd
->total_cct_entry
- 1;
2280 memcpy(ppd
->ccti_entries_shadow
->entries
, ppd
->ccti_entries
,
2281 (ppd
->total_cct_entry
* sizeof(struct ib_cc_table_entry
)));
2283 ppd
->congestion_entries_shadow
->port_control
= IB_CC_CCS_PC_SL_BASED
;
2284 ppd
->congestion_entries_shadow
->control_map
= ppd
->cc_sl_control_map
;
2285 memcpy(ppd
->congestion_entries_shadow
->entries
, ppd
->congestion_entries
,
2286 IB_CC_CCS_ENTRIES
* sizeof(struct ib_cc_congestion_entry
));
2288 spin_unlock(&ppd
->cc_shadow_lock
);
2290 return reply((struct ib_smp
*) ccp
);
2293 return reply_failure((struct ib_smp
*) ccp
);
2296 static int check_cc_key(struct qib_ibport
*ibp
,
2297 struct ib_cc_mad
*ccp
, int mad_flags
)
2302 static int process_cc(struct ib_device
*ibdev
, int mad_flags
,
2303 u8 port
, const struct ib_mad
*in_mad
,
2304 struct ib_mad
*out_mad
)
2306 struct ib_cc_mad
*ccp
= (struct ib_cc_mad
*)out_mad
;
2307 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
2312 if (ccp
->class_version
!= 2) {
2313 ccp
->status
|= IB_SMP_UNSUP_VERSION
;
2314 ret
= reply((struct ib_smp
*)ccp
);
2318 ret
= check_cc_key(ibp
, ccp
, mad_flags
);
2322 switch (ccp
->method
) {
2323 case IB_MGMT_METHOD_GET
:
2324 switch (ccp
->attr_id
) {
2325 case IB_CC_ATTR_CLASSPORTINFO
:
2326 ret
= cc_get_classportinfo(ccp
, ibdev
);
2329 case IB_CC_ATTR_CONGESTION_INFO
:
2330 ret
= cc_get_congestion_info(ccp
, ibdev
, port
);
2333 case IB_CC_ATTR_CA_CONGESTION_SETTING
:
2334 ret
= cc_get_congestion_setting(ccp
, ibdev
, port
);
2337 case IB_CC_ATTR_CONGESTION_CONTROL_TABLE
:
2338 ret
= cc_get_congestion_control_table(ccp
, ibdev
, port
);
2343 ccp
->status
|= IB_SMP_UNSUP_METH_ATTR
;
2344 ret
= reply((struct ib_smp
*) ccp
);
2348 case IB_MGMT_METHOD_SET
:
2349 switch (ccp
->attr_id
) {
2350 case IB_CC_ATTR_CA_CONGESTION_SETTING
:
2351 ret
= cc_set_congestion_setting(ccp
, ibdev
, port
);
2354 case IB_CC_ATTR_CONGESTION_CONTROL_TABLE
:
2355 ret
= cc_set_congestion_control_table(ccp
, ibdev
, port
);
2360 ccp
->status
|= IB_SMP_UNSUP_METH_ATTR
;
2361 ret
= reply((struct ib_smp
*) ccp
);
2365 case IB_MGMT_METHOD_GET_RESP
:
2367 * The ib_mad module will call us to process responses
2368 * before checking for other consumers.
2369 * Just tell the caller to process it normally.
2371 ret
= IB_MAD_RESULT_SUCCESS
;
2374 case IB_MGMT_METHOD_TRAP
:
2376 ccp
->status
|= IB_SMP_UNSUP_METHOD
;
2377 ret
= reply((struct ib_smp
*) ccp
);
2385 * qib_process_mad - process an incoming MAD packet
2386 * @ibdev: the infiniband device this packet came in on
2387 * @mad_flags: MAD flags
2388 * @port: the port number this packet came in on
2389 * @in_wc: the work completion entry for this packet
2390 * @in_grh: the global route header for this packet
2391 * @in_mad: the incoming MAD
2392 * @out_mad: any outgoing MAD reply
2394 * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not
2395 * interested in processing.
2397 * Note that the verbs framework has already done the MAD sanity checks,
2398 * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
2401 * This is called by the ib_mad module.
2403 int qib_process_mad(struct ib_device
*ibdev
, int mad_flags
, u8 port
,
2404 const struct ib_wc
*in_wc
, const struct ib_grh
*in_grh
,
2405 const struct ib_mad_hdr
*in
, size_t in_mad_size
,
2406 struct ib_mad_hdr
*out
, size_t *out_mad_size
,
2407 u16
*out_mad_pkey_index
)
2410 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
2411 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
2412 const struct ib_mad
*in_mad
= (const struct ib_mad
*)in
;
2413 struct ib_mad
*out_mad
= (struct ib_mad
*)out
;
2415 if (WARN_ON_ONCE(in_mad_size
!= sizeof(*in_mad
) ||
2416 *out_mad_size
!= sizeof(*out_mad
)))
2417 return IB_MAD_RESULT_FAILURE
;
2419 switch (in_mad
->mad_hdr
.mgmt_class
) {
2420 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
:
2421 case IB_MGMT_CLASS_SUBN_LID_ROUTED
:
2422 ret
= process_subn(ibdev
, mad_flags
, port
, in_mad
, out_mad
);
2425 case IB_MGMT_CLASS_PERF_MGMT
:
2426 ret
= process_perf(ibdev
, port
, in_mad
, out_mad
);
2429 case IB_MGMT_CLASS_CONG_MGMT
:
2430 if (!ppd
->congestion_entries_shadow
||
2431 !qib_cc_table_size
) {
2432 ret
= IB_MAD_RESULT_SUCCESS
;
2435 ret
= process_cc(ibdev
, mad_flags
, port
, in_mad
, out_mad
);
2439 ret
= IB_MAD_RESULT_SUCCESS
;
2446 static void send_handler(struct ib_mad_agent
*agent
,
2447 struct ib_mad_send_wc
*mad_send_wc
)
2449 ib_free_send_mad(mad_send_wc
->send_buf
);
2452 static void xmit_wait_timer_func(unsigned long opaque
)
2454 struct qib_pportdata
*ppd
= (struct qib_pportdata
*)opaque
;
2455 struct qib_devdata
*dd
= dd_from_ppd(ppd
);
2456 unsigned long flags
;
2459 spin_lock_irqsave(&ppd
->ibport_data
.lock
, flags
);
2460 if (ppd
->cong_stats
.flags
== IB_PMA_CONG_HW_CONTROL_SAMPLE
) {
2461 status
= dd
->f_portcntr(ppd
, QIBPORTCNTR_PSSTAT
);
2462 if (status
== IB_PMA_SAMPLE_STATUS_DONE
) {
2463 /* save counter cache */
2464 cache_hw_sample_counters(ppd
);
2465 ppd
->cong_stats
.flags
= IB_PMA_CONG_HW_CONTROL_TIMER
;
2469 ppd
->cong_stats
.counter
= xmit_wait_get_value_delta(ppd
);
2470 dd
->f_set_cntr_sample(ppd
, QIB_CONG_TIMER_PSINTERVAL
, 0x0);
2472 spin_unlock_irqrestore(&ppd
->ibport_data
.lock
, flags
);
2473 mod_timer(&ppd
->cong_stats
.timer
, jiffies
+ HZ
);
2476 int qib_create_agents(struct qib_ibdev
*dev
)
2478 struct qib_devdata
*dd
= dd_from_dev(dev
);
2479 struct ib_mad_agent
*agent
;
2480 struct qib_ibport
*ibp
;
2484 for (p
= 0; p
< dd
->num_pports
; p
++) {
2485 ibp
= &dd
->pport
[p
].ibport_data
;
2486 agent
= ib_register_mad_agent(&dev
->ibdev
, p
+ 1, IB_QPT_SMI
,
2487 NULL
, 0, send_handler
,
2489 if (IS_ERR(agent
)) {
2490 ret
= PTR_ERR(agent
);
2494 /* Initialize xmit_wait structure */
2495 dd
->pport
[p
].cong_stats
.counter
= 0;
2496 init_timer(&dd
->pport
[p
].cong_stats
.timer
);
2497 dd
->pport
[p
].cong_stats
.timer
.function
= xmit_wait_timer_func
;
2498 dd
->pport
[p
].cong_stats
.timer
.data
=
2499 (unsigned long)(&dd
->pport
[p
]);
2500 dd
->pport
[p
].cong_stats
.timer
.expires
= 0;
2501 add_timer(&dd
->pport
[p
].cong_stats
.timer
);
2503 ibp
->send_agent
= agent
;
2509 for (p
= 0; p
< dd
->num_pports
; p
++) {
2510 ibp
= &dd
->pport
[p
].ibport_data
;
2511 if (ibp
->send_agent
) {
2512 agent
= ibp
->send_agent
;
2513 ibp
->send_agent
= NULL
;
2514 ib_unregister_mad_agent(agent
);
2521 void qib_free_agents(struct qib_ibdev
*dev
)
2523 struct qib_devdata
*dd
= dd_from_dev(dev
);
2524 struct ib_mad_agent
*agent
;
2525 struct qib_ibport
*ibp
;
2528 for (p
= 0; p
< dd
->num_pports
; p
++) {
2529 ibp
= &dd
->pport
[p
].ibport_data
;
2530 if (ibp
->send_agent
) {
2531 agent
= ibp
->send_agent
;
2532 ibp
->send_agent
= NULL
;
2533 ib_unregister_mad_agent(agent
);
2536 ib_destroy_ah(&ibp
->sm_ah
->ibah
);
2539 if (dd
->pport
[p
].cong_stats
.timer
.data
)
2540 del_timer_sync(&dd
->pport
[p
].cong_stats
.timer
);