2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
49 #include "mlx4_stats.h"
51 #define MLX4_MAC_VALID (1ull << 63)
52 #define MLX4_PF_COUNTERS_PER_PORT 2
53 #define MLX4_VF_COUNTERS_PER_PORT 1
56 struct list_head list
;
64 struct list_head list
;
72 struct list_head list
;
87 struct list_head list
;
89 enum mlx4_protocol prot
;
90 enum mlx4_steer_type steer
;
95 RES_QP_BUSY
= RES_ANY_BUSY
,
97 /* QP number was allocated */
100 /* ICM memory for QP context was mapped */
103 /* QP is in hw ownership */
108 struct res_common com
;
113 struct list_head mcg_list
;
118 /* saved qp params before VST enforcement in order to restore on VGT */
128 enum res_mtt_states
{
129 RES_MTT_BUSY
= RES_ANY_BUSY
,
133 static inline const char *mtt_states_str(enum res_mtt_states state
)
136 case RES_MTT_BUSY
: return "RES_MTT_BUSY";
137 case RES_MTT_ALLOCATED
: return "RES_MTT_ALLOCATED";
138 default: return "Unknown";
143 struct res_common com
;
148 enum res_mpt_states
{
149 RES_MPT_BUSY
= RES_ANY_BUSY
,
156 struct res_common com
;
162 RES_EQ_BUSY
= RES_ANY_BUSY
,
168 struct res_common com
;
173 RES_CQ_BUSY
= RES_ANY_BUSY
,
179 struct res_common com
;
184 enum res_srq_states
{
185 RES_SRQ_BUSY
= RES_ANY_BUSY
,
191 struct res_common com
;
197 enum res_counter_states
{
198 RES_COUNTER_BUSY
= RES_ANY_BUSY
,
199 RES_COUNTER_ALLOCATED
,
203 struct res_common com
;
207 enum res_xrcdn_states
{
208 RES_XRCD_BUSY
= RES_ANY_BUSY
,
213 struct res_common com
;
217 enum res_fs_rule_states
{
218 RES_FS_RULE_BUSY
= RES_ANY_BUSY
,
219 RES_FS_RULE_ALLOCATED
,
223 struct res_common com
;
225 /* VF DMFS mbox with port flipped */
227 /* > 0 --> apply mirror when getting into HA mode */
228 /* = 0 --> un-apply mirror when getting out of HA mode */
230 struct list_head mirr_list
;
234 static void *res_tracker_lookup(struct rb_root
*root
, u64 res_id
)
236 struct rb_node
*node
= root
->rb_node
;
239 struct res_common
*res
= container_of(node
, struct res_common
,
242 if (res_id
< res
->res_id
)
243 node
= node
->rb_left
;
244 else if (res_id
> res
->res_id
)
245 node
= node
->rb_right
;
252 static int res_tracker_insert(struct rb_root
*root
, struct res_common
*res
)
254 struct rb_node
**new = &(root
->rb_node
), *parent
= NULL
;
256 /* Figure out where to put new node */
258 struct res_common
*this = container_of(*new, struct res_common
,
262 if (res
->res_id
< this->res_id
)
263 new = &((*new)->rb_left
);
264 else if (res
->res_id
> this->res_id
)
265 new = &((*new)->rb_right
);
270 /* Add new node and rebalance tree. */
271 rb_link_node(&res
->node
, parent
, new);
272 rb_insert_color(&res
->node
, root
);
287 static const char *resource_str(enum mlx4_resource rt
)
290 case RES_QP
: return "RES_QP";
291 case RES_CQ
: return "RES_CQ";
292 case RES_SRQ
: return "RES_SRQ";
293 case RES_MPT
: return "RES_MPT";
294 case RES_MTT
: return "RES_MTT";
295 case RES_MAC
: return "RES_MAC";
296 case RES_VLAN
: return "RES_VLAN";
297 case RES_EQ
: return "RES_EQ";
298 case RES_COUNTER
: return "RES_COUNTER";
299 case RES_FS_RULE
: return "RES_FS_RULE";
300 case RES_XRCD
: return "RES_XRCD";
301 default: return "Unknown resource type !!!";
305 static void rem_slave_vlans(struct mlx4_dev
*dev
, int slave
);
306 static inline int mlx4_grant_resource(struct mlx4_dev
*dev
, int slave
,
307 enum mlx4_resource res_type
, int count
,
310 struct mlx4_priv
*priv
= mlx4_priv(dev
);
311 struct resource_allocator
*res_alloc
=
312 &priv
->mfunc
.master
.res_tracker
.res_alloc
[res_type
];
314 int allocated
, free
, reserved
, guaranteed
, from_free
;
317 if (slave
> dev
->persist
->num_vfs
)
320 spin_lock(&res_alloc
->alloc_lock
);
321 allocated
= (port
> 0) ?
322 res_alloc
->allocated
[(port
- 1) *
323 (dev
->persist
->num_vfs
+ 1) + slave
] :
324 res_alloc
->allocated
[slave
];
325 free
= (port
> 0) ? res_alloc
->res_port_free
[port
- 1] :
327 reserved
= (port
> 0) ? res_alloc
->res_port_rsvd
[port
- 1] :
328 res_alloc
->res_reserved
;
329 guaranteed
= res_alloc
->guaranteed
[slave
];
331 if (allocated
+ count
> res_alloc
->quota
[slave
]) {
332 mlx4_warn(dev
, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
333 slave
, port
, resource_str(res_type
), count
,
334 allocated
, res_alloc
->quota
[slave
]);
338 if (allocated
+ count
<= guaranteed
) {
342 /* portion may need to be obtained from free area */
343 if (guaranteed
- allocated
> 0)
344 from_free
= count
- (guaranteed
- allocated
);
348 from_rsvd
= count
- from_free
;
350 if (free
- from_free
>= reserved
)
353 mlx4_warn(dev
, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
354 slave
, port
, resource_str(res_type
), free
,
355 from_free
, reserved
);
359 /* grant the request */
361 res_alloc
->allocated
[(port
- 1) *
362 (dev
->persist
->num_vfs
+ 1) + slave
] += count
;
363 res_alloc
->res_port_free
[port
- 1] -= count
;
364 res_alloc
->res_port_rsvd
[port
- 1] -= from_rsvd
;
366 res_alloc
->allocated
[slave
] += count
;
367 res_alloc
->res_free
-= count
;
368 res_alloc
->res_reserved
-= from_rsvd
;
373 spin_unlock(&res_alloc
->alloc_lock
);
377 static inline void mlx4_release_resource(struct mlx4_dev
*dev
, int slave
,
378 enum mlx4_resource res_type
, int count
,
381 struct mlx4_priv
*priv
= mlx4_priv(dev
);
382 struct resource_allocator
*res_alloc
=
383 &priv
->mfunc
.master
.res_tracker
.res_alloc
[res_type
];
384 int allocated
, guaranteed
, from_rsvd
;
386 if (slave
> dev
->persist
->num_vfs
)
389 spin_lock(&res_alloc
->alloc_lock
);
391 allocated
= (port
> 0) ?
392 res_alloc
->allocated
[(port
- 1) *
393 (dev
->persist
->num_vfs
+ 1) + slave
] :
394 res_alloc
->allocated
[slave
];
395 guaranteed
= res_alloc
->guaranteed
[slave
];
397 if (allocated
- count
>= guaranteed
) {
400 /* portion may need to be returned to reserved area */
401 if (allocated
- guaranteed
> 0)
402 from_rsvd
= count
- (allocated
- guaranteed
);
408 res_alloc
->allocated
[(port
- 1) *
409 (dev
->persist
->num_vfs
+ 1) + slave
] -= count
;
410 res_alloc
->res_port_free
[port
- 1] += count
;
411 res_alloc
->res_port_rsvd
[port
- 1] += from_rsvd
;
413 res_alloc
->allocated
[slave
] -= count
;
414 res_alloc
->res_free
+= count
;
415 res_alloc
->res_reserved
+= from_rsvd
;
418 spin_unlock(&res_alloc
->alloc_lock
);
422 static inline void initialize_res_quotas(struct mlx4_dev
*dev
,
423 struct resource_allocator
*res_alloc
,
424 enum mlx4_resource res_type
,
425 int vf
, int num_instances
)
427 res_alloc
->guaranteed
[vf
] = num_instances
/
428 (2 * (dev
->persist
->num_vfs
+ 1));
429 res_alloc
->quota
[vf
] = (num_instances
/ 2) + res_alloc
->guaranteed
[vf
];
430 if (vf
== mlx4_master_func_num(dev
)) {
431 res_alloc
->res_free
= num_instances
;
432 if (res_type
== RES_MTT
) {
433 /* reserved mtts will be taken out of the PF allocation */
434 res_alloc
->res_free
+= dev
->caps
.reserved_mtts
;
435 res_alloc
->guaranteed
[vf
] += dev
->caps
.reserved_mtts
;
436 res_alloc
->quota
[vf
] += dev
->caps
.reserved_mtts
;
441 void mlx4_init_quotas(struct mlx4_dev
*dev
)
443 struct mlx4_priv
*priv
= mlx4_priv(dev
);
446 /* quotas for VFs are initialized in mlx4_slave_cap */
447 if (mlx4_is_slave(dev
))
450 if (!mlx4_is_mfunc(dev
)) {
451 dev
->quotas
.qp
= dev
->caps
.num_qps
- dev
->caps
.reserved_qps
-
452 mlx4_num_reserved_sqps(dev
);
453 dev
->quotas
.cq
= dev
->caps
.num_cqs
- dev
->caps
.reserved_cqs
;
454 dev
->quotas
.srq
= dev
->caps
.num_srqs
- dev
->caps
.reserved_srqs
;
455 dev
->quotas
.mtt
= dev
->caps
.num_mtts
- dev
->caps
.reserved_mtts
;
456 dev
->quotas
.mpt
= dev
->caps
.num_mpts
- dev
->caps
.reserved_mrws
;
460 pf
= mlx4_master_func_num(dev
);
462 priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_QP
].quota
[pf
];
464 priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_CQ
].quota
[pf
];
466 priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_SRQ
].quota
[pf
];
468 priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_MTT
].quota
[pf
];
470 priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_MPT
].quota
[pf
];
473 static int get_max_gauranteed_vfs_counter(struct mlx4_dev
*dev
)
475 /* reduce the sink counter */
476 return (dev
->caps
.max_counters
- 1 -
477 (MLX4_PF_COUNTERS_PER_PORT
* MLX4_MAX_PORTS
))
481 int mlx4_init_resource_tracker(struct mlx4_dev
*dev
)
483 struct mlx4_priv
*priv
= mlx4_priv(dev
);
486 int max_vfs_guarantee_counter
= get_max_gauranteed_vfs_counter(dev
);
488 priv
->mfunc
.master
.res_tracker
.slave_list
=
489 kzalloc(dev
->num_slaves
* sizeof(struct slave_list
),
491 if (!priv
->mfunc
.master
.res_tracker
.slave_list
)
494 for (i
= 0 ; i
< dev
->num_slaves
; i
++) {
495 for (t
= 0; t
< MLX4_NUM_OF_RESOURCE_TYPE
; ++t
)
496 INIT_LIST_HEAD(&priv
->mfunc
.master
.res_tracker
.
497 slave_list
[i
].res_list
[t
]);
498 mutex_init(&priv
->mfunc
.master
.res_tracker
.slave_list
[i
].mutex
);
501 mlx4_dbg(dev
, "Started init_resource_tracker: %ld slaves\n",
503 for (i
= 0 ; i
< MLX4_NUM_OF_RESOURCE_TYPE
; i
++)
504 priv
->mfunc
.master
.res_tracker
.res_tree
[i
] = RB_ROOT
;
506 for (i
= 0; i
< MLX4_NUM_OF_RESOURCE_TYPE
; i
++) {
507 struct resource_allocator
*res_alloc
=
508 &priv
->mfunc
.master
.res_tracker
.res_alloc
[i
];
509 res_alloc
->quota
= kmalloc((dev
->persist
->num_vfs
+ 1) *
510 sizeof(int), GFP_KERNEL
);
511 res_alloc
->guaranteed
= kmalloc((dev
->persist
->num_vfs
+ 1) *
512 sizeof(int), GFP_KERNEL
);
513 if (i
== RES_MAC
|| i
== RES_VLAN
)
514 res_alloc
->allocated
= kzalloc(MLX4_MAX_PORTS
*
515 (dev
->persist
->num_vfs
517 sizeof(int), GFP_KERNEL
);
519 res_alloc
->allocated
= kzalloc((dev
->persist
->
521 sizeof(int), GFP_KERNEL
);
522 /* Reduce the sink counter */
523 if (i
== RES_COUNTER
)
524 res_alloc
->res_free
= dev
->caps
.max_counters
- 1;
526 if (!res_alloc
->quota
|| !res_alloc
->guaranteed
||
527 !res_alloc
->allocated
)
530 spin_lock_init(&res_alloc
->alloc_lock
);
531 for (t
= 0; t
< dev
->persist
->num_vfs
+ 1; t
++) {
532 struct mlx4_active_ports actv_ports
=
533 mlx4_get_active_ports(dev
, t
);
536 initialize_res_quotas(dev
, res_alloc
, RES_QP
,
537 t
, dev
->caps
.num_qps
-
538 dev
->caps
.reserved_qps
-
539 mlx4_num_reserved_sqps(dev
));
542 initialize_res_quotas(dev
, res_alloc
, RES_CQ
,
543 t
, dev
->caps
.num_cqs
-
544 dev
->caps
.reserved_cqs
);
547 initialize_res_quotas(dev
, res_alloc
, RES_SRQ
,
548 t
, dev
->caps
.num_srqs
-
549 dev
->caps
.reserved_srqs
);
552 initialize_res_quotas(dev
, res_alloc
, RES_MPT
,
553 t
, dev
->caps
.num_mpts
-
554 dev
->caps
.reserved_mrws
);
557 initialize_res_quotas(dev
, res_alloc
, RES_MTT
,
558 t
, dev
->caps
.num_mtts
-
559 dev
->caps
.reserved_mtts
);
562 if (t
== mlx4_master_func_num(dev
)) {
563 int max_vfs_pport
= 0;
564 /* Calculate the max vfs per port for */
566 for (j
= 0; j
< dev
->caps
.num_ports
;
568 struct mlx4_slaves_pport slaves_pport
=
569 mlx4_phys_to_slaves_pport(dev
, j
+ 1);
570 unsigned current_slaves
=
571 bitmap_weight(slaves_pport
.slaves
,
572 dev
->caps
.num_ports
) - 1;
573 if (max_vfs_pport
< current_slaves
)
577 res_alloc
->quota
[t
] =
580 res_alloc
->guaranteed
[t
] = 2;
581 for (j
= 0; j
< MLX4_MAX_PORTS
; j
++)
582 res_alloc
->res_port_free
[j
] =
585 res_alloc
->quota
[t
] = MLX4_MAX_MAC_NUM
;
586 res_alloc
->guaranteed
[t
] = 2;
590 if (t
== mlx4_master_func_num(dev
)) {
591 res_alloc
->quota
[t
] = MLX4_MAX_VLAN_NUM
;
592 res_alloc
->guaranteed
[t
] = MLX4_MAX_VLAN_NUM
/ 2;
593 for (j
= 0; j
< MLX4_MAX_PORTS
; j
++)
594 res_alloc
->res_port_free
[j
] =
597 res_alloc
->quota
[t
] = MLX4_MAX_VLAN_NUM
/ 2;
598 res_alloc
->guaranteed
[t
] = 0;
602 res_alloc
->quota
[t
] = dev
->caps
.max_counters
;
603 if (t
== mlx4_master_func_num(dev
))
604 res_alloc
->guaranteed
[t
] =
605 MLX4_PF_COUNTERS_PER_PORT
*
607 else if (t
<= max_vfs_guarantee_counter
)
608 res_alloc
->guaranteed
[t
] =
609 MLX4_VF_COUNTERS_PER_PORT
*
612 res_alloc
->guaranteed
[t
] = 0;
613 res_alloc
->res_free
-= res_alloc
->guaranteed
[t
];
618 if (i
== RES_MAC
|| i
== RES_VLAN
) {
619 for (j
= 0; j
< dev
->caps
.num_ports
; j
++)
620 if (test_bit(j
, actv_ports
.ports
))
621 res_alloc
->res_port_rsvd
[j
] +=
622 res_alloc
->guaranteed
[t
];
624 res_alloc
->res_reserved
+= res_alloc
->guaranteed
[t
];
628 spin_lock_init(&priv
->mfunc
.master
.res_tracker
.lock
);
632 for (i
= 0; i
< MLX4_NUM_OF_RESOURCE_TYPE
; i
++) {
633 kfree(priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].allocated
);
634 priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].allocated
= NULL
;
635 kfree(priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].guaranteed
);
636 priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].guaranteed
= NULL
;
637 kfree(priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].quota
);
638 priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].quota
= NULL
;
643 void mlx4_free_resource_tracker(struct mlx4_dev
*dev
,
644 enum mlx4_res_tracker_free_type type
)
646 struct mlx4_priv
*priv
= mlx4_priv(dev
);
649 if (priv
->mfunc
.master
.res_tracker
.slave_list
) {
650 if (type
!= RES_TR_FREE_STRUCTS_ONLY
) {
651 for (i
= 0; i
< dev
->num_slaves
; i
++) {
652 if (type
== RES_TR_FREE_ALL
||
653 dev
->caps
.function
!= i
)
654 mlx4_delete_all_resources_for_slave(dev
, i
);
656 /* free master's vlans */
657 i
= dev
->caps
.function
;
658 mlx4_reset_roce_gids(dev
, i
);
659 mutex_lock(&priv
->mfunc
.master
.res_tracker
.slave_list
[i
].mutex
);
660 rem_slave_vlans(dev
, i
);
661 mutex_unlock(&priv
->mfunc
.master
.res_tracker
.slave_list
[i
].mutex
);
664 if (type
!= RES_TR_FREE_SLAVES_ONLY
) {
665 for (i
= 0; i
< MLX4_NUM_OF_RESOURCE_TYPE
; i
++) {
666 kfree(priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].allocated
);
667 priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].allocated
= NULL
;
668 kfree(priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].guaranteed
);
669 priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].guaranteed
= NULL
;
670 kfree(priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].quota
);
671 priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].quota
= NULL
;
673 kfree(priv
->mfunc
.master
.res_tracker
.slave_list
);
674 priv
->mfunc
.master
.res_tracker
.slave_list
= NULL
;
679 static void update_pkey_index(struct mlx4_dev
*dev
, int slave
,
680 struct mlx4_cmd_mailbox
*inbox
)
682 u8 sched
= *(u8
*)(inbox
->buf
+ 64);
683 u8 orig_index
= *(u8
*)(inbox
->buf
+ 35);
685 struct mlx4_priv
*priv
= mlx4_priv(dev
);
688 port
= (sched
>> 6 & 1) + 1;
690 new_index
= priv
->virt2phys_pkey
[slave
][port
- 1][orig_index
];
691 *(u8
*)(inbox
->buf
+ 35) = new_index
;
694 static void update_gid(struct mlx4_dev
*dev
, struct mlx4_cmd_mailbox
*inbox
,
697 struct mlx4_qp_context
*qp_ctx
= inbox
->buf
+ 8;
698 enum mlx4_qp_optpar optpar
= be32_to_cpu(*(__be32
*) inbox
->buf
);
699 u32 ts
= (be32_to_cpu(qp_ctx
->flags
) >> 16) & 0xff;
702 if (MLX4_QP_ST_UD
== ts
) {
703 port
= (qp_ctx
->pri_path
.sched_queue
>> 6 & 1) + 1;
704 if (mlx4_is_eth(dev
, port
))
705 qp_ctx
->pri_path
.mgid_index
=
706 mlx4_get_base_gid_ix(dev
, slave
, port
) | 0x80;
708 qp_ctx
->pri_path
.mgid_index
= slave
| 0x80;
710 } else if (MLX4_QP_ST_RC
== ts
|| MLX4_QP_ST_XRC
== ts
|| MLX4_QP_ST_UC
== ts
) {
711 if (optpar
& MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH
) {
712 port
= (qp_ctx
->pri_path
.sched_queue
>> 6 & 1) + 1;
713 if (mlx4_is_eth(dev
, port
)) {
714 qp_ctx
->pri_path
.mgid_index
+=
715 mlx4_get_base_gid_ix(dev
, slave
, port
);
716 qp_ctx
->pri_path
.mgid_index
&= 0x7f;
718 qp_ctx
->pri_path
.mgid_index
= slave
& 0x7F;
721 if (optpar
& MLX4_QP_OPTPAR_ALT_ADDR_PATH
) {
722 port
= (qp_ctx
->alt_path
.sched_queue
>> 6 & 1) + 1;
723 if (mlx4_is_eth(dev
, port
)) {
724 qp_ctx
->alt_path
.mgid_index
+=
725 mlx4_get_base_gid_ix(dev
, slave
, port
);
726 qp_ctx
->alt_path
.mgid_index
&= 0x7f;
728 qp_ctx
->alt_path
.mgid_index
= slave
& 0x7F;
734 static int handle_counter(struct mlx4_dev
*dev
, struct mlx4_qp_context
*qpc
,
737 static int update_vport_qp_param(struct mlx4_dev
*dev
,
738 struct mlx4_cmd_mailbox
*inbox
,
741 struct mlx4_qp_context
*qpc
= inbox
->buf
+ 8;
742 struct mlx4_vport_oper_state
*vp_oper
;
743 struct mlx4_priv
*priv
;
747 port
= (qpc
->pri_path
.sched_queue
& 0x40) ? 2 : 1;
748 priv
= mlx4_priv(dev
);
749 vp_oper
= &priv
->mfunc
.master
.vf_oper
[slave
].vport
[port
];
750 qp_type
= (be32_to_cpu(qpc
->flags
) >> 16) & 0xff;
752 err
= handle_counter(dev
, qpc
, slave
, port
);
756 if (MLX4_VGT
!= vp_oper
->state
.default_vlan
) {
757 /* the reserved QPs (special, proxy, tunnel)
758 * do not operate over vlans
760 if (mlx4_is_qp_reserved(dev
, qpn
))
763 /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
764 if (qp_type
== MLX4_QP_ST_UD
||
765 (qp_type
== MLX4_QP_ST_MLX
&& mlx4_is_eth(dev
, port
))) {
766 if (dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_VSD_INIT2RTR
) {
767 *(__be32
*)inbox
->buf
=
768 cpu_to_be32(be32_to_cpu(*(__be32
*)inbox
->buf
) |
769 MLX4_QP_OPTPAR_VLAN_STRIPPING
);
770 qpc
->param3
&= ~cpu_to_be32(MLX4_STRIP_VLAN
);
772 struct mlx4_update_qp_params params
= {.flags
= 0};
774 err
= mlx4_update_qp(dev
, qpn
, MLX4_UPDATE_QP_VSD
, ¶ms
);
780 /* preserve IF_COUNTER flag */
781 qpc
->pri_path
.vlan_control
&=
782 MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER
;
783 if (vp_oper
->state
.link_state
== IFLA_VF_LINK_STATE_DISABLE
&&
784 dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_UPDATE_QP
) {
785 qpc
->pri_path
.vlan_control
|=
786 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
787 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED
|
788 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED
|
789 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED
|
790 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED
|
791 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED
;
792 } else if (0 != vp_oper
->state
.default_vlan
) {
793 if (vp_oper
->state
.vlan_proto
== htons(ETH_P_8021AD
)) {
794 /* vst QinQ should block untagged on TX,
795 * but cvlan is in payload and phv is set so
796 * hw see it as untagged. Block tagged instead.
798 qpc
->pri_path
.vlan_control
|=
799 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED
|
800 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
801 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED
|
802 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED
;
803 } else { /* vst 802.1Q */
804 qpc
->pri_path
.vlan_control
|=
805 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
806 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED
|
807 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED
;
809 } else { /* priority tagged */
810 qpc
->pri_path
.vlan_control
|=
811 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
812 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED
;
815 qpc
->pri_path
.fvl_rx
|= MLX4_FVL_RX_FORCE_ETH_VLAN
;
816 qpc
->pri_path
.vlan_index
= vp_oper
->vlan_idx
;
817 qpc
->pri_path
.fl
|= MLX4_FL_ETH_HIDE_CQE_VLAN
;
818 if (vp_oper
->state
.vlan_proto
== htons(ETH_P_8021AD
))
819 qpc
->pri_path
.fl
|= MLX4_FL_SV
;
821 qpc
->pri_path
.fl
|= MLX4_FL_CV
;
822 qpc
->pri_path
.feup
|= MLX4_FEUP_FORCE_ETH_UP
| MLX4_FVL_FORCE_ETH_VLAN
;
823 qpc
->pri_path
.sched_queue
&= 0xC7;
824 qpc
->pri_path
.sched_queue
|= (vp_oper
->state
.default_qos
) << 3;
825 qpc
->qos_vport
= vp_oper
->state
.qos_vport
;
827 if (vp_oper
->state
.spoofchk
) {
828 qpc
->pri_path
.feup
|= MLX4_FSM_FORCE_ETH_SRC_MAC
;
829 qpc
->pri_path
.grh_mylmc
= (0x80 & qpc
->pri_path
.grh_mylmc
) + vp_oper
->mac_idx
;
835 static int mpt_mask(struct mlx4_dev
*dev
)
837 return dev
->caps
.num_mpts
- 1;
840 static void *find_res(struct mlx4_dev
*dev
, u64 res_id
,
841 enum mlx4_resource type
)
843 struct mlx4_priv
*priv
= mlx4_priv(dev
);
845 return res_tracker_lookup(&priv
->mfunc
.master
.res_tracker
.res_tree
[type
],
849 static int get_res(struct mlx4_dev
*dev
, int slave
, u64 res_id
,
850 enum mlx4_resource type
,
853 struct res_common
*r
;
856 spin_lock_irq(mlx4_tlock(dev
));
857 r
= find_res(dev
, res_id
, type
);
863 if (r
->state
== RES_ANY_BUSY
) {
868 if (r
->owner
!= slave
) {
873 r
->from_state
= r
->state
;
874 r
->state
= RES_ANY_BUSY
;
877 *((struct res_common
**)res
) = r
;
880 spin_unlock_irq(mlx4_tlock(dev
));
884 int mlx4_get_slave_from_resource_id(struct mlx4_dev
*dev
,
885 enum mlx4_resource type
,
886 u64 res_id
, int *slave
)
889 struct res_common
*r
;
895 spin_lock(mlx4_tlock(dev
));
897 r
= find_res(dev
, id
, type
);
902 spin_unlock(mlx4_tlock(dev
));
907 static void put_res(struct mlx4_dev
*dev
, int slave
, u64 res_id
,
908 enum mlx4_resource type
)
910 struct res_common
*r
;
912 spin_lock_irq(mlx4_tlock(dev
));
913 r
= find_res(dev
, res_id
, type
);
915 r
->state
= r
->from_state
;
916 spin_unlock_irq(mlx4_tlock(dev
));
919 static int counter_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
920 u64 in_param
, u64
*out_param
, int port
);
922 static int handle_existing_counter(struct mlx4_dev
*dev
, u8 slave
, int port
,
925 struct res_common
*r
;
926 struct res_counter
*counter
;
929 if (counter_index
== MLX4_SINK_COUNTER_INDEX(dev
))
932 spin_lock_irq(mlx4_tlock(dev
));
933 r
= find_res(dev
, counter_index
, RES_COUNTER
);
934 if (!r
|| r
->owner
!= slave
) {
937 counter
= container_of(r
, struct res_counter
, com
);
939 counter
->port
= port
;
942 spin_unlock_irq(mlx4_tlock(dev
));
946 static int handle_unexisting_counter(struct mlx4_dev
*dev
,
947 struct mlx4_qp_context
*qpc
, u8 slave
,
950 struct mlx4_priv
*priv
= mlx4_priv(dev
);
951 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
952 struct res_common
*tmp
;
953 struct res_counter
*counter
;
954 u64 counter_idx
= MLX4_SINK_COUNTER_INDEX(dev
);
957 spin_lock_irq(mlx4_tlock(dev
));
958 list_for_each_entry(tmp
,
959 &tracker
->slave_list
[slave
].res_list
[RES_COUNTER
],
961 counter
= container_of(tmp
, struct res_counter
, com
);
962 if (port
== counter
->port
) {
963 qpc
->pri_path
.counter_index
= counter
->com
.res_id
;
964 spin_unlock_irq(mlx4_tlock(dev
));
968 spin_unlock_irq(mlx4_tlock(dev
));
970 /* No existing counter, need to allocate a new counter */
971 err
= counter_alloc_res(dev
, slave
, RES_OP_RESERVE
, 0, 0, &counter_idx
,
973 if (err
== -ENOENT
) {
975 } else if (err
&& err
!= -ENOSPC
) {
976 mlx4_err(dev
, "%s: failed to create new counter for slave %d err %d\n",
977 __func__
, slave
, err
);
979 qpc
->pri_path
.counter_index
= counter_idx
;
980 mlx4_dbg(dev
, "%s: alloc new counter for slave %d index %d\n",
981 __func__
, slave
, qpc
->pri_path
.counter_index
);
988 static int handle_counter(struct mlx4_dev
*dev
, struct mlx4_qp_context
*qpc
,
991 if (qpc
->pri_path
.counter_index
!= MLX4_SINK_COUNTER_INDEX(dev
))
992 return handle_existing_counter(dev
, slave
, port
,
993 qpc
->pri_path
.counter_index
);
995 return handle_unexisting_counter(dev
, qpc
, slave
, port
);
998 static struct res_common
*alloc_qp_tr(int id
)
1002 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
1006 ret
->com
.res_id
= id
;
1007 ret
->com
.state
= RES_QP_RESERVED
;
1008 ret
->local_qpn
= id
;
1009 INIT_LIST_HEAD(&ret
->mcg_list
);
1010 spin_lock_init(&ret
->mcg_spl
);
1011 atomic_set(&ret
->ref_count
, 0);
1016 static struct res_common
*alloc_mtt_tr(int id
, int order
)
1018 struct res_mtt
*ret
;
1020 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
1024 ret
->com
.res_id
= id
;
1026 ret
->com
.state
= RES_MTT_ALLOCATED
;
1027 atomic_set(&ret
->ref_count
, 0);
1032 static struct res_common
*alloc_mpt_tr(int id
, int key
)
1034 struct res_mpt
*ret
;
1036 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
1040 ret
->com
.res_id
= id
;
1041 ret
->com
.state
= RES_MPT_RESERVED
;
1047 static struct res_common
*alloc_eq_tr(int id
)
1051 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
1055 ret
->com
.res_id
= id
;
1056 ret
->com
.state
= RES_EQ_RESERVED
;
1061 static struct res_common
*alloc_cq_tr(int id
)
1065 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
1069 ret
->com
.res_id
= id
;
1070 ret
->com
.state
= RES_CQ_ALLOCATED
;
1071 atomic_set(&ret
->ref_count
, 0);
1076 static struct res_common
*alloc_srq_tr(int id
)
1078 struct res_srq
*ret
;
1080 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
1084 ret
->com
.res_id
= id
;
1085 ret
->com
.state
= RES_SRQ_ALLOCATED
;
1086 atomic_set(&ret
->ref_count
, 0);
1091 static struct res_common
*alloc_counter_tr(int id
, int port
)
1093 struct res_counter
*ret
;
1095 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
1099 ret
->com
.res_id
= id
;
1100 ret
->com
.state
= RES_COUNTER_ALLOCATED
;
1106 static struct res_common
*alloc_xrcdn_tr(int id
)
1108 struct res_xrcdn
*ret
;
1110 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
1114 ret
->com
.res_id
= id
;
1115 ret
->com
.state
= RES_XRCD_ALLOCATED
;
1120 static struct res_common
*alloc_fs_rule_tr(u64 id
, int qpn
)
1122 struct res_fs_rule
*ret
;
1124 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
1128 ret
->com
.res_id
= id
;
1129 ret
->com
.state
= RES_FS_RULE_ALLOCATED
;
1134 static struct res_common
*alloc_tr(u64 id
, enum mlx4_resource type
, int slave
,
1137 struct res_common
*ret
;
1141 ret
= alloc_qp_tr(id
);
1144 ret
= alloc_mpt_tr(id
, extra
);
1147 ret
= alloc_mtt_tr(id
, extra
);
1150 ret
= alloc_eq_tr(id
);
1153 ret
= alloc_cq_tr(id
);
1156 ret
= alloc_srq_tr(id
);
1159 pr_err("implementation missing\n");
1162 ret
= alloc_counter_tr(id
, extra
);
1165 ret
= alloc_xrcdn_tr(id
);
1168 ret
= alloc_fs_rule_tr(id
, extra
);
1179 int mlx4_calc_vf_counters(struct mlx4_dev
*dev
, int slave
, int port
,
1180 struct mlx4_counter
*data
)
1182 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1183 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1184 struct res_common
*tmp
;
1185 struct res_counter
*counter
;
1189 memset(data
, 0, sizeof(*data
));
1191 counters_arr
= kmalloc_array(dev
->caps
.max_counters
,
1192 sizeof(*counters_arr
), GFP_KERNEL
);
1196 spin_lock_irq(mlx4_tlock(dev
));
1197 list_for_each_entry(tmp
,
1198 &tracker
->slave_list
[slave
].res_list
[RES_COUNTER
],
1200 counter
= container_of(tmp
, struct res_counter
, com
);
1201 if (counter
->port
== port
) {
1202 counters_arr
[i
] = (int)tmp
->res_id
;
1206 spin_unlock_irq(mlx4_tlock(dev
));
1207 counters_arr
[i
] = -1;
1211 while (counters_arr
[i
] != -1) {
1212 err
= mlx4_get_counter_stats(dev
, counters_arr
[i
], data
,
1215 memset(data
, 0, sizeof(*data
));
1222 kfree(counters_arr
);
1226 static int add_res_range(struct mlx4_dev
*dev
, int slave
, u64 base
, int count
,
1227 enum mlx4_resource type
, int extra
)
1231 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1232 struct res_common
**res_arr
;
1233 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1234 struct rb_root
*root
= &tracker
->res_tree
[type
];
1236 res_arr
= kzalloc(count
* sizeof *res_arr
, GFP_KERNEL
);
1240 for (i
= 0; i
< count
; ++i
) {
1241 res_arr
[i
] = alloc_tr(base
+ i
, type
, slave
, extra
);
1243 for (--i
; i
>= 0; --i
)
1251 spin_lock_irq(mlx4_tlock(dev
));
1252 for (i
= 0; i
< count
; ++i
) {
1253 if (find_res(dev
, base
+ i
, type
)) {
1257 err
= res_tracker_insert(root
, res_arr
[i
]);
1260 list_add_tail(&res_arr
[i
]->list
,
1261 &tracker
->slave_list
[slave
].res_list
[type
]);
1263 spin_unlock_irq(mlx4_tlock(dev
));
1269 for (--i
; i
>= 0; --i
) {
1270 rb_erase(&res_arr
[i
]->node
, root
);
1271 list_del_init(&res_arr
[i
]->list
);
1274 spin_unlock_irq(mlx4_tlock(dev
));
1276 for (i
= 0; i
< count
; ++i
)
1284 static int remove_qp_ok(struct res_qp
*res
)
1286 if (res
->com
.state
== RES_QP_BUSY
|| atomic_read(&res
->ref_count
) ||
1287 !list_empty(&res
->mcg_list
)) {
1288 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1289 res
->com
.state
, atomic_read(&res
->ref_count
));
1291 } else if (res
->com
.state
!= RES_QP_RESERVED
) {
1298 static int remove_mtt_ok(struct res_mtt
*res
, int order
)
1300 if (res
->com
.state
== RES_MTT_BUSY
||
1301 atomic_read(&res
->ref_count
)) {
1302 pr_devel("%s-%d: state %s, ref_count %d\n",
1304 mtt_states_str(res
->com
.state
),
1305 atomic_read(&res
->ref_count
));
1307 } else if (res
->com
.state
!= RES_MTT_ALLOCATED
)
1309 else if (res
->order
!= order
)
1315 static int remove_mpt_ok(struct res_mpt
*res
)
1317 if (res
->com
.state
== RES_MPT_BUSY
)
1319 else if (res
->com
.state
!= RES_MPT_RESERVED
)
1325 static int remove_eq_ok(struct res_eq
*res
)
1327 if (res
->com
.state
== RES_MPT_BUSY
)
1329 else if (res
->com
.state
!= RES_MPT_RESERVED
)
1335 static int remove_counter_ok(struct res_counter
*res
)
1337 if (res
->com
.state
== RES_COUNTER_BUSY
)
1339 else if (res
->com
.state
!= RES_COUNTER_ALLOCATED
)
1345 static int remove_xrcdn_ok(struct res_xrcdn
*res
)
1347 if (res
->com
.state
== RES_XRCD_BUSY
)
1349 else if (res
->com
.state
!= RES_XRCD_ALLOCATED
)
1355 static int remove_fs_rule_ok(struct res_fs_rule
*res
)
1357 if (res
->com
.state
== RES_FS_RULE_BUSY
)
1359 else if (res
->com
.state
!= RES_FS_RULE_ALLOCATED
)
1365 static int remove_cq_ok(struct res_cq
*res
)
1367 if (res
->com
.state
== RES_CQ_BUSY
)
1369 else if (res
->com
.state
!= RES_CQ_ALLOCATED
)
1375 static int remove_srq_ok(struct res_srq
*res
)
1377 if (res
->com
.state
== RES_SRQ_BUSY
)
1379 else if (res
->com
.state
!= RES_SRQ_ALLOCATED
)
1385 static int remove_ok(struct res_common
*res
, enum mlx4_resource type
, int extra
)
1389 return remove_qp_ok((struct res_qp
*)res
);
1391 return remove_cq_ok((struct res_cq
*)res
);
1393 return remove_srq_ok((struct res_srq
*)res
);
1395 return remove_mpt_ok((struct res_mpt
*)res
);
1397 return remove_mtt_ok((struct res_mtt
*)res
, extra
);
1401 return remove_eq_ok((struct res_eq
*)res
);
1403 return remove_counter_ok((struct res_counter
*)res
);
1405 return remove_xrcdn_ok((struct res_xrcdn
*)res
);
1407 return remove_fs_rule_ok((struct res_fs_rule
*)res
);
1413 static int rem_res_range(struct mlx4_dev
*dev
, int slave
, u64 base
, int count
,
1414 enum mlx4_resource type
, int extra
)
1418 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1419 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1420 struct res_common
*r
;
1422 spin_lock_irq(mlx4_tlock(dev
));
1423 for (i
= base
; i
< base
+ count
; ++i
) {
1424 r
= res_tracker_lookup(&tracker
->res_tree
[type
], i
);
1429 if (r
->owner
!= slave
) {
1433 err
= remove_ok(r
, type
, extra
);
1438 for (i
= base
; i
< base
+ count
; ++i
) {
1439 r
= res_tracker_lookup(&tracker
->res_tree
[type
], i
);
1440 rb_erase(&r
->node
, &tracker
->res_tree
[type
]);
1447 spin_unlock_irq(mlx4_tlock(dev
));
1452 static int qp_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int qpn
,
1453 enum res_qp_states state
, struct res_qp
**qp
,
1456 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1457 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1461 spin_lock_irq(mlx4_tlock(dev
));
1462 r
= res_tracker_lookup(&tracker
->res_tree
[RES_QP
], qpn
);
1465 else if (r
->com
.owner
!= slave
)
1470 mlx4_dbg(dev
, "%s: failed RES_QP, 0x%llx\n",
1471 __func__
, r
->com
.res_id
);
1475 case RES_QP_RESERVED
:
1476 if (r
->com
.state
== RES_QP_MAPPED
&& !alloc
)
1479 mlx4_dbg(dev
, "failed RES_QP, 0x%llx\n", r
->com
.res_id
);
1484 if ((r
->com
.state
== RES_QP_RESERVED
&& alloc
) ||
1485 r
->com
.state
== RES_QP_HW
)
1488 mlx4_dbg(dev
, "failed RES_QP, 0x%llx\n",
1496 if (r
->com
.state
!= RES_QP_MAPPED
)
1504 r
->com
.from_state
= r
->com
.state
;
1505 r
->com
.to_state
= state
;
1506 r
->com
.state
= RES_QP_BUSY
;
1512 spin_unlock_irq(mlx4_tlock(dev
));
1517 static int mr_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int index
,
1518 enum res_mpt_states state
, struct res_mpt
**mpt
)
1520 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1521 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1525 spin_lock_irq(mlx4_tlock(dev
));
1526 r
= res_tracker_lookup(&tracker
->res_tree
[RES_MPT
], index
);
1529 else if (r
->com
.owner
!= slave
)
1537 case RES_MPT_RESERVED
:
1538 if (r
->com
.state
!= RES_MPT_MAPPED
)
1542 case RES_MPT_MAPPED
:
1543 if (r
->com
.state
!= RES_MPT_RESERVED
&&
1544 r
->com
.state
!= RES_MPT_HW
)
1549 if (r
->com
.state
!= RES_MPT_MAPPED
)
1557 r
->com
.from_state
= r
->com
.state
;
1558 r
->com
.to_state
= state
;
1559 r
->com
.state
= RES_MPT_BUSY
;
1565 spin_unlock_irq(mlx4_tlock(dev
));
1570 static int eq_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int index
,
1571 enum res_eq_states state
, struct res_eq
**eq
)
1573 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1574 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1578 spin_lock_irq(mlx4_tlock(dev
));
1579 r
= res_tracker_lookup(&tracker
->res_tree
[RES_EQ
], index
);
1582 else if (r
->com
.owner
!= slave
)
1590 case RES_EQ_RESERVED
:
1591 if (r
->com
.state
!= RES_EQ_HW
)
1596 if (r
->com
.state
!= RES_EQ_RESERVED
)
1605 r
->com
.from_state
= r
->com
.state
;
1606 r
->com
.to_state
= state
;
1607 r
->com
.state
= RES_EQ_BUSY
;
1611 spin_unlock_irq(mlx4_tlock(dev
));
1619 static int cq_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int cqn
,
1620 enum res_cq_states state
, struct res_cq
**cq
)
1622 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1623 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1627 spin_lock_irq(mlx4_tlock(dev
));
1628 r
= res_tracker_lookup(&tracker
->res_tree
[RES_CQ
], cqn
);
1631 } else if (r
->com
.owner
!= slave
) {
1633 } else if (state
== RES_CQ_ALLOCATED
) {
1634 if (r
->com
.state
!= RES_CQ_HW
)
1636 else if (atomic_read(&r
->ref_count
))
1640 } else if (state
!= RES_CQ_HW
|| r
->com
.state
!= RES_CQ_ALLOCATED
) {
1647 r
->com
.from_state
= r
->com
.state
;
1648 r
->com
.to_state
= state
;
1649 r
->com
.state
= RES_CQ_BUSY
;
1654 spin_unlock_irq(mlx4_tlock(dev
));
1659 static int srq_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int index
,
1660 enum res_srq_states state
, struct res_srq
**srq
)
1662 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1663 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1667 spin_lock_irq(mlx4_tlock(dev
));
1668 r
= res_tracker_lookup(&tracker
->res_tree
[RES_SRQ
], index
);
1671 } else if (r
->com
.owner
!= slave
) {
1673 } else if (state
== RES_SRQ_ALLOCATED
) {
1674 if (r
->com
.state
!= RES_SRQ_HW
)
1676 else if (atomic_read(&r
->ref_count
))
1678 } else if (state
!= RES_SRQ_HW
|| r
->com
.state
!= RES_SRQ_ALLOCATED
) {
1683 r
->com
.from_state
= r
->com
.state
;
1684 r
->com
.to_state
= state
;
1685 r
->com
.state
= RES_SRQ_BUSY
;
1690 spin_unlock_irq(mlx4_tlock(dev
));
1695 static void res_abort_move(struct mlx4_dev
*dev
, int slave
,
1696 enum mlx4_resource type
, int id
)
1698 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1699 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1700 struct res_common
*r
;
1702 spin_lock_irq(mlx4_tlock(dev
));
1703 r
= res_tracker_lookup(&tracker
->res_tree
[type
], id
);
1704 if (r
&& (r
->owner
== slave
))
1705 r
->state
= r
->from_state
;
1706 spin_unlock_irq(mlx4_tlock(dev
));
1709 static void res_end_move(struct mlx4_dev
*dev
, int slave
,
1710 enum mlx4_resource type
, int id
)
1712 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1713 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1714 struct res_common
*r
;
1716 spin_lock_irq(mlx4_tlock(dev
));
1717 r
= res_tracker_lookup(&tracker
->res_tree
[type
], id
);
1718 if (r
&& (r
->owner
== slave
))
1719 r
->state
= r
->to_state
;
1720 spin_unlock_irq(mlx4_tlock(dev
));
1723 static int valid_reserved(struct mlx4_dev
*dev
, int slave
, int qpn
)
1725 return mlx4_is_qp_reserved(dev
, qpn
) &&
1726 (mlx4_is_master(dev
) || mlx4_is_guest_proxy(dev
, slave
, qpn
));
1729 static int fw_reserved(struct mlx4_dev
*dev
, int qpn
)
1731 return qpn
< dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
];
1734 static int qp_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1735 u64 in_param
, u64
*out_param
)
1745 case RES_OP_RESERVE
:
1746 count
= get_param_l(&in_param
) & 0xffffff;
1747 /* Turn off all unsupported QP allocation flags that the
1748 * slave tries to set.
1750 flags
= (get_param_l(&in_param
) >> 24) & dev
->caps
.alloc_res_qp_mask
;
1751 align
= get_param_h(&in_param
);
1752 err
= mlx4_grant_resource(dev
, slave
, RES_QP
, count
, 0);
1756 err
= __mlx4_qp_reserve_range(dev
, count
, align
, &base
, flags
);
1758 mlx4_release_resource(dev
, slave
, RES_QP
, count
, 0);
1762 err
= add_res_range(dev
, slave
, base
, count
, RES_QP
, 0);
1764 mlx4_release_resource(dev
, slave
, RES_QP
, count
, 0);
1765 __mlx4_qp_release_range(dev
, base
, count
);
1768 set_param_l(out_param
, base
);
1770 case RES_OP_MAP_ICM
:
1771 qpn
= get_param_l(&in_param
) & 0x7fffff;
1772 if (valid_reserved(dev
, slave
, qpn
)) {
1773 err
= add_res_range(dev
, slave
, qpn
, 1, RES_QP
, 0);
1778 err
= qp_res_start_move_to(dev
, slave
, qpn
, RES_QP_MAPPED
,
1783 if (!fw_reserved(dev
, qpn
)) {
1784 err
= __mlx4_qp_alloc_icm(dev
, qpn
, GFP_KERNEL
);
1786 res_abort_move(dev
, slave
, RES_QP
, qpn
);
1791 res_end_move(dev
, slave
, RES_QP
, qpn
);
1801 static int mtt_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1802 u64 in_param
, u64
*out_param
)
1808 if (op
!= RES_OP_RESERVE_AND_MAP
)
1811 order
= get_param_l(&in_param
);
1813 err
= mlx4_grant_resource(dev
, slave
, RES_MTT
, 1 << order
, 0);
1817 base
= __mlx4_alloc_mtt_range(dev
, order
);
1819 mlx4_release_resource(dev
, slave
, RES_MTT
, 1 << order
, 0);
1823 err
= add_res_range(dev
, slave
, base
, 1, RES_MTT
, order
);
1825 mlx4_release_resource(dev
, slave
, RES_MTT
, 1 << order
, 0);
1826 __mlx4_free_mtt_range(dev
, base
, order
);
1828 set_param_l(out_param
, base
);
1834 static int mpt_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1835 u64 in_param
, u64
*out_param
)
1840 struct res_mpt
*mpt
;
1843 case RES_OP_RESERVE
:
1844 err
= mlx4_grant_resource(dev
, slave
, RES_MPT
, 1, 0);
1848 index
= __mlx4_mpt_reserve(dev
);
1850 mlx4_release_resource(dev
, slave
, RES_MPT
, 1, 0);
1853 id
= index
& mpt_mask(dev
);
1855 err
= add_res_range(dev
, slave
, id
, 1, RES_MPT
, index
);
1857 mlx4_release_resource(dev
, slave
, RES_MPT
, 1, 0);
1858 __mlx4_mpt_release(dev
, index
);
1861 set_param_l(out_param
, index
);
1863 case RES_OP_MAP_ICM
:
1864 index
= get_param_l(&in_param
);
1865 id
= index
& mpt_mask(dev
);
1866 err
= mr_res_start_move_to(dev
, slave
, id
,
1867 RES_MPT_MAPPED
, &mpt
);
1871 err
= __mlx4_mpt_alloc_icm(dev
, mpt
->key
, GFP_KERNEL
);
1873 res_abort_move(dev
, slave
, RES_MPT
, id
);
1877 res_end_move(dev
, slave
, RES_MPT
, id
);
1883 static int cq_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1884 u64 in_param
, u64
*out_param
)
1890 case RES_OP_RESERVE_AND_MAP
:
1891 err
= mlx4_grant_resource(dev
, slave
, RES_CQ
, 1, 0);
1895 err
= __mlx4_cq_alloc_icm(dev
, &cqn
);
1897 mlx4_release_resource(dev
, slave
, RES_CQ
, 1, 0);
1901 err
= add_res_range(dev
, slave
, cqn
, 1, RES_CQ
, 0);
1903 mlx4_release_resource(dev
, slave
, RES_CQ
, 1, 0);
1904 __mlx4_cq_free_icm(dev
, cqn
);
1908 set_param_l(out_param
, cqn
);
1918 static int srq_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1919 u64 in_param
, u64
*out_param
)
1925 case RES_OP_RESERVE_AND_MAP
:
1926 err
= mlx4_grant_resource(dev
, slave
, RES_SRQ
, 1, 0);
1930 err
= __mlx4_srq_alloc_icm(dev
, &srqn
);
1932 mlx4_release_resource(dev
, slave
, RES_SRQ
, 1, 0);
1936 err
= add_res_range(dev
, slave
, srqn
, 1, RES_SRQ
, 0);
1938 mlx4_release_resource(dev
, slave
, RES_SRQ
, 1, 0);
1939 __mlx4_srq_free_icm(dev
, srqn
);
1943 set_param_l(out_param
, srqn
);
1953 static int mac_find_smac_ix_in_slave(struct mlx4_dev
*dev
, int slave
, int port
,
1954 u8 smac_index
, u64
*mac
)
1956 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1957 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1958 struct list_head
*mac_list
=
1959 &tracker
->slave_list
[slave
].res_list
[RES_MAC
];
1960 struct mac_res
*res
, *tmp
;
1962 list_for_each_entry_safe(res
, tmp
, mac_list
, list
) {
1963 if (res
->smac_index
== smac_index
&& res
->port
== (u8
) port
) {
1971 static int mac_add_to_slave(struct mlx4_dev
*dev
, int slave
, u64 mac
, int port
, u8 smac_index
)
1973 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1974 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1975 struct list_head
*mac_list
=
1976 &tracker
->slave_list
[slave
].res_list
[RES_MAC
];
1977 struct mac_res
*res
, *tmp
;
1979 list_for_each_entry_safe(res
, tmp
, mac_list
, list
) {
1980 if (res
->mac
== mac
&& res
->port
== (u8
) port
) {
1981 /* mac found. update ref count */
1987 if (mlx4_grant_resource(dev
, slave
, RES_MAC
, 1, port
))
1989 res
= kzalloc(sizeof *res
, GFP_KERNEL
);
1991 mlx4_release_resource(dev
, slave
, RES_MAC
, 1, port
);
1995 res
->port
= (u8
) port
;
1996 res
->smac_index
= smac_index
;
1998 list_add_tail(&res
->list
,
1999 &tracker
->slave_list
[slave
].res_list
[RES_MAC
]);
2003 static void mac_del_from_slave(struct mlx4_dev
*dev
, int slave
, u64 mac
,
2006 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2007 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
2008 struct list_head
*mac_list
=
2009 &tracker
->slave_list
[slave
].res_list
[RES_MAC
];
2010 struct mac_res
*res
, *tmp
;
2012 list_for_each_entry_safe(res
, tmp
, mac_list
, list
) {
2013 if (res
->mac
== mac
&& res
->port
== (u8
) port
) {
2014 if (!--res
->ref_count
) {
2015 list_del(&res
->list
);
2016 mlx4_release_resource(dev
, slave
, RES_MAC
, 1, port
);
2024 static void rem_slave_macs(struct mlx4_dev
*dev
, int slave
)
2026 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2027 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
2028 struct list_head
*mac_list
=
2029 &tracker
->slave_list
[slave
].res_list
[RES_MAC
];
2030 struct mac_res
*res
, *tmp
;
2033 list_for_each_entry_safe(res
, tmp
, mac_list
, list
) {
2034 list_del(&res
->list
);
2035 /* dereference the mac the num times the slave referenced it */
2036 for (i
= 0; i
< res
->ref_count
; i
++)
2037 __mlx4_unregister_mac(dev
, res
->port
, res
->mac
);
2038 mlx4_release_resource(dev
, slave
, RES_MAC
, 1, res
->port
);
2043 static int mac_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2044 u64 in_param
, u64
*out_param
, int in_port
)
2051 if (op
!= RES_OP_RESERVE_AND_MAP
)
2054 port
= !in_port
? get_param_l(out_param
) : in_port
;
2055 port
= mlx4_slave_convert_port(
2062 err
= __mlx4_register_mac(dev
, port
, mac
);
2065 set_param_l(out_param
, err
);
2070 err
= mac_add_to_slave(dev
, slave
, mac
, port
, smac_index
);
2072 __mlx4_unregister_mac(dev
, port
, mac
);
2077 static int vlan_add_to_slave(struct mlx4_dev
*dev
, int slave
, u16 vlan
,
2078 int port
, int vlan_index
)
2080 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2081 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
2082 struct list_head
*vlan_list
=
2083 &tracker
->slave_list
[slave
].res_list
[RES_VLAN
];
2084 struct vlan_res
*res
, *tmp
;
2086 list_for_each_entry_safe(res
, tmp
, vlan_list
, list
) {
2087 if (res
->vlan
== vlan
&& res
->port
== (u8
) port
) {
2088 /* vlan found. update ref count */
2094 if (mlx4_grant_resource(dev
, slave
, RES_VLAN
, 1, port
))
2096 res
= kzalloc(sizeof(*res
), GFP_KERNEL
);
2098 mlx4_release_resource(dev
, slave
, RES_VLAN
, 1, port
);
2102 res
->port
= (u8
) port
;
2103 res
->vlan_index
= vlan_index
;
2105 list_add_tail(&res
->list
,
2106 &tracker
->slave_list
[slave
].res_list
[RES_VLAN
]);
2111 static void vlan_del_from_slave(struct mlx4_dev
*dev
, int slave
, u16 vlan
,
2114 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2115 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
2116 struct list_head
*vlan_list
=
2117 &tracker
->slave_list
[slave
].res_list
[RES_VLAN
];
2118 struct vlan_res
*res
, *tmp
;
2120 list_for_each_entry_safe(res
, tmp
, vlan_list
, list
) {
2121 if (res
->vlan
== vlan
&& res
->port
== (u8
) port
) {
2122 if (!--res
->ref_count
) {
2123 list_del(&res
->list
);
2124 mlx4_release_resource(dev
, slave
, RES_VLAN
,
2133 static void rem_slave_vlans(struct mlx4_dev
*dev
, int slave
)
2135 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2136 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
2137 struct list_head
*vlan_list
=
2138 &tracker
->slave_list
[slave
].res_list
[RES_VLAN
];
2139 struct vlan_res
*res
, *tmp
;
2142 list_for_each_entry_safe(res
, tmp
, vlan_list
, list
) {
2143 list_del(&res
->list
);
2144 /* dereference the vlan the num times the slave referenced it */
2145 for (i
= 0; i
< res
->ref_count
; i
++)
2146 __mlx4_unregister_vlan(dev
, res
->port
, res
->vlan
);
2147 mlx4_release_resource(dev
, slave
, RES_VLAN
, 1, res
->port
);
2152 static int vlan_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2153 u64 in_param
, u64
*out_param
, int in_port
)
2155 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2156 struct mlx4_slave_state
*slave_state
= priv
->mfunc
.master
.slave_state
;
2162 port
= !in_port
? get_param_l(out_param
) : in_port
;
2164 if (!port
|| op
!= RES_OP_RESERVE_AND_MAP
)
2167 port
= mlx4_slave_convert_port(
2172 /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
2173 if (!in_port
&& port
> 0 && port
<= dev
->caps
.num_ports
) {
2174 slave_state
[slave
].old_vlan_api
= true;
2178 vlan
= (u16
) in_param
;
2180 err
= __mlx4_register_vlan(dev
, port
, vlan
, &vlan_index
);
2182 set_param_l(out_param
, (u32
) vlan_index
);
2183 err
= vlan_add_to_slave(dev
, slave
, vlan
, port
, vlan_index
);
2185 __mlx4_unregister_vlan(dev
, port
, vlan
);
2190 static int counter_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2191 u64 in_param
, u64
*out_param
, int port
)
2196 if (op
!= RES_OP_RESERVE
)
2199 err
= mlx4_grant_resource(dev
, slave
, RES_COUNTER
, 1, 0);
2203 err
= __mlx4_counter_alloc(dev
, &index
);
2205 mlx4_release_resource(dev
, slave
, RES_COUNTER
, 1, 0);
2209 err
= add_res_range(dev
, slave
, index
, 1, RES_COUNTER
, port
);
2211 __mlx4_counter_free(dev
, index
);
2212 mlx4_release_resource(dev
, slave
, RES_COUNTER
, 1, 0);
2214 set_param_l(out_param
, index
);
2220 static int xrcdn_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2221 u64 in_param
, u64
*out_param
)
2226 if (op
!= RES_OP_RESERVE
)
2229 err
= __mlx4_xrcd_alloc(dev
, &xrcdn
);
2233 err
= add_res_range(dev
, slave
, xrcdn
, 1, RES_XRCD
, 0);
2235 __mlx4_xrcd_free(dev
, xrcdn
);
2237 set_param_l(out_param
, xrcdn
);
2242 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev
*dev
, int slave
,
2243 struct mlx4_vhcr
*vhcr
,
2244 struct mlx4_cmd_mailbox
*inbox
,
2245 struct mlx4_cmd_mailbox
*outbox
,
2246 struct mlx4_cmd_info
*cmd
)
2249 int alop
= vhcr
->op_modifier
;
2251 switch (vhcr
->in_modifier
& 0xFF) {
2253 err
= qp_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2254 vhcr
->in_param
, &vhcr
->out_param
);
2258 err
= mtt_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2259 vhcr
->in_param
, &vhcr
->out_param
);
2263 err
= mpt_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2264 vhcr
->in_param
, &vhcr
->out_param
);
2268 err
= cq_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2269 vhcr
->in_param
, &vhcr
->out_param
);
2273 err
= srq_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2274 vhcr
->in_param
, &vhcr
->out_param
);
2278 err
= mac_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2279 vhcr
->in_param
, &vhcr
->out_param
,
2280 (vhcr
->in_modifier
>> 8) & 0xFF);
2284 err
= vlan_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2285 vhcr
->in_param
, &vhcr
->out_param
,
2286 (vhcr
->in_modifier
>> 8) & 0xFF);
2290 err
= counter_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2291 vhcr
->in_param
, &vhcr
->out_param
, 0);
2295 err
= xrcdn_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2296 vhcr
->in_param
, &vhcr
->out_param
);
2307 static int qp_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2316 case RES_OP_RESERVE
:
2317 base
= get_param_l(&in_param
) & 0x7fffff;
2318 count
= get_param_h(&in_param
);
2319 err
= rem_res_range(dev
, slave
, base
, count
, RES_QP
, 0);
2322 mlx4_release_resource(dev
, slave
, RES_QP
, count
, 0);
2323 __mlx4_qp_release_range(dev
, base
, count
);
2325 case RES_OP_MAP_ICM
:
2326 qpn
= get_param_l(&in_param
) & 0x7fffff;
2327 err
= qp_res_start_move_to(dev
, slave
, qpn
, RES_QP_RESERVED
,
2332 if (!fw_reserved(dev
, qpn
))
2333 __mlx4_qp_free_icm(dev
, qpn
);
2335 res_end_move(dev
, slave
, RES_QP
, qpn
);
2337 if (valid_reserved(dev
, slave
, qpn
))
2338 err
= rem_res_range(dev
, slave
, qpn
, 1, RES_QP
, 0);
2347 static int mtt_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2348 u64 in_param
, u64
*out_param
)
2354 if (op
!= RES_OP_RESERVE_AND_MAP
)
2357 base
= get_param_l(&in_param
);
2358 order
= get_param_h(&in_param
);
2359 err
= rem_res_range(dev
, slave
, base
, 1, RES_MTT
, order
);
2361 mlx4_release_resource(dev
, slave
, RES_MTT
, 1 << order
, 0);
2362 __mlx4_free_mtt_range(dev
, base
, order
);
2367 static int mpt_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2373 struct res_mpt
*mpt
;
2376 case RES_OP_RESERVE
:
2377 index
= get_param_l(&in_param
);
2378 id
= index
& mpt_mask(dev
);
2379 err
= get_res(dev
, slave
, id
, RES_MPT
, &mpt
);
2383 put_res(dev
, slave
, id
, RES_MPT
);
2385 err
= rem_res_range(dev
, slave
, id
, 1, RES_MPT
, 0);
2388 mlx4_release_resource(dev
, slave
, RES_MPT
, 1, 0);
2389 __mlx4_mpt_release(dev
, index
);
2391 case RES_OP_MAP_ICM
:
2392 index
= get_param_l(&in_param
);
2393 id
= index
& mpt_mask(dev
);
2394 err
= mr_res_start_move_to(dev
, slave
, id
,
2395 RES_MPT_RESERVED
, &mpt
);
2399 __mlx4_mpt_free_icm(dev
, mpt
->key
);
2400 res_end_move(dev
, slave
, RES_MPT
, id
);
2409 static int cq_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2410 u64 in_param
, u64
*out_param
)
2416 case RES_OP_RESERVE_AND_MAP
:
2417 cqn
= get_param_l(&in_param
);
2418 err
= rem_res_range(dev
, slave
, cqn
, 1, RES_CQ
, 0);
2422 mlx4_release_resource(dev
, slave
, RES_CQ
, 1, 0);
2423 __mlx4_cq_free_icm(dev
, cqn
);
2434 static int srq_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2435 u64 in_param
, u64
*out_param
)
2441 case RES_OP_RESERVE_AND_MAP
:
2442 srqn
= get_param_l(&in_param
);
2443 err
= rem_res_range(dev
, slave
, srqn
, 1, RES_SRQ
, 0);
2447 mlx4_release_resource(dev
, slave
, RES_SRQ
, 1, 0);
2448 __mlx4_srq_free_icm(dev
, srqn
);
2459 static int mac_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2460 u64 in_param
, u64
*out_param
, int in_port
)
2466 case RES_OP_RESERVE_AND_MAP
:
2467 port
= !in_port
? get_param_l(out_param
) : in_port
;
2468 port
= mlx4_slave_convert_port(
2473 mac_del_from_slave(dev
, slave
, in_param
, port
);
2474 __mlx4_unregister_mac(dev
, port
, in_param
);
2485 static int vlan_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2486 u64 in_param
, u64
*out_param
, int port
)
2488 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2489 struct mlx4_slave_state
*slave_state
= priv
->mfunc
.master
.slave_state
;
2492 port
= mlx4_slave_convert_port(
2498 case RES_OP_RESERVE_AND_MAP
:
2499 if (slave_state
[slave
].old_vlan_api
)
2503 vlan_del_from_slave(dev
, slave
, in_param
, port
);
2504 __mlx4_unregister_vlan(dev
, port
, in_param
);
2514 static int counter_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2515 u64 in_param
, u64
*out_param
)
2520 if (op
!= RES_OP_RESERVE
)
2523 index
= get_param_l(&in_param
);
2524 if (index
== MLX4_SINK_COUNTER_INDEX(dev
))
2527 err
= rem_res_range(dev
, slave
, index
, 1, RES_COUNTER
, 0);
2531 __mlx4_counter_free(dev
, index
);
2532 mlx4_release_resource(dev
, slave
, RES_COUNTER
, 1, 0);
2537 static int xrcdn_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2538 u64 in_param
, u64
*out_param
)
2543 if (op
!= RES_OP_RESERVE
)
2546 xrcdn
= get_param_l(&in_param
);
2547 err
= rem_res_range(dev
, slave
, xrcdn
, 1, RES_XRCD
, 0);
2551 __mlx4_xrcd_free(dev
, xrcdn
);
2556 int mlx4_FREE_RES_wrapper(struct mlx4_dev
*dev
, int slave
,
2557 struct mlx4_vhcr
*vhcr
,
2558 struct mlx4_cmd_mailbox
*inbox
,
2559 struct mlx4_cmd_mailbox
*outbox
,
2560 struct mlx4_cmd_info
*cmd
)
2563 int alop
= vhcr
->op_modifier
;
2565 switch (vhcr
->in_modifier
& 0xFF) {
2567 err
= qp_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2572 err
= mtt_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2573 vhcr
->in_param
, &vhcr
->out_param
);
2577 err
= mpt_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2582 err
= cq_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2583 vhcr
->in_param
, &vhcr
->out_param
);
2587 err
= srq_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2588 vhcr
->in_param
, &vhcr
->out_param
);
2592 err
= mac_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2593 vhcr
->in_param
, &vhcr
->out_param
,
2594 (vhcr
->in_modifier
>> 8) & 0xFF);
2598 err
= vlan_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2599 vhcr
->in_param
, &vhcr
->out_param
,
2600 (vhcr
->in_modifier
>> 8) & 0xFF);
2604 err
= counter_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2605 vhcr
->in_param
, &vhcr
->out_param
);
2609 err
= xrcdn_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2610 vhcr
->in_param
, &vhcr
->out_param
);
2618 /* ugly but other choices are uglier */
2619 static int mr_phys_mpt(struct mlx4_mpt_entry
*mpt
)
2621 return (be32_to_cpu(mpt
->flags
) >> 9) & 1;
2624 static int mr_get_mtt_addr(struct mlx4_mpt_entry
*mpt
)
2626 return (int)be64_to_cpu(mpt
->mtt_addr
) & 0xfffffff8;
2629 static int mr_get_mtt_size(struct mlx4_mpt_entry
*mpt
)
2631 return be32_to_cpu(mpt
->mtt_sz
);
2634 static u32
mr_get_pd(struct mlx4_mpt_entry
*mpt
)
2636 return be32_to_cpu(mpt
->pd_flags
) & 0x00ffffff;
2639 static int mr_is_fmr(struct mlx4_mpt_entry
*mpt
)
2641 return be32_to_cpu(mpt
->pd_flags
) & MLX4_MPT_PD_FLAG_FAST_REG
;
2644 static int mr_is_bind_enabled(struct mlx4_mpt_entry
*mpt
)
2646 return be32_to_cpu(mpt
->flags
) & MLX4_MPT_FLAG_BIND_ENABLE
;
2649 static int mr_is_region(struct mlx4_mpt_entry
*mpt
)
2651 return be32_to_cpu(mpt
->flags
) & MLX4_MPT_FLAG_REGION
;
2654 static int qp_get_mtt_addr(struct mlx4_qp_context
*qpc
)
2656 return be32_to_cpu(qpc
->mtt_base_addr_l
) & 0xfffffff8;
2659 static int srq_get_mtt_addr(struct mlx4_srq_context
*srqc
)
2661 return be32_to_cpu(srqc
->mtt_base_addr_l
) & 0xfffffff8;
2664 static int qp_get_mtt_size(struct mlx4_qp_context
*qpc
)
2666 int page_shift
= (qpc
->log_page_size
& 0x3f) + 12;
2667 int log_sq_size
= (qpc
->sq_size_stride
>> 3) & 0xf;
2668 int log_sq_sride
= qpc
->sq_size_stride
& 7;
2669 int log_rq_size
= (qpc
->rq_size_stride
>> 3) & 0xf;
2670 int log_rq_stride
= qpc
->rq_size_stride
& 7;
2671 int srq
= (be32_to_cpu(qpc
->srqn
) >> 24) & 1;
2672 int rss
= (be32_to_cpu(qpc
->flags
) >> 13) & 1;
2673 u32 ts
= (be32_to_cpu(qpc
->flags
) >> 16) & 0xff;
2674 int xrc
= (ts
== MLX4_QP_ST_XRC
) ? 1 : 0;
2679 int page_offset
= (be32_to_cpu(qpc
->params2
) >> 6) & 0x3f;
2681 sq_size
= 1 << (log_sq_size
+ log_sq_sride
+ 4);
2682 rq_size
= (srq
|rss
|xrc
) ? 0 : (1 << (log_rq_size
+ log_rq_stride
+ 4));
2683 total_mem
= sq_size
+ rq_size
;
2685 roundup_pow_of_two((total_mem
+ (page_offset
<< 6)) >>
2691 static int check_mtt_range(struct mlx4_dev
*dev
, int slave
, int start
,
2692 int size
, struct res_mtt
*mtt
)
2694 int res_start
= mtt
->com
.res_id
;
2695 int res_size
= (1 << mtt
->order
);
2697 if (start
< res_start
|| start
+ size
> res_start
+ res_size
)
2702 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev
*dev
, int slave
,
2703 struct mlx4_vhcr
*vhcr
,
2704 struct mlx4_cmd_mailbox
*inbox
,
2705 struct mlx4_cmd_mailbox
*outbox
,
2706 struct mlx4_cmd_info
*cmd
)
2709 int index
= vhcr
->in_modifier
;
2710 struct res_mtt
*mtt
;
2711 struct res_mpt
*mpt
;
2712 int mtt_base
= mr_get_mtt_addr(inbox
->buf
) / dev
->caps
.mtt_entry_sz
;
2718 id
= index
& mpt_mask(dev
);
2719 err
= mr_res_start_move_to(dev
, slave
, id
, RES_MPT_HW
, &mpt
);
2723 /* Disable memory windows for VFs. */
2724 if (!mr_is_region(inbox
->buf
)) {
2729 /* Make sure that the PD bits related to the slave id are zeros. */
2730 pd
= mr_get_pd(inbox
->buf
);
2731 pd_slave
= (pd
>> 17) & 0x7f;
2732 if (pd_slave
!= 0 && --pd_slave
!= slave
) {
2737 if (mr_is_fmr(inbox
->buf
)) {
2738 /* FMR and Bind Enable are forbidden in slave devices. */
2739 if (mr_is_bind_enabled(inbox
->buf
)) {
2743 /* FMR and Memory Windows are also forbidden. */
2744 if (!mr_is_region(inbox
->buf
)) {
2750 phys
= mr_phys_mpt(inbox
->buf
);
2752 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
2756 err
= check_mtt_range(dev
, slave
, mtt_base
,
2757 mr_get_mtt_size(inbox
->buf
), mtt
);
2764 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2769 atomic_inc(&mtt
->ref_count
);
2770 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2773 res_end_move(dev
, slave
, RES_MPT
, id
);
2778 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2780 res_abort_move(dev
, slave
, RES_MPT
, id
);
2785 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev
*dev
, int slave
,
2786 struct mlx4_vhcr
*vhcr
,
2787 struct mlx4_cmd_mailbox
*inbox
,
2788 struct mlx4_cmd_mailbox
*outbox
,
2789 struct mlx4_cmd_info
*cmd
)
2792 int index
= vhcr
->in_modifier
;
2793 struct res_mpt
*mpt
;
2796 id
= index
& mpt_mask(dev
);
2797 err
= mr_res_start_move_to(dev
, slave
, id
, RES_MPT_MAPPED
, &mpt
);
2801 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2806 atomic_dec(&mpt
->mtt
->ref_count
);
2808 res_end_move(dev
, slave
, RES_MPT
, id
);
2812 res_abort_move(dev
, slave
, RES_MPT
, id
);
2817 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev
*dev
, int slave
,
2818 struct mlx4_vhcr
*vhcr
,
2819 struct mlx4_cmd_mailbox
*inbox
,
2820 struct mlx4_cmd_mailbox
*outbox
,
2821 struct mlx4_cmd_info
*cmd
)
2824 int index
= vhcr
->in_modifier
;
2825 struct res_mpt
*mpt
;
2828 id
= index
& mpt_mask(dev
);
2829 err
= get_res(dev
, slave
, id
, RES_MPT
, &mpt
);
2833 if (mpt
->com
.from_state
== RES_MPT_MAPPED
) {
2834 /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do
2835 * that, the VF must read the MPT. But since the MPT entry memory is not
2836 * in the VF's virtual memory space, it must use QUERY_MPT to obtain the
2837 * entry contents. To guarantee that the MPT cannot be changed, the driver
2838 * must perform HW2SW_MPT before this query and return the MPT entry to HW
2839 * ownership fofollowing the change. The change here allows the VF to
2840 * perform QUERY_MPT also when the entry is in SW ownership.
2842 struct mlx4_mpt_entry
*mpt_entry
= mlx4_table_find(
2843 &mlx4_priv(dev
)->mr_table
.dmpt_table
,
2846 if (NULL
== mpt_entry
|| NULL
== outbox
->buf
) {
2851 memcpy(outbox
->buf
, mpt_entry
, sizeof(*mpt_entry
));
2854 } else if (mpt
->com
.from_state
== RES_MPT_HW
) {
2855 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2863 put_res(dev
, slave
, id
, RES_MPT
);
2867 static int qp_get_rcqn(struct mlx4_qp_context
*qpc
)
2869 return be32_to_cpu(qpc
->cqn_recv
) & 0xffffff;
2872 static int qp_get_scqn(struct mlx4_qp_context
*qpc
)
2874 return be32_to_cpu(qpc
->cqn_send
) & 0xffffff;
2877 static u32
qp_get_srqn(struct mlx4_qp_context
*qpc
)
2879 return be32_to_cpu(qpc
->srqn
) & 0x1ffffff;
2882 static void adjust_proxy_tun_qkey(struct mlx4_dev
*dev
, struct mlx4_vhcr
*vhcr
,
2883 struct mlx4_qp_context
*context
)
2885 u32 qpn
= vhcr
->in_modifier
& 0xffffff;
2888 if (mlx4_get_parav_qkey(dev
, qpn
, &qkey
))
2891 /* adjust qkey in qp context */
2892 context
->qkey
= cpu_to_be32(qkey
);
2895 static int adjust_qp_sched_queue(struct mlx4_dev
*dev
, int slave
,
2896 struct mlx4_qp_context
*qpc
,
2897 struct mlx4_cmd_mailbox
*inbox
);
2899 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
2900 struct mlx4_vhcr
*vhcr
,
2901 struct mlx4_cmd_mailbox
*inbox
,
2902 struct mlx4_cmd_mailbox
*outbox
,
2903 struct mlx4_cmd_info
*cmd
)
2906 int qpn
= vhcr
->in_modifier
& 0x7fffff;
2907 struct res_mtt
*mtt
;
2909 struct mlx4_qp_context
*qpc
= inbox
->buf
+ 8;
2910 int mtt_base
= qp_get_mtt_addr(qpc
) / dev
->caps
.mtt_entry_sz
;
2911 int mtt_size
= qp_get_mtt_size(qpc
);
2914 int rcqn
= qp_get_rcqn(qpc
);
2915 int scqn
= qp_get_scqn(qpc
);
2916 u32 srqn
= qp_get_srqn(qpc
) & 0xffffff;
2917 int use_srq
= (qp_get_srqn(qpc
) >> 24) & 1;
2918 struct res_srq
*srq
;
2919 int local_qpn
= be32_to_cpu(qpc
->local_qpn
) & 0xffffff;
2921 err
= adjust_qp_sched_queue(dev
, slave
, qpc
, inbox
);
2925 err
= qp_res_start_move_to(dev
, slave
, qpn
, RES_QP_HW
, &qp
, 0);
2928 qp
->local_qpn
= local_qpn
;
2929 qp
->sched_queue
= 0;
2931 qp
->vlan_control
= 0;
2933 qp
->pri_path_fl
= 0;
2936 qp
->qpc_flags
= be32_to_cpu(qpc
->flags
);
2938 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
2942 err
= check_mtt_range(dev
, slave
, mtt_base
, mtt_size
, mtt
);
2946 err
= get_res(dev
, slave
, rcqn
, RES_CQ
, &rcq
);
2951 err
= get_res(dev
, slave
, scqn
, RES_CQ
, &scq
);
2958 err
= get_res(dev
, slave
, srqn
, RES_SRQ
, &srq
);
2963 adjust_proxy_tun_qkey(dev
, vhcr
, qpc
);
2964 update_pkey_index(dev
, slave
, inbox
);
2965 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2968 atomic_inc(&mtt
->ref_count
);
2970 atomic_inc(&rcq
->ref_count
);
2972 atomic_inc(&scq
->ref_count
);
2976 put_res(dev
, slave
, scqn
, RES_CQ
);
2979 atomic_inc(&srq
->ref_count
);
2980 put_res(dev
, slave
, srqn
, RES_SRQ
);
2984 /* Save param3 for dynamic changes from VST back to VGT */
2985 qp
->param3
= qpc
->param3
;
2986 put_res(dev
, slave
, rcqn
, RES_CQ
);
2987 put_res(dev
, slave
, mtt_base
, RES_MTT
);
2988 res_end_move(dev
, slave
, RES_QP
, qpn
);
2994 put_res(dev
, slave
, srqn
, RES_SRQ
);
2997 put_res(dev
, slave
, scqn
, RES_CQ
);
2999 put_res(dev
, slave
, rcqn
, RES_CQ
);
3001 put_res(dev
, slave
, mtt_base
, RES_MTT
);
3003 res_abort_move(dev
, slave
, RES_QP
, qpn
);
3008 static int eq_get_mtt_addr(struct mlx4_eq_context
*eqc
)
3010 return be32_to_cpu(eqc
->mtt_base_addr_l
) & 0xfffffff8;
3013 static int eq_get_mtt_size(struct mlx4_eq_context
*eqc
)
3015 int log_eq_size
= eqc
->log_eq_size
& 0x1f;
3016 int page_shift
= (eqc
->log_page_size
& 0x3f) + 12;
3018 if (log_eq_size
+ 5 < page_shift
)
3021 return 1 << (log_eq_size
+ 5 - page_shift
);
3024 static int cq_get_mtt_addr(struct mlx4_cq_context
*cqc
)
3026 return be32_to_cpu(cqc
->mtt_base_addr_l
) & 0xfffffff8;
3029 static int cq_get_mtt_size(struct mlx4_cq_context
*cqc
)
3031 int log_cq_size
= (be32_to_cpu(cqc
->logsize_usrpage
) >> 24) & 0x1f;
3032 int page_shift
= (cqc
->log_page_size
& 0x3f) + 12;
3034 if (log_cq_size
+ 5 < page_shift
)
3037 return 1 << (log_cq_size
+ 5 - page_shift
);
3040 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3041 struct mlx4_vhcr
*vhcr
,
3042 struct mlx4_cmd_mailbox
*inbox
,
3043 struct mlx4_cmd_mailbox
*outbox
,
3044 struct mlx4_cmd_info
*cmd
)
3047 int eqn
= vhcr
->in_modifier
;
3048 int res_id
= (slave
<< 10) | eqn
;
3049 struct mlx4_eq_context
*eqc
= inbox
->buf
;
3050 int mtt_base
= eq_get_mtt_addr(eqc
) / dev
->caps
.mtt_entry_sz
;
3051 int mtt_size
= eq_get_mtt_size(eqc
);
3053 struct res_mtt
*mtt
;
3055 err
= add_res_range(dev
, slave
, res_id
, 1, RES_EQ
, 0);
3058 err
= eq_res_start_move_to(dev
, slave
, res_id
, RES_EQ_HW
, &eq
);
3062 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
3066 err
= check_mtt_range(dev
, slave
, mtt_base
, mtt_size
, mtt
);
3070 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3074 atomic_inc(&mtt
->ref_count
);
3076 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3077 res_end_move(dev
, slave
, RES_EQ
, res_id
);
3081 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3083 res_abort_move(dev
, slave
, RES_EQ
, res_id
);
3085 rem_res_range(dev
, slave
, res_id
, 1, RES_EQ
, 0);
3089 int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev
*dev
, int slave
,
3090 struct mlx4_vhcr
*vhcr
,
3091 struct mlx4_cmd_mailbox
*inbox
,
3092 struct mlx4_cmd_mailbox
*outbox
,
3093 struct mlx4_cmd_info
*cmd
)
3096 u8 get
= vhcr
->op_modifier
;
3101 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3106 static int get_containing_mtt(struct mlx4_dev
*dev
, int slave
, int start
,
3107 int len
, struct res_mtt
**res
)
3109 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3110 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
3111 struct res_mtt
*mtt
;
3114 spin_lock_irq(mlx4_tlock(dev
));
3115 list_for_each_entry(mtt
, &tracker
->slave_list
[slave
].res_list
[RES_MTT
],
3117 if (!check_mtt_range(dev
, slave
, start
, len
, mtt
)) {
3119 mtt
->com
.from_state
= mtt
->com
.state
;
3120 mtt
->com
.state
= RES_MTT_BUSY
;
3125 spin_unlock_irq(mlx4_tlock(dev
));
3130 static int verify_qp_parameters(struct mlx4_dev
*dev
,
3131 struct mlx4_vhcr
*vhcr
,
3132 struct mlx4_cmd_mailbox
*inbox
,
3133 enum qp_transition transition
, u8 slave
)
3137 struct mlx4_qp_context
*qp_ctx
;
3138 enum mlx4_qp_optpar optpar
;
3142 qp_ctx
= inbox
->buf
+ 8;
3143 qp_type
= (be32_to_cpu(qp_ctx
->flags
) >> 16) & 0xff;
3144 optpar
= be32_to_cpu(*(__be32
*) inbox
->buf
);
3146 if (slave
!= mlx4_master_func_num(dev
)) {
3147 qp_ctx
->params2
&= ~MLX4_QP_BIT_FPP
;
3148 /* setting QP rate-limit is disallowed for VFs */
3149 if (qp_ctx
->rate_limit_params
)
3155 case MLX4_QP_ST_XRC
:
3157 switch (transition
) {
3158 case QP_TRANS_INIT2RTR
:
3159 case QP_TRANS_RTR2RTS
:
3160 case QP_TRANS_RTS2RTS
:
3161 case QP_TRANS_SQD2SQD
:
3162 case QP_TRANS_SQD2RTS
:
3163 if (slave
!= mlx4_master_func_num(dev
)) {
3164 if (optpar
& MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH
) {
3165 port
= (qp_ctx
->pri_path
.sched_queue
>> 6 & 1) + 1;
3166 if (dev
->caps
.port_mask
[port
] != MLX4_PORT_TYPE_IB
)
3167 num_gids
= mlx4_get_slave_num_gids(dev
, slave
, port
);
3170 if (qp_ctx
->pri_path
.mgid_index
>= num_gids
)
3173 if (optpar
& MLX4_QP_OPTPAR_ALT_ADDR_PATH
) {
3174 port
= (qp_ctx
->alt_path
.sched_queue
>> 6 & 1) + 1;
3175 if (dev
->caps
.port_mask
[port
] != MLX4_PORT_TYPE_IB
)
3176 num_gids
= mlx4_get_slave_num_gids(dev
, slave
, port
);
3179 if (qp_ctx
->alt_path
.mgid_index
>= num_gids
)
3189 case MLX4_QP_ST_MLX
:
3190 qpn
= vhcr
->in_modifier
& 0x7fffff;
3191 port
= (qp_ctx
->pri_path
.sched_queue
>> 6 & 1) + 1;
3192 if (transition
== QP_TRANS_INIT2RTR
&&
3193 slave
!= mlx4_master_func_num(dev
) &&
3194 mlx4_is_qp_reserved(dev
, qpn
) &&
3195 !mlx4_vf_smi_enabled(dev
, slave
, port
)) {
3196 /* only enabled VFs may create MLX proxy QPs */
3197 mlx4_err(dev
, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
3198 __func__
, slave
, port
);
3210 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev
*dev
, int slave
,
3211 struct mlx4_vhcr
*vhcr
,
3212 struct mlx4_cmd_mailbox
*inbox
,
3213 struct mlx4_cmd_mailbox
*outbox
,
3214 struct mlx4_cmd_info
*cmd
)
3216 struct mlx4_mtt mtt
;
3217 __be64
*page_list
= inbox
->buf
;
3218 u64
*pg_list
= (u64
*)page_list
;
3220 struct res_mtt
*rmtt
= NULL
;
3221 int start
= be64_to_cpu(page_list
[0]);
3222 int npages
= vhcr
->in_modifier
;
3225 err
= get_containing_mtt(dev
, slave
, start
, npages
, &rmtt
);
3229 /* Call the SW implementation of write_mtt:
3230 * - Prepare a dummy mtt struct
3231 * - Translate inbox contents to simple addresses in host endianness */
3232 mtt
.offset
= 0; /* TBD this is broken but I don't handle it since
3233 we don't really use it */
3236 for (i
= 0; i
< npages
; ++i
)
3237 pg_list
[i
+ 2] = (be64_to_cpu(page_list
[i
+ 2]) & ~1ULL);
3239 err
= __mlx4_write_mtt(dev
, &mtt
, be64_to_cpu(page_list
[0]), npages
,
3240 ((u64
*)page_list
+ 2));
3243 put_res(dev
, slave
, rmtt
->com
.res_id
, RES_MTT
);
3248 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3249 struct mlx4_vhcr
*vhcr
,
3250 struct mlx4_cmd_mailbox
*inbox
,
3251 struct mlx4_cmd_mailbox
*outbox
,
3252 struct mlx4_cmd_info
*cmd
)
3254 int eqn
= vhcr
->in_modifier
;
3255 int res_id
= eqn
| (slave
<< 10);
3259 err
= eq_res_start_move_to(dev
, slave
, res_id
, RES_EQ_RESERVED
, &eq
);
3263 err
= get_res(dev
, slave
, eq
->mtt
->com
.res_id
, RES_MTT
, NULL
);
3267 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3271 atomic_dec(&eq
->mtt
->ref_count
);
3272 put_res(dev
, slave
, eq
->mtt
->com
.res_id
, RES_MTT
);
3273 res_end_move(dev
, slave
, RES_EQ
, res_id
);
3274 rem_res_range(dev
, slave
, res_id
, 1, RES_EQ
, 0);
3279 put_res(dev
, slave
, eq
->mtt
->com
.res_id
, RES_MTT
);
3281 res_abort_move(dev
, slave
, RES_EQ
, res_id
);
3286 int mlx4_GEN_EQE(struct mlx4_dev
*dev
, int slave
, struct mlx4_eqe
*eqe
)
3288 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3289 struct mlx4_slave_event_eq_info
*event_eq
;
3290 struct mlx4_cmd_mailbox
*mailbox
;
3291 u32 in_modifier
= 0;
3296 if (!priv
->mfunc
.master
.slave_state
)
3299 /* check for slave valid, slave not PF, and slave active */
3300 if (slave
< 0 || slave
> dev
->persist
->num_vfs
||
3301 slave
== dev
->caps
.function
||
3302 !priv
->mfunc
.master
.slave_state
[slave
].active
)
3305 event_eq
= &priv
->mfunc
.master
.slave_state
[slave
].event_eq
[eqe
->type
];
3307 /* Create the event only if the slave is registered */
3308 if (event_eq
->eqn
< 0)
3311 mutex_lock(&priv
->mfunc
.master
.gen_eqe_mutex
[slave
]);
3312 res_id
= (slave
<< 10) | event_eq
->eqn
;
3313 err
= get_res(dev
, slave
, res_id
, RES_EQ
, &req
);
3317 if (req
->com
.from_state
!= RES_EQ_HW
) {
3322 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
3323 if (IS_ERR(mailbox
)) {
3324 err
= PTR_ERR(mailbox
);
3328 if (eqe
->type
== MLX4_EVENT_TYPE_CMD
) {
3330 eqe
->event
.cmd
.token
= cpu_to_be16(event_eq
->token
);
3333 memcpy(mailbox
->buf
, (u8
*) eqe
, 28);
3335 in_modifier
= (slave
& 0xff) | ((event_eq
->eqn
& 0x3ff) << 16);
3337 err
= mlx4_cmd(dev
, mailbox
->dma
, in_modifier
, 0,
3338 MLX4_CMD_GEN_EQE
, MLX4_CMD_TIME_CLASS_B
,
3341 put_res(dev
, slave
, res_id
, RES_EQ
);
3342 mutex_unlock(&priv
->mfunc
.master
.gen_eqe_mutex
[slave
]);
3343 mlx4_free_cmd_mailbox(dev
, mailbox
);
3347 put_res(dev
, slave
, res_id
, RES_EQ
);
3350 mutex_unlock(&priv
->mfunc
.master
.gen_eqe_mutex
[slave
]);
3354 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3355 struct mlx4_vhcr
*vhcr
,
3356 struct mlx4_cmd_mailbox
*inbox
,
3357 struct mlx4_cmd_mailbox
*outbox
,
3358 struct mlx4_cmd_info
*cmd
)
3360 int eqn
= vhcr
->in_modifier
;
3361 int res_id
= eqn
| (slave
<< 10);
3365 err
= get_res(dev
, slave
, res_id
, RES_EQ
, &eq
);
3369 if (eq
->com
.from_state
!= RES_EQ_HW
) {
3374 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3377 put_res(dev
, slave
, res_id
, RES_EQ
);
3381 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3382 struct mlx4_vhcr
*vhcr
,
3383 struct mlx4_cmd_mailbox
*inbox
,
3384 struct mlx4_cmd_mailbox
*outbox
,
3385 struct mlx4_cmd_info
*cmd
)
3388 int cqn
= vhcr
->in_modifier
;
3389 struct mlx4_cq_context
*cqc
= inbox
->buf
;
3390 int mtt_base
= cq_get_mtt_addr(cqc
) / dev
->caps
.mtt_entry_sz
;
3391 struct res_cq
*cq
= NULL
;
3392 struct res_mtt
*mtt
;
3394 err
= cq_res_start_move_to(dev
, slave
, cqn
, RES_CQ_HW
, &cq
);
3397 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
3400 err
= check_mtt_range(dev
, slave
, mtt_base
, cq_get_mtt_size(cqc
), mtt
);
3403 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3406 atomic_inc(&mtt
->ref_count
);
3408 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3409 res_end_move(dev
, slave
, RES_CQ
, cqn
);
3413 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3415 res_abort_move(dev
, slave
, RES_CQ
, cqn
);
3419 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3420 struct mlx4_vhcr
*vhcr
,
3421 struct mlx4_cmd_mailbox
*inbox
,
3422 struct mlx4_cmd_mailbox
*outbox
,
3423 struct mlx4_cmd_info
*cmd
)
3426 int cqn
= vhcr
->in_modifier
;
3427 struct res_cq
*cq
= NULL
;
3429 err
= cq_res_start_move_to(dev
, slave
, cqn
, RES_CQ_ALLOCATED
, &cq
);
3432 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3435 atomic_dec(&cq
->mtt
->ref_count
);
3436 res_end_move(dev
, slave
, RES_CQ
, cqn
);
3440 res_abort_move(dev
, slave
, RES_CQ
, cqn
);
3444 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3445 struct mlx4_vhcr
*vhcr
,
3446 struct mlx4_cmd_mailbox
*inbox
,
3447 struct mlx4_cmd_mailbox
*outbox
,
3448 struct mlx4_cmd_info
*cmd
)
3450 int cqn
= vhcr
->in_modifier
;
3454 err
= get_res(dev
, slave
, cqn
, RES_CQ
, &cq
);
3458 if (cq
->com
.from_state
!= RES_CQ_HW
)
3461 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3463 put_res(dev
, slave
, cqn
, RES_CQ
);
3468 static int handle_resize(struct mlx4_dev
*dev
, int slave
,
3469 struct mlx4_vhcr
*vhcr
,
3470 struct mlx4_cmd_mailbox
*inbox
,
3471 struct mlx4_cmd_mailbox
*outbox
,
3472 struct mlx4_cmd_info
*cmd
,
3476 struct res_mtt
*orig_mtt
;
3477 struct res_mtt
*mtt
;
3478 struct mlx4_cq_context
*cqc
= inbox
->buf
;
3479 int mtt_base
= cq_get_mtt_addr(cqc
) / dev
->caps
.mtt_entry_sz
;
3481 err
= get_res(dev
, slave
, cq
->mtt
->com
.res_id
, RES_MTT
, &orig_mtt
);
3485 if (orig_mtt
!= cq
->mtt
) {
3490 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
3494 err
= check_mtt_range(dev
, slave
, mtt_base
, cq_get_mtt_size(cqc
), mtt
);
3497 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3500 atomic_dec(&orig_mtt
->ref_count
);
3501 put_res(dev
, slave
, orig_mtt
->com
.res_id
, RES_MTT
);
3502 atomic_inc(&mtt
->ref_count
);
3504 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3508 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3510 put_res(dev
, slave
, orig_mtt
->com
.res_id
, RES_MTT
);
3516 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3517 struct mlx4_vhcr
*vhcr
,
3518 struct mlx4_cmd_mailbox
*inbox
,
3519 struct mlx4_cmd_mailbox
*outbox
,
3520 struct mlx4_cmd_info
*cmd
)
3522 int cqn
= vhcr
->in_modifier
;
3526 err
= get_res(dev
, slave
, cqn
, RES_CQ
, &cq
);
3530 if (cq
->com
.from_state
!= RES_CQ_HW
)
3533 if (vhcr
->op_modifier
== 0) {
3534 err
= handle_resize(dev
, slave
, vhcr
, inbox
, outbox
, cmd
, cq
);
3538 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3540 put_res(dev
, slave
, cqn
, RES_CQ
);
3545 static int srq_get_mtt_size(struct mlx4_srq_context
*srqc
)
3547 int log_srq_size
= (be32_to_cpu(srqc
->state_logsize_srqn
) >> 24) & 0xf;
3548 int log_rq_stride
= srqc
->logstride
& 7;
3549 int page_shift
= (srqc
->log_page_size
& 0x3f) + 12;
3551 if (log_srq_size
+ log_rq_stride
+ 4 < page_shift
)
3554 return 1 << (log_srq_size
+ log_rq_stride
+ 4 - page_shift
);
3557 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3558 struct mlx4_vhcr
*vhcr
,
3559 struct mlx4_cmd_mailbox
*inbox
,
3560 struct mlx4_cmd_mailbox
*outbox
,
3561 struct mlx4_cmd_info
*cmd
)
3564 int srqn
= vhcr
->in_modifier
;
3565 struct res_mtt
*mtt
;
3566 struct res_srq
*srq
= NULL
;
3567 struct mlx4_srq_context
*srqc
= inbox
->buf
;
3568 int mtt_base
= srq_get_mtt_addr(srqc
) / dev
->caps
.mtt_entry_sz
;
3570 if (srqn
!= (be32_to_cpu(srqc
->state_logsize_srqn
) & 0xffffff))
3573 err
= srq_res_start_move_to(dev
, slave
, srqn
, RES_SRQ_HW
, &srq
);
3576 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
3579 err
= check_mtt_range(dev
, slave
, mtt_base
, srq_get_mtt_size(srqc
),
3584 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3588 atomic_inc(&mtt
->ref_count
);
3590 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3591 res_end_move(dev
, slave
, RES_SRQ
, srqn
);
3595 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3597 res_abort_move(dev
, slave
, RES_SRQ
, srqn
);
3602 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3603 struct mlx4_vhcr
*vhcr
,
3604 struct mlx4_cmd_mailbox
*inbox
,
3605 struct mlx4_cmd_mailbox
*outbox
,
3606 struct mlx4_cmd_info
*cmd
)
3609 int srqn
= vhcr
->in_modifier
;
3610 struct res_srq
*srq
= NULL
;
3612 err
= srq_res_start_move_to(dev
, slave
, srqn
, RES_SRQ_ALLOCATED
, &srq
);
3615 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3618 atomic_dec(&srq
->mtt
->ref_count
);
3620 atomic_dec(&srq
->cq
->ref_count
);
3621 res_end_move(dev
, slave
, RES_SRQ
, srqn
);
3626 res_abort_move(dev
, slave
, RES_SRQ
, srqn
);
3631 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3632 struct mlx4_vhcr
*vhcr
,
3633 struct mlx4_cmd_mailbox
*inbox
,
3634 struct mlx4_cmd_mailbox
*outbox
,
3635 struct mlx4_cmd_info
*cmd
)
3638 int srqn
= vhcr
->in_modifier
;
3639 struct res_srq
*srq
;
3641 err
= get_res(dev
, slave
, srqn
, RES_SRQ
, &srq
);
3644 if (srq
->com
.from_state
!= RES_SRQ_HW
) {
3648 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3650 put_res(dev
, slave
, srqn
, RES_SRQ
);
3654 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3655 struct mlx4_vhcr
*vhcr
,
3656 struct mlx4_cmd_mailbox
*inbox
,
3657 struct mlx4_cmd_mailbox
*outbox
,
3658 struct mlx4_cmd_info
*cmd
)
3661 int srqn
= vhcr
->in_modifier
;
3662 struct res_srq
*srq
;
3664 err
= get_res(dev
, slave
, srqn
, RES_SRQ
, &srq
);
3668 if (srq
->com
.from_state
!= RES_SRQ_HW
) {
3673 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3675 put_res(dev
, slave
, srqn
, RES_SRQ
);
3679 int mlx4_GEN_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3680 struct mlx4_vhcr
*vhcr
,
3681 struct mlx4_cmd_mailbox
*inbox
,
3682 struct mlx4_cmd_mailbox
*outbox
,
3683 struct mlx4_cmd_info
*cmd
)
3686 int qpn
= vhcr
->in_modifier
& 0x7fffff;
3689 err
= get_res(dev
, slave
, qpn
, RES_QP
, &qp
);
3692 if (qp
->com
.from_state
!= RES_QP_HW
) {
3697 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3699 put_res(dev
, slave
, qpn
, RES_QP
);
3703 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3704 struct mlx4_vhcr
*vhcr
,
3705 struct mlx4_cmd_mailbox
*inbox
,
3706 struct mlx4_cmd_mailbox
*outbox
,
3707 struct mlx4_cmd_info
*cmd
)
3709 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3710 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3711 update_pkey_index(dev
, slave
, inbox
);
3712 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3715 static int adjust_qp_sched_queue(struct mlx4_dev
*dev
, int slave
,
3716 struct mlx4_qp_context
*qpc
,
3717 struct mlx4_cmd_mailbox
*inbox
)
3719 enum mlx4_qp_optpar optpar
= be32_to_cpu(*(__be32
*)inbox
->buf
);
3721 int port
= mlx4_slave_convert_port(
3722 dev
, slave
, (qpc
->pri_path
.sched_queue
>> 6 & 1) + 1) - 1;
3727 pri_sched_queue
= (qpc
->pri_path
.sched_queue
& ~(1 << 6)) |
3730 if (optpar
& (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH
| MLX4_QP_OPTPAR_SCHED_QUEUE
) ||
3731 qpc
->pri_path
.sched_queue
|| mlx4_is_eth(dev
, port
+ 1)) {
3732 qpc
->pri_path
.sched_queue
= pri_sched_queue
;
3735 if (optpar
& MLX4_QP_OPTPAR_ALT_ADDR_PATH
) {
3736 port
= mlx4_slave_convert_port(
3737 dev
, slave
, (qpc
->alt_path
.sched_queue
>> 6 & 1)
3741 qpc
->alt_path
.sched_queue
=
3742 (qpc
->alt_path
.sched_queue
& ~(1 << 6)) |
3748 static int roce_verify_mac(struct mlx4_dev
*dev
, int slave
,
3749 struct mlx4_qp_context
*qpc
,
3750 struct mlx4_cmd_mailbox
*inbox
)
3754 u32 ts
= (be32_to_cpu(qpc
->flags
) >> 16) & 0xff;
3755 u8 sched
= *(u8
*)(inbox
->buf
+ 64);
3758 port
= (sched
>> 6 & 1) + 1;
3759 if (mlx4_is_eth(dev
, port
) && (ts
!= MLX4_QP_ST_MLX
)) {
3760 smac_ix
= qpc
->pri_path
.grh_mylmc
& 0x7f;
3761 if (mac_find_smac_ix_in_slave(dev
, slave
, port
, smac_ix
, &mac
))
3767 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3768 struct mlx4_vhcr
*vhcr
,
3769 struct mlx4_cmd_mailbox
*inbox
,
3770 struct mlx4_cmd_mailbox
*outbox
,
3771 struct mlx4_cmd_info
*cmd
)
3774 struct mlx4_qp_context
*qpc
= inbox
->buf
+ 8;
3775 int qpn
= vhcr
->in_modifier
& 0x7fffff;
3777 u8 orig_sched_queue
;
3778 u8 orig_vlan_control
= qpc
->pri_path
.vlan_control
;
3779 u8 orig_fvl_rx
= qpc
->pri_path
.fvl_rx
;
3780 u8 orig_pri_path_fl
= qpc
->pri_path
.fl
;
3781 u8 orig_vlan_index
= qpc
->pri_path
.vlan_index
;
3782 u8 orig_feup
= qpc
->pri_path
.feup
;
3784 err
= adjust_qp_sched_queue(dev
, slave
, qpc
, inbox
);
3787 err
= verify_qp_parameters(dev
, vhcr
, inbox
, QP_TRANS_INIT2RTR
, slave
);
3791 if (roce_verify_mac(dev
, slave
, qpc
, inbox
))
3794 update_pkey_index(dev
, slave
, inbox
);
3795 update_gid(dev
, inbox
, (u8
)slave
);
3796 adjust_proxy_tun_qkey(dev
, vhcr
, qpc
);
3797 orig_sched_queue
= qpc
->pri_path
.sched_queue
;
3799 err
= get_res(dev
, slave
, qpn
, RES_QP
, &qp
);
3802 if (qp
->com
.from_state
!= RES_QP_HW
) {
3807 err
= update_vport_qp_param(dev
, inbox
, slave
, qpn
);
3811 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3813 /* if no error, save sched queue value passed in by VF. This is
3814 * essentially the QOS value provided by the VF. This will be useful
3815 * if we allow dynamic changes from VST back to VGT
3818 qp
->sched_queue
= orig_sched_queue
;
3819 qp
->vlan_control
= orig_vlan_control
;
3820 qp
->fvl_rx
= orig_fvl_rx
;
3821 qp
->pri_path_fl
= orig_pri_path_fl
;
3822 qp
->vlan_index
= orig_vlan_index
;
3823 qp
->feup
= orig_feup
;
3825 put_res(dev
, slave
, qpn
, RES_QP
);
3829 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3830 struct mlx4_vhcr
*vhcr
,
3831 struct mlx4_cmd_mailbox
*inbox
,
3832 struct mlx4_cmd_mailbox
*outbox
,
3833 struct mlx4_cmd_info
*cmd
)
3836 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3838 err
= adjust_qp_sched_queue(dev
, slave
, context
, inbox
);
3841 err
= verify_qp_parameters(dev
, vhcr
, inbox
, QP_TRANS_RTR2RTS
, slave
);
3845 update_pkey_index(dev
, slave
, inbox
);
3846 update_gid(dev
, inbox
, (u8
)slave
);
3847 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3848 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3851 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3852 struct mlx4_vhcr
*vhcr
,
3853 struct mlx4_cmd_mailbox
*inbox
,
3854 struct mlx4_cmd_mailbox
*outbox
,
3855 struct mlx4_cmd_info
*cmd
)
3858 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3860 err
= adjust_qp_sched_queue(dev
, slave
, context
, inbox
);
3863 err
= verify_qp_parameters(dev
, vhcr
, inbox
, QP_TRANS_RTS2RTS
, slave
);
3867 update_pkey_index(dev
, slave
, inbox
);
3868 update_gid(dev
, inbox
, (u8
)slave
);
3869 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3870 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3874 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3875 struct mlx4_vhcr
*vhcr
,
3876 struct mlx4_cmd_mailbox
*inbox
,
3877 struct mlx4_cmd_mailbox
*outbox
,
3878 struct mlx4_cmd_info
*cmd
)
3880 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3881 int err
= adjust_qp_sched_queue(dev
, slave
, context
, inbox
);
3884 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3885 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3888 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3889 struct mlx4_vhcr
*vhcr
,
3890 struct mlx4_cmd_mailbox
*inbox
,
3891 struct mlx4_cmd_mailbox
*outbox
,
3892 struct mlx4_cmd_info
*cmd
)
3895 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3897 err
= adjust_qp_sched_queue(dev
, slave
, context
, inbox
);
3900 err
= verify_qp_parameters(dev
, vhcr
, inbox
, QP_TRANS_SQD2SQD
, slave
);
3904 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3905 update_gid(dev
, inbox
, (u8
)slave
);
3906 update_pkey_index(dev
, slave
, inbox
);
3907 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3910 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3911 struct mlx4_vhcr
*vhcr
,
3912 struct mlx4_cmd_mailbox
*inbox
,
3913 struct mlx4_cmd_mailbox
*outbox
,
3914 struct mlx4_cmd_info
*cmd
)
3917 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3919 err
= adjust_qp_sched_queue(dev
, slave
, context
, inbox
);
3922 err
= verify_qp_parameters(dev
, vhcr
, inbox
, QP_TRANS_SQD2RTS
, slave
);
3926 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3927 update_gid(dev
, inbox
, (u8
)slave
);
3928 update_pkey_index(dev
, slave
, inbox
);
3929 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3932 int mlx4_2RST_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3933 struct mlx4_vhcr
*vhcr
,
3934 struct mlx4_cmd_mailbox
*inbox
,
3935 struct mlx4_cmd_mailbox
*outbox
,
3936 struct mlx4_cmd_info
*cmd
)
3939 int qpn
= vhcr
->in_modifier
& 0x7fffff;
3942 err
= qp_res_start_move_to(dev
, slave
, qpn
, RES_QP_MAPPED
, &qp
, 0);
3945 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3949 atomic_dec(&qp
->mtt
->ref_count
);
3950 atomic_dec(&qp
->rcq
->ref_count
);
3951 atomic_dec(&qp
->scq
->ref_count
);
3953 atomic_dec(&qp
->srq
->ref_count
);
3954 res_end_move(dev
, slave
, RES_QP
, qpn
);
3958 res_abort_move(dev
, slave
, RES_QP
, qpn
);
3963 static struct res_gid
*find_gid(struct mlx4_dev
*dev
, int slave
,
3964 struct res_qp
*rqp
, u8
*gid
)
3966 struct res_gid
*res
;
3968 list_for_each_entry(res
, &rqp
->mcg_list
, list
) {
3969 if (!memcmp(res
->gid
, gid
, 16))
3975 static int add_mcg_res(struct mlx4_dev
*dev
, int slave
, struct res_qp
*rqp
,
3976 u8
*gid
, enum mlx4_protocol prot
,
3977 enum mlx4_steer_type steer
, u64 reg_id
)
3979 struct res_gid
*res
;
3982 res
= kzalloc(sizeof *res
, GFP_KERNEL
);
3986 spin_lock_irq(&rqp
->mcg_spl
);
3987 if (find_gid(dev
, slave
, rqp
, gid
)) {
3991 memcpy(res
->gid
, gid
, 16);
3994 res
->reg_id
= reg_id
;
3995 list_add_tail(&res
->list
, &rqp
->mcg_list
);
3998 spin_unlock_irq(&rqp
->mcg_spl
);
4003 static int rem_mcg_res(struct mlx4_dev
*dev
, int slave
, struct res_qp
*rqp
,
4004 u8
*gid
, enum mlx4_protocol prot
,
4005 enum mlx4_steer_type steer
, u64
*reg_id
)
4007 struct res_gid
*res
;
4010 spin_lock_irq(&rqp
->mcg_spl
);
4011 res
= find_gid(dev
, slave
, rqp
, gid
);
4012 if (!res
|| res
->prot
!= prot
|| res
->steer
!= steer
)
4015 *reg_id
= res
->reg_id
;
4016 list_del(&res
->list
);
4020 spin_unlock_irq(&rqp
->mcg_spl
);
4025 static int qp_attach(struct mlx4_dev
*dev
, int slave
, struct mlx4_qp
*qp
,
4026 u8 gid
[16], int block_loopback
, enum mlx4_protocol prot
,
4027 enum mlx4_steer_type type
, u64
*reg_id
)
4029 switch (dev
->caps
.steering_mode
) {
4030 case MLX4_STEERING_MODE_DEVICE_MANAGED
: {
4031 int port
= mlx4_slave_convert_port(dev
, slave
, gid
[5]);
4034 return mlx4_trans_to_dmfs_attach(dev
, qp
, gid
, port
,
4035 block_loopback
, prot
,
4038 case MLX4_STEERING_MODE_B0
:
4039 if (prot
== MLX4_PROT_ETH
) {
4040 int port
= mlx4_slave_convert_port(dev
, slave
, gid
[5]);
4045 return mlx4_qp_attach_common(dev
, qp
, gid
,
4046 block_loopback
, prot
, type
);
4052 static int qp_detach(struct mlx4_dev
*dev
, struct mlx4_qp
*qp
,
4053 u8 gid
[16], enum mlx4_protocol prot
,
4054 enum mlx4_steer_type type
, u64 reg_id
)
4056 switch (dev
->caps
.steering_mode
) {
4057 case MLX4_STEERING_MODE_DEVICE_MANAGED
:
4058 return mlx4_flow_detach(dev
, reg_id
);
4059 case MLX4_STEERING_MODE_B0
:
4060 return mlx4_qp_detach_common(dev
, qp
, gid
, prot
, type
);
4066 static int mlx4_adjust_port(struct mlx4_dev
*dev
, int slave
,
4067 u8
*gid
, enum mlx4_protocol prot
)
4071 if (prot
!= MLX4_PROT_ETH
)
4074 if (dev
->caps
.steering_mode
== MLX4_STEERING_MODE_B0
||
4075 dev
->caps
.steering_mode
== MLX4_STEERING_MODE_DEVICE_MANAGED
) {
4076 real_port
= mlx4_slave_convert_port(dev
, slave
, gid
[5]);
4085 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev
*dev
, int slave
,
4086 struct mlx4_vhcr
*vhcr
,
4087 struct mlx4_cmd_mailbox
*inbox
,
4088 struct mlx4_cmd_mailbox
*outbox
,
4089 struct mlx4_cmd_info
*cmd
)
4091 struct mlx4_qp qp
; /* dummy for calling attach/detach */
4092 u8
*gid
= inbox
->buf
;
4093 enum mlx4_protocol prot
= (vhcr
->in_modifier
>> 28) & 0x7;
4098 int attach
= vhcr
->op_modifier
;
4099 int block_loopback
= vhcr
->in_modifier
>> 31;
4100 u8 steer_type_mask
= 2;
4101 enum mlx4_steer_type type
= (gid
[7] & steer_type_mask
) >> 1;
4103 qpn
= vhcr
->in_modifier
& 0xffffff;
4104 err
= get_res(dev
, slave
, qpn
, RES_QP
, &rqp
);
4110 err
= qp_attach(dev
, slave
, &qp
, gid
, block_loopback
, prot
,
4113 pr_err("Fail to attach rule to qp 0x%x\n", qpn
);
4116 err
= add_mcg_res(dev
, slave
, rqp
, gid
, prot
, type
, reg_id
);
4120 err
= mlx4_adjust_port(dev
, slave
, gid
, prot
);
4124 err
= rem_mcg_res(dev
, slave
, rqp
, gid
, prot
, type
, ®_id
);
4128 err
= qp_detach(dev
, &qp
, gid
, prot
, type
, reg_id
);
4130 pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
4133 put_res(dev
, slave
, qpn
, RES_QP
);
4137 qp_detach(dev
, &qp
, gid
, prot
, type
, reg_id
);
4139 put_res(dev
, slave
, qpn
, RES_QP
);
4144 * MAC validation for Flow Steering rules.
4145 * VF can attach rules only with a mac address which is assigned to it.
4147 static int validate_eth_header_mac(int slave
, struct _rule_hw
*eth_header
,
4148 struct list_head
*rlist
)
4150 struct mac_res
*res
, *tmp
;
4153 /* make sure it isn't multicast or broadcast mac*/
4154 if (!is_multicast_ether_addr(eth_header
->eth
.dst_mac
) &&
4155 !is_broadcast_ether_addr(eth_header
->eth
.dst_mac
)) {
4156 list_for_each_entry_safe(res
, tmp
, rlist
, list
) {
4157 be_mac
= cpu_to_be64(res
->mac
<< 16);
4158 if (ether_addr_equal((u8
*)&be_mac
, eth_header
->eth
.dst_mac
))
4161 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
4162 eth_header
->eth
.dst_mac
, slave
);
4168 static void handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl
*ctrl
,
4169 struct _rule_hw
*eth_header
)
4171 if (is_multicast_ether_addr(eth_header
->eth
.dst_mac
) ||
4172 is_broadcast_ether_addr(eth_header
->eth
.dst_mac
)) {
4173 struct mlx4_net_trans_rule_hw_eth
*eth
=
4174 (struct mlx4_net_trans_rule_hw_eth
*)eth_header
;
4175 struct _rule_hw
*next_rule
= (struct _rule_hw
*)(eth
+ 1);
4176 bool last_rule
= next_rule
->size
== 0 && next_rule
->id
== 0 &&
4177 next_rule
->rsvd
== 0;
4180 ctrl
->prio
= cpu_to_be16(MLX4_DOMAIN_NIC
);
4185 * In case of missing eth header, append eth header with a MAC address
4186 * assigned to the VF.
4188 static int add_eth_header(struct mlx4_dev
*dev
, int slave
,
4189 struct mlx4_cmd_mailbox
*inbox
,
4190 struct list_head
*rlist
, int header_id
)
4192 struct mac_res
*res
, *tmp
;
4194 struct mlx4_net_trans_rule_hw_ctrl
*ctrl
;
4195 struct mlx4_net_trans_rule_hw_eth
*eth_header
;
4196 struct mlx4_net_trans_rule_hw_ipv4
*ip_header
;
4197 struct mlx4_net_trans_rule_hw_tcp_udp
*l4_header
;
4199 __be64 mac_msk
= cpu_to_be64(MLX4_MAC_MASK
<< 16);
4201 ctrl
= (struct mlx4_net_trans_rule_hw_ctrl
*)inbox
->buf
;
4203 eth_header
= (struct mlx4_net_trans_rule_hw_eth
*)(ctrl
+ 1);
4205 /* Clear a space in the inbox for eth header */
4206 switch (header_id
) {
4207 case MLX4_NET_TRANS_RULE_ID_IPV4
:
4209 (struct mlx4_net_trans_rule_hw_ipv4
*)(eth_header
+ 1);
4210 memmove(ip_header
, eth_header
,
4211 sizeof(*ip_header
) + sizeof(*l4_header
));
4213 case MLX4_NET_TRANS_RULE_ID_TCP
:
4214 case MLX4_NET_TRANS_RULE_ID_UDP
:
4215 l4_header
= (struct mlx4_net_trans_rule_hw_tcp_udp
*)
4217 memmove(l4_header
, eth_header
, sizeof(*l4_header
));
4222 list_for_each_entry_safe(res
, tmp
, rlist
, list
) {
4223 if (port
== res
->port
) {
4224 be_mac
= cpu_to_be64(res
->mac
<< 16);
4229 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
4234 memset(eth_header
, 0, sizeof(*eth_header
));
4235 eth_header
->size
= sizeof(*eth_header
) >> 2;
4236 eth_header
->id
= cpu_to_be16(__sw_id_hw
[MLX4_NET_TRANS_RULE_ID_ETH
]);
4237 memcpy(eth_header
->dst_mac
, &be_mac
, ETH_ALEN
);
4238 memcpy(eth_header
->dst_mac_msk
, &mac_msk
, ETH_ALEN
);
4244 #define MLX4_UPD_QP_PATH_MASK_SUPPORTED ( \
4245 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX |\
4246 1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)
4247 int mlx4_UPDATE_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
4248 struct mlx4_vhcr
*vhcr
,
4249 struct mlx4_cmd_mailbox
*inbox
,
4250 struct mlx4_cmd_mailbox
*outbox
,
4251 struct mlx4_cmd_info
*cmd_info
)
4254 u32 qpn
= vhcr
->in_modifier
& 0xffffff;
4258 u64 pri_addr_path_mask
;
4259 struct mlx4_update_qp_context
*cmd
;
4262 cmd
= (struct mlx4_update_qp_context
*)inbox
->buf
;
4264 pri_addr_path_mask
= be64_to_cpu(cmd
->primary_addr_path_mask
);
4265 if (cmd
->qp_mask
|| cmd
->secondary_addr_path_mask
||
4266 (pri_addr_path_mask
& ~MLX4_UPD_QP_PATH_MASK_SUPPORTED
))
4269 if ((pri_addr_path_mask
&
4270 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB
)) &&
4271 !(dev
->caps
.flags2
&
4272 MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB
)) {
4273 mlx4_warn(dev
, "Src check LB for slave %d isn't supported\n",
4278 /* Just change the smac for the QP */
4279 err
= get_res(dev
, slave
, qpn
, RES_QP
, &rqp
);
4281 mlx4_err(dev
, "Updating qpn 0x%x for slave %d rejected\n", qpn
, slave
);
4285 port
= (rqp
->sched_queue
>> 6 & 1) + 1;
4287 if (pri_addr_path_mask
& (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX
)) {
4288 smac_index
= cmd
->qp_context
.pri_path
.grh_mylmc
;
4289 err
= mac_find_smac_ix_in_slave(dev
, slave
, port
,
4293 mlx4_err(dev
, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
4299 err
= mlx4_cmd(dev
, inbox
->dma
,
4300 vhcr
->in_modifier
, 0,
4301 MLX4_CMD_UPDATE_QP
, MLX4_CMD_TIME_CLASS_A
,
4304 mlx4_err(dev
, "Failed to update qpn on qpn 0x%x, command failed\n", qpn
);
4309 put_res(dev
, slave
, qpn
, RES_QP
);
4313 static u32
qp_attach_mbox_size(void *mbox
)
4315 u32 size
= sizeof(struct mlx4_net_trans_rule_hw_ctrl
);
4316 struct _rule_hw
*rule_header
;
4318 rule_header
= (struct _rule_hw
*)(mbox
+ size
);
4320 while (rule_header
->size
) {
4321 size
+= rule_header
->size
* sizeof(u32
);
4327 static int mlx4_do_mirror_rule(struct mlx4_dev
*dev
, struct res_fs_rule
*fs_rule
);
4329 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev
*dev
, int slave
,
4330 struct mlx4_vhcr
*vhcr
,
4331 struct mlx4_cmd_mailbox
*inbox
,
4332 struct mlx4_cmd_mailbox
*outbox
,
4333 struct mlx4_cmd_info
*cmd
)
4336 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4337 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4338 struct list_head
*rlist
= &tracker
->slave_list
[slave
].res_list
[RES_MAC
];
4342 struct mlx4_net_trans_rule_hw_ctrl
*ctrl
;
4343 struct _rule_hw
*rule_header
;
4345 struct res_fs_rule
*rrule
;
4348 if (dev
->caps
.steering_mode
!=
4349 MLX4_STEERING_MODE_DEVICE_MANAGED
)
4352 ctrl
= (struct mlx4_net_trans_rule_hw_ctrl
*)inbox
->buf
;
4353 err
= mlx4_slave_convert_port(dev
, slave
, ctrl
->port
);
4357 qpn
= be32_to_cpu(ctrl
->qpn
) & 0xffffff;
4358 err
= get_res(dev
, slave
, qpn
, RES_QP
, &rqp
);
4360 pr_err("Steering rule with qpn 0x%x rejected\n", qpn
);
4363 rule_header
= (struct _rule_hw
*)(ctrl
+ 1);
4364 header_id
= map_hw_to_sw_id(be16_to_cpu(rule_header
->id
));
4366 if (header_id
== MLX4_NET_TRANS_RULE_ID_ETH
)
4367 handle_eth_header_mcast_prio(ctrl
, rule_header
);
4369 if (slave
== dev
->caps
.function
)
4372 switch (header_id
) {
4373 case MLX4_NET_TRANS_RULE_ID_ETH
:
4374 if (validate_eth_header_mac(slave
, rule_header
, rlist
)) {
4379 case MLX4_NET_TRANS_RULE_ID_IB
:
4381 case MLX4_NET_TRANS_RULE_ID_IPV4
:
4382 case MLX4_NET_TRANS_RULE_ID_TCP
:
4383 case MLX4_NET_TRANS_RULE_ID_UDP
:
4384 pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
4385 if (add_eth_header(dev
, slave
, inbox
, rlist
, header_id
)) {
4389 vhcr
->in_modifier
+=
4390 sizeof(struct mlx4_net_trans_rule_hw_eth
) >> 2;
4393 pr_err("Corrupted mailbox\n");
4399 err
= mlx4_cmd_imm(dev
, inbox
->dma
, &vhcr
->out_param
,
4400 vhcr
->in_modifier
, 0,
4401 MLX4_QP_FLOW_STEERING_ATTACH
, MLX4_CMD_TIME_CLASS_A
,
4407 err
= add_res_range(dev
, slave
, vhcr
->out_param
, 1, RES_FS_RULE
, qpn
);
4409 mlx4_err(dev
, "Fail to add flow steering resources\n");
4413 err
= get_res(dev
, slave
, vhcr
->out_param
, RES_FS_RULE
, &rrule
);
4417 mbox_size
= qp_attach_mbox_size(inbox
->buf
);
4418 rrule
->mirr_mbox
= kmalloc(mbox_size
, GFP_KERNEL
);
4419 if (!rrule
->mirr_mbox
) {
4423 rrule
->mirr_mbox_size
= mbox_size
;
4424 rrule
->mirr_rule_id
= 0;
4425 memcpy(rrule
->mirr_mbox
, inbox
->buf
, mbox_size
);
4427 /* set different port */
4428 ctrl
= (struct mlx4_net_trans_rule_hw_ctrl
*)rrule
->mirr_mbox
;
4429 if (ctrl
->port
== 1)
4434 if (mlx4_is_bonded(dev
))
4435 mlx4_do_mirror_rule(dev
, rrule
);
4437 atomic_inc(&rqp
->ref_count
);
4440 put_res(dev
, slave
, vhcr
->out_param
, RES_FS_RULE
);
4442 /* detach rule on error */
4444 mlx4_cmd(dev
, vhcr
->out_param
, 0, 0,
4445 MLX4_QP_FLOW_STEERING_DETACH
, MLX4_CMD_TIME_CLASS_A
,
4448 put_res(dev
, slave
, qpn
, RES_QP
);
4452 static int mlx4_undo_mirror_rule(struct mlx4_dev
*dev
, struct res_fs_rule
*fs_rule
)
4456 err
= rem_res_range(dev
, fs_rule
->com
.owner
, fs_rule
->com
.res_id
, 1, RES_FS_RULE
, 0);
4458 mlx4_err(dev
, "Fail to remove flow steering resources\n");
4462 mlx4_cmd(dev
, fs_rule
->com
.res_id
, 0, 0, MLX4_QP_FLOW_STEERING_DETACH
,
4463 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
4467 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev
*dev
, int slave
,
4468 struct mlx4_vhcr
*vhcr
,
4469 struct mlx4_cmd_mailbox
*inbox
,
4470 struct mlx4_cmd_mailbox
*outbox
,
4471 struct mlx4_cmd_info
*cmd
)
4475 struct res_fs_rule
*rrule
;
4478 if (dev
->caps
.steering_mode
!=
4479 MLX4_STEERING_MODE_DEVICE_MANAGED
)
4482 err
= get_res(dev
, slave
, vhcr
->in_param
, RES_FS_RULE
, &rrule
);
4486 if (!rrule
->mirr_mbox
) {
4487 mlx4_err(dev
, "Mirror rules cannot be removed explicitly\n");
4488 put_res(dev
, slave
, vhcr
->in_param
, RES_FS_RULE
);
4491 mirr_reg_id
= rrule
->mirr_rule_id
;
4492 kfree(rrule
->mirr_mbox
);
4494 /* Release the rule form busy state before removal */
4495 put_res(dev
, slave
, vhcr
->in_param
, RES_FS_RULE
);
4496 err
= get_res(dev
, slave
, rrule
->qpn
, RES_QP
, &rqp
);
4500 if (mirr_reg_id
&& mlx4_is_bonded(dev
)) {
4501 err
= get_res(dev
, slave
, mirr_reg_id
, RES_FS_RULE
, &rrule
);
4503 mlx4_err(dev
, "Fail to get resource of mirror rule\n");
4505 put_res(dev
, slave
, mirr_reg_id
, RES_FS_RULE
);
4506 mlx4_undo_mirror_rule(dev
, rrule
);
4509 err
= rem_res_range(dev
, slave
, vhcr
->in_param
, 1, RES_FS_RULE
, 0);
4511 mlx4_err(dev
, "Fail to remove flow steering resources\n");
4515 err
= mlx4_cmd(dev
, vhcr
->in_param
, 0, 0,
4516 MLX4_QP_FLOW_STEERING_DETACH
, MLX4_CMD_TIME_CLASS_A
,
4519 atomic_dec(&rqp
->ref_count
);
4521 put_res(dev
, slave
, rrule
->qpn
, RES_QP
);
4526 BUSY_MAX_RETRIES
= 10
4529 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev
*dev
, int slave
,
4530 struct mlx4_vhcr
*vhcr
,
4531 struct mlx4_cmd_mailbox
*inbox
,
4532 struct mlx4_cmd_mailbox
*outbox
,
4533 struct mlx4_cmd_info
*cmd
)
4536 int index
= vhcr
->in_modifier
& 0xffff;
4538 err
= get_res(dev
, slave
, index
, RES_COUNTER
, NULL
);
4542 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
4543 put_res(dev
, slave
, index
, RES_COUNTER
);
4547 static void detach_qp(struct mlx4_dev
*dev
, int slave
, struct res_qp
*rqp
)
4549 struct res_gid
*rgid
;
4550 struct res_gid
*tmp
;
4551 struct mlx4_qp qp
; /* dummy for calling attach/detach */
4553 list_for_each_entry_safe(rgid
, tmp
, &rqp
->mcg_list
, list
) {
4554 switch (dev
->caps
.steering_mode
) {
4555 case MLX4_STEERING_MODE_DEVICE_MANAGED
:
4556 mlx4_flow_detach(dev
, rgid
->reg_id
);
4558 case MLX4_STEERING_MODE_B0
:
4559 qp
.qpn
= rqp
->local_qpn
;
4560 (void) mlx4_qp_detach_common(dev
, &qp
, rgid
->gid
,
4561 rgid
->prot
, rgid
->steer
);
4564 list_del(&rgid
->list
);
4569 static int _move_all_busy(struct mlx4_dev
*dev
, int slave
,
4570 enum mlx4_resource type
, int print
)
4572 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4573 struct mlx4_resource_tracker
*tracker
=
4574 &priv
->mfunc
.master
.res_tracker
;
4575 struct list_head
*rlist
= &tracker
->slave_list
[slave
].res_list
[type
];
4576 struct res_common
*r
;
4577 struct res_common
*tmp
;
4581 spin_lock_irq(mlx4_tlock(dev
));
4582 list_for_each_entry_safe(r
, tmp
, rlist
, list
) {
4583 if (r
->owner
== slave
) {
4585 if (r
->state
== RES_ANY_BUSY
) {
4588 "%s id 0x%llx is busy\n",
4593 r
->from_state
= r
->state
;
4594 r
->state
= RES_ANY_BUSY
;
4600 spin_unlock_irq(mlx4_tlock(dev
));
4605 static int move_all_busy(struct mlx4_dev
*dev
, int slave
,
4606 enum mlx4_resource type
)
4608 unsigned long begin
;
4613 busy
= _move_all_busy(dev
, slave
, type
, 0);
4614 if (time_after(jiffies
, begin
+ 5 * HZ
))
4621 busy
= _move_all_busy(dev
, slave
, type
, 1);
4625 static void rem_slave_qps(struct mlx4_dev
*dev
, int slave
)
4627 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4628 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4629 struct list_head
*qp_list
=
4630 &tracker
->slave_list
[slave
].res_list
[RES_QP
];
4638 err
= move_all_busy(dev
, slave
, RES_QP
);
4640 mlx4_warn(dev
, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
4643 spin_lock_irq(mlx4_tlock(dev
));
4644 list_for_each_entry_safe(qp
, tmp
, qp_list
, com
.list
) {
4645 spin_unlock_irq(mlx4_tlock(dev
));
4646 if (qp
->com
.owner
== slave
) {
4647 qpn
= qp
->com
.res_id
;
4648 detach_qp(dev
, slave
, qp
);
4649 state
= qp
->com
.from_state
;
4650 while (state
!= 0) {
4652 case RES_QP_RESERVED
:
4653 spin_lock_irq(mlx4_tlock(dev
));
4654 rb_erase(&qp
->com
.node
,
4655 &tracker
->res_tree
[RES_QP
]);
4656 list_del(&qp
->com
.list
);
4657 spin_unlock_irq(mlx4_tlock(dev
));
4658 if (!valid_reserved(dev
, slave
, qpn
)) {
4659 __mlx4_qp_release_range(dev
, qpn
, 1);
4660 mlx4_release_resource(dev
, slave
,
4667 if (!valid_reserved(dev
, slave
, qpn
))
4668 __mlx4_qp_free_icm(dev
, qpn
);
4669 state
= RES_QP_RESERVED
;
4673 err
= mlx4_cmd(dev
, in_param
,
4676 MLX4_CMD_TIME_CLASS_A
,
4679 mlx4_dbg(dev
, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
4680 slave
, qp
->local_qpn
);
4681 atomic_dec(&qp
->rcq
->ref_count
);
4682 atomic_dec(&qp
->scq
->ref_count
);
4683 atomic_dec(&qp
->mtt
->ref_count
);
4685 atomic_dec(&qp
->srq
->ref_count
);
4686 state
= RES_QP_MAPPED
;
4693 spin_lock_irq(mlx4_tlock(dev
));
4695 spin_unlock_irq(mlx4_tlock(dev
));
4698 static void rem_slave_srqs(struct mlx4_dev
*dev
, int slave
)
4700 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4701 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4702 struct list_head
*srq_list
=
4703 &tracker
->slave_list
[slave
].res_list
[RES_SRQ
];
4704 struct res_srq
*srq
;
4705 struct res_srq
*tmp
;
4712 err
= move_all_busy(dev
, slave
, RES_SRQ
);
4714 mlx4_warn(dev
, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
4717 spin_lock_irq(mlx4_tlock(dev
));
4718 list_for_each_entry_safe(srq
, tmp
, srq_list
, com
.list
) {
4719 spin_unlock_irq(mlx4_tlock(dev
));
4720 if (srq
->com
.owner
== slave
) {
4721 srqn
= srq
->com
.res_id
;
4722 state
= srq
->com
.from_state
;
4723 while (state
!= 0) {
4725 case RES_SRQ_ALLOCATED
:
4726 __mlx4_srq_free_icm(dev
, srqn
);
4727 spin_lock_irq(mlx4_tlock(dev
));
4728 rb_erase(&srq
->com
.node
,
4729 &tracker
->res_tree
[RES_SRQ
]);
4730 list_del(&srq
->com
.list
);
4731 spin_unlock_irq(mlx4_tlock(dev
));
4732 mlx4_release_resource(dev
, slave
,
4740 err
= mlx4_cmd(dev
, in_param
, srqn
, 1,
4742 MLX4_CMD_TIME_CLASS_A
,
4745 mlx4_dbg(dev
, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
4748 atomic_dec(&srq
->mtt
->ref_count
);
4750 atomic_dec(&srq
->cq
->ref_count
);
4751 state
= RES_SRQ_ALLOCATED
;
4759 spin_lock_irq(mlx4_tlock(dev
));
4761 spin_unlock_irq(mlx4_tlock(dev
));
4764 static void rem_slave_cqs(struct mlx4_dev
*dev
, int slave
)
4766 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4767 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4768 struct list_head
*cq_list
=
4769 &tracker
->slave_list
[slave
].res_list
[RES_CQ
];
4778 err
= move_all_busy(dev
, slave
, RES_CQ
);
4780 mlx4_warn(dev
, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
4783 spin_lock_irq(mlx4_tlock(dev
));
4784 list_for_each_entry_safe(cq
, tmp
, cq_list
, com
.list
) {
4785 spin_unlock_irq(mlx4_tlock(dev
));
4786 if (cq
->com
.owner
== slave
&& !atomic_read(&cq
->ref_count
)) {
4787 cqn
= cq
->com
.res_id
;
4788 state
= cq
->com
.from_state
;
4789 while (state
!= 0) {
4791 case RES_CQ_ALLOCATED
:
4792 __mlx4_cq_free_icm(dev
, cqn
);
4793 spin_lock_irq(mlx4_tlock(dev
));
4794 rb_erase(&cq
->com
.node
,
4795 &tracker
->res_tree
[RES_CQ
]);
4796 list_del(&cq
->com
.list
);
4797 spin_unlock_irq(mlx4_tlock(dev
));
4798 mlx4_release_resource(dev
, slave
,
4806 err
= mlx4_cmd(dev
, in_param
, cqn
, 1,
4808 MLX4_CMD_TIME_CLASS_A
,
4811 mlx4_dbg(dev
, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
4813 atomic_dec(&cq
->mtt
->ref_count
);
4814 state
= RES_CQ_ALLOCATED
;
4822 spin_lock_irq(mlx4_tlock(dev
));
4824 spin_unlock_irq(mlx4_tlock(dev
));
4827 static void rem_slave_mrs(struct mlx4_dev
*dev
, int slave
)
4829 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4830 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4831 struct list_head
*mpt_list
=
4832 &tracker
->slave_list
[slave
].res_list
[RES_MPT
];
4833 struct res_mpt
*mpt
;
4834 struct res_mpt
*tmp
;
4841 err
= move_all_busy(dev
, slave
, RES_MPT
);
4843 mlx4_warn(dev
, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
4846 spin_lock_irq(mlx4_tlock(dev
));
4847 list_for_each_entry_safe(mpt
, tmp
, mpt_list
, com
.list
) {
4848 spin_unlock_irq(mlx4_tlock(dev
));
4849 if (mpt
->com
.owner
== slave
) {
4850 mptn
= mpt
->com
.res_id
;
4851 state
= mpt
->com
.from_state
;
4852 while (state
!= 0) {
4854 case RES_MPT_RESERVED
:
4855 __mlx4_mpt_release(dev
, mpt
->key
);
4856 spin_lock_irq(mlx4_tlock(dev
));
4857 rb_erase(&mpt
->com
.node
,
4858 &tracker
->res_tree
[RES_MPT
]);
4859 list_del(&mpt
->com
.list
);
4860 spin_unlock_irq(mlx4_tlock(dev
));
4861 mlx4_release_resource(dev
, slave
,
4867 case RES_MPT_MAPPED
:
4868 __mlx4_mpt_free_icm(dev
, mpt
->key
);
4869 state
= RES_MPT_RESERVED
;
4874 err
= mlx4_cmd(dev
, in_param
, mptn
, 0,
4876 MLX4_CMD_TIME_CLASS_A
,
4879 mlx4_dbg(dev
, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
4882 atomic_dec(&mpt
->mtt
->ref_count
);
4883 state
= RES_MPT_MAPPED
;
4890 spin_lock_irq(mlx4_tlock(dev
));
4892 spin_unlock_irq(mlx4_tlock(dev
));
4895 static void rem_slave_mtts(struct mlx4_dev
*dev
, int slave
)
4897 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4898 struct mlx4_resource_tracker
*tracker
=
4899 &priv
->mfunc
.master
.res_tracker
;
4900 struct list_head
*mtt_list
=
4901 &tracker
->slave_list
[slave
].res_list
[RES_MTT
];
4902 struct res_mtt
*mtt
;
4903 struct res_mtt
*tmp
;
4909 err
= move_all_busy(dev
, slave
, RES_MTT
);
4911 mlx4_warn(dev
, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n",
4914 spin_lock_irq(mlx4_tlock(dev
));
4915 list_for_each_entry_safe(mtt
, tmp
, mtt_list
, com
.list
) {
4916 spin_unlock_irq(mlx4_tlock(dev
));
4917 if (mtt
->com
.owner
== slave
) {
4918 base
= mtt
->com
.res_id
;
4919 state
= mtt
->com
.from_state
;
4920 while (state
!= 0) {
4922 case RES_MTT_ALLOCATED
:
4923 __mlx4_free_mtt_range(dev
, base
,
4925 spin_lock_irq(mlx4_tlock(dev
));
4926 rb_erase(&mtt
->com
.node
,
4927 &tracker
->res_tree
[RES_MTT
]);
4928 list_del(&mtt
->com
.list
);
4929 spin_unlock_irq(mlx4_tlock(dev
));
4930 mlx4_release_resource(dev
, slave
, RES_MTT
,
4931 1 << mtt
->order
, 0);
4941 spin_lock_irq(mlx4_tlock(dev
));
4943 spin_unlock_irq(mlx4_tlock(dev
));
4946 static int mlx4_do_mirror_rule(struct mlx4_dev
*dev
, struct res_fs_rule
*fs_rule
)
4948 struct mlx4_cmd_mailbox
*mailbox
;
4950 struct res_fs_rule
*mirr_rule
;
4953 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
4954 if (IS_ERR(mailbox
))
4955 return PTR_ERR(mailbox
);
4957 if (!fs_rule
->mirr_mbox
) {
4958 mlx4_err(dev
, "rule mirroring mailbox is null\n");
4961 memcpy(mailbox
->buf
, fs_rule
->mirr_mbox
, fs_rule
->mirr_mbox_size
);
4962 err
= mlx4_cmd_imm(dev
, mailbox
->dma
, ®_id
, fs_rule
->mirr_mbox_size
>> 2, 0,
4963 MLX4_QP_FLOW_STEERING_ATTACH
, MLX4_CMD_TIME_CLASS_A
,
4965 mlx4_free_cmd_mailbox(dev
, mailbox
);
4970 err
= add_res_range(dev
, fs_rule
->com
.owner
, reg_id
, 1, RES_FS_RULE
, fs_rule
->qpn
);
4974 err
= get_res(dev
, fs_rule
->com
.owner
, reg_id
, RES_FS_RULE
, &mirr_rule
);
4978 fs_rule
->mirr_rule_id
= reg_id
;
4979 mirr_rule
->mirr_rule_id
= 0;
4980 mirr_rule
->mirr_mbox_size
= 0;
4981 mirr_rule
->mirr_mbox
= NULL
;
4982 put_res(dev
, fs_rule
->com
.owner
, reg_id
, RES_FS_RULE
);
4986 rem_res_range(dev
, fs_rule
->com
.owner
, reg_id
, 1, RES_FS_RULE
, 0);
4988 mlx4_cmd(dev
, reg_id
, 0, 0, MLX4_QP_FLOW_STEERING_DETACH
,
4989 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
4994 static int mlx4_mirror_fs_rules(struct mlx4_dev
*dev
, bool bond
)
4996 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4997 struct mlx4_resource_tracker
*tracker
=
4998 &priv
->mfunc
.master
.res_tracker
;
4999 struct rb_root
*root
= &tracker
->res_tree
[RES_FS_RULE
];
5001 struct res_fs_rule
*fs_rule
;
5003 LIST_HEAD(mirr_list
);
5005 for (p
= rb_first(root
); p
; p
= rb_next(p
)) {
5006 fs_rule
= rb_entry(p
, struct res_fs_rule
, com
.node
);
5007 if ((bond
&& fs_rule
->mirr_mbox_size
) ||
5008 (!bond
&& !fs_rule
->mirr_mbox_size
))
5009 list_add_tail(&fs_rule
->mirr_list
, &mirr_list
);
5012 list_for_each_entry(fs_rule
, &mirr_list
, mirr_list
) {
5014 err
+= mlx4_do_mirror_rule(dev
, fs_rule
);
5016 err
+= mlx4_undo_mirror_rule(dev
, fs_rule
);
5021 int mlx4_bond_fs_rules(struct mlx4_dev
*dev
)
5023 return mlx4_mirror_fs_rules(dev
, true);
5026 int mlx4_unbond_fs_rules(struct mlx4_dev
*dev
)
5028 return mlx4_mirror_fs_rules(dev
, false);
5031 static void rem_slave_fs_rule(struct mlx4_dev
*dev
, int slave
)
5033 struct mlx4_priv
*priv
= mlx4_priv(dev
);
5034 struct mlx4_resource_tracker
*tracker
=
5035 &priv
->mfunc
.master
.res_tracker
;
5036 struct list_head
*fs_rule_list
=
5037 &tracker
->slave_list
[slave
].res_list
[RES_FS_RULE
];
5038 struct res_fs_rule
*fs_rule
;
5039 struct res_fs_rule
*tmp
;
5044 err
= move_all_busy(dev
, slave
, RES_FS_RULE
);
5046 mlx4_warn(dev
, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
5049 spin_lock_irq(mlx4_tlock(dev
));
5050 list_for_each_entry_safe(fs_rule
, tmp
, fs_rule_list
, com
.list
) {
5051 spin_unlock_irq(mlx4_tlock(dev
));
5052 if (fs_rule
->com
.owner
== slave
) {
5053 base
= fs_rule
->com
.res_id
;
5054 state
= fs_rule
->com
.from_state
;
5055 while (state
!= 0) {
5057 case RES_FS_RULE_ALLOCATED
:
5059 err
= mlx4_cmd(dev
, base
, 0, 0,
5060 MLX4_QP_FLOW_STEERING_DETACH
,
5061 MLX4_CMD_TIME_CLASS_A
,
5064 spin_lock_irq(mlx4_tlock(dev
));
5065 rb_erase(&fs_rule
->com
.node
,
5066 &tracker
->res_tree
[RES_FS_RULE
]);
5067 list_del(&fs_rule
->com
.list
);
5068 spin_unlock_irq(mlx4_tlock(dev
));
5078 spin_lock_irq(mlx4_tlock(dev
));
5080 spin_unlock_irq(mlx4_tlock(dev
));
5083 static void rem_slave_eqs(struct mlx4_dev
*dev
, int slave
)
5085 struct mlx4_priv
*priv
= mlx4_priv(dev
);
5086 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
5087 struct list_head
*eq_list
=
5088 &tracker
->slave_list
[slave
].res_list
[RES_EQ
];
5096 err
= move_all_busy(dev
, slave
, RES_EQ
);
5098 mlx4_warn(dev
, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
5101 spin_lock_irq(mlx4_tlock(dev
));
5102 list_for_each_entry_safe(eq
, tmp
, eq_list
, com
.list
) {
5103 spin_unlock_irq(mlx4_tlock(dev
));
5104 if (eq
->com
.owner
== slave
) {
5105 eqn
= eq
->com
.res_id
;
5106 state
= eq
->com
.from_state
;
5107 while (state
!= 0) {
5109 case RES_EQ_RESERVED
:
5110 spin_lock_irq(mlx4_tlock(dev
));
5111 rb_erase(&eq
->com
.node
,
5112 &tracker
->res_tree
[RES_EQ
]);
5113 list_del(&eq
->com
.list
);
5114 spin_unlock_irq(mlx4_tlock(dev
));
5120 err
= mlx4_cmd(dev
, slave
, eqn
& 0x3ff,
5121 1, MLX4_CMD_HW2SW_EQ
,
5122 MLX4_CMD_TIME_CLASS_A
,
5125 mlx4_dbg(dev
, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
5126 slave
, eqn
& 0x3ff);
5127 atomic_dec(&eq
->mtt
->ref_count
);
5128 state
= RES_EQ_RESERVED
;
5136 spin_lock_irq(mlx4_tlock(dev
));
5138 spin_unlock_irq(mlx4_tlock(dev
));
5141 static void rem_slave_counters(struct mlx4_dev
*dev
, int slave
)
5143 struct mlx4_priv
*priv
= mlx4_priv(dev
);
5144 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
5145 struct list_head
*counter_list
=
5146 &tracker
->slave_list
[slave
].res_list
[RES_COUNTER
];
5147 struct res_counter
*counter
;
5148 struct res_counter
*tmp
;
5150 int *counters_arr
= NULL
;
5153 err
= move_all_busy(dev
, slave
, RES_COUNTER
);
5155 mlx4_warn(dev
, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
5158 counters_arr
= kmalloc_array(dev
->caps
.max_counters
,
5159 sizeof(*counters_arr
), GFP_KERNEL
);
5166 spin_lock_irq(mlx4_tlock(dev
));
5167 list_for_each_entry_safe(counter
, tmp
, counter_list
, com
.list
) {
5168 if (counter
->com
.owner
== slave
) {
5169 counters_arr
[i
++] = counter
->com
.res_id
;
5170 rb_erase(&counter
->com
.node
,
5171 &tracker
->res_tree
[RES_COUNTER
]);
5172 list_del(&counter
->com
.list
);
5176 spin_unlock_irq(mlx4_tlock(dev
));
5179 __mlx4_counter_free(dev
, counters_arr
[j
++]);
5180 mlx4_release_resource(dev
, slave
, RES_COUNTER
, 1, 0);
5184 kfree(counters_arr
);
5187 static void rem_slave_xrcdns(struct mlx4_dev
*dev
, int slave
)
5189 struct mlx4_priv
*priv
= mlx4_priv(dev
);
5190 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
5191 struct list_head
*xrcdn_list
=
5192 &tracker
->slave_list
[slave
].res_list
[RES_XRCD
];
5193 struct res_xrcdn
*xrcd
;
5194 struct res_xrcdn
*tmp
;
5198 err
= move_all_busy(dev
, slave
, RES_XRCD
);
5200 mlx4_warn(dev
, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
5203 spin_lock_irq(mlx4_tlock(dev
));
5204 list_for_each_entry_safe(xrcd
, tmp
, xrcdn_list
, com
.list
) {
5205 if (xrcd
->com
.owner
== slave
) {
5206 xrcdn
= xrcd
->com
.res_id
;
5207 rb_erase(&xrcd
->com
.node
, &tracker
->res_tree
[RES_XRCD
]);
5208 list_del(&xrcd
->com
.list
);
5210 __mlx4_xrcd_free(dev
, xrcdn
);
5213 spin_unlock_irq(mlx4_tlock(dev
));
5216 void mlx4_delete_all_resources_for_slave(struct mlx4_dev
*dev
, int slave
)
5218 struct mlx4_priv
*priv
= mlx4_priv(dev
);
5219 mlx4_reset_roce_gids(dev
, slave
);
5220 mutex_lock(&priv
->mfunc
.master
.res_tracker
.slave_list
[slave
].mutex
);
5221 rem_slave_vlans(dev
, slave
);
5222 rem_slave_macs(dev
, slave
);
5223 rem_slave_fs_rule(dev
, slave
);
5224 rem_slave_qps(dev
, slave
);
5225 rem_slave_srqs(dev
, slave
);
5226 rem_slave_cqs(dev
, slave
);
5227 rem_slave_mrs(dev
, slave
);
5228 rem_slave_eqs(dev
, slave
);
5229 rem_slave_mtts(dev
, slave
);
5230 rem_slave_counters(dev
, slave
);
5231 rem_slave_xrcdns(dev
, slave
);
5232 mutex_unlock(&priv
->mfunc
.master
.res_tracker
.slave_list
[slave
].mutex
);
5235 void mlx4_vf_immed_vlan_work_handler(struct work_struct
*_work
)
5237 struct mlx4_vf_immed_vlan_work
*work
=
5238 container_of(_work
, struct mlx4_vf_immed_vlan_work
, work
);
5239 struct mlx4_cmd_mailbox
*mailbox
;
5240 struct mlx4_update_qp_context
*upd_context
;
5241 struct mlx4_dev
*dev
= &work
->priv
->dev
;
5242 struct mlx4_resource_tracker
*tracker
=
5243 &work
->priv
->mfunc
.master
.res_tracker
;
5244 struct list_head
*qp_list
=
5245 &tracker
->slave_list
[work
->slave
].res_list
[RES_QP
];
5248 u64 qp_path_mask_vlan_ctrl
=
5249 ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED
) |
5250 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P
) |
5251 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED
) |
5252 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED
) |
5253 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P
) |
5254 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED
));
5256 u64 qp_path_mask
= ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX
) |
5257 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL
) |
5258 (1ULL << MLX4_UPD_QP_PATH_MASK_CV
) |
5259 (1ULL << MLX4_UPD_QP_PATH_MASK_SV
) |
5260 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN
) |
5261 (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP
) |
5262 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX
) |
5263 (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE
));
5266 int port
, errors
= 0;
5269 if (mlx4_is_slave(dev
)) {
5270 mlx4_warn(dev
, "Trying to update-qp in slave %d\n",
5275 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
5276 if (IS_ERR(mailbox
))
5278 if (work
->flags
& MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE
) /* block all */
5279 vlan_control
= MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
5280 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED
|
5281 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED
|
5282 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED
|
5283 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED
|
5284 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED
;
5285 else if (!work
->vlan_id
)
5286 vlan_control
= MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
5287 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED
;
5288 else if (work
->vlan_proto
== htons(ETH_P_8021AD
))
5289 vlan_control
= MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED
|
5290 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
5291 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED
|
5292 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED
;
5293 else /* vst 802.1Q */
5294 vlan_control
= MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
5295 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED
|
5296 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED
;
5298 upd_context
= mailbox
->buf
;
5299 upd_context
->qp_mask
= cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD
);
5301 spin_lock_irq(mlx4_tlock(dev
));
5302 list_for_each_entry_safe(qp
, tmp
, qp_list
, com
.list
) {
5303 spin_unlock_irq(mlx4_tlock(dev
));
5304 if (qp
->com
.owner
== work
->slave
) {
5305 if (qp
->com
.from_state
!= RES_QP_HW
||
5306 !qp
->sched_queue
|| /* no INIT2RTR trans yet */
5307 mlx4_is_qp_reserved(dev
, qp
->local_qpn
) ||
5308 qp
->qpc_flags
& (1 << MLX4_RSS_QPC_FLAG_OFFSET
)) {
5309 spin_lock_irq(mlx4_tlock(dev
));
5312 port
= (qp
->sched_queue
>> 6 & 1) + 1;
5313 if (port
!= work
->port
) {
5314 spin_lock_irq(mlx4_tlock(dev
));
5317 if (MLX4_QP_ST_RC
== ((qp
->qpc_flags
>> 16) & 0xff))
5318 upd_context
->primary_addr_path_mask
= cpu_to_be64(qp_path_mask
);
5320 upd_context
->primary_addr_path_mask
=
5321 cpu_to_be64(qp_path_mask
| qp_path_mask_vlan_ctrl
);
5322 if (work
->vlan_id
== MLX4_VGT
) {
5323 upd_context
->qp_context
.param3
= qp
->param3
;
5324 upd_context
->qp_context
.pri_path
.vlan_control
= qp
->vlan_control
;
5325 upd_context
->qp_context
.pri_path
.fvl_rx
= qp
->fvl_rx
;
5326 upd_context
->qp_context
.pri_path
.vlan_index
= qp
->vlan_index
;
5327 upd_context
->qp_context
.pri_path
.fl
= qp
->pri_path_fl
;
5328 upd_context
->qp_context
.pri_path
.feup
= qp
->feup
;
5329 upd_context
->qp_context
.pri_path
.sched_queue
=
5332 upd_context
->qp_context
.param3
= qp
->param3
& ~cpu_to_be32(MLX4_STRIP_VLAN
);
5333 upd_context
->qp_context
.pri_path
.vlan_control
= vlan_control
;
5334 upd_context
->qp_context
.pri_path
.vlan_index
= work
->vlan_ix
;
5335 upd_context
->qp_context
.pri_path
.fvl_rx
=
5336 qp
->fvl_rx
| MLX4_FVL_RX_FORCE_ETH_VLAN
;
5337 upd_context
->qp_context
.pri_path
.fl
=
5338 qp
->pri_path_fl
| MLX4_FL_ETH_HIDE_CQE_VLAN
;
5339 if (work
->vlan_proto
== htons(ETH_P_8021AD
))
5340 upd_context
->qp_context
.pri_path
.fl
|= MLX4_FL_SV
;
5342 upd_context
->qp_context
.pri_path
.fl
|= MLX4_FL_CV
;
5343 upd_context
->qp_context
.pri_path
.feup
=
5344 qp
->feup
| MLX4_FEUP_FORCE_ETH_UP
| MLX4_FVL_FORCE_ETH_VLAN
;
5345 upd_context
->qp_context
.pri_path
.sched_queue
=
5346 qp
->sched_queue
& 0xC7;
5347 upd_context
->qp_context
.pri_path
.sched_queue
|=
5348 ((work
->qos
& 0x7) << 3);
5349 upd_context
->qp_mask
|=
5351 MLX4_UPD_QP_MASK_QOS_VPP
);
5352 upd_context
->qp_context
.qos_vport
=
5356 err
= mlx4_cmd(dev
, mailbox
->dma
,
5357 qp
->local_qpn
& 0xffffff,
5358 0, MLX4_CMD_UPDATE_QP
,
5359 MLX4_CMD_TIME_CLASS_C
, MLX4_CMD_NATIVE
);
5361 mlx4_info(dev
, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
5362 work
->slave
, port
, qp
->local_qpn
, err
);
5366 spin_lock_irq(mlx4_tlock(dev
));
5368 spin_unlock_irq(mlx4_tlock(dev
));
5369 mlx4_free_cmd_mailbox(dev
, mailbox
);
5372 mlx4_err(dev
, "%d UPDATE_QP failures for slave %d, port %d\n",
5373 errors
, work
->slave
, work
->port
);
5375 /* unregister previous vlan_id if needed and we had no errors
5376 * while updating the QPs
5378 if (work
->flags
& MLX4_VF_IMMED_VLAN_FLAG_VLAN
&& !errors
&&
5379 NO_INDX
!= work
->orig_vlan_ix
)
5380 __mlx4_unregister_vlan(&work
->priv
->dev
, work
->port
,
5381 work
->orig_vlan_id
);