2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
49 #include "mlx4_stats.h"
51 #define MLX4_MAC_VALID (1ull << 63)
52 #define MLX4_PF_COUNTERS_PER_PORT 2
53 #define MLX4_VF_COUNTERS_PER_PORT 1
56 struct list_head list
;
64 struct list_head list
;
72 struct list_head list
;
87 struct list_head list
;
89 enum mlx4_protocol prot
;
90 enum mlx4_steer_type steer
;
95 RES_QP_BUSY
= RES_ANY_BUSY
,
97 /* QP number was allocated */
100 /* ICM memory for QP context was mapped */
103 /* QP is in hw ownership */
108 struct res_common com
;
113 struct list_head mcg_list
;
118 /* saved qp params before VST enforcement in order to restore on VGT */
128 enum res_mtt_states
{
129 RES_MTT_BUSY
= RES_ANY_BUSY
,
133 static inline const char *mtt_states_str(enum res_mtt_states state
)
136 case RES_MTT_BUSY
: return "RES_MTT_BUSY";
137 case RES_MTT_ALLOCATED
: return "RES_MTT_ALLOCATED";
138 default: return "Unknown";
143 struct res_common com
;
148 enum res_mpt_states
{
149 RES_MPT_BUSY
= RES_ANY_BUSY
,
156 struct res_common com
;
162 RES_EQ_BUSY
= RES_ANY_BUSY
,
168 struct res_common com
;
173 RES_CQ_BUSY
= RES_ANY_BUSY
,
179 struct res_common com
;
184 enum res_srq_states
{
185 RES_SRQ_BUSY
= RES_ANY_BUSY
,
191 struct res_common com
;
197 enum res_counter_states
{
198 RES_COUNTER_BUSY
= RES_ANY_BUSY
,
199 RES_COUNTER_ALLOCATED
,
203 struct res_common com
;
207 enum res_xrcdn_states
{
208 RES_XRCD_BUSY
= RES_ANY_BUSY
,
213 struct res_common com
;
217 enum res_fs_rule_states
{
218 RES_FS_RULE_BUSY
= RES_ANY_BUSY
,
219 RES_FS_RULE_ALLOCATED
,
223 struct res_common com
;
227 static void *res_tracker_lookup(struct rb_root
*root
, u64 res_id
)
229 struct rb_node
*node
= root
->rb_node
;
232 struct res_common
*res
= container_of(node
, struct res_common
,
235 if (res_id
< res
->res_id
)
236 node
= node
->rb_left
;
237 else if (res_id
> res
->res_id
)
238 node
= node
->rb_right
;
245 static int res_tracker_insert(struct rb_root
*root
, struct res_common
*res
)
247 struct rb_node
**new = &(root
->rb_node
), *parent
= NULL
;
249 /* Figure out where to put new node */
251 struct res_common
*this = container_of(*new, struct res_common
,
255 if (res
->res_id
< this->res_id
)
256 new = &((*new)->rb_left
);
257 else if (res
->res_id
> this->res_id
)
258 new = &((*new)->rb_right
);
263 /* Add new node and rebalance tree. */
264 rb_link_node(&res
->node
, parent
, new);
265 rb_insert_color(&res
->node
, root
);
280 static const char *resource_str(enum mlx4_resource rt
)
283 case RES_QP
: return "RES_QP";
284 case RES_CQ
: return "RES_CQ";
285 case RES_SRQ
: return "RES_SRQ";
286 case RES_MPT
: return "RES_MPT";
287 case RES_MTT
: return "RES_MTT";
288 case RES_MAC
: return "RES_MAC";
289 case RES_VLAN
: return "RES_VLAN";
290 case RES_EQ
: return "RES_EQ";
291 case RES_COUNTER
: return "RES_COUNTER";
292 case RES_FS_RULE
: return "RES_FS_RULE";
293 case RES_XRCD
: return "RES_XRCD";
294 default: return "Unknown resource type !!!";
298 static void rem_slave_vlans(struct mlx4_dev
*dev
, int slave
);
299 static inline int mlx4_grant_resource(struct mlx4_dev
*dev
, int slave
,
300 enum mlx4_resource res_type
, int count
,
303 struct mlx4_priv
*priv
= mlx4_priv(dev
);
304 struct resource_allocator
*res_alloc
=
305 &priv
->mfunc
.master
.res_tracker
.res_alloc
[res_type
];
307 int allocated
, free
, reserved
, guaranteed
, from_free
;
310 if (slave
> dev
->persist
->num_vfs
)
313 spin_lock(&res_alloc
->alloc_lock
);
314 allocated
= (port
> 0) ?
315 res_alloc
->allocated
[(port
- 1) *
316 (dev
->persist
->num_vfs
+ 1) + slave
] :
317 res_alloc
->allocated
[slave
];
318 free
= (port
> 0) ? res_alloc
->res_port_free
[port
- 1] :
320 reserved
= (port
> 0) ? res_alloc
->res_port_rsvd
[port
- 1] :
321 res_alloc
->res_reserved
;
322 guaranteed
= res_alloc
->guaranteed
[slave
];
324 if (allocated
+ count
> res_alloc
->quota
[slave
]) {
325 mlx4_warn(dev
, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
326 slave
, port
, resource_str(res_type
), count
,
327 allocated
, res_alloc
->quota
[slave
]);
331 if (allocated
+ count
<= guaranteed
) {
335 /* portion may need to be obtained from free area */
336 if (guaranteed
- allocated
> 0)
337 from_free
= count
- (guaranteed
- allocated
);
341 from_rsvd
= count
- from_free
;
343 if (free
- from_free
>= reserved
)
346 mlx4_warn(dev
, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
347 slave
, port
, resource_str(res_type
), free
,
348 from_free
, reserved
);
352 /* grant the request */
354 res_alloc
->allocated
[(port
- 1) *
355 (dev
->persist
->num_vfs
+ 1) + slave
] += count
;
356 res_alloc
->res_port_free
[port
- 1] -= count
;
357 res_alloc
->res_port_rsvd
[port
- 1] -= from_rsvd
;
359 res_alloc
->allocated
[slave
] += count
;
360 res_alloc
->res_free
-= count
;
361 res_alloc
->res_reserved
-= from_rsvd
;
366 spin_unlock(&res_alloc
->alloc_lock
);
370 static inline void mlx4_release_resource(struct mlx4_dev
*dev
, int slave
,
371 enum mlx4_resource res_type
, int count
,
374 struct mlx4_priv
*priv
= mlx4_priv(dev
);
375 struct resource_allocator
*res_alloc
=
376 &priv
->mfunc
.master
.res_tracker
.res_alloc
[res_type
];
377 int allocated
, guaranteed
, from_rsvd
;
379 if (slave
> dev
->persist
->num_vfs
)
382 spin_lock(&res_alloc
->alloc_lock
);
384 allocated
= (port
> 0) ?
385 res_alloc
->allocated
[(port
- 1) *
386 (dev
->persist
->num_vfs
+ 1) + slave
] :
387 res_alloc
->allocated
[slave
];
388 guaranteed
= res_alloc
->guaranteed
[slave
];
390 if (allocated
- count
>= guaranteed
) {
393 /* portion may need to be returned to reserved area */
394 if (allocated
- guaranteed
> 0)
395 from_rsvd
= count
- (allocated
- guaranteed
);
401 res_alloc
->allocated
[(port
- 1) *
402 (dev
->persist
->num_vfs
+ 1) + slave
] -= count
;
403 res_alloc
->res_port_free
[port
- 1] += count
;
404 res_alloc
->res_port_rsvd
[port
- 1] += from_rsvd
;
406 res_alloc
->allocated
[slave
] -= count
;
407 res_alloc
->res_free
+= count
;
408 res_alloc
->res_reserved
+= from_rsvd
;
411 spin_unlock(&res_alloc
->alloc_lock
);
415 static inline void initialize_res_quotas(struct mlx4_dev
*dev
,
416 struct resource_allocator
*res_alloc
,
417 enum mlx4_resource res_type
,
418 int vf
, int num_instances
)
420 res_alloc
->guaranteed
[vf
] = num_instances
/
421 (2 * (dev
->persist
->num_vfs
+ 1));
422 res_alloc
->quota
[vf
] = (num_instances
/ 2) + res_alloc
->guaranteed
[vf
];
423 if (vf
== mlx4_master_func_num(dev
)) {
424 res_alloc
->res_free
= num_instances
;
425 if (res_type
== RES_MTT
) {
426 /* reserved mtts will be taken out of the PF allocation */
427 res_alloc
->res_free
+= dev
->caps
.reserved_mtts
;
428 res_alloc
->guaranteed
[vf
] += dev
->caps
.reserved_mtts
;
429 res_alloc
->quota
[vf
] += dev
->caps
.reserved_mtts
;
434 void mlx4_init_quotas(struct mlx4_dev
*dev
)
436 struct mlx4_priv
*priv
= mlx4_priv(dev
);
439 /* quotas for VFs are initialized in mlx4_slave_cap */
440 if (mlx4_is_slave(dev
))
443 if (!mlx4_is_mfunc(dev
)) {
444 dev
->quotas
.qp
= dev
->caps
.num_qps
- dev
->caps
.reserved_qps
-
445 mlx4_num_reserved_sqps(dev
);
446 dev
->quotas
.cq
= dev
->caps
.num_cqs
- dev
->caps
.reserved_cqs
;
447 dev
->quotas
.srq
= dev
->caps
.num_srqs
- dev
->caps
.reserved_srqs
;
448 dev
->quotas
.mtt
= dev
->caps
.num_mtts
- dev
->caps
.reserved_mtts
;
449 dev
->quotas
.mpt
= dev
->caps
.num_mpts
- dev
->caps
.reserved_mrws
;
453 pf
= mlx4_master_func_num(dev
);
455 priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_QP
].quota
[pf
];
457 priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_CQ
].quota
[pf
];
459 priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_SRQ
].quota
[pf
];
461 priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_MTT
].quota
[pf
];
463 priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_MPT
].quota
[pf
];
466 static int get_max_gauranteed_vfs_counter(struct mlx4_dev
*dev
)
468 /* reduce the sink counter */
469 return (dev
->caps
.max_counters
- 1 -
470 (MLX4_PF_COUNTERS_PER_PORT
* MLX4_MAX_PORTS
))
474 int mlx4_init_resource_tracker(struct mlx4_dev
*dev
)
476 struct mlx4_priv
*priv
= mlx4_priv(dev
);
479 int max_vfs_guarantee_counter
= get_max_gauranteed_vfs_counter(dev
);
481 priv
->mfunc
.master
.res_tracker
.slave_list
=
482 kzalloc(dev
->num_slaves
* sizeof(struct slave_list
),
484 if (!priv
->mfunc
.master
.res_tracker
.slave_list
)
487 for (i
= 0 ; i
< dev
->num_slaves
; i
++) {
488 for (t
= 0; t
< MLX4_NUM_OF_RESOURCE_TYPE
; ++t
)
489 INIT_LIST_HEAD(&priv
->mfunc
.master
.res_tracker
.
490 slave_list
[i
].res_list
[t
]);
491 mutex_init(&priv
->mfunc
.master
.res_tracker
.slave_list
[i
].mutex
);
494 mlx4_dbg(dev
, "Started init_resource_tracker: %ld slaves\n",
496 for (i
= 0 ; i
< MLX4_NUM_OF_RESOURCE_TYPE
; i
++)
497 priv
->mfunc
.master
.res_tracker
.res_tree
[i
] = RB_ROOT
;
499 for (i
= 0; i
< MLX4_NUM_OF_RESOURCE_TYPE
; i
++) {
500 struct resource_allocator
*res_alloc
=
501 &priv
->mfunc
.master
.res_tracker
.res_alloc
[i
];
502 res_alloc
->quota
= kmalloc((dev
->persist
->num_vfs
+ 1) *
503 sizeof(int), GFP_KERNEL
);
504 res_alloc
->guaranteed
= kmalloc((dev
->persist
->num_vfs
+ 1) *
505 sizeof(int), GFP_KERNEL
);
506 if (i
== RES_MAC
|| i
== RES_VLAN
)
507 res_alloc
->allocated
= kzalloc(MLX4_MAX_PORTS
*
508 (dev
->persist
->num_vfs
510 sizeof(int), GFP_KERNEL
);
512 res_alloc
->allocated
= kzalloc((dev
->persist
->
514 sizeof(int), GFP_KERNEL
);
515 /* Reduce the sink counter */
516 if (i
== RES_COUNTER
)
517 res_alloc
->res_free
= dev
->caps
.max_counters
- 1;
519 if (!res_alloc
->quota
|| !res_alloc
->guaranteed
||
520 !res_alloc
->allocated
)
523 spin_lock_init(&res_alloc
->alloc_lock
);
524 for (t
= 0; t
< dev
->persist
->num_vfs
+ 1; t
++) {
525 struct mlx4_active_ports actv_ports
=
526 mlx4_get_active_ports(dev
, t
);
529 initialize_res_quotas(dev
, res_alloc
, RES_QP
,
530 t
, dev
->caps
.num_qps
-
531 dev
->caps
.reserved_qps
-
532 mlx4_num_reserved_sqps(dev
));
535 initialize_res_quotas(dev
, res_alloc
, RES_CQ
,
536 t
, dev
->caps
.num_cqs
-
537 dev
->caps
.reserved_cqs
);
540 initialize_res_quotas(dev
, res_alloc
, RES_SRQ
,
541 t
, dev
->caps
.num_srqs
-
542 dev
->caps
.reserved_srqs
);
545 initialize_res_quotas(dev
, res_alloc
, RES_MPT
,
546 t
, dev
->caps
.num_mpts
-
547 dev
->caps
.reserved_mrws
);
550 initialize_res_quotas(dev
, res_alloc
, RES_MTT
,
551 t
, dev
->caps
.num_mtts
-
552 dev
->caps
.reserved_mtts
);
555 if (t
== mlx4_master_func_num(dev
)) {
556 int max_vfs_pport
= 0;
557 /* Calculate the max vfs per port for */
559 for (j
= 0; j
< dev
->caps
.num_ports
;
561 struct mlx4_slaves_pport slaves_pport
=
562 mlx4_phys_to_slaves_pport(dev
, j
+ 1);
563 unsigned current_slaves
=
564 bitmap_weight(slaves_pport
.slaves
,
565 dev
->caps
.num_ports
) - 1;
566 if (max_vfs_pport
< current_slaves
)
570 res_alloc
->quota
[t
] =
573 res_alloc
->guaranteed
[t
] = 2;
574 for (j
= 0; j
< MLX4_MAX_PORTS
; j
++)
575 res_alloc
->res_port_free
[j
] =
578 res_alloc
->quota
[t
] = MLX4_MAX_MAC_NUM
;
579 res_alloc
->guaranteed
[t
] = 2;
583 if (t
== mlx4_master_func_num(dev
)) {
584 res_alloc
->quota
[t
] = MLX4_MAX_VLAN_NUM
;
585 res_alloc
->guaranteed
[t
] = MLX4_MAX_VLAN_NUM
/ 2;
586 for (j
= 0; j
< MLX4_MAX_PORTS
; j
++)
587 res_alloc
->res_port_free
[j
] =
590 res_alloc
->quota
[t
] = MLX4_MAX_VLAN_NUM
/ 2;
591 res_alloc
->guaranteed
[t
] = 0;
595 res_alloc
->quota
[t
] = dev
->caps
.max_counters
;
596 if (t
== mlx4_master_func_num(dev
))
597 res_alloc
->guaranteed
[t
] =
598 MLX4_PF_COUNTERS_PER_PORT
*
600 else if (t
<= max_vfs_guarantee_counter
)
601 res_alloc
->guaranteed
[t
] =
602 MLX4_VF_COUNTERS_PER_PORT
*
605 res_alloc
->guaranteed
[t
] = 0;
606 res_alloc
->res_free
-= res_alloc
->guaranteed
[t
];
611 if (i
== RES_MAC
|| i
== RES_VLAN
) {
612 for (j
= 0; j
< dev
->caps
.num_ports
; j
++)
613 if (test_bit(j
, actv_ports
.ports
))
614 res_alloc
->res_port_rsvd
[j
] +=
615 res_alloc
->guaranteed
[t
];
617 res_alloc
->res_reserved
+= res_alloc
->guaranteed
[t
];
621 spin_lock_init(&priv
->mfunc
.master
.res_tracker
.lock
);
625 for (i
= 0; i
< MLX4_NUM_OF_RESOURCE_TYPE
; i
++) {
626 kfree(priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].allocated
);
627 priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].allocated
= NULL
;
628 kfree(priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].guaranteed
);
629 priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].guaranteed
= NULL
;
630 kfree(priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].quota
);
631 priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].quota
= NULL
;
636 void mlx4_free_resource_tracker(struct mlx4_dev
*dev
,
637 enum mlx4_res_tracker_free_type type
)
639 struct mlx4_priv
*priv
= mlx4_priv(dev
);
642 if (priv
->mfunc
.master
.res_tracker
.slave_list
) {
643 if (type
!= RES_TR_FREE_STRUCTS_ONLY
) {
644 for (i
= 0; i
< dev
->num_slaves
; i
++) {
645 if (type
== RES_TR_FREE_ALL
||
646 dev
->caps
.function
!= i
)
647 mlx4_delete_all_resources_for_slave(dev
, i
);
649 /* free master's vlans */
650 i
= dev
->caps
.function
;
651 mlx4_reset_roce_gids(dev
, i
);
652 mutex_lock(&priv
->mfunc
.master
.res_tracker
.slave_list
[i
].mutex
);
653 rem_slave_vlans(dev
, i
);
654 mutex_unlock(&priv
->mfunc
.master
.res_tracker
.slave_list
[i
].mutex
);
657 if (type
!= RES_TR_FREE_SLAVES_ONLY
) {
658 for (i
= 0; i
< MLX4_NUM_OF_RESOURCE_TYPE
; i
++) {
659 kfree(priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].allocated
);
660 priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].allocated
= NULL
;
661 kfree(priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].guaranteed
);
662 priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].guaranteed
= NULL
;
663 kfree(priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].quota
);
664 priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].quota
= NULL
;
666 kfree(priv
->mfunc
.master
.res_tracker
.slave_list
);
667 priv
->mfunc
.master
.res_tracker
.slave_list
= NULL
;
672 static void update_pkey_index(struct mlx4_dev
*dev
, int slave
,
673 struct mlx4_cmd_mailbox
*inbox
)
675 u8 sched
= *(u8
*)(inbox
->buf
+ 64);
676 u8 orig_index
= *(u8
*)(inbox
->buf
+ 35);
678 struct mlx4_priv
*priv
= mlx4_priv(dev
);
681 port
= (sched
>> 6 & 1) + 1;
683 new_index
= priv
->virt2phys_pkey
[slave
][port
- 1][orig_index
];
684 *(u8
*)(inbox
->buf
+ 35) = new_index
;
687 static void update_gid(struct mlx4_dev
*dev
, struct mlx4_cmd_mailbox
*inbox
,
690 struct mlx4_qp_context
*qp_ctx
= inbox
->buf
+ 8;
691 enum mlx4_qp_optpar optpar
= be32_to_cpu(*(__be32
*) inbox
->buf
);
692 u32 ts
= (be32_to_cpu(qp_ctx
->flags
) >> 16) & 0xff;
695 if (MLX4_QP_ST_UD
== ts
) {
696 port
= (qp_ctx
->pri_path
.sched_queue
>> 6 & 1) + 1;
697 if (mlx4_is_eth(dev
, port
))
698 qp_ctx
->pri_path
.mgid_index
=
699 mlx4_get_base_gid_ix(dev
, slave
, port
) | 0x80;
701 qp_ctx
->pri_path
.mgid_index
= slave
| 0x80;
703 } else if (MLX4_QP_ST_RC
== ts
|| MLX4_QP_ST_XRC
== ts
|| MLX4_QP_ST_UC
== ts
) {
704 if (optpar
& MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH
) {
705 port
= (qp_ctx
->pri_path
.sched_queue
>> 6 & 1) + 1;
706 if (mlx4_is_eth(dev
, port
)) {
707 qp_ctx
->pri_path
.mgid_index
+=
708 mlx4_get_base_gid_ix(dev
, slave
, port
);
709 qp_ctx
->pri_path
.mgid_index
&= 0x7f;
711 qp_ctx
->pri_path
.mgid_index
= slave
& 0x7F;
714 if (optpar
& MLX4_QP_OPTPAR_ALT_ADDR_PATH
) {
715 port
= (qp_ctx
->alt_path
.sched_queue
>> 6 & 1) + 1;
716 if (mlx4_is_eth(dev
, port
)) {
717 qp_ctx
->alt_path
.mgid_index
+=
718 mlx4_get_base_gid_ix(dev
, slave
, port
);
719 qp_ctx
->alt_path
.mgid_index
&= 0x7f;
721 qp_ctx
->alt_path
.mgid_index
= slave
& 0x7F;
727 static int handle_counter(struct mlx4_dev
*dev
, struct mlx4_qp_context
*qpc
,
730 static int update_vport_qp_param(struct mlx4_dev
*dev
,
731 struct mlx4_cmd_mailbox
*inbox
,
734 struct mlx4_qp_context
*qpc
= inbox
->buf
+ 8;
735 struct mlx4_vport_oper_state
*vp_oper
;
736 struct mlx4_priv
*priv
;
740 port
= (qpc
->pri_path
.sched_queue
& 0x40) ? 2 : 1;
741 priv
= mlx4_priv(dev
);
742 vp_oper
= &priv
->mfunc
.master
.vf_oper
[slave
].vport
[port
];
743 qp_type
= (be32_to_cpu(qpc
->flags
) >> 16) & 0xff;
745 err
= handle_counter(dev
, qpc
, slave
, port
);
749 if (MLX4_VGT
!= vp_oper
->state
.default_vlan
) {
750 /* the reserved QPs (special, proxy, tunnel)
751 * do not operate over vlans
753 if (mlx4_is_qp_reserved(dev
, qpn
))
756 /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
757 if (qp_type
== MLX4_QP_ST_UD
||
758 (qp_type
== MLX4_QP_ST_MLX
&& mlx4_is_eth(dev
, port
))) {
759 if (dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_VSD_INIT2RTR
) {
760 *(__be32
*)inbox
->buf
=
761 cpu_to_be32(be32_to_cpu(*(__be32
*)inbox
->buf
) |
762 MLX4_QP_OPTPAR_VLAN_STRIPPING
);
763 qpc
->param3
&= ~cpu_to_be32(MLX4_STRIP_VLAN
);
765 struct mlx4_update_qp_params params
= {.flags
= 0};
767 err
= mlx4_update_qp(dev
, qpn
, MLX4_UPDATE_QP_VSD
, ¶ms
);
773 /* preserve IF_COUNTER flag */
774 qpc
->pri_path
.vlan_control
&=
775 MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER
;
776 if (vp_oper
->state
.link_state
== IFLA_VF_LINK_STATE_DISABLE
&&
777 dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_UPDATE_QP
) {
778 qpc
->pri_path
.vlan_control
|=
779 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
780 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED
|
781 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED
|
782 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED
|
783 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED
|
784 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED
;
785 } else if (0 != vp_oper
->state
.default_vlan
) {
786 qpc
->pri_path
.vlan_control
|=
787 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
788 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED
|
789 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED
;
790 } else { /* priority tagged */
791 qpc
->pri_path
.vlan_control
|=
792 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
793 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED
;
796 qpc
->pri_path
.fvl_rx
|= MLX4_FVL_RX_FORCE_ETH_VLAN
;
797 qpc
->pri_path
.vlan_index
= vp_oper
->vlan_idx
;
798 qpc
->pri_path
.fl
|= MLX4_FL_CV
| MLX4_FL_ETH_HIDE_CQE_VLAN
;
799 qpc
->pri_path
.feup
|= MLX4_FEUP_FORCE_ETH_UP
| MLX4_FVL_FORCE_ETH_VLAN
;
800 qpc
->pri_path
.sched_queue
&= 0xC7;
801 qpc
->pri_path
.sched_queue
|= (vp_oper
->state
.default_qos
) << 3;
802 qpc
->qos_vport
= vp_oper
->state
.qos_vport
;
804 if (vp_oper
->state
.spoofchk
) {
805 qpc
->pri_path
.feup
|= MLX4_FSM_FORCE_ETH_SRC_MAC
;
806 qpc
->pri_path
.grh_mylmc
= (0x80 & qpc
->pri_path
.grh_mylmc
) + vp_oper
->mac_idx
;
812 static int mpt_mask(struct mlx4_dev
*dev
)
814 return dev
->caps
.num_mpts
- 1;
817 static void *find_res(struct mlx4_dev
*dev
, u64 res_id
,
818 enum mlx4_resource type
)
820 struct mlx4_priv
*priv
= mlx4_priv(dev
);
822 return res_tracker_lookup(&priv
->mfunc
.master
.res_tracker
.res_tree
[type
],
826 static int get_res(struct mlx4_dev
*dev
, int slave
, u64 res_id
,
827 enum mlx4_resource type
,
830 struct res_common
*r
;
833 spin_lock_irq(mlx4_tlock(dev
));
834 r
= find_res(dev
, res_id
, type
);
840 if (r
->state
== RES_ANY_BUSY
) {
845 if (r
->owner
!= slave
) {
850 r
->from_state
= r
->state
;
851 r
->state
= RES_ANY_BUSY
;
854 *((struct res_common
**)res
) = r
;
857 spin_unlock_irq(mlx4_tlock(dev
));
861 int mlx4_get_slave_from_resource_id(struct mlx4_dev
*dev
,
862 enum mlx4_resource type
,
863 u64 res_id
, int *slave
)
866 struct res_common
*r
;
872 spin_lock(mlx4_tlock(dev
));
874 r
= find_res(dev
, id
, type
);
879 spin_unlock(mlx4_tlock(dev
));
884 static void put_res(struct mlx4_dev
*dev
, int slave
, u64 res_id
,
885 enum mlx4_resource type
)
887 struct res_common
*r
;
889 spin_lock_irq(mlx4_tlock(dev
));
890 r
= find_res(dev
, res_id
, type
);
892 r
->state
= r
->from_state
;
893 spin_unlock_irq(mlx4_tlock(dev
));
896 static int counter_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
897 u64 in_param
, u64
*out_param
, int port
);
899 static int handle_existing_counter(struct mlx4_dev
*dev
, u8 slave
, int port
,
902 struct res_common
*r
;
903 struct res_counter
*counter
;
906 if (counter_index
== MLX4_SINK_COUNTER_INDEX(dev
))
909 spin_lock_irq(mlx4_tlock(dev
));
910 r
= find_res(dev
, counter_index
, RES_COUNTER
);
911 if (!r
|| r
->owner
!= slave
)
913 counter
= container_of(r
, struct res_counter
, com
);
915 counter
->port
= port
;
917 spin_unlock_irq(mlx4_tlock(dev
));
921 static int handle_unexisting_counter(struct mlx4_dev
*dev
,
922 struct mlx4_qp_context
*qpc
, u8 slave
,
925 struct mlx4_priv
*priv
= mlx4_priv(dev
);
926 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
927 struct res_common
*tmp
;
928 struct res_counter
*counter
;
929 u64 counter_idx
= MLX4_SINK_COUNTER_INDEX(dev
);
932 spin_lock_irq(mlx4_tlock(dev
));
933 list_for_each_entry(tmp
,
934 &tracker
->slave_list
[slave
].res_list
[RES_COUNTER
],
936 counter
= container_of(tmp
, struct res_counter
, com
);
937 if (port
== counter
->port
) {
938 qpc
->pri_path
.counter_index
= counter
->com
.res_id
;
939 spin_unlock_irq(mlx4_tlock(dev
));
943 spin_unlock_irq(mlx4_tlock(dev
));
945 /* No existing counter, need to allocate a new counter */
946 err
= counter_alloc_res(dev
, slave
, RES_OP_RESERVE
, 0, 0, &counter_idx
,
948 if (err
== -ENOENT
) {
950 } else if (err
&& err
!= -ENOSPC
) {
951 mlx4_err(dev
, "%s: failed to create new counter for slave %d err %d\n",
952 __func__
, slave
, err
);
954 qpc
->pri_path
.counter_index
= counter_idx
;
955 mlx4_dbg(dev
, "%s: alloc new counter for slave %d index %d\n",
956 __func__
, slave
, qpc
->pri_path
.counter_index
);
963 static int handle_counter(struct mlx4_dev
*dev
, struct mlx4_qp_context
*qpc
,
966 if (qpc
->pri_path
.counter_index
!= MLX4_SINK_COUNTER_INDEX(dev
))
967 return handle_existing_counter(dev
, slave
, port
,
968 qpc
->pri_path
.counter_index
);
970 return handle_unexisting_counter(dev
, qpc
, slave
, port
);
973 static struct res_common
*alloc_qp_tr(int id
)
977 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
981 ret
->com
.res_id
= id
;
982 ret
->com
.state
= RES_QP_RESERVED
;
984 INIT_LIST_HEAD(&ret
->mcg_list
);
985 spin_lock_init(&ret
->mcg_spl
);
986 atomic_set(&ret
->ref_count
, 0);
991 static struct res_common
*alloc_mtt_tr(int id
, int order
)
995 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
999 ret
->com
.res_id
= id
;
1001 ret
->com
.state
= RES_MTT_ALLOCATED
;
1002 atomic_set(&ret
->ref_count
, 0);
1007 static struct res_common
*alloc_mpt_tr(int id
, int key
)
1009 struct res_mpt
*ret
;
1011 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
1015 ret
->com
.res_id
= id
;
1016 ret
->com
.state
= RES_MPT_RESERVED
;
1022 static struct res_common
*alloc_eq_tr(int id
)
1026 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
1030 ret
->com
.res_id
= id
;
1031 ret
->com
.state
= RES_EQ_RESERVED
;
1036 static struct res_common
*alloc_cq_tr(int id
)
1040 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
1044 ret
->com
.res_id
= id
;
1045 ret
->com
.state
= RES_CQ_ALLOCATED
;
1046 atomic_set(&ret
->ref_count
, 0);
1051 static struct res_common
*alloc_srq_tr(int id
)
1053 struct res_srq
*ret
;
1055 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
1059 ret
->com
.res_id
= id
;
1060 ret
->com
.state
= RES_SRQ_ALLOCATED
;
1061 atomic_set(&ret
->ref_count
, 0);
1066 static struct res_common
*alloc_counter_tr(int id
, int port
)
1068 struct res_counter
*ret
;
1070 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
1074 ret
->com
.res_id
= id
;
1075 ret
->com
.state
= RES_COUNTER_ALLOCATED
;
1081 static struct res_common
*alloc_xrcdn_tr(int id
)
1083 struct res_xrcdn
*ret
;
1085 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
1089 ret
->com
.res_id
= id
;
1090 ret
->com
.state
= RES_XRCD_ALLOCATED
;
1095 static struct res_common
*alloc_fs_rule_tr(u64 id
, int qpn
)
1097 struct res_fs_rule
*ret
;
1099 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
1103 ret
->com
.res_id
= id
;
1104 ret
->com
.state
= RES_FS_RULE_ALLOCATED
;
1109 static struct res_common
*alloc_tr(u64 id
, enum mlx4_resource type
, int slave
,
1112 struct res_common
*ret
;
1116 ret
= alloc_qp_tr(id
);
1119 ret
= alloc_mpt_tr(id
, extra
);
1122 ret
= alloc_mtt_tr(id
, extra
);
1125 ret
= alloc_eq_tr(id
);
1128 ret
= alloc_cq_tr(id
);
1131 ret
= alloc_srq_tr(id
);
1134 pr_err("implementation missing\n");
1137 ret
= alloc_counter_tr(id
, extra
);
1140 ret
= alloc_xrcdn_tr(id
);
1143 ret
= alloc_fs_rule_tr(id
, extra
);
1154 int mlx4_calc_vf_counters(struct mlx4_dev
*dev
, int slave
, int port
,
1155 struct mlx4_counter
*data
)
1157 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1158 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1159 struct res_common
*tmp
;
1160 struct res_counter
*counter
;
1164 memset(data
, 0, sizeof(*data
));
1166 counters_arr
= kmalloc_array(dev
->caps
.max_counters
,
1167 sizeof(*counters_arr
), GFP_KERNEL
);
1171 spin_lock_irq(mlx4_tlock(dev
));
1172 list_for_each_entry(tmp
,
1173 &tracker
->slave_list
[slave
].res_list
[RES_COUNTER
],
1175 counter
= container_of(tmp
, struct res_counter
, com
);
1176 if (counter
->port
== port
) {
1177 counters_arr
[i
] = (int)tmp
->res_id
;
1181 spin_unlock_irq(mlx4_tlock(dev
));
1182 counters_arr
[i
] = -1;
1186 while (counters_arr
[i
] != -1) {
1187 err
= mlx4_get_counter_stats(dev
, counters_arr
[i
], data
,
1190 memset(data
, 0, sizeof(*data
));
1197 kfree(counters_arr
);
1201 static int add_res_range(struct mlx4_dev
*dev
, int slave
, u64 base
, int count
,
1202 enum mlx4_resource type
, int extra
)
1206 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1207 struct res_common
**res_arr
;
1208 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1209 struct rb_root
*root
= &tracker
->res_tree
[type
];
1211 res_arr
= kzalloc(count
* sizeof *res_arr
, GFP_KERNEL
);
1215 for (i
= 0; i
< count
; ++i
) {
1216 res_arr
[i
] = alloc_tr(base
+ i
, type
, slave
, extra
);
1218 for (--i
; i
>= 0; --i
)
1226 spin_lock_irq(mlx4_tlock(dev
));
1227 for (i
= 0; i
< count
; ++i
) {
1228 if (find_res(dev
, base
+ i
, type
)) {
1232 err
= res_tracker_insert(root
, res_arr
[i
]);
1235 list_add_tail(&res_arr
[i
]->list
,
1236 &tracker
->slave_list
[slave
].res_list
[type
]);
1238 spin_unlock_irq(mlx4_tlock(dev
));
1244 for (--i
; i
>= 0; --i
) {
1245 rb_erase(&res_arr
[i
]->node
, root
);
1246 list_del_init(&res_arr
[i
]->list
);
1249 spin_unlock_irq(mlx4_tlock(dev
));
1251 for (i
= 0; i
< count
; ++i
)
1259 static int remove_qp_ok(struct res_qp
*res
)
1261 if (res
->com
.state
== RES_QP_BUSY
|| atomic_read(&res
->ref_count
) ||
1262 !list_empty(&res
->mcg_list
)) {
1263 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1264 res
->com
.state
, atomic_read(&res
->ref_count
));
1266 } else if (res
->com
.state
!= RES_QP_RESERVED
) {
1273 static int remove_mtt_ok(struct res_mtt
*res
, int order
)
1275 if (res
->com
.state
== RES_MTT_BUSY
||
1276 atomic_read(&res
->ref_count
)) {
1277 pr_devel("%s-%d: state %s, ref_count %d\n",
1279 mtt_states_str(res
->com
.state
),
1280 atomic_read(&res
->ref_count
));
1282 } else if (res
->com
.state
!= RES_MTT_ALLOCATED
)
1284 else if (res
->order
!= order
)
1290 static int remove_mpt_ok(struct res_mpt
*res
)
1292 if (res
->com
.state
== RES_MPT_BUSY
)
1294 else if (res
->com
.state
!= RES_MPT_RESERVED
)
1300 static int remove_eq_ok(struct res_eq
*res
)
1302 if (res
->com
.state
== RES_MPT_BUSY
)
1304 else if (res
->com
.state
!= RES_MPT_RESERVED
)
1310 static int remove_counter_ok(struct res_counter
*res
)
1312 if (res
->com
.state
== RES_COUNTER_BUSY
)
1314 else if (res
->com
.state
!= RES_COUNTER_ALLOCATED
)
1320 static int remove_xrcdn_ok(struct res_xrcdn
*res
)
1322 if (res
->com
.state
== RES_XRCD_BUSY
)
1324 else if (res
->com
.state
!= RES_XRCD_ALLOCATED
)
1330 static int remove_fs_rule_ok(struct res_fs_rule
*res
)
1332 if (res
->com
.state
== RES_FS_RULE_BUSY
)
1334 else if (res
->com
.state
!= RES_FS_RULE_ALLOCATED
)
1340 static int remove_cq_ok(struct res_cq
*res
)
1342 if (res
->com
.state
== RES_CQ_BUSY
)
1344 else if (res
->com
.state
!= RES_CQ_ALLOCATED
)
1350 static int remove_srq_ok(struct res_srq
*res
)
1352 if (res
->com
.state
== RES_SRQ_BUSY
)
1354 else if (res
->com
.state
!= RES_SRQ_ALLOCATED
)
1360 static int remove_ok(struct res_common
*res
, enum mlx4_resource type
, int extra
)
1364 return remove_qp_ok((struct res_qp
*)res
);
1366 return remove_cq_ok((struct res_cq
*)res
);
1368 return remove_srq_ok((struct res_srq
*)res
);
1370 return remove_mpt_ok((struct res_mpt
*)res
);
1372 return remove_mtt_ok((struct res_mtt
*)res
, extra
);
1376 return remove_eq_ok((struct res_eq
*)res
);
1378 return remove_counter_ok((struct res_counter
*)res
);
1380 return remove_xrcdn_ok((struct res_xrcdn
*)res
);
1382 return remove_fs_rule_ok((struct res_fs_rule
*)res
);
1388 static int rem_res_range(struct mlx4_dev
*dev
, int slave
, u64 base
, int count
,
1389 enum mlx4_resource type
, int extra
)
1393 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1394 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1395 struct res_common
*r
;
1397 spin_lock_irq(mlx4_tlock(dev
));
1398 for (i
= base
; i
< base
+ count
; ++i
) {
1399 r
= res_tracker_lookup(&tracker
->res_tree
[type
], i
);
1404 if (r
->owner
!= slave
) {
1408 err
= remove_ok(r
, type
, extra
);
1413 for (i
= base
; i
< base
+ count
; ++i
) {
1414 r
= res_tracker_lookup(&tracker
->res_tree
[type
], i
);
1415 rb_erase(&r
->node
, &tracker
->res_tree
[type
]);
1422 spin_unlock_irq(mlx4_tlock(dev
));
1427 static int qp_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int qpn
,
1428 enum res_qp_states state
, struct res_qp
**qp
,
1431 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1432 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1436 spin_lock_irq(mlx4_tlock(dev
));
1437 r
= res_tracker_lookup(&tracker
->res_tree
[RES_QP
], qpn
);
1440 else if (r
->com
.owner
!= slave
)
1445 mlx4_dbg(dev
, "%s: failed RES_QP, 0x%llx\n",
1446 __func__
, r
->com
.res_id
);
1450 case RES_QP_RESERVED
:
1451 if (r
->com
.state
== RES_QP_MAPPED
&& !alloc
)
1454 mlx4_dbg(dev
, "failed RES_QP, 0x%llx\n", r
->com
.res_id
);
1459 if ((r
->com
.state
== RES_QP_RESERVED
&& alloc
) ||
1460 r
->com
.state
== RES_QP_HW
)
1463 mlx4_dbg(dev
, "failed RES_QP, 0x%llx\n",
1471 if (r
->com
.state
!= RES_QP_MAPPED
)
1479 r
->com
.from_state
= r
->com
.state
;
1480 r
->com
.to_state
= state
;
1481 r
->com
.state
= RES_QP_BUSY
;
1487 spin_unlock_irq(mlx4_tlock(dev
));
1492 static int mr_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int index
,
1493 enum res_mpt_states state
, struct res_mpt
**mpt
)
1495 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1496 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1500 spin_lock_irq(mlx4_tlock(dev
));
1501 r
= res_tracker_lookup(&tracker
->res_tree
[RES_MPT
], index
);
1504 else if (r
->com
.owner
!= slave
)
1512 case RES_MPT_RESERVED
:
1513 if (r
->com
.state
!= RES_MPT_MAPPED
)
1517 case RES_MPT_MAPPED
:
1518 if (r
->com
.state
!= RES_MPT_RESERVED
&&
1519 r
->com
.state
!= RES_MPT_HW
)
1524 if (r
->com
.state
!= RES_MPT_MAPPED
)
1532 r
->com
.from_state
= r
->com
.state
;
1533 r
->com
.to_state
= state
;
1534 r
->com
.state
= RES_MPT_BUSY
;
1540 spin_unlock_irq(mlx4_tlock(dev
));
1545 static int eq_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int index
,
1546 enum res_eq_states state
, struct res_eq
**eq
)
1548 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1549 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1553 spin_lock_irq(mlx4_tlock(dev
));
1554 r
= res_tracker_lookup(&tracker
->res_tree
[RES_EQ
], index
);
1557 else if (r
->com
.owner
!= slave
)
1565 case RES_EQ_RESERVED
:
1566 if (r
->com
.state
!= RES_EQ_HW
)
1571 if (r
->com
.state
!= RES_EQ_RESERVED
)
1580 r
->com
.from_state
= r
->com
.state
;
1581 r
->com
.to_state
= state
;
1582 r
->com
.state
= RES_EQ_BUSY
;
1588 spin_unlock_irq(mlx4_tlock(dev
));
1593 static int cq_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int cqn
,
1594 enum res_cq_states state
, struct res_cq
**cq
)
1596 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1597 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1601 spin_lock_irq(mlx4_tlock(dev
));
1602 r
= res_tracker_lookup(&tracker
->res_tree
[RES_CQ
], cqn
);
1605 } else if (r
->com
.owner
!= slave
) {
1607 } else if (state
== RES_CQ_ALLOCATED
) {
1608 if (r
->com
.state
!= RES_CQ_HW
)
1610 else if (atomic_read(&r
->ref_count
))
1614 } else if (state
!= RES_CQ_HW
|| r
->com
.state
!= RES_CQ_ALLOCATED
) {
1621 r
->com
.from_state
= r
->com
.state
;
1622 r
->com
.to_state
= state
;
1623 r
->com
.state
= RES_CQ_BUSY
;
1628 spin_unlock_irq(mlx4_tlock(dev
));
1633 static int srq_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int index
,
1634 enum res_srq_states state
, struct res_srq
**srq
)
1636 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1637 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1641 spin_lock_irq(mlx4_tlock(dev
));
1642 r
= res_tracker_lookup(&tracker
->res_tree
[RES_SRQ
], index
);
1645 } else if (r
->com
.owner
!= slave
) {
1647 } else if (state
== RES_SRQ_ALLOCATED
) {
1648 if (r
->com
.state
!= RES_SRQ_HW
)
1650 else if (atomic_read(&r
->ref_count
))
1652 } else if (state
!= RES_SRQ_HW
|| r
->com
.state
!= RES_SRQ_ALLOCATED
) {
1657 r
->com
.from_state
= r
->com
.state
;
1658 r
->com
.to_state
= state
;
1659 r
->com
.state
= RES_SRQ_BUSY
;
1664 spin_unlock_irq(mlx4_tlock(dev
));
1669 static void res_abort_move(struct mlx4_dev
*dev
, int slave
,
1670 enum mlx4_resource type
, int id
)
1672 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1673 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1674 struct res_common
*r
;
1676 spin_lock_irq(mlx4_tlock(dev
));
1677 r
= res_tracker_lookup(&tracker
->res_tree
[type
], id
);
1678 if (r
&& (r
->owner
== slave
))
1679 r
->state
= r
->from_state
;
1680 spin_unlock_irq(mlx4_tlock(dev
));
1683 static void res_end_move(struct mlx4_dev
*dev
, int slave
,
1684 enum mlx4_resource type
, int id
)
1686 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1687 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1688 struct res_common
*r
;
1690 spin_lock_irq(mlx4_tlock(dev
));
1691 r
= res_tracker_lookup(&tracker
->res_tree
[type
], id
);
1692 if (r
&& (r
->owner
== slave
))
1693 r
->state
= r
->to_state
;
1694 spin_unlock_irq(mlx4_tlock(dev
));
1697 static int valid_reserved(struct mlx4_dev
*dev
, int slave
, int qpn
)
1699 return mlx4_is_qp_reserved(dev
, qpn
) &&
1700 (mlx4_is_master(dev
) || mlx4_is_guest_proxy(dev
, slave
, qpn
));
1703 static int fw_reserved(struct mlx4_dev
*dev
, int qpn
)
1705 return qpn
< dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
];
1708 static int qp_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1709 u64 in_param
, u64
*out_param
)
1719 case RES_OP_RESERVE
:
1720 count
= get_param_l(&in_param
) & 0xffffff;
1721 /* Turn off all unsupported QP allocation flags that the
1722 * slave tries to set.
1724 flags
= (get_param_l(&in_param
) >> 24) & dev
->caps
.alloc_res_qp_mask
;
1725 align
= get_param_h(&in_param
);
1726 err
= mlx4_grant_resource(dev
, slave
, RES_QP
, count
, 0);
1730 err
= __mlx4_qp_reserve_range(dev
, count
, align
, &base
, flags
);
1732 mlx4_release_resource(dev
, slave
, RES_QP
, count
, 0);
1736 err
= add_res_range(dev
, slave
, base
, count
, RES_QP
, 0);
1738 mlx4_release_resource(dev
, slave
, RES_QP
, count
, 0);
1739 __mlx4_qp_release_range(dev
, base
, count
);
1742 set_param_l(out_param
, base
);
1744 case RES_OP_MAP_ICM
:
1745 qpn
= get_param_l(&in_param
) & 0x7fffff;
1746 if (valid_reserved(dev
, slave
, qpn
)) {
1747 err
= add_res_range(dev
, slave
, qpn
, 1, RES_QP
, 0);
1752 err
= qp_res_start_move_to(dev
, slave
, qpn
, RES_QP_MAPPED
,
1757 if (!fw_reserved(dev
, qpn
)) {
1758 err
= __mlx4_qp_alloc_icm(dev
, qpn
, GFP_KERNEL
);
1760 res_abort_move(dev
, slave
, RES_QP
, qpn
);
1765 res_end_move(dev
, slave
, RES_QP
, qpn
);
1775 static int mtt_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1776 u64 in_param
, u64
*out_param
)
1782 if (op
!= RES_OP_RESERVE_AND_MAP
)
1785 order
= get_param_l(&in_param
);
1787 err
= mlx4_grant_resource(dev
, slave
, RES_MTT
, 1 << order
, 0);
1791 base
= __mlx4_alloc_mtt_range(dev
, order
);
1793 mlx4_release_resource(dev
, slave
, RES_MTT
, 1 << order
, 0);
1797 err
= add_res_range(dev
, slave
, base
, 1, RES_MTT
, order
);
1799 mlx4_release_resource(dev
, slave
, RES_MTT
, 1 << order
, 0);
1800 __mlx4_free_mtt_range(dev
, base
, order
);
1802 set_param_l(out_param
, base
);
1808 static int mpt_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1809 u64 in_param
, u64
*out_param
)
1814 struct res_mpt
*mpt
;
1817 case RES_OP_RESERVE
:
1818 err
= mlx4_grant_resource(dev
, slave
, RES_MPT
, 1, 0);
1822 index
= __mlx4_mpt_reserve(dev
);
1824 mlx4_release_resource(dev
, slave
, RES_MPT
, 1, 0);
1827 id
= index
& mpt_mask(dev
);
1829 err
= add_res_range(dev
, slave
, id
, 1, RES_MPT
, index
);
1831 mlx4_release_resource(dev
, slave
, RES_MPT
, 1, 0);
1832 __mlx4_mpt_release(dev
, index
);
1835 set_param_l(out_param
, index
);
1837 case RES_OP_MAP_ICM
:
1838 index
= get_param_l(&in_param
);
1839 id
= index
& mpt_mask(dev
);
1840 err
= mr_res_start_move_to(dev
, slave
, id
,
1841 RES_MPT_MAPPED
, &mpt
);
1845 err
= __mlx4_mpt_alloc_icm(dev
, mpt
->key
, GFP_KERNEL
);
1847 res_abort_move(dev
, slave
, RES_MPT
, id
);
1851 res_end_move(dev
, slave
, RES_MPT
, id
);
1857 static int cq_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1858 u64 in_param
, u64
*out_param
)
1864 case RES_OP_RESERVE_AND_MAP
:
1865 err
= mlx4_grant_resource(dev
, slave
, RES_CQ
, 1, 0);
1869 err
= __mlx4_cq_alloc_icm(dev
, &cqn
);
1871 mlx4_release_resource(dev
, slave
, RES_CQ
, 1, 0);
1875 err
= add_res_range(dev
, slave
, cqn
, 1, RES_CQ
, 0);
1877 mlx4_release_resource(dev
, slave
, RES_CQ
, 1, 0);
1878 __mlx4_cq_free_icm(dev
, cqn
);
1882 set_param_l(out_param
, cqn
);
1892 static int srq_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1893 u64 in_param
, u64
*out_param
)
1899 case RES_OP_RESERVE_AND_MAP
:
1900 err
= mlx4_grant_resource(dev
, slave
, RES_SRQ
, 1, 0);
1904 err
= __mlx4_srq_alloc_icm(dev
, &srqn
);
1906 mlx4_release_resource(dev
, slave
, RES_SRQ
, 1, 0);
1910 err
= add_res_range(dev
, slave
, srqn
, 1, RES_SRQ
, 0);
1912 mlx4_release_resource(dev
, slave
, RES_SRQ
, 1, 0);
1913 __mlx4_srq_free_icm(dev
, srqn
);
1917 set_param_l(out_param
, srqn
);
1927 static int mac_find_smac_ix_in_slave(struct mlx4_dev
*dev
, int slave
, int port
,
1928 u8 smac_index
, u64
*mac
)
1930 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1931 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1932 struct list_head
*mac_list
=
1933 &tracker
->slave_list
[slave
].res_list
[RES_MAC
];
1934 struct mac_res
*res
, *tmp
;
1936 list_for_each_entry_safe(res
, tmp
, mac_list
, list
) {
1937 if (res
->smac_index
== smac_index
&& res
->port
== (u8
) port
) {
1945 static int mac_add_to_slave(struct mlx4_dev
*dev
, int slave
, u64 mac
, int port
, u8 smac_index
)
1947 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1948 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1949 struct list_head
*mac_list
=
1950 &tracker
->slave_list
[slave
].res_list
[RES_MAC
];
1951 struct mac_res
*res
, *tmp
;
1953 list_for_each_entry_safe(res
, tmp
, mac_list
, list
) {
1954 if (res
->mac
== mac
&& res
->port
== (u8
) port
) {
1955 /* mac found. update ref count */
1961 if (mlx4_grant_resource(dev
, slave
, RES_MAC
, 1, port
))
1963 res
= kzalloc(sizeof *res
, GFP_KERNEL
);
1965 mlx4_release_resource(dev
, slave
, RES_MAC
, 1, port
);
1969 res
->port
= (u8
) port
;
1970 res
->smac_index
= smac_index
;
1972 list_add_tail(&res
->list
,
1973 &tracker
->slave_list
[slave
].res_list
[RES_MAC
]);
1977 static void mac_del_from_slave(struct mlx4_dev
*dev
, int slave
, u64 mac
,
1980 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1981 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1982 struct list_head
*mac_list
=
1983 &tracker
->slave_list
[slave
].res_list
[RES_MAC
];
1984 struct mac_res
*res
, *tmp
;
1986 list_for_each_entry_safe(res
, tmp
, mac_list
, list
) {
1987 if (res
->mac
== mac
&& res
->port
== (u8
) port
) {
1988 if (!--res
->ref_count
) {
1989 list_del(&res
->list
);
1990 mlx4_release_resource(dev
, slave
, RES_MAC
, 1, port
);
1998 static void rem_slave_macs(struct mlx4_dev
*dev
, int slave
)
2000 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2001 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
2002 struct list_head
*mac_list
=
2003 &tracker
->slave_list
[slave
].res_list
[RES_MAC
];
2004 struct mac_res
*res
, *tmp
;
2007 list_for_each_entry_safe(res
, tmp
, mac_list
, list
) {
2008 list_del(&res
->list
);
2009 /* dereference the mac the num times the slave referenced it */
2010 for (i
= 0; i
< res
->ref_count
; i
++)
2011 __mlx4_unregister_mac(dev
, res
->port
, res
->mac
);
2012 mlx4_release_resource(dev
, slave
, RES_MAC
, 1, res
->port
);
2017 static int mac_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2018 u64 in_param
, u64
*out_param
, int in_port
)
2025 if (op
!= RES_OP_RESERVE_AND_MAP
)
2028 port
= !in_port
? get_param_l(out_param
) : in_port
;
2029 port
= mlx4_slave_convert_port(
2036 err
= __mlx4_register_mac(dev
, port
, mac
);
2039 set_param_l(out_param
, err
);
2044 err
= mac_add_to_slave(dev
, slave
, mac
, port
, smac_index
);
2046 __mlx4_unregister_mac(dev
, port
, mac
);
2051 static int vlan_add_to_slave(struct mlx4_dev
*dev
, int slave
, u16 vlan
,
2052 int port
, int vlan_index
)
2054 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2055 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
2056 struct list_head
*vlan_list
=
2057 &tracker
->slave_list
[slave
].res_list
[RES_VLAN
];
2058 struct vlan_res
*res
, *tmp
;
2060 list_for_each_entry_safe(res
, tmp
, vlan_list
, list
) {
2061 if (res
->vlan
== vlan
&& res
->port
== (u8
) port
) {
2062 /* vlan found. update ref count */
2068 if (mlx4_grant_resource(dev
, slave
, RES_VLAN
, 1, port
))
2070 res
= kzalloc(sizeof(*res
), GFP_KERNEL
);
2072 mlx4_release_resource(dev
, slave
, RES_VLAN
, 1, port
);
2076 res
->port
= (u8
) port
;
2077 res
->vlan_index
= vlan_index
;
2079 list_add_tail(&res
->list
,
2080 &tracker
->slave_list
[slave
].res_list
[RES_VLAN
]);
2085 static void vlan_del_from_slave(struct mlx4_dev
*dev
, int slave
, u16 vlan
,
2088 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2089 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
2090 struct list_head
*vlan_list
=
2091 &tracker
->slave_list
[slave
].res_list
[RES_VLAN
];
2092 struct vlan_res
*res
, *tmp
;
2094 list_for_each_entry_safe(res
, tmp
, vlan_list
, list
) {
2095 if (res
->vlan
== vlan
&& res
->port
== (u8
) port
) {
2096 if (!--res
->ref_count
) {
2097 list_del(&res
->list
);
2098 mlx4_release_resource(dev
, slave
, RES_VLAN
,
2107 static void rem_slave_vlans(struct mlx4_dev
*dev
, int slave
)
2109 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2110 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
2111 struct list_head
*vlan_list
=
2112 &tracker
->slave_list
[slave
].res_list
[RES_VLAN
];
2113 struct vlan_res
*res
, *tmp
;
2116 list_for_each_entry_safe(res
, tmp
, vlan_list
, list
) {
2117 list_del(&res
->list
);
2118 /* dereference the vlan the num times the slave referenced it */
2119 for (i
= 0; i
< res
->ref_count
; i
++)
2120 __mlx4_unregister_vlan(dev
, res
->port
, res
->vlan
);
2121 mlx4_release_resource(dev
, slave
, RES_VLAN
, 1, res
->port
);
2126 static int vlan_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2127 u64 in_param
, u64
*out_param
, int in_port
)
2129 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2130 struct mlx4_slave_state
*slave_state
= priv
->mfunc
.master
.slave_state
;
2136 port
= !in_port
? get_param_l(out_param
) : in_port
;
2138 if (!port
|| op
!= RES_OP_RESERVE_AND_MAP
)
2141 port
= mlx4_slave_convert_port(
2146 /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
2147 if (!in_port
&& port
> 0 && port
<= dev
->caps
.num_ports
) {
2148 slave_state
[slave
].old_vlan_api
= true;
2152 vlan
= (u16
) in_param
;
2154 err
= __mlx4_register_vlan(dev
, port
, vlan
, &vlan_index
);
2156 set_param_l(out_param
, (u32
) vlan_index
);
2157 err
= vlan_add_to_slave(dev
, slave
, vlan
, port
, vlan_index
);
2159 __mlx4_unregister_vlan(dev
, port
, vlan
);
2164 static int counter_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2165 u64 in_param
, u64
*out_param
, int port
)
2170 if (op
!= RES_OP_RESERVE
)
2173 err
= mlx4_grant_resource(dev
, slave
, RES_COUNTER
, 1, 0);
2177 err
= __mlx4_counter_alloc(dev
, &index
);
2179 mlx4_release_resource(dev
, slave
, RES_COUNTER
, 1, 0);
2183 err
= add_res_range(dev
, slave
, index
, 1, RES_COUNTER
, port
);
2185 __mlx4_counter_free(dev
, index
);
2186 mlx4_release_resource(dev
, slave
, RES_COUNTER
, 1, 0);
2188 set_param_l(out_param
, index
);
2194 static int xrcdn_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2195 u64 in_param
, u64
*out_param
)
2200 if (op
!= RES_OP_RESERVE
)
2203 err
= __mlx4_xrcd_alloc(dev
, &xrcdn
);
2207 err
= add_res_range(dev
, slave
, xrcdn
, 1, RES_XRCD
, 0);
2209 __mlx4_xrcd_free(dev
, xrcdn
);
2211 set_param_l(out_param
, xrcdn
);
2216 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev
*dev
, int slave
,
2217 struct mlx4_vhcr
*vhcr
,
2218 struct mlx4_cmd_mailbox
*inbox
,
2219 struct mlx4_cmd_mailbox
*outbox
,
2220 struct mlx4_cmd_info
*cmd
)
2223 int alop
= vhcr
->op_modifier
;
2225 switch (vhcr
->in_modifier
& 0xFF) {
2227 err
= qp_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2228 vhcr
->in_param
, &vhcr
->out_param
);
2232 err
= mtt_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2233 vhcr
->in_param
, &vhcr
->out_param
);
2237 err
= mpt_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2238 vhcr
->in_param
, &vhcr
->out_param
);
2242 err
= cq_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2243 vhcr
->in_param
, &vhcr
->out_param
);
2247 err
= srq_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2248 vhcr
->in_param
, &vhcr
->out_param
);
2252 err
= mac_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2253 vhcr
->in_param
, &vhcr
->out_param
,
2254 (vhcr
->in_modifier
>> 8) & 0xFF);
2258 err
= vlan_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2259 vhcr
->in_param
, &vhcr
->out_param
,
2260 (vhcr
->in_modifier
>> 8) & 0xFF);
2264 err
= counter_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2265 vhcr
->in_param
, &vhcr
->out_param
, 0);
2269 err
= xrcdn_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2270 vhcr
->in_param
, &vhcr
->out_param
);
2281 static int qp_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2290 case RES_OP_RESERVE
:
2291 base
= get_param_l(&in_param
) & 0x7fffff;
2292 count
= get_param_h(&in_param
);
2293 err
= rem_res_range(dev
, slave
, base
, count
, RES_QP
, 0);
2296 mlx4_release_resource(dev
, slave
, RES_QP
, count
, 0);
2297 __mlx4_qp_release_range(dev
, base
, count
);
2299 case RES_OP_MAP_ICM
:
2300 qpn
= get_param_l(&in_param
) & 0x7fffff;
2301 err
= qp_res_start_move_to(dev
, slave
, qpn
, RES_QP_RESERVED
,
2306 if (!fw_reserved(dev
, qpn
))
2307 __mlx4_qp_free_icm(dev
, qpn
);
2309 res_end_move(dev
, slave
, RES_QP
, qpn
);
2311 if (valid_reserved(dev
, slave
, qpn
))
2312 err
= rem_res_range(dev
, slave
, qpn
, 1, RES_QP
, 0);
2321 static int mtt_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2322 u64 in_param
, u64
*out_param
)
2328 if (op
!= RES_OP_RESERVE_AND_MAP
)
2331 base
= get_param_l(&in_param
);
2332 order
= get_param_h(&in_param
);
2333 err
= rem_res_range(dev
, slave
, base
, 1, RES_MTT
, order
);
2335 mlx4_release_resource(dev
, slave
, RES_MTT
, 1 << order
, 0);
2336 __mlx4_free_mtt_range(dev
, base
, order
);
2341 static int mpt_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2347 struct res_mpt
*mpt
;
2350 case RES_OP_RESERVE
:
2351 index
= get_param_l(&in_param
);
2352 id
= index
& mpt_mask(dev
);
2353 err
= get_res(dev
, slave
, id
, RES_MPT
, &mpt
);
2357 put_res(dev
, slave
, id
, RES_MPT
);
2359 err
= rem_res_range(dev
, slave
, id
, 1, RES_MPT
, 0);
2362 mlx4_release_resource(dev
, slave
, RES_MPT
, 1, 0);
2363 __mlx4_mpt_release(dev
, index
);
2365 case RES_OP_MAP_ICM
:
2366 index
= get_param_l(&in_param
);
2367 id
= index
& mpt_mask(dev
);
2368 err
= mr_res_start_move_to(dev
, slave
, id
,
2369 RES_MPT_RESERVED
, &mpt
);
2373 __mlx4_mpt_free_icm(dev
, mpt
->key
);
2374 res_end_move(dev
, slave
, RES_MPT
, id
);
2384 static int cq_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2385 u64 in_param
, u64
*out_param
)
2391 case RES_OP_RESERVE_AND_MAP
:
2392 cqn
= get_param_l(&in_param
);
2393 err
= rem_res_range(dev
, slave
, cqn
, 1, RES_CQ
, 0);
2397 mlx4_release_resource(dev
, slave
, RES_CQ
, 1, 0);
2398 __mlx4_cq_free_icm(dev
, cqn
);
2409 static int srq_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2410 u64 in_param
, u64
*out_param
)
2416 case RES_OP_RESERVE_AND_MAP
:
2417 srqn
= get_param_l(&in_param
);
2418 err
= rem_res_range(dev
, slave
, srqn
, 1, RES_SRQ
, 0);
2422 mlx4_release_resource(dev
, slave
, RES_SRQ
, 1, 0);
2423 __mlx4_srq_free_icm(dev
, srqn
);
2434 static int mac_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2435 u64 in_param
, u64
*out_param
, int in_port
)
2441 case RES_OP_RESERVE_AND_MAP
:
2442 port
= !in_port
? get_param_l(out_param
) : in_port
;
2443 port
= mlx4_slave_convert_port(
2448 mac_del_from_slave(dev
, slave
, in_param
, port
);
2449 __mlx4_unregister_mac(dev
, port
, in_param
);
2460 static int vlan_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2461 u64 in_param
, u64
*out_param
, int port
)
2463 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2464 struct mlx4_slave_state
*slave_state
= priv
->mfunc
.master
.slave_state
;
2467 port
= mlx4_slave_convert_port(
2473 case RES_OP_RESERVE_AND_MAP
:
2474 if (slave_state
[slave
].old_vlan_api
)
2478 vlan_del_from_slave(dev
, slave
, in_param
, port
);
2479 __mlx4_unregister_vlan(dev
, port
, in_param
);
2489 static int counter_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2490 u64 in_param
, u64
*out_param
)
2495 if (op
!= RES_OP_RESERVE
)
2498 index
= get_param_l(&in_param
);
2499 if (index
== MLX4_SINK_COUNTER_INDEX(dev
))
2502 err
= rem_res_range(dev
, slave
, index
, 1, RES_COUNTER
, 0);
2506 __mlx4_counter_free(dev
, index
);
2507 mlx4_release_resource(dev
, slave
, RES_COUNTER
, 1, 0);
2512 static int xrcdn_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2513 u64 in_param
, u64
*out_param
)
2518 if (op
!= RES_OP_RESERVE
)
2521 xrcdn
= get_param_l(&in_param
);
2522 err
= rem_res_range(dev
, slave
, xrcdn
, 1, RES_XRCD
, 0);
2526 __mlx4_xrcd_free(dev
, xrcdn
);
2531 int mlx4_FREE_RES_wrapper(struct mlx4_dev
*dev
, int slave
,
2532 struct mlx4_vhcr
*vhcr
,
2533 struct mlx4_cmd_mailbox
*inbox
,
2534 struct mlx4_cmd_mailbox
*outbox
,
2535 struct mlx4_cmd_info
*cmd
)
2538 int alop
= vhcr
->op_modifier
;
2540 switch (vhcr
->in_modifier
& 0xFF) {
2542 err
= qp_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2547 err
= mtt_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2548 vhcr
->in_param
, &vhcr
->out_param
);
2552 err
= mpt_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2557 err
= cq_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2558 vhcr
->in_param
, &vhcr
->out_param
);
2562 err
= srq_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2563 vhcr
->in_param
, &vhcr
->out_param
);
2567 err
= mac_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2568 vhcr
->in_param
, &vhcr
->out_param
,
2569 (vhcr
->in_modifier
>> 8) & 0xFF);
2573 err
= vlan_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2574 vhcr
->in_param
, &vhcr
->out_param
,
2575 (vhcr
->in_modifier
>> 8) & 0xFF);
2579 err
= counter_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2580 vhcr
->in_param
, &vhcr
->out_param
);
2584 err
= xrcdn_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2585 vhcr
->in_param
, &vhcr
->out_param
);
2593 /* ugly but other choices are uglier */
2594 static int mr_phys_mpt(struct mlx4_mpt_entry
*mpt
)
2596 return (be32_to_cpu(mpt
->flags
) >> 9) & 1;
2599 static int mr_get_mtt_addr(struct mlx4_mpt_entry
*mpt
)
2601 return (int)be64_to_cpu(mpt
->mtt_addr
) & 0xfffffff8;
2604 static int mr_get_mtt_size(struct mlx4_mpt_entry
*mpt
)
2606 return be32_to_cpu(mpt
->mtt_sz
);
2609 static u32
mr_get_pd(struct mlx4_mpt_entry
*mpt
)
2611 return be32_to_cpu(mpt
->pd_flags
) & 0x00ffffff;
2614 static int mr_is_fmr(struct mlx4_mpt_entry
*mpt
)
2616 return be32_to_cpu(mpt
->pd_flags
) & MLX4_MPT_PD_FLAG_FAST_REG
;
2619 static int mr_is_bind_enabled(struct mlx4_mpt_entry
*mpt
)
2621 return be32_to_cpu(mpt
->flags
) & MLX4_MPT_FLAG_BIND_ENABLE
;
2624 static int mr_is_region(struct mlx4_mpt_entry
*mpt
)
2626 return be32_to_cpu(mpt
->flags
) & MLX4_MPT_FLAG_REGION
;
2629 static int qp_get_mtt_addr(struct mlx4_qp_context
*qpc
)
2631 return be32_to_cpu(qpc
->mtt_base_addr_l
) & 0xfffffff8;
2634 static int srq_get_mtt_addr(struct mlx4_srq_context
*srqc
)
2636 return be32_to_cpu(srqc
->mtt_base_addr_l
) & 0xfffffff8;
2639 static int qp_get_mtt_size(struct mlx4_qp_context
*qpc
)
2641 int page_shift
= (qpc
->log_page_size
& 0x3f) + 12;
2642 int log_sq_size
= (qpc
->sq_size_stride
>> 3) & 0xf;
2643 int log_sq_sride
= qpc
->sq_size_stride
& 7;
2644 int log_rq_size
= (qpc
->rq_size_stride
>> 3) & 0xf;
2645 int log_rq_stride
= qpc
->rq_size_stride
& 7;
2646 int srq
= (be32_to_cpu(qpc
->srqn
) >> 24) & 1;
2647 int rss
= (be32_to_cpu(qpc
->flags
) >> 13) & 1;
2648 u32 ts
= (be32_to_cpu(qpc
->flags
) >> 16) & 0xff;
2649 int xrc
= (ts
== MLX4_QP_ST_XRC
) ? 1 : 0;
2654 int page_offset
= (be32_to_cpu(qpc
->params2
) >> 6) & 0x3f;
2656 sq_size
= 1 << (log_sq_size
+ log_sq_sride
+ 4);
2657 rq_size
= (srq
|rss
|xrc
) ? 0 : (1 << (log_rq_size
+ log_rq_stride
+ 4));
2658 total_mem
= sq_size
+ rq_size
;
2660 roundup_pow_of_two((total_mem
+ (page_offset
<< 6)) >>
2666 static int check_mtt_range(struct mlx4_dev
*dev
, int slave
, int start
,
2667 int size
, struct res_mtt
*mtt
)
2669 int res_start
= mtt
->com
.res_id
;
2670 int res_size
= (1 << mtt
->order
);
2672 if (start
< res_start
|| start
+ size
> res_start
+ res_size
)
2677 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev
*dev
, int slave
,
2678 struct mlx4_vhcr
*vhcr
,
2679 struct mlx4_cmd_mailbox
*inbox
,
2680 struct mlx4_cmd_mailbox
*outbox
,
2681 struct mlx4_cmd_info
*cmd
)
2684 int index
= vhcr
->in_modifier
;
2685 struct res_mtt
*mtt
;
2686 struct res_mpt
*mpt
;
2687 int mtt_base
= mr_get_mtt_addr(inbox
->buf
) / dev
->caps
.mtt_entry_sz
;
2693 id
= index
& mpt_mask(dev
);
2694 err
= mr_res_start_move_to(dev
, slave
, id
, RES_MPT_HW
, &mpt
);
2698 /* Disable memory windows for VFs. */
2699 if (!mr_is_region(inbox
->buf
)) {
2704 /* Make sure that the PD bits related to the slave id are zeros. */
2705 pd
= mr_get_pd(inbox
->buf
);
2706 pd_slave
= (pd
>> 17) & 0x7f;
2707 if (pd_slave
!= 0 && --pd_slave
!= slave
) {
2712 if (mr_is_fmr(inbox
->buf
)) {
2713 /* FMR and Bind Enable are forbidden in slave devices. */
2714 if (mr_is_bind_enabled(inbox
->buf
)) {
2718 /* FMR and Memory Windows are also forbidden. */
2719 if (!mr_is_region(inbox
->buf
)) {
2725 phys
= mr_phys_mpt(inbox
->buf
);
2727 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
2731 err
= check_mtt_range(dev
, slave
, mtt_base
,
2732 mr_get_mtt_size(inbox
->buf
), mtt
);
2739 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2744 atomic_inc(&mtt
->ref_count
);
2745 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2748 res_end_move(dev
, slave
, RES_MPT
, id
);
2753 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2755 res_abort_move(dev
, slave
, RES_MPT
, id
);
2760 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev
*dev
, int slave
,
2761 struct mlx4_vhcr
*vhcr
,
2762 struct mlx4_cmd_mailbox
*inbox
,
2763 struct mlx4_cmd_mailbox
*outbox
,
2764 struct mlx4_cmd_info
*cmd
)
2767 int index
= vhcr
->in_modifier
;
2768 struct res_mpt
*mpt
;
2771 id
= index
& mpt_mask(dev
);
2772 err
= mr_res_start_move_to(dev
, slave
, id
, RES_MPT_MAPPED
, &mpt
);
2776 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2781 atomic_dec(&mpt
->mtt
->ref_count
);
2783 res_end_move(dev
, slave
, RES_MPT
, id
);
2787 res_abort_move(dev
, slave
, RES_MPT
, id
);
2792 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev
*dev
, int slave
,
2793 struct mlx4_vhcr
*vhcr
,
2794 struct mlx4_cmd_mailbox
*inbox
,
2795 struct mlx4_cmd_mailbox
*outbox
,
2796 struct mlx4_cmd_info
*cmd
)
2799 int index
= vhcr
->in_modifier
;
2800 struct res_mpt
*mpt
;
2803 id
= index
& mpt_mask(dev
);
2804 err
= get_res(dev
, slave
, id
, RES_MPT
, &mpt
);
2808 if (mpt
->com
.from_state
== RES_MPT_MAPPED
) {
2809 /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do
2810 * that, the VF must read the MPT. But since the MPT entry memory is not
2811 * in the VF's virtual memory space, it must use QUERY_MPT to obtain the
2812 * entry contents. To guarantee that the MPT cannot be changed, the driver
2813 * must perform HW2SW_MPT before this query and return the MPT entry to HW
2814 * ownership fofollowing the change. The change here allows the VF to
2815 * perform QUERY_MPT also when the entry is in SW ownership.
2817 struct mlx4_mpt_entry
*mpt_entry
= mlx4_table_find(
2818 &mlx4_priv(dev
)->mr_table
.dmpt_table
,
2821 if (NULL
== mpt_entry
|| NULL
== outbox
->buf
) {
2826 memcpy(outbox
->buf
, mpt_entry
, sizeof(*mpt_entry
));
2829 } else if (mpt
->com
.from_state
== RES_MPT_HW
) {
2830 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2838 put_res(dev
, slave
, id
, RES_MPT
);
2842 static int qp_get_rcqn(struct mlx4_qp_context
*qpc
)
2844 return be32_to_cpu(qpc
->cqn_recv
) & 0xffffff;
2847 static int qp_get_scqn(struct mlx4_qp_context
*qpc
)
2849 return be32_to_cpu(qpc
->cqn_send
) & 0xffffff;
2852 static u32
qp_get_srqn(struct mlx4_qp_context
*qpc
)
2854 return be32_to_cpu(qpc
->srqn
) & 0x1ffffff;
2857 static void adjust_proxy_tun_qkey(struct mlx4_dev
*dev
, struct mlx4_vhcr
*vhcr
,
2858 struct mlx4_qp_context
*context
)
2860 u32 qpn
= vhcr
->in_modifier
& 0xffffff;
2863 if (mlx4_get_parav_qkey(dev
, qpn
, &qkey
))
2866 /* adjust qkey in qp context */
2867 context
->qkey
= cpu_to_be32(qkey
);
2870 static int adjust_qp_sched_queue(struct mlx4_dev
*dev
, int slave
,
2871 struct mlx4_qp_context
*qpc
,
2872 struct mlx4_cmd_mailbox
*inbox
);
2874 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
2875 struct mlx4_vhcr
*vhcr
,
2876 struct mlx4_cmd_mailbox
*inbox
,
2877 struct mlx4_cmd_mailbox
*outbox
,
2878 struct mlx4_cmd_info
*cmd
)
2881 int qpn
= vhcr
->in_modifier
& 0x7fffff;
2882 struct res_mtt
*mtt
;
2884 struct mlx4_qp_context
*qpc
= inbox
->buf
+ 8;
2885 int mtt_base
= qp_get_mtt_addr(qpc
) / dev
->caps
.mtt_entry_sz
;
2886 int mtt_size
= qp_get_mtt_size(qpc
);
2889 int rcqn
= qp_get_rcqn(qpc
);
2890 int scqn
= qp_get_scqn(qpc
);
2891 u32 srqn
= qp_get_srqn(qpc
) & 0xffffff;
2892 int use_srq
= (qp_get_srqn(qpc
) >> 24) & 1;
2893 struct res_srq
*srq
;
2894 int local_qpn
= be32_to_cpu(qpc
->local_qpn
) & 0xffffff;
2896 err
= adjust_qp_sched_queue(dev
, slave
, qpc
, inbox
);
2900 err
= qp_res_start_move_to(dev
, slave
, qpn
, RES_QP_HW
, &qp
, 0);
2903 qp
->local_qpn
= local_qpn
;
2904 qp
->sched_queue
= 0;
2906 qp
->vlan_control
= 0;
2908 qp
->pri_path_fl
= 0;
2911 qp
->qpc_flags
= be32_to_cpu(qpc
->flags
);
2913 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
2917 err
= check_mtt_range(dev
, slave
, mtt_base
, mtt_size
, mtt
);
2921 err
= get_res(dev
, slave
, rcqn
, RES_CQ
, &rcq
);
2926 err
= get_res(dev
, slave
, scqn
, RES_CQ
, &scq
);
2933 err
= get_res(dev
, slave
, srqn
, RES_SRQ
, &srq
);
2938 adjust_proxy_tun_qkey(dev
, vhcr
, qpc
);
2939 update_pkey_index(dev
, slave
, inbox
);
2940 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2943 atomic_inc(&mtt
->ref_count
);
2945 atomic_inc(&rcq
->ref_count
);
2947 atomic_inc(&scq
->ref_count
);
2951 put_res(dev
, slave
, scqn
, RES_CQ
);
2954 atomic_inc(&srq
->ref_count
);
2955 put_res(dev
, slave
, srqn
, RES_SRQ
);
2958 put_res(dev
, slave
, rcqn
, RES_CQ
);
2959 put_res(dev
, slave
, mtt_base
, RES_MTT
);
2960 res_end_move(dev
, slave
, RES_QP
, qpn
);
2966 put_res(dev
, slave
, srqn
, RES_SRQ
);
2969 put_res(dev
, slave
, scqn
, RES_CQ
);
2971 put_res(dev
, slave
, rcqn
, RES_CQ
);
2973 put_res(dev
, slave
, mtt_base
, RES_MTT
);
2975 res_abort_move(dev
, slave
, RES_QP
, qpn
);
2980 static int eq_get_mtt_addr(struct mlx4_eq_context
*eqc
)
2982 return be32_to_cpu(eqc
->mtt_base_addr_l
) & 0xfffffff8;
2985 static int eq_get_mtt_size(struct mlx4_eq_context
*eqc
)
2987 int log_eq_size
= eqc
->log_eq_size
& 0x1f;
2988 int page_shift
= (eqc
->log_page_size
& 0x3f) + 12;
2990 if (log_eq_size
+ 5 < page_shift
)
2993 return 1 << (log_eq_size
+ 5 - page_shift
);
2996 static int cq_get_mtt_addr(struct mlx4_cq_context
*cqc
)
2998 return be32_to_cpu(cqc
->mtt_base_addr_l
) & 0xfffffff8;
3001 static int cq_get_mtt_size(struct mlx4_cq_context
*cqc
)
3003 int log_cq_size
= (be32_to_cpu(cqc
->logsize_usrpage
) >> 24) & 0x1f;
3004 int page_shift
= (cqc
->log_page_size
& 0x3f) + 12;
3006 if (log_cq_size
+ 5 < page_shift
)
3009 return 1 << (log_cq_size
+ 5 - page_shift
);
3012 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3013 struct mlx4_vhcr
*vhcr
,
3014 struct mlx4_cmd_mailbox
*inbox
,
3015 struct mlx4_cmd_mailbox
*outbox
,
3016 struct mlx4_cmd_info
*cmd
)
3019 int eqn
= vhcr
->in_modifier
;
3020 int res_id
= (slave
<< 10) | eqn
;
3021 struct mlx4_eq_context
*eqc
= inbox
->buf
;
3022 int mtt_base
= eq_get_mtt_addr(eqc
) / dev
->caps
.mtt_entry_sz
;
3023 int mtt_size
= eq_get_mtt_size(eqc
);
3025 struct res_mtt
*mtt
;
3027 err
= add_res_range(dev
, slave
, res_id
, 1, RES_EQ
, 0);
3030 err
= eq_res_start_move_to(dev
, slave
, res_id
, RES_EQ_HW
, &eq
);
3034 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
3038 err
= check_mtt_range(dev
, slave
, mtt_base
, mtt_size
, mtt
);
3042 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3046 atomic_inc(&mtt
->ref_count
);
3048 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3049 res_end_move(dev
, slave
, RES_EQ
, res_id
);
3053 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3055 res_abort_move(dev
, slave
, RES_EQ
, res_id
);
3057 rem_res_range(dev
, slave
, res_id
, 1, RES_EQ
, 0);
3061 int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev
*dev
, int slave
,
3062 struct mlx4_vhcr
*vhcr
,
3063 struct mlx4_cmd_mailbox
*inbox
,
3064 struct mlx4_cmd_mailbox
*outbox
,
3065 struct mlx4_cmd_info
*cmd
)
3068 u8 get
= vhcr
->op_modifier
;
3073 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3078 static int get_containing_mtt(struct mlx4_dev
*dev
, int slave
, int start
,
3079 int len
, struct res_mtt
**res
)
3081 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3082 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
3083 struct res_mtt
*mtt
;
3086 spin_lock_irq(mlx4_tlock(dev
));
3087 list_for_each_entry(mtt
, &tracker
->slave_list
[slave
].res_list
[RES_MTT
],
3089 if (!check_mtt_range(dev
, slave
, start
, len
, mtt
)) {
3091 mtt
->com
.from_state
= mtt
->com
.state
;
3092 mtt
->com
.state
= RES_MTT_BUSY
;
3097 spin_unlock_irq(mlx4_tlock(dev
));
3102 static int verify_qp_parameters(struct mlx4_dev
*dev
,
3103 struct mlx4_vhcr
*vhcr
,
3104 struct mlx4_cmd_mailbox
*inbox
,
3105 enum qp_transition transition
, u8 slave
)
3109 struct mlx4_qp_context
*qp_ctx
;
3110 enum mlx4_qp_optpar optpar
;
3114 qp_ctx
= inbox
->buf
+ 8;
3115 qp_type
= (be32_to_cpu(qp_ctx
->flags
) >> 16) & 0xff;
3116 optpar
= be32_to_cpu(*(__be32
*) inbox
->buf
);
3118 if (slave
!= mlx4_master_func_num(dev
)) {
3119 qp_ctx
->params2
&= ~MLX4_QP_BIT_FPP
;
3120 /* setting QP rate-limit is disallowed for VFs */
3121 if (qp_ctx
->rate_limit_params
)
3127 case MLX4_QP_ST_XRC
:
3129 switch (transition
) {
3130 case QP_TRANS_INIT2RTR
:
3131 case QP_TRANS_RTR2RTS
:
3132 case QP_TRANS_RTS2RTS
:
3133 case QP_TRANS_SQD2SQD
:
3134 case QP_TRANS_SQD2RTS
:
3135 if (slave
!= mlx4_master_func_num(dev
))
3136 if (optpar
& MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH
) {
3137 port
= (qp_ctx
->pri_path
.sched_queue
>> 6 & 1) + 1;
3138 if (dev
->caps
.port_mask
[port
] != MLX4_PORT_TYPE_IB
)
3139 num_gids
= mlx4_get_slave_num_gids(dev
, slave
, port
);
3142 if (qp_ctx
->pri_path
.mgid_index
>= num_gids
)
3145 if (optpar
& MLX4_QP_OPTPAR_ALT_ADDR_PATH
) {
3146 port
= (qp_ctx
->alt_path
.sched_queue
>> 6 & 1) + 1;
3147 if (dev
->caps
.port_mask
[port
] != MLX4_PORT_TYPE_IB
)
3148 num_gids
= mlx4_get_slave_num_gids(dev
, slave
, port
);
3151 if (qp_ctx
->alt_path
.mgid_index
>= num_gids
)
3160 case MLX4_QP_ST_MLX
:
3161 qpn
= vhcr
->in_modifier
& 0x7fffff;
3162 port
= (qp_ctx
->pri_path
.sched_queue
>> 6 & 1) + 1;
3163 if (transition
== QP_TRANS_INIT2RTR
&&
3164 slave
!= mlx4_master_func_num(dev
) &&
3165 mlx4_is_qp_reserved(dev
, qpn
) &&
3166 !mlx4_vf_smi_enabled(dev
, slave
, port
)) {
3167 /* only enabled VFs may create MLX proxy QPs */
3168 mlx4_err(dev
, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
3169 __func__
, slave
, port
);
3181 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev
*dev
, int slave
,
3182 struct mlx4_vhcr
*vhcr
,
3183 struct mlx4_cmd_mailbox
*inbox
,
3184 struct mlx4_cmd_mailbox
*outbox
,
3185 struct mlx4_cmd_info
*cmd
)
3187 struct mlx4_mtt mtt
;
3188 __be64
*page_list
= inbox
->buf
;
3189 u64
*pg_list
= (u64
*)page_list
;
3191 struct res_mtt
*rmtt
= NULL
;
3192 int start
= be64_to_cpu(page_list
[0]);
3193 int npages
= vhcr
->in_modifier
;
3196 err
= get_containing_mtt(dev
, slave
, start
, npages
, &rmtt
);
3200 /* Call the SW implementation of write_mtt:
3201 * - Prepare a dummy mtt struct
3202 * - Translate inbox contents to simple addresses in host endianness */
3203 mtt
.offset
= 0; /* TBD this is broken but I don't handle it since
3204 we don't really use it */
3207 for (i
= 0; i
< npages
; ++i
)
3208 pg_list
[i
+ 2] = (be64_to_cpu(page_list
[i
+ 2]) & ~1ULL);
3210 err
= __mlx4_write_mtt(dev
, &mtt
, be64_to_cpu(page_list
[0]), npages
,
3211 ((u64
*)page_list
+ 2));
3214 put_res(dev
, slave
, rmtt
->com
.res_id
, RES_MTT
);
3219 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3220 struct mlx4_vhcr
*vhcr
,
3221 struct mlx4_cmd_mailbox
*inbox
,
3222 struct mlx4_cmd_mailbox
*outbox
,
3223 struct mlx4_cmd_info
*cmd
)
3225 int eqn
= vhcr
->in_modifier
;
3226 int res_id
= eqn
| (slave
<< 10);
3230 err
= eq_res_start_move_to(dev
, slave
, res_id
, RES_EQ_RESERVED
, &eq
);
3234 err
= get_res(dev
, slave
, eq
->mtt
->com
.res_id
, RES_MTT
, NULL
);
3238 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3242 atomic_dec(&eq
->mtt
->ref_count
);
3243 put_res(dev
, slave
, eq
->mtt
->com
.res_id
, RES_MTT
);
3244 res_end_move(dev
, slave
, RES_EQ
, res_id
);
3245 rem_res_range(dev
, slave
, res_id
, 1, RES_EQ
, 0);
3250 put_res(dev
, slave
, eq
->mtt
->com
.res_id
, RES_MTT
);
3252 res_abort_move(dev
, slave
, RES_EQ
, res_id
);
3257 int mlx4_GEN_EQE(struct mlx4_dev
*dev
, int slave
, struct mlx4_eqe
*eqe
)
3259 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3260 struct mlx4_slave_event_eq_info
*event_eq
;
3261 struct mlx4_cmd_mailbox
*mailbox
;
3262 u32 in_modifier
= 0;
3267 if (!priv
->mfunc
.master
.slave_state
)
3270 /* check for slave valid, slave not PF, and slave active */
3271 if (slave
< 0 || slave
> dev
->persist
->num_vfs
||
3272 slave
== dev
->caps
.function
||
3273 !priv
->mfunc
.master
.slave_state
[slave
].active
)
3276 event_eq
= &priv
->mfunc
.master
.slave_state
[slave
].event_eq
[eqe
->type
];
3278 /* Create the event only if the slave is registered */
3279 if (event_eq
->eqn
< 0)
3282 mutex_lock(&priv
->mfunc
.master
.gen_eqe_mutex
[slave
]);
3283 res_id
= (slave
<< 10) | event_eq
->eqn
;
3284 err
= get_res(dev
, slave
, res_id
, RES_EQ
, &req
);
3288 if (req
->com
.from_state
!= RES_EQ_HW
) {
3293 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
3294 if (IS_ERR(mailbox
)) {
3295 err
= PTR_ERR(mailbox
);
3299 if (eqe
->type
== MLX4_EVENT_TYPE_CMD
) {
3301 eqe
->event
.cmd
.token
= cpu_to_be16(event_eq
->token
);
3304 memcpy(mailbox
->buf
, (u8
*) eqe
, 28);
3306 in_modifier
= (slave
& 0xff) | ((event_eq
->eqn
& 0x3ff) << 16);
3308 err
= mlx4_cmd(dev
, mailbox
->dma
, in_modifier
, 0,
3309 MLX4_CMD_GEN_EQE
, MLX4_CMD_TIME_CLASS_B
,
3312 put_res(dev
, slave
, res_id
, RES_EQ
);
3313 mutex_unlock(&priv
->mfunc
.master
.gen_eqe_mutex
[slave
]);
3314 mlx4_free_cmd_mailbox(dev
, mailbox
);
3318 put_res(dev
, slave
, res_id
, RES_EQ
);
3321 mutex_unlock(&priv
->mfunc
.master
.gen_eqe_mutex
[slave
]);
3325 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3326 struct mlx4_vhcr
*vhcr
,
3327 struct mlx4_cmd_mailbox
*inbox
,
3328 struct mlx4_cmd_mailbox
*outbox
,
3329 struct mlx4_cmd_info
*cmd
)
3331 int eqn
= vhcr
->in_modifier
;
3332 int res_id
= eqn
| (slave
<< 10);
3336 err
= get_res(dev
, slave
, res_id
, RES_EQ
, &eq
);
3340 if (eq
->com
.from_state
!= RES_EQ_HW
) {
3345 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3348 put_res(dev
, slave
, res_id
, RES_EQ
);
3352 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3353 struct mlx4_vhcr
*vhcr
,
3354 struct mlx4_cmd_mailbox
*inbox
,
3355 struct mlx4_cmd_mailbox
*outbox
,
3356 struct mlx4_cmd_info
*cmd
)
3359 int cqn
= vhcr
->in_modifier
;
3360 struct mlx4_cq_context
*cqc
= inbox
->buf
;
3361 int mtt_base
= cq_get_mtt_addr(cqc
) / dev
->caps
.mtt_entry_sz
;
3362 struct res_cq
*cq
= NULL
;
3363 struct res_mtt
*mtt
;
3365 err
= cq_res_start_move_to(dev
, slave
, cqn
, RES_CQ_HW
, &cq
);
3368 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
3371 err
= check_mtt_range(dev
, slave
, mtt_base
, cq_get_mtt_size(cqc
), mtt
);
3374 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3377 atomic_inc(&mtt
->ref_count
);
3379 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3380 res_end_move(dev
, slave
, RES_CQ
, cqn
);
3384 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3386 res_abort_move(dev
, slave
, RES_CQ
, cqn
);
3390 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3391 struct mlx4_vhcr
*vhcr
,
3392 struct mlx4_cmd_mailbox
*inbox
,
3393 struct mlx4_cmd_mailbox
*outbox
,
3394 struct mlx4_cmd_info
*cmd
)
3397 int cqn
= vhcr
->in_modifier
;
3398 struct res_cq
*cq
= NULL
;
3400 err
= cq_res_start_move_to(dev
, slave
, cqn
, RES_CQ_ALLOCATED
, &cq
);
3403 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3406 atomic_dec(&cq
->mtt
->ref_count
);
3407 res_end_move(dev
, slave
, RES_CQ
, cqn
);
3411 res_abort_move(dev
, slave
, RES_CQ
, cqn
);
3415 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3416 struct mlx4_vhcr
*vhcr
,
3417 struct mlx4_cmd_mailbox
*inbox
,
3418 struct mlx4_cmd_mailbox
*outbox
,
3419 struct mlx4_cmd_info
*cmd
)
3421 int cqn
= vhcr
->in_modifier
;
3425 err
= get_res(dev
, slave
, cqn
, RES_CQ
, &cq
);
3429 if (cq
->com
.from_state
!= RES_CQ_HW
)
3432 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3434 put_res(dev
, slave
, cqn
, RES_CQ
);
3439 static int handle_resize(struct mlx4_dev
*dev
, int slave
,
3440 struct mlx4_vhcr
*vhcr
,
3441 struct mlx4_cmd_mailbox
*inbox
,
3442 struct mlx4_cmd_mailbox
*outbox
,
3443 struct mlx4_cmd_info
*cmd
,
3447 struct res_mtt
*orig_mtt
;
3448 struct res_mtt
*mtt
;
3449 struct mlx4_cq_context
*cqc
= inbox
->buf
;
3450 int mtt_base
= cq_get_mtt_addr(cqc
) / dev
->caps
.mtt_entry_sz
;
3452 err
= get_res(dev
, slave
, cq
->mtt
->com
.res_id
, RES_MTT
, &orig_mtt
);
3456 if (orig_mtt
!= cq
->mtt
) {
3461 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
3465 err
= check_mtt_range(dev
, slave
, mtt_base
, cq_get_mtt_size(cqc
), mtt
);
3468 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3471 atomic_dec(&orig_mtt
->ref_count
);
3472 put_res(dev
, slave
, orig_mtt
->com
.res_id
, RES_MTT
);
3473 atomic_inc(&mtt
->ref_count
);
3475 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3479 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3481 put_res(dev
, slave
, orig_mtt
->com
.res_id
, RES_MTT
);
3487 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3488 struct mlx4_vhcr
*vhcr
,
3489 struct mlx4_cmd_mailbox
*inbox
,
3490 struct mlx4_cmd_mailbox
*outbox
,
3491 struct mlx4_cmd_info
*cmd
)
3493 int cqn
= vhcr
->in_modifier
;
3497 err
= get_res(dev
, slave
, cqn
, RES_CQ
, &cq
);
3501 if (cq
->com
.from_state
!= RES_CQ_HW
)
3504 if (vhcr
->op_modifier
== 0) {
3505 err
= handle_resize(dev
, slave
, vhcr
, inbox
, outbox
, cmd
, cq
);
3509 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3511 put_res(dev
, slave
, cqn
, RES_CQ
);
3516 static int srq_get_mtt_size(struct mlx4_srq_context
*srqc
)
3518 int log_srq_size
= (be32_to_cpu(srqc
->state_logsize_srqn
) >> 24) & 0xf;
3519 int log_rq_stride
= srqc
->logstride
& 7;
3520 int page_shift
= (srqc
->log_page_size
& 0x3f) + 12;
3522 if (log_srq_size
+ log_rq_stride
+ 4 < page_shift
)
3525 return 1 << (log_srq_size
+ log_rq_stride
+ 4 - page_shift
);
3528 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3529 struct mlx4_vhcr
*vhcr
,
3530 struct mlx4_cmd_mailbox
*inbox
,
3531 struct mlx4_cmd_mailbox
*outbox
,
3532 struct mlx4_cmd_info
*cmd
)
3535 int srqn
= vhcr
->in_modifier
;
3536 struct res_mtt
*mtt
;
3537 struct res_srq
*srq
= NULL
;
3538 struct mlx4_srq_context
*srqc
= inbox
->buf
;
3539 int mtt_base
= srq_get_mtt_addr(srqc
) / dev
->caps
.mtt_entry_sz
;
3541 if (srqn
!= (be32_to_cpu(srqc
->state_logsize_srqn
) & 0xffffff))
3544 err
= srq_res_start_move_to(dev
, slave
, srqn
, RES_SRQ_HW
, &srq
);
3547 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
3550 err
= check_mtt_range(dev
, slave
, mtt_base
, srq_get_mtt_size(srqc
),
3555 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3559 atomic_inc(&mtt
->ref_count
);
3561 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3562 res_end_move(dev
, slave
, RES_SRQ
, srqn
);
3566 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3568 res_abort_move(dev
, slave
, RES_SRQ
, srqn
);
3573 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3574 struct mlx4_vhcr
*vhcr
,
3575 struct mlx4_cmd_mailbox
*inbox
,
3576 struct mlx4_cmd_mailbox
*outbox
,
3577 struct mlx4_cmd_info
*cmd
)
3580 int srqn
= vhcr
->in_modifier
;
3581 struct res_srq
*srq
= NULL
;
3583 err
= srq_res_start_move_to(dev
, slave
, srqn
, RES_SRQ_ALLOCATED
, &srq
);
3586 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3589 atomic_dec(&srq
->mtt
->ref_count
);
3591 atomic_dec(&srq
->cq
->ref_count
);
3592 res_end_move(dev
, slave
, RES_SRQ
, srqn
);
3597 res_abort_move(dev
, slave
, RES_SRQ
, srqn
);
3602 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3603 struct mlx4_vhcr
*vhcr
,
3604 struct mlx4_cmd_mailbox
*inbox
,
3605 struct mlx4_cmd_mailbox
*outbox
,
3606 struct mlx4_cmd_info
*cmd
)
3609 int srqn
= vhcr
->in_modifier
;
3610 struct res_srq
*srq
;
3612 err
= get_res(dev
, slave
, srqn
, RES_SRQ
, &srq
);
3615 if (srq
->com
.from_state
!= RES_SRQ_HW
) {
3619 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3621 put_res(dev
, slave
, srqn
, RES_SRQ
);
3625 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3626 struct mlx4_vhcr
*vhcr
,
3627 struct mlx4_cmd_mailbox
*inbox
,
3628 struct mlx4_cmd_mailbox
*outbox
,
3629 struct mlx4_cmd_info
*cmd
)
3632 int srqn
= vhcr
->in_modifier
;
3633 struct res_srq
*srq
;
3635 err
= get_res(dev
, slave
, srqn
, RES_SRQ
, &srq
);
3639 if (srq
->com
.from_state
!= RES_SRQ_HW
) {
3644 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3646 put_res(dev
, slave
, srqn
, RES_SRQ
);
3650 int mlx4_GEN_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3651 struct mlx4_vhcr
*vhcr
,
3652 struct mlx4_cmd_mailbox
*inbox
,
3653 struct mlx4_cmd_mailbox
*outbox
,
3654 struct mlx4_cmd_info
*cmd
)
3657 int qpn
= vhcr
->in_modifier
& 0x7fffff;
3660 err
= get_res(dev
, slave
, qpn
, RES_QP
, &qp
);
3663 if (qp
->com
.from_state
!= RES_QP_HW
) {
3668 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3670 put_res(dev
, slave
, qpn
, RES_QP
);
3674 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3675 struct mlx4_vhcr
*vhcr
,
3676 struct mlx4_cmd_mailbox
*inbox
,
3677 struct mlx4_cmd_mailbox
*outbox
,
3678 struct mlx4_cmd_info
*cmd
)
3680 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3681 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3682 update_pkey_index(dev
, slave
, inbox
);
3683 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3686 static int adjust_qp_sched_queue(struct mlx4_dev
*dev
, int slave
,
3687 struct mlx4_qp_context
*qpc
,
3688 struct mlx4_cmd_mailbox
*inbox
)
3690 enum mlx4_qp_optpar optpar
= be32_to_cpu(*(__be32
*)inbox
->buf
);
3692 int port
= mlx4_slave_convert_port(
3693 dev
, slave
, (qpc
->pri_path
.sched_queue
>> 6 & 1) + 1) - 1;
3698 pri_sched_queue
= (qpc
->pri_path
.sched_queue
& ~(1 << 6)) |
3701 if (optpar
& (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH
| MLX4_QP_OPTPAR_SCHED_QUEUE
) ||
3702 qpc
->pri_path
.sched_queue
|| mlx4_is_eth(dev
, port
+ 1)) {
3703 qpc
->pri_path
.sched_queue
= pri_sched_queue
;
3706 if (optpar
& MLX4_QP_OPTPAR_ALT_ADDR_PATH
) {
3707 port
= mlx4_slave_convert_port(
3708 dev
, slave
, (qpc
->alt_path
.sched_queue
>> 6 & 1)
3712 qpc
->alt_path
.sched_queue
=
3713 (qpc
->alt_path
.sched_queue
& ~(1 << 6)) |
3719 static int roce_verify_mac(struct mlx4_dev
*dev
, int slave
,
3720 struct mlx4_qp_context
*qpc
,
3721 struct mlx4_cmd_mailbox
*inbox
)
3725 u32 ts
= (be32_to_cpu(qpc
->flags
) >> 16) & 0xff;
3726 u8 sched
= *(u8
*)(inbox
->buf
+ 64);
3729 port
= (sched
>> 6 & 1) + 1;
3730 if (mlx4_is_eth(dev
, port
) && (ts
!= MLX4_QP_ST_MLX
)) {
3731 smac_ix
= qpc
->pri_path
.grh_mylmc
& 0x7f;
3732 if (mac_find_smac_ix_in_slave(dev
, slave
, port
, smac_ix
, &mac
))
3738 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3739 struct mlx4_vhcr
*vhcr
,
3740 struct mlx4_cmd_mailbox
*inbox
,
3741 struct mlx4_cmd_mailbox
*outbox
,
3742 struct mlx4_cmd_info
*cmd
)
3745 struct mlx4_qp_context
*qpc
= inbox
->buf
+ 8;
3746 int qpn
= vhcr
->in_modifier
& 0x7fffff;
3748 u8 orig_sched_queue
;
3749 __be32 orig_param3
= qpc
->param3
;
3750 u8 orig_vlan_control
= qpc
->pri_path
.vlan_control
;
3751 u8 orig_fvl_rx
= qpc
->pri_path
.fvl_rx
;
3752 u8 orig_pri_path_fl
= qpc
->pri_path
.fl
;
3753 u8 orig_vlan_index
= qpc
->pri_path
.vlan_index
;
3754 u8 orig_feup
= qpc
->pri_path
.feup
;
3756 err
= adjust_qp_sched_queue(dev
, slave
, qpc
, inbox
);
3759 err
= verify_qp_parameters(dev
, vhcr
, inbox
, QP_TRANS_INIT2RTR
, slave
);
3763 if (roce_verify_mac(dev
, slave
, qpc
, inbox
))
3766 update_pkey_index(dev
, slave
, inbox
);
3767 update_gid(dev
, inbox
, (u8
)slave
);
3768 adjust_proxy_tun_qkey(dev
, vhcr
, qpc
);
3769 orig_sched_queue
= qpc
->pri_path
.sched_queue
;
3771 err
= get_res(dev
, slave
, qpn
, RES_QP
, &qp
);
3774 if (qp
->com
.from_state
!= RES_QP_HW
) {
3779 err
= update_vport_qp_param(dev
, inbox
, slave
, qpn
);
3783 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3785 /* if no error, save sched queue value passed in by VF. This is
3786 * essentially the QOS value provided by the VF. This will be useful
3787 * if we allow dynamic changes from VST back to VGT
3790 qp
->sched_queue
= orig_sched_queue
;
3791 qp
->param3
= orig_param3
;
3792 qp
->vlan_control
= orig_vlan_control
;
3793 qp
->fvl_rx
= orig_fvl_rx
;
3794 qp
->pri_path_fl
= orig_pri_path_fl
;
3795 qp
->vlan_index
= orig_vlan_index
;
3796 qp
->feup
= orig_feup
;
3798 put_res(dev
, slave
, qpn
, RES_QP
);
3802 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3803 struct mlx4_vhcr
*vhcr
,
3804 struct mlx4_cmd_mailbox
*inbox
,
3805 struct mlx4_cmd_mailbox
*outbox
,
3806 struct mlx4_cmd_info
*cmd
)
3809 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3811 err
= adjust_qp_sched_queue(dev
, slave
, context
, inbox
);
3814 err
= verify_qp_parameters(dev
, vhcr
, inbox
, QP_TRANS_RTR2RTS
, slave
);
3818 update_pkey_index(dev
, slave
, inbox
);
3819 update_gid(dev
, inbox
, (u8
)slave
);
3820 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3821 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3824 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3825 struct mlx4_vhcr
*vhcr
,
3826 struct mlx4_cmd_mailbox
*inbox
,
3827 struct mlx4_cmd_mailbox
*outbox
,
3828 struct mlx4_cmd_info
*cmd
)
3831 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3833 err
= adjust_qp_sched_queue(dev
, slave
, context
, inbox
);
3836 err
= verify_qp_parameters(dev
, vhcr
, inbox
, QP_TRANS_RTS2RTS
, slave
);
3840 update_pkey_index(dev
, slave
, inbox
);
3841 update_gid(dev
, inbox
, (u8
)slave
);
3842 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3843 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3847 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3848 struct mlx4_vhcr
*vhcr
,
3849 struct mlx4_cmd_mailbox
*inbox
,
3850 struct mlx4_cmd_mailbox
*outbox
,
3851 struct mlx4_cmd_info
*cmd
)
3853 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3854 int err
= adjust_qp_sched_queue(dev
, slave
, context
, inbox
);
3857 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3858 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3861 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3862 struct mlx4_vhcr
*vhcr
,
3863 struct mlx4_cmd_mailbox
*inbox
,
3864 struct mlx4_cmd_mailbox
*outbox
,
3865 struct mlx4_cmd_info
*cmd
)
3868 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3870 err
= adjust_qp_sched_queue(dev
, slave
, context
, inbox
);
3873 err
= verify_qp_parameters(dev
, vhcr
, inbox
, QP_TRANS_SQD2SQD
, slave
);
3877 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3878 update_gid(dev
, inbox
, (u8
)slave
);
3879 update_pkey_index(dev
, slave
, inbox
);
3880 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3883 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3884 struct mlx4_vhcr
*vhcr
,
3885 struct mlx4_cmd_mailbox
*inbox
,
3886 struct mlx4_cmd_mailbox
*outbox
,
3887 struct mlx4_cmd_info
*cmd
)
3890 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3892 err
= adjust_qp_sched_queue(dev
, slave
, context
, inbox
);
3895 err
= verify_qp_parameters(dev
, vhcr
, inbox
, QP_TRANS_SQD2RTS
, slave
);
3899 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3900 update_gid(dev
, inbox
, (u8
)slave
);
3901 update_pkey_index(dev
, slave
, inbox
);
3902 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3905 int mlx4_2RST_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3906 struct mlx4_vhcr
*vhcr
,
3907 struct mlx4_cmd_mailbox
*inbox
,
3908 struct mlx4_cmd_mailbox
*outbox
,
3909 struct mlx4_cmd_info
*cmd
)
3912 int qpn
= vhcr
->in_modifier
& 0x7fffff;
3915 err
= qp_res_start_move_to(dev
, slave
, qpn
, RES_QP_MAPPED
, &qp
, 0);
3918 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3922 atomic_dec(&qp
->mtt
->ref_count
);
3923 atomic_dec(&qp
->rcq
->ref_count
);
3924 atomic_dec(&qp
->scq
->ref_count
);
3926 atomic_dec(&qp
->srq
->ref_count
);
3927 res_end_move(dev
, slave
, RES_QP
, qpn
);
3931 res_abort_move(dev
, slave
, RES_QP
, qpn
);
3936 static struct res_gid
*find_gid(struct mlx4_dev
*dev
, int slave
,
3937 struct res_qp
*rqp
, u8
*gid
)
3939 struct res_gid
*res
;
3941 list_for_each_entry(res
, &rqp
->mcg_list
, list
) {
3942 if (!memcmp(res
->gid
, gid
, 16))
3948 static int add_mcg_res(struct mlx4_dev
*dev
, int slave
, struct res_qp
*rqp
,
3949 u8
*gid
, enum mlx4_protocol prot
,
3950 enum mlx4_steer_type steer
, u64 reg_id
)
3952 struct res_gid
*res
;
3955 res
= kzalloc(sizeof *res
, GFP_KERNEL
);
3959 spin_lock_irq(&rqp
->mcg_spl
);
3960 if (find_gid(dev
, slave
, rqp
, gid
)) {
3964 memcpy(res
->gid
, gid
, 16);
3967 res
->reg_id
= reg_id
;
3968 list_add_tail(&res
->list
, &rqp
->mcg_list
);
3971 spin_unlock_irq(&rqp
->mcg_spl
);
3976 static int rem_mcg_res(struct mlx4_dev
*dev
, int slave
, struct res_qp
*rqp
,
3977 u8
*gid
, enum mlx4_protocol prot
,
3978 enum mlx4_steer_type steer
, u64
*reg_id
)
3980 struct res_gid
*res
;
3983 spin_lock_irq(&rqp
->mcg_spl
);
3984 res
= find_gid(dev
, slave
, rqp
, gid
);
3985 if (!res
|| res
->prot
!= prot
|| res
->steer
!= steer
)
3988 *reg_id
= res
->reg_id
;
3989 list_del(&res
->list
);
3993 spin_unlock_irq(&rqp
->mcg_spl
);
3998 static int qp_attach(struct mlx4_dev
*dev
, int slave
, struct mlx4_qp
*qp
,
3999 u8 gid
[16], int block_loopback
, enum mlx4_protocol prot
,
4000 enum mlx4_steer_type type
, u64
*reg_id
)
4002 switch (dev
->caps
.steering_mode
) {
4003 case MLX4_STEERING_MODE_DEVICE_MANAGED
: {
4004 int port
= mlx4_slave_convert_port(dev
, slave
, gid
[5]);
4007 return mlx4_trans_to_dmfs_attach(dev
, qp
, gid
, port
,
4008 block_loopback
, prot
,
4011 case MLX4_STEERING_MODE_B0
:
4012 if (prot
== MLX4_PROT_ETH
) {
4013 int port
= mlx4_slave_convert_port(dev
, slave
, gid
[5]);
4018 return mlx4_qp_attach_common(dev
, qp
, gid
,
4019 block_loopback
, prot
, type
);
4025 static int qp_detach(struct mlx4_dev
*dev
, struct mlx4_qp
*qp
,
4026 u8 gid
[16], enum mlx4_protocol prot
,
4027 enum mlx4_steer_type type
, u64 reg_id
)
4029 switch (dev
->caps
.steering_mode
) {
4030 case MLX4_STEERING_MODE_DEVICE_MANAGED
:
4031 return mlx4_flow_detach(dev
, reg_id
);
4032 case MLX4_STEERING_MODE_B0
:
4033 return mlx4_qp_detach_common(dev
, qp
, gid
, prot
, type
);
4039 static int mlx4_adjust_port(struct mlx4_dev
*dev
, int slave
,
4040 u8
*gid
, enum mlx4_protocol prot
)
4044 if (prot
!= MLX4_PROT_ETH
)
4047 if (dev
->caps
.steering_mode
== MLX4_STEERING_MODE_B0
||
4048 dev
->caps
.steering_mode
== MLX4_STEERING_MODE_DEVICE_MANAGED
) {
4049 real_port
= mlx4_slave_convert_port(dev
, slave
, gid
[5]);
4058 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev
*dev
, int slave
,
4059 struct mlx4_vhcr
*vhcr
,
4060 struct mlx4_cmd_mailbox
*inbox
,
4061 struct mlx4_cmd_mailbox
*outbox
,
4062 struct mlx4_cmd_info
*cmd
)
4064 struct mlx4_qp qp
; /* dummy for calling attach/detach */
4065 u8
*gid
= inbox
->buf
;
4066 enum mlx4_protocol prot
= (vhcr
->in_modifier
>> 28) & 0x7;
4071 int attach
= vhcr
->op_modifier
;
4072 int block_loopback
= vhcr
->in_modifier
>> 31;
4073 u8 steer_type_mask
= 2;
4074 enum mlx4_steer_type type
= (gid
[7] & steer_type_mask
) >> 1;
4076 qpn
= vhcr
->in_modifier
& 0xffffff;
4077 err
= get_res(dev
, slave
, qpn
, RES_QP
, &rqp
);
4083 err
= qp_attach(dev
, slave
, &qp
, gid
, block_loopback
, prot
,
4086 pr_err("Fail to attach rule to qp 0x%x\n", qpn
);
4089 err
= add_mcg_res(dev
, slave
, rqp
, gid
, prot
, type
, reg_id
);
4093 err
= mlx4_adjust_port(dev
, slave
, gid
, prot
);
4097 err
= rem_mcg_res(dev
, slave
, rqp
, gid
, prot
, type
, ®_id
);
4101 err
= qp_detach(dev
, &qp
, gid
, prot
, type
, reg_id
);
4103 pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
4106 put_res(dev
, slave
, qpn
, RES_QP
);
4110 qp_detach(dev
, &qp
, gid
, prot
, type
, reg_id
);
4112 put_res(dev
, slave
, qpn
, RES_QP
);
4117 * MAC validation for Flow Steering rules.
4118 * VF can attach rules only with a mac address which is assigned to it.
4120 static int validate_eth_header_mac(int slave
, struct _rule_hw
*eth_header
,
4121 struct list_head
*rlist
)
4123 struct mac_res
*res
, *tmp
;
4126 /* make sure it isn't multicast or broadcast mac*/
4127 if (!is_multicast_ether_addr(eth_header
->eth
.dst_mac
) &&
4128 !is_broadcast_ether_addr(eth_header
->eth
.dst_mac
)) {
4129 list_for_each_entry_safe(res
, tmp
, rlist
, list
) {
4130 be_mac
= cpu_to_be64(res
->mac
<< 16);
4131 if (ether_addr_equal((u8
*)&be_mac
, eth_header
->eth
.dst_mac
))
4134 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
4135 eth_header
->eth
.dst_mac
, slave
);
4141 static void handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl
*ctrl
,
4142 struct _rule_hw
*eth_header
)
4144 if (is_multicast_ether_addr(eth_header
->eth
.dst_mac
) ||
4145 is_broadcast_ether_addr(eth_header
->eth
.dst_mac
)) {
4146 struct mlx4_net_trans_rule_hw_eth
*eth
=
4147 (struct mlx4_net_trans_rule_hw_eth
*)eth_header
;
4148 struct _rule_hw
*next_rule
= (struct _rule_hw
*)(eth
+ 1);
4149 bool last_rule
= next_rule
->size
== 0 && next_rule
->id
== 0 &&
4150 next_rule
->rsvd
== 0;
4153 ctrl
->prio
= cpu_to_be16(MLX4_DOMAIN_NIC
);
4158 * In case of missing eth header, append eth header with a MAC address
4159 * assigned to the VF.
4161 static int add_eth_header(struct mlx4_dev
*dev
, int slave
,
4162 struct mlx4_cmd_mailbox
*inbox
,
4163 struct list_head
*rlist
, int header_id
)
4165 struct mac_res
*res
, *tmp
;
4167 struct mlx4_net_trans_rule_hw_ctrl
*ctrl
;
4168 struct mlx4_net_trans_rule_hw_eth
*eth_header
;
4169 struct mlx4_net_trans_rule_hw_ipv4
*ip_header
;
4170 struct mlx4_net_trans_rule_hw_tcp_udp
*l4_header
;
4172 __be64 mac_msk
= cpu_to_be64(MLX4_MAC_MASK
<< 16);
4174 ctrl
= (struct mlx4_net_trans_rule_hw_ctrl
*)inbox
->buf
;
4176 eth_header
= (struct mlx4_net_trans_rule_hw_eth
*)(ctrl
+ 1);
4178 /* Clear a space in the inbox for eth header */
4179 switch (header_id
) {
4180 case MLX4_NET_TRANS_RULE_ID_IPV4
:
4182 (struct mlx4_net_trans_rule_hw_ipv4
*)(eth_header
+ 1);
4183 memmove(ip_header
, eth_header
,
4184 sizeof(*ip_header
) + sizeof(*l4_header
));
4186 case MLX4_NET_TRANS_RULE_ID_TCP
:
4187 case MLX4_NET_TRANS_RULE_ID_UDP
:
4188 l4_header
= (struct mlx4_net_trans_rule_hw_tcp_udp
*)
4190 memmove(l4_header
, eth_header
, sizeof(*l4_header
));
4195 list_for_each_entry_safe(res
, tmp
, rlist
, list
) {
4196 if (port
== res
->port
) {
4197 be_mac
= cpu_to_be64(res
->mac
<< 16);
4202 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
4207 memset(eth_header
, 0, sizeof(*eth_header
));
4208 eth_header
->size
= sizeof(*eth_header
) >> 2;
4209 eth_header
->id
= cpu_to_be16(__sw_id_hw
[MLX4_NET_TRANS_RULE_ID_ETH
]);
4210 memcpy(eth_header
->dst_mac
, &be_mac
, ETH_ALEN
);
4211 memcpy(eth_header
->dst_mac_msk
, &mac_msk
, ETH_ALEN
);
4217 #define MLX4_UPD_QP_PATH_MASK_SUPPORTED ( \
4218 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX |\
4219 1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)
4220 int mlx4_UPDATE_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
4221 struct mlx4_vhcr
*vhcr
,
4222 struct mlx4_cmd_mailbox
*inbox
,
4223 struct mlx4_cmd_mailbox
*outbox
,
4224 struct mlx4_cmd_info
*cmd_info
)
4227 u32 qpn
= vhcr
->in_modifier
& 0xffffff;
4231 u64 pri_addr_path_mask
;
4232 struct mlx4_update_qp_context
*cmd
;
4235 cmd
= (struct mlx4_update_qp_context
*)inbox
->buf
;
4237 pri_addr_path_mask
= be64_to_cpu(cmd
->primary_addr_path_mask
);
4238 if (cmd
->qp_mask
|| cmd
->secondary_addr_path_mask
||
4239 (pri_addr_path_mask
& ~MLX4_UPD_QP_PATH_MASK_SUPPORTED
))
4242 if ((pri_addr_path_mask
&
4243 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB
)) &&
4244 !(dev
->caps
.flags2
&
4245 MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB
)) {
4247 "Src check LB for slave %d isn't supported\n",
4252 /* Just change the smac for the QP */
4253 err
= get_res(dev
, slave
, qpn
, RES_QP
, &rqp
);
4255 mlx4_err(dev
, "Updating qpn 0x%x for slave %d rejected\n", qpn
, slave
);
4259 port
= (rqp
->sched_queue
>> 6 & 1) + 1;
4261 if (pri_addr_path_mask
& (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX
)) {
4262 smac_index
= cmd
->qp_context
.pri_path
.grh_mylmc
;
4263 err
= mac_find_smac_ix_in_slave(dev
, slave
, port
,
4267 mlx4_err(dev
, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
4273 err
= mlx4_cmd(dev
, inbox
->dma
,
4274 vhcr
->in_modifier
, 0,
4275 MLX4_CMD_UPDATE_QP
, MLX4_CMD_TIME_CLASS_A
,
4278 mlx4_err(dev
, "Failed to update qpn on qpn 0x%x, command failed\n", qpn
);
4283 put_res(dev
, slave
, qpn
, RES_QP
);
4287 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev
*dev
, int slave
,
4288 struct mlx4_vhcr
*vhcr
,
4289 struct mlx4_cmd_mailbox
*inbox
,
4290 struct mlx4_cmd_mailbox
*outbox
,
4291 struct mlx4_cmd_info
*cmd
)
4294 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4295 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4296 struct list_head
*rlist
= &tracker
->slave_list
[slave
].res_list
[RES_MAC
];
4300 struct mlx4_net_trans_rule_hw_ctrl
*ctrl
;
4301 struct _rule_hw
*rule_header
;
4304 if (dev
->caps
.steering_mode
!=
4305 MLX4_STEERING_MODE_DEVICE_MANAGED
)
4308 ctrl
= (struct mlx4_net_trans_rule_hw_ctrl
*)inbox
->buf
;
4309 err
= mlx4_slave_convert_port(dev
, slave
, ctrl
->port
);
4313 qpn
= be32_to_cpu(ctrl
->qpn
) & 0xffffff;
4314 err
= get_res(dev
, slave
, qpn
, RES_QP
, &rqp
);
4316 pr_err("Steering rule with qpn 0x%x rejected\n", qpn
);
4319 rule_header
= (struct _rule_hw
*)(ctrl
+ 1);
4320 header_id
= map_hw_to_sw_id(be16_to_cpu(rule_header
->id
));
4322 if (header_id
== MLX4_NET_TRANS_RULE_ID_ETH
)
4323 handle_eth_header_mcast_prio(ctrl
, rule_header
);
4325 if (slave
== dev
->caps
.function
)
4328 switch (header_id
) {
4329 case MLX4_NET_TRANS_RULE_ID_ETH
:
4330 if (validate_eth_header_mac(slave
, rule_header
, rlist
)) {
4335 case MLX4_NET_TRANS_RULE_ID_IB
:
4337 case MLX4_NET_TRANS_RULE_ID_IPV4
:
4338 case MLX4_NET_TRANS_RULE_ID_TCP
:
4339 case MLX4_NET_TRANS_RULE_ID_UDP
:
4340 pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
4341 if (add_eth_header(dev
, slave
, inbox
, rlist
, header_id
)) {
4345 vhcr
->in_modifier
+=
4346 sizeof(struct mlx4_net_trans_rule_hw_eth
) >> 2;
4349 pr_err("Corrupted mailbox\n");
4355 err
= mlx4_cmd_imm(dev
, inbox
->dma
, &vhcr
->out_param
,
4356 vhcr
->in_modifier
, 0,
4357 MLX4_QP_FLOW_STEERING_ATTACH
, MLX4_CMD_TIME_CLASS_A
,
4362 err
= add_res_range(dev
, slave
, vhcr
->out_param
, 1, RES_FS_RULE
, qpn
);
4364 mlx4_err(dev
, "Fail to add flow steering resources\n");
4366 mlx4_cmd(dev
, vhcr
->out_param
, 0, 0,
4367 MLX4_QP_FLOW_STEERING_DETACH
, MLX4_CMD_TIME_CLASS_A
,
4371 atomic_inc(&rqp
->ref_count
);
4373 put_res(dev
, slave
, qpn
, RES_QP
);
4377 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev
*dev
, int slave
,
4378 struct mlx4_vhcr
*vhcr
,
4379 struct mlx4_cmd_mailbox
*inbox
,
4380 struct mlx4_cmd_mailbox
*outbox
,
4381 struct mlx4_cmd_info
*cmd
)
4385 struct res_fs_rule
*rrule
;
4387 if (dev
->caps
.steering_mode
!=
4388 MLX4_STEERING_MODE_DEVICE_MANAGED
)
4391 err
= get_res(dev
, slave
, vhcr
->in_param
, RES_FS_RULE
, &rrule
);
4394 /* Release the rule form busy state before removal */
4395 put_res(dev
, slave
, vhcr
->in_param
, RES_FS_RULE
);
4396 err
= get_res(dev
, slave
, rrule
->qpn
, RES_QP
, &rqp
);
4400 err
= rem_res_range(dev
, slave
, vhcr
->in_param
, 1, RES_FS_RULE
, 0);
4402 mlx4_err(dev
, "Fail to remove flow steering resources\n");
4406 err
= mlx4_cmd(dev
, vhcr
->in_param
, 0, 0,
4407 MLX4_QP_FLOW_STEERING_DETACH
, MLX4_CMD_TIME_CLASS_A
,
4410 atomic_dec(&rqp
->ref_count
);
4412 put_res(dev
, slave
, rrule
->qpn
, RES_QP
);
4417 BUSY_MAX_RETRIES
= 10
4420 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev
*dev
, int slave
,
4421 struct mlx4_vhcr
*vhcr
,
4422 struct mlx4_cmd_mailbox
*inbox
,
4423 struct mlx4_cmd_mailbox
*outbox
,
4424 struct mlx4_cmd_info
*cmd
)
4427 int index
= vhcr
->in_modifier
& 0xffff;
4429 err
= get_res(dev
, slave
, index
, RES_COUNTER
, NULL
);
4433 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
4434 put_res(dev
, slave
, index
, RES_COUNTER
);
4438 static void detach_qp(struct mlx4_dev
*dev
, int slave
, struct res_qp
*rqp
)
4440 struct res_gid
*rgid
;
4441 struct res_gid
*tmp
;
4442 struct mlx4_qp qp
; /* dummy for calling attach/detach */
4444 list_for_each_entry_safe(rgid
, tmp
, &rqp
->mcg_list
, list
) {
4445 switch (dev
->caps
.steering_mode
) {
4446 case MLX4_STEERING_MODE_DEVICE_MANAGED
:
4447 mlx4_flow_detach(dev
, rgid
->reg_id
);
4449 case MLX4_STEERING_MODE_B0
:
4450 qp
.qpn
= rqp
->local_qpn
;
4451 (void) mlx4_qp_detach_common(dev
, &qp
, rgid
->gid
,
4452 rgid
->prot
, rgid
->steer
);
4455 list_del(&rgid
->list
);
4460 static int _move_all_busy(struct mlx4_dev
*dev
, int slave
,
4461 enum mlx4_resource type
, int print
)
4463 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4464 struct mlx4_resource_tracker
*tracker
=
4465 &priv
->mfunc
.master
.res_tracker
;
4466 struct list_head
*rlist
= &tracker
->slave_list
[slave
].res_list
[type
];
4467 struct res_common
*r
;
4468 struct res_common
*tmp
;
4472 spin_lock_irq(mlx4_tlock(dev
));
4473 list_for_each_entry_safe(r
, tmp
, rlist
, list
) {
4474 if (r
->owner
== slave
) {
4476 if (r
->state
== RES_ANY_BUSY
) {
4479 "%s id 0x%llx is busy\n",
4484 r
->from_state
= r
->state
;
4485 r
->state
= RES_ANY_BUSY
;
4491 spin_unlock_irq(mlx4_tlock(dev
));
4496 static int move_all_busy(struct mlx4_dev
*dev
, int slave
,
4497 enum mlx4_resource type
)
4499 unsigned long begin
;
4504 busy
= _move_all_busy(dev
, slave
, type
, 0);
4505 if (time_after(jiffies
, begin
+ 5 * HZ
))
4512 busy
= _move_all_busy(dev
, slave
, type
, 1);
4516 static void rem_slave_qps(struct mlx4_dev
*dev
, int slave
)
4518 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4519 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4520 struct list_head
*qp_list
=
4521 &tracker
->slave_list
[slave
].res_list
[RES_QP
];
4529 err
= move_all_busy(dev
, slave
, RES_QP
);
4531 mlx4_warn(dev
, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
4534 spin_lock_irq(mlx4_tlock(dev
));
4535 list_for_each_entry_safe(qp
, tmp
, qp_list
, com
.list
) {
4536 spin_unlock_irq(mlx4_tlock(dev
));
4537 if (qp
->com
.owner
== slave
) {
4538 qpn
= qp
->com
.res_id
;
4539 detach_qp(dev
, slave
, qp
);
4540 state
= qp
->com
.from_state
;
4541 while (state
!= 0) {
4543 case RES_QP_RESERVED
:
4544 spin_lock_irq(mlx4_tlock(dev
));
4545 rb_erase(&qp
->com
.node
,
4546 &tracker
->res_tree
[RES_QP
]);
4547 list_del(&qp
->com
.list
);
4548 spin_unlock_irq(mlx4_tlock(dev
));
4549 if (!valid_reserved(dev
, slave
, qpn
)) {
4550 __mlx4_qp_release_range(dev
, qpn
, 1);
4551 mlx4_release_resource(dev
, slave
,
4558 if (!valid_reserved(dev
, slave
, qpn
))
4559 __mlx4_qp_free_icm(dev
, qpn
);
4560 state
= RES_QP_RESERVED
;
4564 err
= mlx4_cmd(dev
, in_param
,
4567 MLX4_CMD_TIME_CLASS_A
,
4570 mlx4_dbg(dev
, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
4571 slave
, qp
->local_qpn
);
4572 atomic_dec(&qp
->rcq
->ref_count
);
4573 atomic_dec(&qp
->scq
->ref_count
);
4574 atomic_dec(&qp
->mtt
->ref_count
);
4576 atomic_dec(&qp
->srq
->ref_count
);
4577 state
= RES_QP_MAPPED
;
4584 spin_lock_irq(mlx4_tlock(dev
));
4586 spin_unlock_irq(mlx4_tlock(dev
));
4589 static void rem_slave_srqs(struct mlx4_dev
*dev
, int slave
)
4591 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4592 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4593 struct list_head
*srq_list
=
4594 &tracker
->slave_list
[slave
].res_list
[RES_SRQ
];
4595 struct res_srq
*srq
;
4596 struct res_srq
*tmp
;
4603 err
= move_all_busy(dev
, slave
, RES_SRQ
);
4605 mlx4_warn(dev
, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
4608 spin_lock_irq(mlx4_tlock(dev
));
4609 list_for_each_entry_safe(srq
, tmp
, srq_list
, com
.list
) {
4610 spin_unlock_irq(mlx4_tlock(dev
));
4611 if (srq
->com
.owner
== slave
) {
4612 srqn
= srq
->com
.res_id
;
4613 state
= srq
->com
.from_state
;
4614 while (state
!= 0) {
4616 case RES_SRQ_ALLOCATED
:
4617 __mlx4_srq_free_icm(dev
, srqn
);
4618 spin_lock_irq(mlx4_tlock(dev
));
4619 rb_erase(&srq
->com
.node
,
4620 &tracker
->res_tree
[RES_SRQ
]);
4621 list_del(&srq
->com
.list
);
4622 spin_unlock_irq(mlx4_tlock(dev
));
4623 mlx4_release_resource(dev
, slave
,
4631 err
= mlx4_cmd(dev
, in_param
, srqn
, 1,
4633 MLX4_CMD_TIME_CLASS_A
,
4636 mlx4_dbg(dev
, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
4639 atomic_dec(&srq
->mtt
->ref_count
);
4641 atomic_dec(&srq
->cq
->ref_count
);
4642 state
= RES_SRQ_ALLOCATED
;
4650 spin_lock_irq(mlx4_tlock(dev
));
4652 spin_unlock_irq(mlx4_tlock(dev
));
4655 static void rem_slave_cqs(struct mlx4_dev
*dev
, int slave
)
4657 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4658 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4659 struct list_head
*cq_list
=
4660 &tracker
->slave_list
[slave
].res_list
[RES_CQ
];
4669 err
= move_all_busy(dev
, slave
, RES_CQ
);
4671 mlx4_warn(dev
, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
4674 spin_lock_irq(mlx4_tlock(dev
));
4675 list_for_each_entry_safe(cq
, tmp
, cq_list
, com
.list
) {
4676 spin_unlock_irq(mlx4_tlock(dev
));
4677 if (cq
->com
.owner
== slave
&& !atomic_read(&cq
->ref_count
)) {
4678 cqn
= cq
->com
.res_id
;
4679 state
= cq
->com
.from_state
;
4680 while (state
!= 0) {
4682 case RES_CQ_ALLOCATED
:
4683 __mlx4_cq_free_icm(dev
, cqn
);
4684 spin_lock_irq(mlx4_tlock(dev
));
4685 rb_erase(&cq
->com
.node
,
4686 &tracker
->res_tree
[RES_CQ
]);
4687 list_del(&cq
->com
.list
);
4688 spin_unlock_irq(mlx4_tlock(dev
));
4689 mlx4_release_resource(dev
, slave
,
4697 err
= mlx4_cmd(dev
, in_param
, cqn
, 1,
4699 MLX4_CMD_TIME_CLASS_A
,
4702 mlx4_dbg(dev
, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
4704 atomic_dec(&cq
->mtt
->ref_count
);
4705 state
= RES_CQ_ALLOCATED
;
4713 spin_lock_irq(mlx4_tlock(dev
));
4715 spin_unlock_irq(mlx4_tlock(dev
));
4718 static void rem_slave_mrs(struct mlx4_dev
*dev
, int slave
)
4720 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4721 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4722 struct list_head
*mpt_list
=
4723 &tracker
->slave_list
[slave
].res_list
[RES_MPT
];
4724 struct res_mpt
*mpt
;
4725 struct res_mpt
*tmp
;
4732 err
= move_all_busy(dev
, slave
, RES_MPT
);
4734 mlx4_warn(dev
, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
4737 spin_lock_irq(mlx4_tlock(dev
));
4738 list_for_each_entry_safe(mpt
, tmp
, mpt_list
, com
.list
) {
4739 spin_unlock_irq(mlx4_tlock(dev
));
4740 if (mpt
->com
.owner
== slave
) {
4741 mptn
= mpt
->com
.res_id
;
4742 state
= mpt
->com
.from_state
;
4743 while (state
!= 0) {
4745 case RES_MPT_RESERVED
:
4746 __mlx4_mpt_release(dev
, mpt
->key
);
4747 spin_lock_irq(mlx4_tlock(dev
));
4748 rb_erase(&mpt
->com
.node
,
4749 &tracker
->res_tree
[RES_MPT
]);
4750 list_del(&mpt
->com
.list
);
4751 spin_unlock_irq(mlx4_tlock(dev
));
4752 mlx4_release_resource(dev
, slave
,
4758 case RES_MPT_MAPPED
:
4759 __mlx4_mpt_free_icm(dev
, mpt
->key
);
4760 state
= RES_MPT_RESERVED
;
4765 err
= mlx4_cmd(dev
, in_param
, mptn
, 0,
4767 MLX4_CMD_TIME_CLASS_A
,
4770 mlx4_dbg(dev
, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
4773 atomic_dec(&mpt
->mtt
->ref_count
);
4774 state
= RES_MPT_MAPPED
;
4781 spin_lock_irq(mlx4_tlock(dev
));
4783 spin_unlock_irq(mlx4_tlock(dev
));
4786 static void rem_slave_mtts(struct mlx4_dev
*dev
, int slave
)
4788 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4789 struct mlx4_resource_tracker
*tracker
=
4790 &priv
->mfunc
.master
.res_tracker
;
4791 struct list_head
*mtt_list
=
4792 &tracker
->slave_list
[slave
].res_list
[RES_MTT
];
4793 struct res_mtt
*mtt
;
4794 struct res_mtt
*tmp
;
4800 err
= move_all_busy(dev
, slave
, RES_MTT
);
4802 mlx4_warn(dev
, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n",
4805 spin_lock_irq(mlx4_tlock(dev
));
4806 list_for_each_entry_safe(mtt
, tmp
, mtt_list
, com
.list
) {
4807 spin_unlock_irq(mlx4_tlock(dev
));
4808 if (mtt
->com
.owner
== slave
) {
4809 base
= mtt
->com
.res_id
;
4810 state
= mtt
->com
.from_state
;
4811 while (state
!= 0) {
4813 case RES_MTT_ALLOCATED
:
4814 __mlx4_free_mtt_range(dev
, base
,
4816 spin_lock_irq(mlx4_tlock(dev
));
4817 rb_erase(&mtt
->com
.node
,
4818 &tracker
->res_tree
[RES_MTT
]);
4819 list_del(&mtt
->com
.list
);
4820 spin_unlock_irq(mlx4_tlock(dev
));
4821 mlx4_release_resource(dev
, slave
, RES_MTT
,
4822 1 << mtt
->order
, 0);
4832 spin_lock_irq(mlx4_tlock(dev
));
4834 spin_unlock_irq(mlx4_tlock(dev
));
4837 static void rem_slave_fs_rule(struct mlx4_dev
*dev
, int slave
)
4839 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4840 struct mlx4_resource_tracker
*tracker
=
4841 &priv
->mfunc
.master
.res_tracker
;
4842 struct list_head
*fs_rule_list
=
4843 &tracker
->slave_list
[slave
].res_list
[RES_FS_RULE
];
4844 struct res_fs_rule
*fs_rule
;
4845 struct res_fs_rule
*tmp
;
4850 err
= move_all_busy(dev
, slave
, RES_FS_RULE
);
4852 mlx4_warn(dev
, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
4855 spin_lock_irq(mlx4_tlock(dev
));
4856 list_for_each_entry_safe(fs_rule
, tmp
, fs_rule_list
, com
.list
) {
4857 spin_unlock_irq(mlx4_tlock(dev
));
4858 if (fs_rule
->com
.owner
== slave
) {
4859 base
= fs_rule
->com
.res_id
;
4860 state
= fs_rule
->com
.from_state
;
4861 while (state
!= 0) {
4863 case RES_FS_RULE_ALLOCATED
:
4865 err
= mlx4_cmd(dev
, base
, 0, 0,
4866 MLX4_QP_FLOW_STEERING_DETACH
,
4867 MLX4_CMD_TIME_CLASS_A
,
4870 spin_lock_irq(mlx4_tlock(dev
));
4871 rb_erase(&fs_rule
->com
.node
,
4872 &tracker
->res_tree
[RES_FS_RULE
]);
4873 list_del(&fs_rule
->com
.list
);
4874 spin_unlock_irq(mlx4_tlock(dev
));
4884 spin_lock_irq(mlx4_tlock(dev
));
4886 spin_unlock_irq(mlx4_tlock(dev
));
4889 static void rem_slave_eqs(struct mlx4_dev
*dev
, int slave
)
4891 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4892 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4893 struct list_head
*eq_list
=
4894 &tracker
->slave_list
[slave
].res_list
[RES_EQ
];
4902 err
= move_all_busy(dev
, slave
, RES_EQ
);
4904 mlx4_warn(dev
, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
4907 spin_lock_irq(mlx4_tlock(dev
));
4908 list_for_each_entry_safe(eq
, tmp
, eq_list
, com
.list
) {
4909 spin_unlock_irq(mlx4_tlock(dev
));
4910 if (eq
->com
.owner
== slave
) {
4911 eqn
= eq
->com
.res_id
;
4912 state
= eq
->com
.from_state
;
4913 while (state
!= 0) {
4915 case RES_EQ_RESERVED
:
4916 spin_lock_irq(mlx4_tlock(dev
));
4917 rb_erase(&eq
->com
.node
,
4918 &tracker
->res_tree
[RES_EQ
]);
4919 list_del(&eq
->com
.list
);
4920 spin_unlock_irq(mlx4_tlock(dev
));
4926 err
= mlx4_cmd(dev
, slave
, eqn
& 0x3ff,
4927 1, MLX4_CMD_HW2SW_EQ
,
4928 MLX4_CMD_TIME_CLASS_A
,
4931 mlx4_dbg(dev
, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
4932 slave
, eqn
& 0x3ff);
4933 atomic_dec(&eq
->mtt
->ref_count
);
4934 state
= RES_EQ_RESERVED
;
4942 spin_lock_irq(mlx4_tlock(dev
));
4944 spin_unlock_irq(mlx4_tlock(dev
));
4947 static void rem_slave_counters(struct mlx4_dev
*dev
, int slave
)
4949 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4950 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4951 struct list_head
*counter_list
=
4952 &tracker
->slave_list
[slave
].res_list
[RES_COUNTER
];
4953 struct res_counter
*counter
;
4954 struct res_counter
*tmp
;
4956 int *counters_arr
= NULL
;
4959 err
= move_all_busy(dev
, slave
, RES_COUNTER
);
4961 mlx4_warn(dev
, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
4964 counters_arr
= kmalloc_array(dev
->caps
.max_counters
,
4965 sizeof(*counters_arr
), GFP_KERNEL
);
4972 spin_lock_irq(mlx4_tlock(dev
));
4973 list_for_each_entry_safe(counter
, tmp
, counter_list
, com
.list
) {
4974 if (counter
->com
.owner
== slave
) {
4975 counters_arr
[i
++] = counter
->com
.res_id
;
4976 rb_erase(&counter
->com
.node
,
4977 &tracker
->res_tree
[RES_COUNTER
]);
4978 list_del(&counter
->com
.list
);
4982 spin_unlock_irq(mlx4_tlock(dev
));
4985 __mlx4_counter_free(dev
, counters_arr
[j
++]);
4986 mlx4_release_resource(dev
, slave
, RES_COUNTER
, 1, 0);
4990 kfree(counters_arr
);
4993 static void rem_slave_xrcdns(struct mlx4_dev
*dev
, int slave
)
4995 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4996 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4997 struct list_head
*xrcdn_list
=
4998 &tracker
->slave_list
[slave
].res_list
[RES_XRCD
];
4999 struct res_xrcdn
*xrcd
;
5000 struct res_xrcdn
*tmp
;
5004 err
= move_all_busy(dev
, slave
, RES_XRCD
);
5006 mlx4_warn(dev
, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
5009 spin_lock_irq(mlx4_tlock(dev
));
5010 list_for_each_entry_safe(xrcd
, tmp
, xrcdn_list
, com
.list
) {
5011 if (xrcd
->com
.owner
== slave
) {
5012 xrcdn
= xrcd
->com
.res_id
;
5013 rb_erase(&xrcd
->com
.node
, &tracker
->res_tree
[RES_XRCD
]);
5014 list_del(&xrcd
->com
.list
);
5016 __mlx4_xrcd_free(dev
, xrcdn
);
5019 spin_unlock_irq(mlx4_tlock(dev
));
5022 void mlx4_delete_all_resources_for_slave(struct mlx4_dev
*dev
, int slave
)
5024 struct mlx4_priv
*priv
= mlx4_priv(dev
);
5025 mlx4_reset_roce_gids(dev
, slave
);
5026 mutex_lock(&priv
->mfunc
.master
.res_tracker
.slave_list
[slave
].mutex
);
5027 rem_slave_vlans(dev
, slave
);
5028 rem_slave_macs(dev
, slave
);
5029 rem_slave_fs_rule(dev
, slave
);
5030 rem_slave_qps(dev
, slave
);
5031 rem_slave_srqs(dev
, slave
);
5032 rem_slave_cqs(dev
, slave
);
5033 rem_slave_mrs(dev
, slave
);
5034 rem_slave_eqs(dev
, slave
);
5035 rem_slave_mtts(dev
, slave
);
5036 rem_slave_counters(dev
, slave
);
5037 rem_slave_xrcdns(dev
, slave
);
5038 mutex_unlock(&priv
->mfunc
.master
.res_tracker
.slave_list
[slave
].mutex
);
5041 void mlx4_vf_immed_vlan_work_handler(struct work_struct
*_work
)
5043 struct mlx4_vf_immed_vlan_work
*work
=
5044 container_of(_work
, struct mlx4_vf_immed_vlan_work
, work
);
5045 struct mlx4_cmd_mailbox
*mailbox
;
5046 struct mlx4_update_qp_context
*upd_context
;
5047 struct mlx4_dev
*dev
= &work
->priv
->dev
;
5048 struct mlx4_resource_tracker
*tracker
=
5049 &work
->priv
->mfunc
.master
.res_tracker
;
5050 struct list_head
*qp_list
=
5051 &tracker
->slave_list
[work
->slave
].res_list
[RES_QP
];
5054 u64 qp_path_mask_vlan_ctrl
=
5055 ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED
) |
5056 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P
) |
5057 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED
) |
5058 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED
) |
5059 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P
) |
5060 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED
));
5062 u64 qp_path_mask
= ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX
) |
5063 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL
) |
5064 (1ULL << MLX4_UPD_QP_PATH_MASK_CV
) |
5065 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN
) |
5066 (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP
) |
5067 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX
) |
5068 (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE
));
5071 int port
, errors
= 0;
5074 if (mlx4_is_slave(dev
)) {
5075 mlx4_warn(dev
, "Trying to update-qp in slave %d\n",
5080 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
5081 if (IS_ERR(mailbox
))
5083 if (work
->flags
& MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE
) /* block all */
5084 vlan_control
= MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
5085 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED
|
5086 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED
|
5087 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED
|
5088 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED
|
5089 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED
;
5090 else if (!work
->vlan_id
)
5091 vlan_control
= MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
5092 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED
;
5094 vlan_control
= MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
5095 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED
|
5096 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED
;
5098 upd_context
= mailbox
->buf
;
5099 upd_context
->qp_mask
= cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD
);
5101 spin_lock_irq(mlx4_tlock(dev
));
5102 list_for_each_entry_safe(qp
, tmp
, qp_list
, com
.list
) {
5103 spin_unlock_irq(mlx4_tlock(dev
));
5104 if (qp
->com
.owner
== work
->slave
) {
5105 if (qp
->com
.from_state
!= RES_QP_HW
||
5106 !qp
->sched_queue
|| /* no INIT2RTR trans yet */
5107 mlx4_is_qp_reserved(dev
, qp
->local_qpn
) ||
5108 qp
->qpc_flags
& (1 << MLX4_RSS_QPC_FLAG_OFFSET
)) {
5109 spin_lock_irq(mlx4_tlock(dev
));
5112 port
= (qp
->sched_queue
>> 6 & 1) + 1;
5113 if (port
!= work
->port
) {
5114 spin_lock_irq(mlx4_tlock(dev
));
5117 if (MLX4_QP_ST_RC
== ((qp
->qpc_flags
>> 16) & 0xff))
5118 upd_context
->primary_addr_path_mask
= cpu_to_be64(qp_path_mask
);
5120 upd_context
->primary_addr_path_mask
=
5121 cpu_to_be64(qp_path_mask
| qp_path_mask_vlan_ctrl
);
5122 if (work
->vlan_id
== MLX4_VGT
) {
5123 upd_context
->qp_context
.param3
= qp
->param3
;
5124 upd_context
->qp_context
.pri_path
.vlan_control
= qp
->vlan_control
;
5125 upd_context
->qp_context
.pri_path
.fvl_rx
= qp
->fvl_rx
;
5126 upd_context
->qp_context
.pri_path
.vlan_index
= qp
->vlan_index
;
5127 upd_context
->qp_context
.pri_path
.fl
= qp
->pri_path_fl
;
5128 upd_context
->qp_context
.pri_path
.feup
= qp
->feup
;
5129 upd_context
->qp_context
.pri_path
.sched_queue
=
5132 upd_context
->qp_context
.param3
= qp
->param3
& ~cpu_to_be32(MLX4_STRIP_VLAN
);
5133 upd_context
->qp_context
.pri_path
.vlan_control
= vlan_control
;
5134 upd_context
->qp_context
.pri_path
.vlan_index
= work
->vlan_ix
;
5135 upd_context
->qp_context
.pri_path
.fvl_rx
=
5136 qp
->fvl_rx
| MLX4_FVL_RX_FORCE_ETH_VLAN
;
5137 upd_context
->qp_context
.pri_path
.fl
=
5138 qp
->pri_path_fl
| MLX4_FL_CV
| MLX4_FL_ETH_HIDE_CQE_VLAN
;
5139 upd_context
->qp_context
.pri_path
.feup
=
5140 qp
->feup
| MLX4_FEUP_FORCE_ETH_UP
| MLX4_FVL_FORCE_ETH_VLAN
;
5141 upd_context
->qp_context
.pri_path
.sched_queue
=
5142 qp
->sched_queue
& 0xC7;
5143 upd_context
->qp_context
.pri_path
.sched_queue
|=
5144 ((work
->qos
& 0x7) << 3);
5145 upd_context
->qp_mask
|=
5147 MLX4_UPD_QP_MASK_QOS_VPP
);
5148 upd_context
->qp_context
.qos_vport
=
5152 err
= mlx4_cmd(dev
, mailbox
->dma
,
5153 qp
->local_qpn
& 0xffffff,
5154 0, MLX4_CMD_UPDATE_QP
,
5155 MLX4_CMD_TIME_CLASS_C
, MLX4_CMD_NATIVE
);
5157 mlx4_info(dev
, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
5158 work
->slave
, port
, qp
->local_qpn
, err
);
5162 spin_lock_irq(mlx4_tlock(dev
));
5164 spin_unlock_irq(mlx4_tlock(dev
));
5165 mlx4_free_cmd_mailbox(dev
, mailbox
);
5168 mlx4_err(dev
, "%d UPDATE_QP failures for slave %d, port %d\n",
5169 errors
, work
->slave
, work
->port
);
5171 /* unregister previous vlan_id if needed and we had no errors
5172 * while updating the QPs
5174 if (work
->flags
& MLX4_VF_IMMED_VLAN_FLAG_VLAN
&& !errors
&&
5175 NO_INDX
!= work
->orig_vlan_ix
)
5176 __mlx4_unregister_vlan(&work
->priv
->dev
, work
->port
,
5177 work
->orig_vlan_id
);