2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
49 #include "mlx4_stats.h"
51 #define MLX4_MAC_VALID (1ull << 63)
52 #define MLX4_PF_COUNTERS_PER_PORT 2
53 #define MLX4_VF_COUNTERS_PER_PORT 1
56 struct list_head list
;
64 struct list_head list
;
72 struct list_head list
;
80 const char *func_name
;
88 struct list_head list
;
90 enum mlx4_protocol prot
;
91 enum mlx4_steer_type steer
;
96 RES_QP_BUSY
= RES_ANY_BUSY
,
98 /* QP number was allocated */
101 /* ICM memory for QP context was mapped */
104 /* QP is in hw ownership */
109 struct res_common com
;
114 struct list_head mcg_list
;
119 /* saved qp params before VST enforcement in order to restore on VGT */
129 enum res_mtt_states
{
130 RES_MTT_BUSY
= RES_ANY_BUSY
,
134 static inline const char *mtt_states_str(enum res_mtt_states state
)
137 case RES_MTT_BUSY
: return "RES_MTT_BUSY";
138 case RES_MTT_ALLOCATED
: return "RES_MTT_ALLOCATED";
139 default: return "Unknown";
144 struct res_common com
;
149 enum res_mpt_states
{
150 RES_MPT_BUSY
= RES_ANY_BUSY
,
157 struct res_common com
;
163 RES_EQ_BUSY
= RES_ANY_BUSY
,
169 struct res_common com
;
174 RES_CQ_BUSY
= RES_ANY_BUSY
,
180 struct res_common com
;
185 enum res_srq_states
{
186 RES_SRQ_BUSY
= RES_ANY_BUSY
,
192 struct res_common com
;
198 enum res_counter_states
{
199 RES_COUNTER_BUSY
= RES_ANY_BUSY
,
200 RES_COUNTER_ALLOCATED
,
204 struct res_common com
;
208 enum res_xrcdn_states
{
209 RES_XRCD_BUSY
= RES_ANY_BUSY
,
214 struct res_common com
;
218 enum res_fs_rule_states
{
219 RES_FS_RULE_BUSY
= RES_ANY_BUSY
,
220 RES_FS_RULE_ALLOCATED
,
224 struct res_common com
;
226 /* VF DMFS mbox with port flipped */
228 /* > 0 --> apply mirror when getting into HA mode */
229 /* = 0 --> un-apply mirror when getting out of HA mode */
231 struct list_head mirr_list
;
235 static void *res_tracker_lookup(struct rb_root
*root
, u64 res_id
)
237 struct rb_node
*node
= root
->rb_node
;
240 struct res_common
*res
= rb_entry(node
, struct res_common
,
243 if (res_id
< res
->res_id
)
244 node
= node
->rb_left
;
245 else if (res_id
> res
->res_id
)
246 node
= node
->rb_right
;
253 static int res_tracker_insert(struct rb_root
*root
, struct res_common
*res
)
255 struct rb_node
**new = &(root
->rb_node
), *parent
= NULL
;
257 /* Figure out where to put new node */
259 struct res_common
*this = rb_entry(*new, struct res_common
,
263 if (res
->res_id
< this->res_id
)
264 new = &((*new)->rb_left
);
265 else if (res
->res_id
> this->res_id
)
266 new = &((*new)->rb_right
);
271 /* Add new node and rebalance tree. */
272 rb_link_node(&res
->node
, parent
, new);
273 rb_insert_color(&res
->node
, root
);
288 static const char *resource_str(enum mlx4_resource rt
)
291 case RES_QP
: return "RES_QP";
292 case RES_CQ
: return "RES_CQ";
293 case RES_SRQ
: return "RES_SRQ";
294 case RES_MPT
: return "RES_MPT";
295 case RES_MTT
: return "RES_MTT";
296 case RES_MAC
: return "RES_MAC";
297 case RES_VLAN
: return "RES_VLAN";
298 case RES_EQ
: return "RES_EQ";
299 case RES_COUNTER
: return "RES_COUNTER";
300 case RES_FS_RULE
: return "RES_FS_RULE";
301 case RES_XRCD
: return "RES_XRCD";
302 default: return "Unknown resource type !!!";
306 static void rem_slave_vlans(struct mlx4_dev
*dev
, int slave
);
307 static inline int mlx4_grant_resource(struct mlx4_dev
*dev
, int slave
,
308 enum mlx4_resource res_type
, int count
,
311 struct mlx4_priv
*priv
= mlx4_priv(dev
);
312 struct resource_allocator
*res_alloc
=
313 &priv
->mfunc
.master
.res_tracker
.res_alloc
[res_type
];
315 int allocated
, free
, reserved
, guaranteed
, from_free
;
318 if (slave
> dev
->persist
->num_vfs
)
321 spin_lock(&res_alloc
->alloc_lock
);
322 allocated
= (port
> 0) ?
323 res_alloc
->allocated
[(port
- 1) *
324 (dev
->persist
->num_vfs
+ 1) + slave
] :
325 res_alloc
->allocated
[slave
];
326 free
= (port
> 0) ? res_alloc
->res_port_free
[port
- 1] :
328 reserved
= (port
> 0) ? res_alloc
->res_port_rsvd
[port
- 1] :
329 res_alloc
->res_reserved
;
330 guaranteed
= res_alloc
->guaranteed
[slave
];
332 if (allocated
+ count
> res_alloc
->quota
[slave
]) {
333 mlx4_warn(dev
, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
334 slave
, port
, resource_str(res_type
), count
,
335 allocated
, res_alloc
->quota
[slave
]);
339 if (allocated
+ count
<= guaranteed
) {
343 /* portion may need to be obtained from free area */
344 if (guaranteed
- allocated
> 0)
345 from_free
= count
- (guaranteed
- allocated
);
349 from_rsvd
= count
- from_free
;
351 if (free
- from_free
>= reserved
)
354 mlx4_warn(dev
, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
355 slave
, port
, resource_str(res_type
), free
,
356 from_free
, reserved
);
360 /* grant the request */
362 res_alloc
->allocated
[(port
- 1) *
363 (dev
->persist
->num_vfs
+ 1) + slave
] += count
;
364 res_alloc
->res_port_free
[port
- 1] -= count
;
365 res_alloc
->res_port_rsvd
[port
- 1] -= from_rsvd
;
367 res_alloc
->allocated
[slave
] += count
;
368 res_alloc
->res_free
-= count
;
369 res_alloc
->res_reserved
-= from_rsvd
;
374 spin_unlock(&res_alloc
->alloc_lock
);
378 static inline void mlx4_release_resource(struct mlx4_dev
*dev
, int slave
,
379 enum mlx4_resource res_type
, int count
,
382 struct mlx4_priv
*priv
= mlx4_priv(dev
);
383 struct resource_allocator
*res_alloc
=
384 &priv
->mfunc
.master
.res_tracker
.res_alloc
[res_type
];
385 int allocated
, guaranteed
, from_rsvd
;
387 if (slave
> dev
->persist
->num_vfs
)
390 spin_lock(&res_alloc
->alloc_lock
);
392 allocated
= (port
> 0) ?
393 res_alloc
->allocated
[(port
- 1) *
394 (dev
->persist
->num_vfs
+ 1) + slave
] :
395 res_alloc
->allocated
[slave
];
396 guaranteed
= res_alloc
->guaranteed
[slave
];
398 if (allocated
- count
>= guaranteed
) {
401 /* portion may need to be returned to reserved area */
402 if (allocated
- guaranteed
> 0)
403 from_rsvd
= count
- (allocated
- guaranteed
);
409 res_alloc
->allocated
[(port
- 1) *
410 (dev
->persist
->num_vfs
+ 1) + slave
] -= count
;
411 res_alloc
->res_port_free
[port
- 1] += count
;
412 res_alloc
->res_port_rsvd
[port
- 1] += from_rsvd
;
414 res_alloc
->allocated
[slave
] -= count
;
415 res_alloc
->res_free
+= count
;
416 res_alloc
->res_reserved
+= from_rsvd
;
419 spin_unlock(&res_alloc
->alloc_lock
);
423 static inline void initialize_res_quotas(struct mlx4_dev
*dev
,
424 struct resource_allocator
*res_alloc
,
425 enum mlx4_resource res_type
,
426 int vf
, int num_instances
)
428 res_alloc
->guaranteed
[vf
] = num_instances
/
429 (2 * (dev
->persist
->num_vfs
+ 1));
430 res_alloc
->quota
[vf
] = (num_instances
/ 2) + res_alloc
->guaranteed
[vf
];
431 if (vf
== mlx4_master_func_num(dev
)) {
432 res_alloc
->res_free
= num_instances
;
433 if (res_type
== RES_MTT
) {
434 /* reserved mtts will be taken out of the PF allocation */
435 res_alloc
->res_free
+= dev
->caps
.reserved_mtts
;
436 res_alloc
->guaranteed
[vf
] += dev
->caps
.reserved_mtts
;
437 res_alloc
->quota
[vf
] += dev
->caps
.reserved_mtts
;
442 void mlx4_init_quotas(struct mlx4_dev
*dev
)
444 struct mlx4_priv
*priv
= mlx4_priv(dev
);
447 /* quotas for VFs are initialized in mlx4_slave_cap */
448 if (mlx4_is_slave(dev
))
451 if (!mlx4_is_mfunc(dev
)) {
452 dev
->quotas
.qp
= dev
->caps
.num_qps
- dev
->caps
.reserved_qps
-
453 mlx4_num_reserved_sqps(dev
);
454 dev
->quotas
.cq
= dev
->caps
.num_cqs
- dev
->caps
.reserved_cqs
;
455 dev
->quotas
.srq
= dev
->caps
.num_srqs
- dev
->caps
.reserved_srqs
;
456 dev
->quotas
.mtt
= dev
->caps
.num_mtts
- dev
->caps
.reserved_mtts
;
457 dev
->quotas
.mpt
= dev
->caps
.num_mpts
- dev
->caps
.reserved_mrws
;
461 pf
= mlx4_master_func_num(dev
);
463 priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_QP
].quota
[pf
];
465 priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_CQ
].quota
[pf
];
467 priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_SRQ
].quota
[pf
];
469 priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_MTT
].quota
[pf
];
471 priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_MPT
].quota
[pf
];
475 mlx4_calc_res_counter_guaranteed(struct mlx4_dev
*dev
,
476 struct resource_allocator
*res_alloc
,
479 struct mlx4_active_ports actv_ports
;
480 int ports
, counters_guaranteed
;
482 /* For master, only allocate according to the number of phys ports */
483 if (vf
== mlx4_master_func_num(dev
))
484 return MLX4_PF_COUNTERS_PER_PORT
* dev
->caps
.num_ports
;
486 /* calculate real number of ports for the VF */
487 actv_ports
= mlx4_get_active_ports(dev
, vf
);
488 ports
= bitmap_weight(actv_ports
.ports
, dev
->caps
.num_ports
);
489 counters_guaranteed
= ports
* MLX4_VF_COUNTERS_PER_PORT
;
491 /* If we do not have enough counters for this VF, do not
492 * allocate any for it. '-1' to reduce the sink counter.
494 if ((res_alloc
->res_reserved
+ counters_guaranteed
) >
495 (dev
->caps
.max_counters
- 1))
498 return counters_guaranteed
;
501 int mlx4_init_resource_tracker(struct mlx4_dev
*dev
)
503 struct mlx4_priv
*priv
= mlx4_priv(dev
);
507 priv
->mfunc
.master
.res_tracker
.slave_list
=
508 kcalloc(dev
->num_slaves
, sizeof(struct slave_list
),
510 if (!priv
->mfunc
.master
.res_tracker
.slave_list
)
513 for (i
= 0 ; i
< dev
->num_slaves
; i
++) {
514 for (t
= 0; t
< MLX4_NUM_OF_RESOURCE_TYPE
; ++t
)
515 INIT_LIST_HEAD(&priv
->mfunc
.master
.res_tracker
.
516 slave_list
[i
].res_list
[t
]);
517 mutex_init(&priv
->mfunc
.master
.res_tracker
.slave_list
[i
].mutex
);
520 mlx4_dbg(dev
, "Started init_resource_tracker: %ld slaves\n",
522 for (i
= 0 ; i
< MLX4_NUM_OF_RESOURCE_TYPE
; i
++)
523 priv
->mfunc
.master
.res_tracker
.res_tree
[i
] = RB_ROOT
;
525 for (i
= 0; i
< MLX4_NUM_OF_RESOURCE_TYPE
; i
++) {
526 struct resource_allocator
*res_alloc
=
527 &priv
->mfunc
.master
.res_tracker
.res_alloc
[i
];
528 res_alloc
->quota
= kmalloc_array(dev
->persist
->num_vfs
+ 1,
531 res_alloc
->guaranteed
= kmalloc_array(dev
->persist
->num_vfs
+ 1,
534 if (i
== RES_MAC
|| i
== RES_VLAN
)
535 res_alloc
->allocated
=
536 kcalloc(MLX4_MAX_PORTS
*
537 (dev
->persist
->num_vfs
+ 1),
538 sizeof(int), GFP_KERNEL
);
540 res_alloc
->allocated
=
541 kcalloc(dev
->persist
->num_vfs
+ 1,
542 sizeof(int), GFP_KERNEL
);
543 /* Reduce the sink counter */
544 if (i
== RES_COUNTER
)
545 res_alloc
->res_free
= dev
->caps
.max_counters
- 1;
547 if (!res_alloc
->quota
|| !res_alloc
->guaranteed
||
548 !res_alloc
->allocated
)
551 spin_lock_init(&res_alloc
->alloc_lock
);
552 for (t
= 0; t
< dev
->persist
->num_vfs
+ 1; t
++) {
553 struct mlx4_active_ports actv_ports
=
554 mlx4_get_active_ports(dev
, t
);
557 initialize_res_quotas(dev
, res_alloc
, RES_QP
,
558 t
, dev
->caps
.num_qps
-
559 dev
->caps
.reserved_qps
-
560 mlx4_num_reserved_sqps(dev
));
563 initialize_res_quotas(dev
, res_alloc
, RES_CQ
,
564 t
, dev
->caps
.num_cqs
-
565 dev
->caps
.reserved_cqs
);
568 initialize_res_quotas(dev
, res_alloc
, RES_SRQ
,
569 t
, dev
->caps
.num_srqs
-
570 dev
->caps
.reserved_srqs
);
573 initialize_res_quotas(dev
, res_alloc
, RES_MPT
,
574 t
, dev
->caps
.num_mpts
-
575 dev
->caps
.reserved_mrws
);
578 initialize_res_quotas(dev
, res_alloc
, RES_MTT
,
579 t
, dev
->caps
.num_mtts
-
580 dev
->caps
.reserved_mtts
);
583 if (t
== mlx4_master_func_num(dev
)) {
584 int max_vfs_pport
= 0;
585 /* Calculate the max vfs per port for */
587 for (j
= 0; j
< dev
->caps
.num_ports
;
589 struct mlx4_slaves_pport slaves_pport
=
590 mlx4_phys_to_slaves_pport(dev
, j
+ 1);
591 unsigned current_slaves
=
592 bitmap_weight(slaves_pport
.slaves
,
593 dev
->caps
.num_ports
) - 1;
594 if (max_vfs_pport
< current_slaves
)
598 res_alloc
->quota
[t
] =
601 res_alloc
->guaranteed
[t
] = 2;
602 for (j
= 0; j
< MLX4_MAX_PORTS
; j
++)
603 res_alloc
->res_port_free
[j
] =
606 res_alloc
->quota
[t
] = MLX4_MAX_MAC_NUM
;
607 res_alloc
->guaranteed
[t
] = 2;
611 if (t
== mlx4_master_func_num(dev
)) {
612 res_alloc
->quota
[t
] = MLX4_MAX_VLAN_NUM
;
613 res_alloc
->guaranteed
[t
] = MLX4_MAX_VLAN_NUM
/ 2;
614 for (j
= 0; j
< MLX4_MAX_PORTS
; j
++)
615 res_alloc
->res_port_free
[j
] =
618 res_alloc
->quota
[t
] = MLX4_MAX_VLAN_NUM
/ 2;
619 res_alloc
->guaranteed
[t
] = 0;
623 res_alloc
->quota
[t
] = dev
->caps
.max_counters
;
624 res_alloc
->guaranteed
[t
] =
625 mlx4_calc_res_counter_guaranteed(dev
, res_alloc
, t
);
630 if (i
== RES_MAC
|| i
== RES_VLAN
) {
631 for (j
= 0; j
< dev
->caps
.num_ports
; j
++)
632 if (test_bit(j
, actv_ports
.ports
))
633 res_alloc
->res_port_rsvd
[j
] +=
634 res_alloc
->guaranteed
[t
];
636 res_alloc
->res_reserved
+= res_alloc
->guaranteed
[t
];
640 spin_lock_init(&priv
->mfunc
.master
.res_tracker
.lock
);
644 for (i
= 0; i
< MLX4_NUM_OF_RESOURCE_TYPE
; i
++) {
645 kfree(priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].allocated
);
646 priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].allocated
= NULL
;
647 kfree(priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].guaranteed
);
648 priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].guaranteed
= NULL
;
649 kfree(priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].quota
);
650 priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].quota
= NULL
;
655 void mlx4_free_resource_tracker(struct mlx4_dev
*dev
,
656 enum mlx4_res_tracker_free_type type
)
658 struct mlx4_priv
*priv
= mlx4_priv(dev
);
661 if (priv
->mfunc
.master
.res_tracker
.slave_list
) {
662 if (type
!= RES_TR_FREE_STRUCTS_ONLY
) {
663 for (i
= 0; i
< dev
->num_slaves
; i
++) {
664 if (type
== RES_TR_FREE_ALL
||
665 dev
->caps
.function
!= i
)
666 mlx4_delete_all_resources_for_slave(dev
, i
);
668 /* free master's vlans */
669 i
= dev
->caps
.function
;
670 mlx4_reset_roce_gids(dev
, i
);
671 mutex_lock(&priv
->mfunc
.master
.res_tracker
.slave_list
[i
].mutex
);
672 rem_slave_vlans(dev
, i
);
673 mutex_unlock(&priv
->mfunc
.master
.res_tracker
.slave_list
[i
].mutex
);
676 if (type
!= RES_TR_FREE_SLAVES_ONLY
) {
677 for (i
= 0; i
< MLX4_NUM_OF_RESOURCE_TYPE
; i
++) {
678 kfree(priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].allocated
);
679 priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].allocated
= NULL
;
680 kfree(priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].guaranteed
);
681 priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].guaranteed
= NULL
;
682 kfree(priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].quota
);
683 priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].quota
= NULL
;
685 kfree(priv
->mfunc
.master
.res_tracker
.slave_list
);
686 priv
->mfunc
.master
.res_tracker
.slave_list
= NULL
;
691 static void update_pkey_index(struct mlx4_dev
*dev
, int slave
,
692 struct mlx4_cmd_mailbox
*inbox
)
694 u8 sched
= *(u8
*)(inbox
->buf
+ 64);
695 u8 orig_index
= *(u8
*)(inbox
->buf
+ 35);
697 struct mlx4_priv
*priv
= mlx4_priv(dev
);
700 port
= (sched
>> 6 & 1) + 1;
702 new_index
= priv
->virt2phys_pkey
[slave
][port
- 1][orig_index
];
703 *(u8
*)(inbox
->buf
+ 35) = new_index
;
706 static void update_gid(struct mlx4_dev
*dev
, struct mlx4_cmd_mailbox
*inbox
,
709 struct mlx4_qp_context
*qp_ctx
= inbox
->buf
+ 8;
710 enum mlx4_qp_optpar optpar
= be32_to_cpu(*(__be32
*) inbox
->buf
);
711 u32 ts
= (be32_to_cpu(qp_ctx
->flags
) >> 16) & 0xff;
714 if (MLX4_QP_ST_UD
== ts
) {
715 port
= (qp_ctx
->pri_path
.sched_queue
>> 6 & 1) + 1;
716 if (mlx4_is_eth(dev
, port
))
717 qp_ctx
->pri_path
.mgid_index
=
718 mlx4_get_base_gid_ix(dev
, slave
, port
) | 0x80;
720 qp_ctx
->pri_path
.mgid_index
= slave
| 0x80;
722 } else if (MLX4_QP_ST_RC
== ts
|| MLX4_QP_ST_XRC
== ts
|| MLX4_QP_ST_UC
== ts
) {
723 if (optpar
& MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH
) {
724 port
= (qp_ctx
->pri_path
.sched_queue
>> 6 & 1) + 1;
725 if (mlx4_is_eth(dev
, port
)) {
726 qp_ctx
->pri_path
.mgid_index
+=
727 mlx4_get_base_gid_ix(dev
, slave
, port
);
728 qp_ctx
->pri_path
.mgid_index
&= 0x7f;
730 qp_ctx
->pri_path
.mgid_index
= slave
& 0x7F;
733 if (optpar
& MLX4_QP_OPTPAR_ALT_ADDR_PATH
) {
734 port
= (qp_ctx
->alt_path
.sched_queue
>> 6 & 1) + 1;
735 if (mlx4_is_eth(dev
, port
)) {
736 qp_ctx
->alt_path
.mgid_index
+=
737 mlx4_get_base_gid_ix(dev
, slave
, port
);
738 qp_ctx
->alt_path
.mgid_index
&= 0x7f;
740 qp_ctx
->alt_path
.mgid_index
= slave
& 0x7F;
746 static int handle_counter(struct mlx4_dev
*dev
, struct mlx4_qp_context
*qpc
,
749 static int update_vport_qp_param(struct mlx4_dev
*dev
,
750 struct mlx4_cmd_mailbox
*inbox
,
753 struct mlx4_qp_context
*qpc
= inbox
->buf
+ 8;
754 struct mlx4_vport_oper_state
*vp_oper
;
755 struct mlx4_priv
*priv
;
759 port
= (qpc
->pri_path
.sched_queue
& 0x40) ? 2 : 1;
760 priv
= mlx4_priv(dev
);
761 vp_oper
= &priv
->mfunc
.master
.vf_oper
[slave
].vport
[port
];
762 qp_type
= (be32_to_cpu(qpc
->flags
) >> 16) & 0xff;
764 err
= handle_counter(dev
, qpc
, slave
, port
);
768 if (MLX4_VGT
!= vp_oper
->state
.default_vlan
) {
769 /* the reserved QPs (special, proxy, tunnel)
770 * do not operate over vlans
772 if (mlx4_is_qp_reserved(dev
, qpn
))
775 /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
776 if (qp_type
== MLX4_QP_ST_UD
||
777 (qp_type
== MLX4_QP_ST_MLX
&& mlx4_is_eth(dev
, port
))) {
778 if (dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_VSD_INIT2RTR
) {
779 *(__be32
*)inbox
->buf
=
780 cpu_to_be32(be32_to_cpu(*(__be32
*)inbox
->buf
) |
781 MLX4_QP_OPTPAR_VLAN_STRIPPING
);
782 qpc
->param3
&= ~cpu_to_be32(MLX4_STRIP_VLAN
);
784 struct mlx4_update_qp_params params
= {.flags
= 0};
786 err
= mlx4_update_qp(dev
, qpn
, MLX4_UPDATE_QP_VSD
, ¶ms
);
792 /* preserve IF_COUNTER flag */
793 qpc
->pri_path
.vlan_control
&=
794 MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER
;
795 if (vp_oper
->state
.link_state
== IFLA_VF_LINK_STATE_DISABLE
&&
796 dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_UPDATE_QP
) {
797 qpc
->pri_path
.vlan_control
|=
798 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
799 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED
|
800 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED
|
801 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED
|
802 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED
|
803 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED
;
804 } else if (0 != vp_oper
->state
.default_vlan
) {
805 if (vp_oper
->state
.vlan_proto
== htons(ETH_P_8021AD
)) {
806 /* vst QinQ should block untagged on TX,
807 * but cvlan is in payload and phv is set so
808 * hw see it as untagged. Block tagged instead.
810 qpc
->pri_path
.vlan_control
|=
811 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED
|
812 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
813 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED
|
814 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED
;
815 } else { /* vst 802.1Q */
816 qpc
->pri_path
.vlan_control
|=
817 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
818 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED
|
819 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED
;
821 } else { /* priority tagged */
822 qpc
->pri_path
.vlan_control
|=
823 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
824 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED
;
827 qpc
->pri_path
.fvl_rx
|= MLX4_FVL_RX_FORCE_ETH_VLAN
;
828 qpc
->pri_path
.vlan_index
= vp_oper
->vlan_idx
;
829 qpc
->pri_path
.fl
|= MLX4_FL_ETH_HIDE_CQE_VLAN
;
830 if (vp_oper
->state
.vlan_proto
== htons(ETH_P_8021AD
))
831 qpc
->pri_path
.fl
|= MLX4_FL_SV
;
833 qpc
->pri_path
.fl
|= MLX4_FL_CV
;
834 qpc
->pri_path
.feup
|= MLX4_FEUP_FORCE_ETH_UP
| MLX4_FVL_FORCE_ETH_VLAN
;
835 qpc
->pri_path
.sched_queue
&= 0xC7;
836 qpc
->pri_path
.sched_queue
|= (vp_oper
->state
.default_qos
) << 3;
837 qpc
->qos_vport
= vp_oper
->state
.qos_vport
;
839 if (vp_oper
->state
.spoofchk
) {
840 qpc
->pri_path
.feup
|= MLX4_FSM_FORCE_ETH_SRC_MAC
;
841 qpc
->pri_path
.grh_mylmc
= (0x80 & qpc
->pri_path
.grh_mylmc
) + vp_oper
->mac_idx
;
847 static int mpt_mask(struct mlx4_dev
*dev
)
849 return dev
->caps
.num_mpts
- 1;
852 static const char *mlx4_resource_type_to_str(enum mlx4_resource t
)
878 return "INVALID RESOURCE";
882 static void *find_res(struct mlx4_dev
*dev
, u64 res_id
,
883 enum mlx4_resource type
)
885 struct mlx4_priv
*priv
= mlx4_priv(dev
);
887 return res_tracker_lookup(&priv
->mfunc
.master
.res_tracker
.res_tree
[type
],
891 static int _get_res(struct mlx4_dev
*dev
, int slave
, u64 res_id
,
892 enum mlx4_resource type
,
893 void *res
, const char *func_name
)
895 struct res_common
*r
;
898 spin_lock_irq(mlx4_tlock(dev
));
899 r
= find_res(dev
, res_id
, type
);
905 if (r
->state
== RES_ANY_BUSY
) {
907 "%s(%d) trying to get resource %llx of type %s, but it's already taken by %s\n",
908 func_name
, slave
, res_id
, mlx4_resource_type_to_str(type
),
914 if (r
->owner
!= slave
) {
919 r
->from_state
= r
->state
;
920 r
->state
= RES_ANY_BUSY
;
921 r
->func_name
= func_name
;
924 *((struct res_common
**)res
) = r
;
927 spin_unlock_irq(mlx4_tlock(dev
));
931 #define get_res(dev, slave, res_id, type, res) \
932 _get_res((dev), (slave), (res_id), (type), (res), __func__)
934 int mlx4_get_slave_from_resource_id(struct mlx4_dev
*dev
,
935 enum mlx4_resource type
,
936 u64 res_id
, int *slave
)
939 struct res_common
*r
;
945 spin_lock(mlx4_tlock(dev
));
947 r
= find_res(dev
, id
, type
);
952 spin_unlock(mlx4_tlock(dev
));
957 static void put_res(struct mlx4_dev
*dev
, int slave
, u64 res_id
,
958 enum mlx4_resource type
)
960 struct res_common
*r
;
962 spin_lock_irq(mlx4_tlock(dev
));
963 r
= find_res(dev
, res_id
, type
);
965 r
->state
= r
->from_state
;
968 spin_unlock_irq(mlx4_tlock(dev
));
971 static int counter_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
972 u64 in_param
, u64
*out_param
, int port
);
974 static int handle_existing_counter(struct mlx4_dev
*dev
, u8 slave
, int port
,
977 struct res_common
*r
;
978 struct res_counter
*counter
;
981 if (counter_index
== MLX4_SINK_COUNTER_INDEX(dev
))
984 spin_lock_irq(mlx4_tlock(dev
));
985 r
= find_res(dev
, counter_index
, RES_COUNTER
);
986 if (!r
|| r
->owner
!= slave
) {
989 counter
= container_of(r
, struct res_counter
, com
);
991 counter
->port
= port
;
994 spin_unlock_irq(mlx4_tlock(dev
));
998 static int handle_unexisting_counter(struct mlx4_dev
*dev
,
999 struct mlx4_qp_context
*qpc
, u8 slave
,
1002 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1003 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1004 struct res_common
*tmp
;
1005 struct res_counter
*counter
;
1006 u64 counter_idx
= MLX4_SINK_COUNTER_INDEX(dev
);
1009 spin_lock_irq(mlx4_tlock(dev
));
1010 list_for_each_entry(tmp
,
1011 &tracker
->slave_list
[slave
].res_list
[RES_COUNTER
],
1013 counter
= container_of(tmp
, struct res_counter
, com
);
1014 if (port
== counter
->port
) {
1015 qpc
->pri_path
.counter_index
= counter
->com
.res_id
;
1016 spin_unlock_irq(mlx4_tlock(dev
));
1020 spin_unlock_irq(mlx4_tlock(dev
));
1022 /* No existing counter, need to allocate a new counter */
1023 err
= counter_alloc_res(dev
, slave
, RES_OP_RESERVE
, 0, 0, &counter_idx
,
1025 if (err
== -ENOENT
) {
1027 } else if (err
&& err
!= -ENOSPC
) {
1028 mlx4_err(dev
, "%s: failed to create new counter for slave %d err %d\n",
1029 __func__
, slave
, err
);
1031 qpc
->pri_path
.counter_index
= counter_idx
;
1032 mlx4_dbg(dev
, "%s: alloc new counter for slave %d index %d\n",
1033 __func__
, slave
, qpc
->pri_path
.counter_index
);
1040 static int handle_counter(struct mlx4_dev
*dev
, struct mlx4_qp_context
*qpc
,
1043 if (qpc
->pri_path
.counter_index
!= MLX4_SINK_COUNTER_INDEX(dev
))
1044 return handle_existing_counter(dev
, slave
, port
,
1045 qpc
->pri_path
.counter_index
);
1047 return handle_unexisting_counter(dev
, qpc
, slave
, port
);
1050 static struct res_common
*alloc_qp_tr(int id
)
1054 ret
= kzalloc(sizeof(*ret
), GFP_KERNEL
);
1058 ret
->com
.res_id
= id
;
1059 ret
->com
.state
= RES_QP_RESERVED
;
1060 ret
->local_qpn
= id
;
1061 INIT_LIST_HEAD(&ret
->mcg_list
);
1062 spin_lock_init(&ret
->mcg_spl
);
1063 atomic_set(&ret
->ref_count
, 0);
1068 static struct res_common
*alloc_mtt_tr(int id
, int order
)
1070 struct res_mtt
*ret
;
1072 ret
= kzalloc(sizeof(*ret
), GFP_KERNEL
);
1076 ret
->com
.res_id
= id
;
1078 ret
->com
.state
= RES_MTT_ALLOCATED
;
1079 atomic_set(&ret
->ref_count
, 0);
1084 static struct res_common
*alloc_mpt_tr(int id
, int key
)
1086 struct res_mpt
*ret
;
1088 ret
= kzalloc(sizeof(*ret
), GFP_KERNEL
);
1092 ret
->com
.res_id
= id
;
1093 ret
->com
.state
= RES_MPT_RESERVED
;
1099 static struct res_common
*alloc_eq_tr(int id
)
1103 ret
= kzalloc(sizeof(*ret
), GFP_KERNEL
);
1107 ret
->com
.res_id
= id
;
1108 ret
->com
.state
= RES_EQ_RESERVED
;
1113 static struct res_common
*alloc_cq_tr(int id
)
1117 ret
= kzalloc(sizeof(*ret
), GFP_KERNEL
);
1121 ret
->com
.res_id
= id
;
1122 ret
->com
.state
= RES_CQ_ALLOCATED
;
1123 atomic_set(&ret
->ref_count
, 0);
1128 static struct res_common
*alloc_srq_tr(int id
)
1130 struct res_srq
*ret
;
1132 ret
= kzalloc(sizeof(*ret
), GFP_KERNEL
);
1136 ret
->com
.res_id
= id
;
1137 ret
->com
.state
= RES_SRQ_ALLOCATED
;
1138 atomic_set(&ret
->ref_count
, 0);
1143 static struct res_common
*alloc_counter_tr(int id
, int port
)
1145 struct res_counter
*ret
;
1147 ret
= kzalloc(sizeof(*ret
), GFP_KERNEL
);
1151 ret
->com
.res_id
= id
;
1152 ret
->com
.state
= RES_COUNTER_ALLOCATED
;
1158 static struct res_common
*alloc_xrcdn_tr(int id
)
1160 struct res_xrcdn
*ret
;
1162 ret
= kzalloc(sizeof(*ret
), GFP_KERNEL
);
1166 ret
->com
.res_id
= id
;
1167 ret
->com
.state
= RES_XRCD_ALLOCATED
;
1172 static struct res_common
*alloc_fs_rule_tr(u64 id
, int qpn
)
1174 struct res_fs_rule
*ret
;
1176 ret
= kzalloc(sizeof(*ret
), GFP_KERNEL
);
1180 ret
->com
.res_id
= id
;
1181 ret
->com
.state
= RES_FS_RULE_ALLOCATED
;
1186 static struct res_common
*alloc_tr(u64 id
, enum mlx4_resource type
, int slave
,
1189 struct res_common
*ret
;
1193 ret
= alloc_qp_tr(id
);
1196 ret
= alloc_mpt_tr(id
, extra
);
1199 ret
= alloc_mtt_tr(id
, extra
);
1202 ret
= alloc_eq_tr(id
);
1205 ret
= alloc_cq_tr(id
);
1208 ret
= alloc_srq_tr(id
);
1211 pr_err("implementation missing\n");
1214 ret
= alloc_counter_tr(id
, extra
);
1217 ret
= alloc_xrcdn_tr(id
);
1220 ret
= alloc_fs_rule_tr(id
, extra
);
1231 int mlx4_calc_vf_counters(struct mlx4_dev
*dev
, int slave
, int port
,
1232 struct mlx4_counter
*data
)
1234 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1235 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1236 struct res_common
*tmp
;
1237 struct res_counter
*counter
;
1241 memset(data
, 0, sizeof(*data
));
1243 counters_arr
= kmalloc_array(dev
->caps
.max_counters
,
1244 sizeof(*counters_arr
), GFP_KERNEL
);
1248 spin_lock_irq(mlx4_tlock(dev
));
1249 list_for_each_entry(tmp
,
1250 &tracker
->slave_list
[slave
].res_list
[RES_COUNTER
],
1252 counter
= container_of(tmp
, struct res_counter
, com
);
1253 if (counter
->port
== port
) {
1254 counters_arr
[i
] = (int)tmp
->res_id
;
1258 spin_unlock_irq(mlx4_tlock(dev
));
1259 counters_arr
[i
] = -1;
1263 while (counters_arr
[i
] != -1) {
1264 err
= mlx4_get_counter_stats(dev
, counters_arr
[i
], data
,
1267 memset(data
, 0, sizeof(*data
));
1274 kfree(counters_arr
);
1278 static int add_res_range(struct mlx4_dev
*dev
, int slave
, u64 base
, int count
,
1279 enum mlx4_resource type
, int extra
)
1283 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1284 struct res_common
**res_arr
;
1285 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1286 struct rb_root
*root
= &tracker
->res_tree
[type
];
1288 res_arr
= kcalloc(count
, sizeof(*res_arr
), GFP_KERNEL
);
1292 for (i
= 0; i
< count
; ++i
) {
1293 res_arr
[i
] = alloc_tr(base
+ i
, type
, slave
, extra
);
1295 for (--i
; i
>= 0; --i
)
1303 spin_lock_irq(mlx4_tlock(dev
));
1304 for (i
= 0; i
< count
; ++i
) {
1305 if (find_res(dev
, base
+ i
, type
)) {
1309 err
= res_tracker_insert(root
, res_arr
[i
]);
1312 list_add_tail(&res_arr
[i
]->list
,
1313 &tracker
->slave_list
[slave
].res_list
[type
]);
1315 spin_unlock_irq(mlx4_tlock(dev
));
1321 for (--i
; i
>= 0; --i
) {
1322 rb_erase(&res_arr
[i
]->node
, root
);
1323 list_del_init(&res_arr
[i
]->list
);
1326 spin_unlock_irq(mlx4_tlock(dev
));
1328 for (i
= 0; i
< count
; ++i
)
1336 static int remove_qp_ok(struct res_qp
*res
)
1338 if (res
->com
.state
== RES_QP_BUSY
|| atomic_read(&res
->ref_count
) ||
1339 !list_empty(&res
->mcg_list
)) {
1340 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1341 res
->com
.state
, atomic_read(&res
->ref_count
));
1343 } else if (res
->com
.state
!= RES_QP_RESERVED
) {
1350 static int remove_mtt_ok(struct res_mtt
*res
, int order
)
1352 if (res
->com
.state
== RES_MTT_BUSY
||
1353 atomic_read(&res
->ref_count
)) {
1354 pr_devel("%s-%d: state %s, ref_count %d\n",
1356 mtt_states_str(res
->com
.state
),
1357 atomic_read(&res
->ref_count
));
1359 } else if (res
->com
.state
!= RES_MTT_ALLOCATED
)
1361 else if (res
->order
!= order
)
1367 static int remove_mpt_ok(struct res_mpt
*res
)
1369 if (res
->com
.state
== RES_MPT_BUSY
)
1371 else if (res
->com
.state
!= RES_MPT_RESERVED
)
1377 static int remove_eq_ok(struct res_eq
*res
)
1379 if (res
->com
.state
== RES_MPT_BUSY
)
1381 else if (res
->com
.state
!= RES_MPT_RESERVED
)
1387 static int remove_counter_ok(struct res_counter
*res
)
1389 if (res
->com
.state
== RES_COUNTER_BUSY
)
1391 else if (res
->com
.state
!= RES_COUNTER_ALLOCATED
)
1397 static int remove_xrcdn_ok(struct res_xrcdn
*res
)
1399 if (res
->com
.state
== RES_XRCD_BUSY
)
1401 else if (res
->com
.state
!= RES_XRCD_ALLOCATED
)
1407 static int remove_fs_rule_ok(struct res_fs_rule
*res
)
1409 if (res
->com
.state
== RES_FS_RULE_BUSY
)
1411 else if (res
->com
.state
!= RES_FS_RULE_ALLOCATED
)
1417 static int remove_cq_ok(struct res_cq
*res
)
1419 if (res
->com
.state
== RES_CQ_BUSY
)
1421 else if (res
->com
.state
!= RES_CQ_ALLOCATED
)
1427 static int remove_srq_ok(struct res_srq
*res
)
1429 if (res
->com
.state
== RES_SRQ_BUSY
)
1431 else if (res
->com
.state
!= RES_SRQ_ALLOCATED
)
1437 static int remove_ok(struct res_common
*res
, enum mlx4_resource type
, int extra
)
1441 return remove_qp_ok((struct res_qp
*)res
);
1443 return remove_cq_ok((struct res_cq
*)res
);
1445 return remove_srq_ok((struct res_srq
*)res
);
1447 return remove_mpt_ok((struct res_mpt
*)res
);
1449 return remove_mtt_ok((struct res_mtt
*)res
, extra
);
1453 return remove_eq_ok((struct res_eq
*)res
);
1455 return remove_counter_ok((struct res_counter
*)res
);
1457 return remove_xrcdn_ok((struct res_xrcdn
*)res
);
1459 return remove_fs_rule_ok((struct res_fs_rule
*)res
);
1465 static int rem_res_range(struct mlx4_dev
*dev
, int slave
, u64 base
, int count
,
1466 enum mlx4_resource type
, int extra
)
1470 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1471 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1472 struct res_common
*r
;
1474 spin_lock_irq(mlx4_tlock(dev
));
1475 for (i
= base
; i
< base
+ count
; ++i
) {
1476 r
= res_tracker_lookup(&tracker
->res_tree
[type
], i
);
1481 if (r
->owner
!= slave
) {
1485 err
= remove_ok(r
, type
, extra
);
1490 for (i
= base
; i
< base
+ count
; ++i
) {
1491 r
= res_tracker_lookup(&tracker
->res_tree
[type
], i
);
1492 rb_erase(&r
->node
, &tracker
->res_tree
[type
]);
1499 spin_unlock_irq(mlx4_tlock(dev
));
1504 static int qp_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int qpn
,
1505 enum res_qp_states state
, struct res_qp
**qp
,
1508 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1509 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1513 spin_lock_irq(mlx4_tlock(dev
));
1514 r
= res_tracker_lookup(&tracker
->res_tree
[RES_QP
], qpn
);
1517 else if (r
->com
.owner
!= slave
)
1522 mlx4_dbg(dev
, "%s: failed RES_QP, 0x%llx\n",
1523 __func__
, r
->com
.res_id
);
1527 case RES_QP_RESERVED
:
1528 if (r
->com
.state
== RES_QP_MAPPED
&& !alloc
)
1531 mlx4_dbg(dev
, "failed RES_QP, 0x%llx\n", r
->com
.res_id
);
1536 if ((r
->com
.state
== RES_QP_RESERVED
&& alloc
) ||
1537 r
->com
.state
== RES_QP_HW
)
1540 mlx4_dbg(dev
, "failed RES_QP, 0x%llx\n",
1548 if (r
->com
.state
!= RES_QP_MAPPED
)
1556 r
->com
.from_state
= r
->com
.state
;
1557 r
->com
.to_state
= state
;
1558 r
->com
.state
= RES_QP_BUSY
;
1564 spin_unlock_irq(mlx4_tlock(dev
));
1569 static int mr_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int index
,
1570 enum res_mpt_states state
, struct res_mpt
**mpt
)
1572 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1573 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1577 spin_lock_irq(mlx4_tlock(dev
));
1578 r
= res_tracker_lookup(&tracker
->res_tree
[RES_MPT
], index
);
1581 else if (r
->com
.owner
!= slave
)
1589 case RES_MPT_RESERVED
:
1590 if (r
->com
.state
!= RES_MPT_MAPPED
)
1594 case RES_MPT_MAPPED
:
1595 if (r
->com
.state
!= RES_MPT_RESERVED
&&
1596 r
->com
.state
!= RES_MPT_HW
)
1601 if (r
->com
.state
!= RES_MPT_MAPPED
)
1609 r
->com
.from_state
= r
->com
.state
;
1610 r
->com
.to_state
= state
;
1611 r
->com
.state
= RES_MPT_BUSY
;
1617 spin_unlock_irq(mlx4_tlock(dev
));
1622 static int eq_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int index
,
1623 enum res_eq_states state
, struct res_eq
**eq
)
1625 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1626 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1630 spin_lock_irq(mlx4_tlock(dev
));
1631 r
= res_tracker_lookup(&tracker
->res_tree
[RES_EQ
], index
);
1634 else if (r
->com
.owner
!= slave
)
1642 case RES_EQ_RESERVED
:
1643 if (r
->com
.state
!= RES_EQ_HW
)
1648 if (r
->com
.state
!= RES_EQ_RESERVED
)
1657 r
->com
.from_state
= r
->com
.state
;
1658 r
->com
.to_state
= state
;
1659 r
->com
.state
= RES_EQ_BUSY
;
1663 spin_unlock_irq(mlx4_tlock(dev
));
1671 static int cq_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int cqn
,
1672 enum res_cq_states state
, struct res_cq
**cq
)
1674 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1675 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1679 spin_lock_irq(mlx4_tlock(dev
));
1680 r
= res_tracker_lookup(&tracker
->res_tree
[RES_CQ
], cqn
);
1683 } else if (r
->com
.owner
!= slave
) {
1685 } else if (state
== RES_CQ_ALLOCATED
) {
1686 if (r
->com
.state
!= RES_CQ_HW
)
1688 else if (atomic_read(&r
->ref_count
))
1692 } else if (state
!= RES_CQ_HW
|| r
->com
.state
!= RES_CQ_ALLOCATED
) {
1699 r
->com
.from_state
= r
->com
.state
;
1700 r
->com
.to_state
= state
;
1701 r
->com
.state
= RES_CQ_BUSY
;
1706 spin_unlock_irq(mlx4_tlock(dev
));
1711 static int srq_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int index
,
1712 enum res_srq_states state
, struct res_srq
**srq
)
1714 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1715 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1719 spin_lock_irq(mlx4_tlock(dev
));
1720 r
= res_tracker_lookup(&tracker
->res_tree
[RES_SRQ
], index
);
1723 } else if (r
->com
.owner
!= slave
) {
1725 } else if (state
== RES_SRQ_ALLOCATED
) {
1726 if (r
->com
.state
!= RES_SRQ_HW
)
1728 else if (atomic_read(&r
->ref_count
))
1730 } else if (state
!= RES_SRQ_HW
|| r
->com
.state
!= RES_SRQ_ALLOCATED
) {
1735 r
->com
.from_state
= r
->com
.state
;
1736 r
->com
.to_state
= state
;
1737 r
->com
.state
= RES_SRQ_BUSY
;
1742 spin_unlock_irq(mlx4_tlock(dev
));
1747 static void res_abort_move(struct mlx4_dev
*dev
, int slave
,
1748 enum mlx4_resource type
, int id
)
1750 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1751 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1752 struct res_common
*r
;
1754 spin_lock_irq(mlx4_tlock(dev
));
1755 r
= res_tracker_lookup(&tracker
->res_tree
[type
], id
);
1756 if (r
&& (r
->owner
== slave
))
1757 r
->state
= r
->from_state
;
1758 spin_unlock_irq(mlx4_tlock(dev
));
1761 static void res_end_move(struct mlx4_dev
*dev
, int slave
,
1762 enum mlx4_resource type
, int id
)
1764 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1765 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1766 struct res_common
*r
;
1768 spin_lock_irq(mlx4_tlock(dev
));
1769 r
= res_tracker_lookup(&tracker
->res_tree
[type
], id
);
1770 if (r
&& (r
->owner
== slave
))
1771 r
->state
= r
->to_state
;
1772 spin_unlock_irq(mlx4_tlock(dev
));
1775 static int valid_reserved(struct mlx4_dev
*dev
, int slave
, int qpn
)
1777 return mlx4_is_qp_reserved(dev
, qpn
) &&
1778 (mlx4_is_master(dev
) || mlx4_is_guest_proxy(dev
, slave
, qpn
));
1781 static int fw_reserved(struct mlx4_dev
*dev
, int qpn
)
1783 return qpn
< dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
];
1786 static int qp_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1787 u64 in_param
, u64
*out_param
)
1797 case RES_OP_RESERVE
:
1798 count
= get_param_l(&in_param
) & 0xffffff;
1799 /* Turn off all unsupported QP allocation flags that the
1800 * slave tries to set.
1802 flags
= (get_param_l(&in_param
) >> 24) & dev
->caps
.alloc_res_qp_mask
;
1803 align
= get_param_h(&in_param
);
1804 err
= mlx4_grant_resource(dev
, slave
, RES_QP
, count
, 0);
1808 err
= __mlx4_qp_reserve_range(dev
, count
, align
, &base
, flags
);
1810 mlx4_release_resource(dev
, slave
, RES_QP
, count
, 0);
1814 err
= add_res_range(dev
, slave
, base
, count
, RES_QP
, 0);
1816 mlx4_release_resource(dev
, slave
, RES_QP
, count
, 0);
1817 __mlx4_qp_release_range(dev
, base
, count
);
1820 set_param_l(out_param
, base
);
1822 case RES_OP_MAP_ICM
:
1823 qpn
= get_param_l(&in_param
) & 0x7fffff;
1824 if (valid_reserved(dev
, slave
, qpn
)) {
1825 err
= add_res_range(dev
, slave
, qpn
, 1, RES_QP
, 0);
1830 err
= qp_res_start_move_to(dev
, slave
, qpn
, RES_QP_MAPPED
,
1835 if (!fw_reserved(dev
, qpn
)) {
1836 err
= __mlx4_qp_alloc_icm(dev
, qpn
);
1838 res_abort_move(dev
, slave
, RES_QP
, qpn
);
1843 res_end_move(dev
, slave
, RES_QP
, qpn
);
1853 static int mtt_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1854 u64 in_param
, u64
*out_param
)
1860 if (op
!= RES_OP_RESERVE_AND_MAP
)
1863 order
= get_param_l(&in_param
);
1865 err
= mlx4_grant_resource(dev
, slave
, RES_MTT
, 1 << order
, 0);
1869 base
= __mlx4_alloc_mtt_range(dev
, order
);
1871 mlx4_release_resource(dev
, slave
, RES_MTT
, 1 << order
, 0);
1875 err
= add_res_range(dev
, slave
, base
, 1, RES_MTT
, order
);
1877 mlx4_release_resource(dev
, slave
, RES_MTT
, 1 << order
, 0);
1878 __mlx4_free_mtt_range(dev
, base
, order
);
1880 set_param_l(out_param
, base
);
1886 static int mpt_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1887 u64 in_param
, u64
*out_param
)
1892 struct res_mpt
*mpt
;
1895 case RES_OP_RESERVE
:
1896 err
= mlx4_grant_resource(dev
, slave
, RES_MPT
, 1, 0);
1900 index
= __mlx4_mpt_reserve(dev
);
1902 mlx4_release_resource(dev
, slave
, RES_MPT
, 1, 0);
1905 id
= index
& mpt_mask(dev
);
1907 err
= add_res_range(dev
, slave
, id
, 1, RES_MPT
, index
);
1909 mlx4_release_resource(dev
, slave
, RES_MPT
, 1, 0);
1910 __mlx4_mpt_release(dev
, index
);
1913 set_param_l(out_param
, index
);
1915 case RES_OP_MAP_ICM
:
1916 index
= get_param_l(&in_param
);
1917 id
= index
& mpt_mask(dev
);
1918 err
= mr_res_start_move_to(dev
, slave
, id
,
1919 RES_MPT_MAPPED
, &mpt
);
1923 err
= __mlx4_mpt_alloc_icm(dev
, mpt
->key
);
1925 res_abort_move(dev
, slave
, RES_MPT
, id
);
1929 res_end_move(dev
, slave
, RES_MPT
, id
);
1935 static int cq_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1936 u64 in_param
, u64
*out_param
)
1942 case RES_OP_RESERVE_AND_MAP
:
1943 err
= mlx4_grant_resource(dev
, slave
, RES_CQ
, 1, 0);
1947 err
= __mlx4_cq_alloc_icm(dev
, &cqn
);
1949 mlx4_release_resource(dev
, slave
, RES_CQ
, 1, 0);
1953 err
= add_res_range(dev
, slave
, cqn
, 1, RES_CQ
, 0);
1955 mlx4_release_resource(dev
, slave
, RES_CQ
, 1, 0);
1956 __mlx4_cq_free_icm(dev
, cqn
);
1960 set_param_l(out_param
, cqn
);
1970 static int srq_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1971 u64 in_param
, u64
*out_param
)
1977 case RES_OP_RESERVE_AND_MAP
:
1978 err
= mlx4_grant_resource(dev
, slave
, RES_SRQ
, 1, 0);
1982 err
= __mlx4_srq_alloc_icm(dev
, &srqn
);
1984 mlx4_release_resource(dev
, slave
, RES_SRQ
, 1, 0);
1988 err
= add_res_range(dev
, slave
, srqn
, 1, RES_SRQ
, 0);
1990 mlx4_release_resource(dev
, slave
, RES_SRQ
, 1, 0);
1991 __mlx4_srq_free_icm(dev
, srqn
);
1995 set_param_l(out_param
, srqn
);
2005 static int mac_find_smac_ix_in_slave(struct mlx4_dev
*dev
, int slave
, int port
,
2006 u8 smac_index
, u64
*mac
)
2008 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2009 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
2010 struct list_head
*mac_list
=
2011 &tracker
->slave_list
[slave
].res_list
[RES_MAC
];
2012 struct mac_res
*res
, *tmp
;
2014 list_for_each_entry_safe(res
, tmp
, mac_list
, list
) {
2015 if (res
->smac_index
== smac_index
&& res
->port
== (u8
) port
) {
2023 static int mac_add_to_slave(struct mlx4_dev
*dev
, int slave
, u64 mac
, int port
, u8 smac_index
)
2025 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2026 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
2027 struct list_head
*mac_list
=
2028 &tracker
->slave_list
[slave
].res_list
[RES_MAC
];
2029 struct mac_res
*res
, *tmp
;
2031 list_for_each_entry_safe(res
, tmp
, mac_list
, list
) {
2032 if (res
->mac
== mac
&& res
->port
== (u8
) port
) {
2033 /* mac found. update ref count */
2039 if (mlx4_grant_resource(dev
, slave
, RES_MAC
, 1, port
))
2041 res
= kzalloc(sizeof(*res
), GFP_KERNEL
);
2043 mlx4_release_resource(dev
, slave
, RES_MAC
, 1, port
);
2047 res
->port
= (u8
) port
;
2048 res
->smac_index
= smac_index
;
2050 list_add_tail(&res
->list
,
2051 &tracker
->slave_list
[slave
].res_list
[RES_MAC
]);
2055 static void mac_del_from_slave(struct mlx4_dev
*dev
, int slave
, u64 mac
,
2058 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2059 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
2060 struct list_head
*mac_list
=
2061 &tracker
->slave_list
[slave
].res_list
[RES_MAC
];
2062 struct mac_res
*res
, *tmp
;
2064 list_for_each_entry_safe(res
, tmp
, mac_list
, list
) {
2065 if (res
->mac
== mac
&& res
->port
== (u8
) port
) {
2066 if (!--res
->ref_count
) {
2067 list_del(&res
->list
);
2068 mlx4_release_resource(dev
, slave
, RES_MAC
, 1, port
);
2076 static void rem_slave_macs(struct mlx4_dev
*dev
, int slave
)
2078 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2079 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
2080 struct list_head
*mac_list
=
2081 &tracker
->slave_list
[slave
].res_list
[RES_MAC
];
2082 struct mac_res
*res
, *tmp
;
2085 list_for_each_entry_safe(res
, tmp
, mac_list
, list
) {
2086 list_del(&res
->list
);
2087 /* dereference the mac the num times the slave referenced it */
2088 for (i
= 0; i
< res
->ref_count
; i
++)
2089 __mlx4_unregister_mac(dev
, res
->port
, res
->mac
);
2090 mlx4_release_resource(dev
, slave
, RES_MAC
, 1, res
->port
);
2095 static int mac_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2096 u64 in_param
, u64
*out_param
, int in_port
)
2103 if (op
!= RES_OP_RESERVE_AND_MAP
)
2106 port
= !in_port
? get_param_l(out_param
) : in_port
;
2107 port
= mlx4_slave_convert_port(
2114 err
= __mlx4_register_mac(dev
, port
, mac
);
2117 set_param_l(out_param
, err
);
2122 err
= mac_add_to_slave(dev
, slave
, mac
, port
, smac_index
);
2124 __mlx4_unregister_mac(dev
, port
, mac
);
2129 static int vlan_add_to_slave(struct mlx4_dev
*dev
, int slave
, u16 vlan
,
2130 int port
, int vlan_index
)
2132 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2133 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
2134 struct list_head
*vlan_list
=
2135 &tracker
->slave_list
[slave
].res_list
[RES_VLAN
];
2136 struct vlan_res
*res
, *tmp
;
2138 list_for_each_entry_safe(res
, tmp
, vlan_list
, list
) {
2139 if (res
->vlan
== vlan
&& res
->port
== (u8
) port
) {
2140 /* vlan found. update ref count */
2146 if (mlx4_grant_resource(dev
, slave
, RES_VLAN
, 1, port
))
2148 res
= kzalloc(sizeof(*res
), GFP_KERNEL
);
2150 mlx4_release_resource(dev
, slave
, RES_VLAN
, 1, port
);
2154 res
->port
= (u8
) port
;
2155 res
->vlan_index
= vlan_index
;
2157 list_add_tail(&res
->list
,
2158 &tracker
->slave_list
[slave
].res_list
[RES_VLAN
]);
2163 static void vlan_del_from_slave(struct mlx4_dev
*dev
, int slave
, u16 vlan
,
2166 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2167 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
2168 struct list_head
*vlan_list
=
2169 &tracker
->slave_list
[slave
].res_list
[RES_VLAN
];
2170 struct vlan_res
*res
, *tmp
;
2172 list_for_each_entry_safe(res
, tmp
, vlan_list
, list
) {
2173 if (res
->vlan
== vlan
&& res
->port
== (u8
) port
) {
2174 if (!--res
->ref_count
) {
2175 list_del(&res
->list
);
2176 mlx4_release_resource(dev
, slave
, RES_VLAN
,
2185 static void rem_slave_vlans(struct mlx4_dev
*dev
, int slave
)
2187 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2188 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
2189 struct list_head
*vlan_list
=
2190 &tracker
->slave_list
[slave
].res_list
[RES_VLAN
];
2191 struct vlan_res
*res
, *tmp
;
2194 list_for_each_entry_safe(res
, tmp
, vlan_list
, list
) {
2195 list_del(&res
->list
);
2196 /* dereference the vlan the num times the slave referenced it */
2197 for (i
= 0; i
< res
->ref_count
; i
++)
2198 __mlx4_unregister_vlan(dev
, res
->port
, res
->vlan
);
2199 mlx4_release_resource(dev
, slave
, RES_VLAN
, 1, res
->port
);
2204 static int vlan_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2205 u64 in_param
, u64
*out_param
, int in_port
)
2207 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2208 struct mlx4_slave_state
*slave_state
= priv
->mfunc
.master
.slave_state
;
2214 port
= !in_port
? get_param_l(out_param
) : in_port
;
2216 if (!port
|| op
!= RES_OP_RESERVE_AND_MAP
)
2219 port
= mlx4_slave_convert_port(
2224 /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
2225 if (!in_port
&& port
> 0 && port
<= dev
->caps
.num_ports
) {
2226 slave_state
[slave
].old_vlan_api
= true;
2230 vlan
= (u16
) in_param
;
2232 err
= __mlx4_register_vlan(dev
, port
, vlan
, &vlan_index
);
2234 set_param_l(out_param
, (u32
) vlan_index
);
2235 err
= vlan_add_to_slave(dev
, slave
, vlan
, port
, vlan_index
);
2237 __mlx4_unregister_vlan(dev
, port
, vlan
);
2242 static int counter_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2243 u64 in_param
, u64
*out_param
, int port
)
2248 if (op
!= RES_OP_RESERVE
)
2251 err
= mlx4_grant_resource(dev
, slave
, RES_COUNTER
, 1, 0);
2255 err
= __mlx4_counter_alloc(dev
, &index
);
2257 mlx4_release_resource(dev
, slave
, RES_COUNTER
, 1, 0);
2261 err
= add_res_range(dev
, slave
, index
, 1, RES_COUNTER
, port
);
2263 __mlx4_counter_free(dev
, index
);
2264 mlx4_release_resource(dev
, slave
, RES_COUNTER
, 1, 0);
2266 set_param_l(out_param
, index
);
2272 static int xrcdn_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2273 u64 in_param
, u64
*out_param
)
2278 if (op
!= RES_OP_RESERVE
)
2281 err
= __mlx4_xrcd_alloc(dev
, &xrcdn
);
2285 err
= add_res_range(dev
, slave
, xrcdn
, 1, RES_XRCD
, 0);
2287 __mlx4_xrcd_free(dev
, xrcdn
);
2289 set_param_l(out_param
, xrcdn
);
2294 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev
*dev
, int slave
,
2295 struct mlx4_vhcr
*vhcr
,
2296 struct mlx4_cmd_mailbox
*inbox
,
2297 struct mlx4_cmd_mailbox
*outbox
,
2298 struct mlx4_cmd_info
*cmd
)
2301 int alop
= vhcr
->op_modifier
;
2303 switch (vhcr
->in_modifier
& 0xFF) {
2305 err
= qp_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2306 vhcr
->in_param
, &vhcr
->out_param
);
2310 err
= mtt_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2311 vhcr
->in_param
, &vhcr
->out_param
);
2315 err
= mpt_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2316 vhcr
->in_param
, &vhcr
->out_param
);
2320 err
= cq_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2321 vhcr
->in_param
, &vhcr
->out_param
);
2325 err
= srq_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2326 vhcr
->in_param
, &vhcr
->out_param
);
2330 err
= mac_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2331 vhcr
->in_param
, &vhcr
->out_param
,
2332 (vhcr
->in_modifier
>> 8) & 0xFF);
2336 err
= vlan_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2337 vhcr
->in_param
, &vhcr
->out_param
,
2338 (vhcr
->in_modifier
>> 8) & 0xFF);
2342 err
= counter_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2343 vhcr
->in_param
, &vhcr
->out_param
, 0);
2347 err
= xrcdn_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2348 vhcr
->in_param
, &vhcr
->out_param
);
2359 static int qp_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2368 case RES_OP_RESERVE
:
2369 base
= get_param_l(&in_param
) & 0x7fffff;
2370 count
= get_param_h(&in_param
);
2371 err
= rem_res_range(dev
, slave
, base
, count
, RES_QP
, 0);
2374 mlx4_release_resource(dev
, slave
, RES_QP
, count
, 0);
2375 __mlx4_qp_release_range(dev
, base
, count
);
2377 case RES_OP_MAP_ICM
:
2378 qpn
= get_param_l(&in_param
) & 0x7fffff;
2379 err
= qp_res_start_move_to(dev
, slave
, qpn
, RES_QP_RESERVED
,
2384 if (!fw_reserved(dev
, qpn
))
2385 __mlx4_qp_free_icm(dev
, qpn
);
2387 res_end_move(dev
, slave
, RES_QP
, qpn
);
2389 if (valid_reserved(dev
, slave
, qpn
))
2390 err
= rem_res_range(dev
, slave
, qpn
, 1, RES_QP
, 0);
2399 static int mtt_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2400 u64 in_param
, u64
*out_param
)
2406 if (op
!= RES_OP_RESERVE_AND_MAP
)
2409 base
= get_param_l(&in_param
);
2410 order
= get_param_h(&in_param
);
2411 err
= rem_res_range(dev
, slave
, base
, 1, RES_MTT
, order
);
2413 mlx4_release_resource(dev
, slave
, RES_MTT
, 1 << order
, 0);
2414 __mlx4_free_mtt_range(dev
, base
, order
);
2419 static int mpt_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2425 struct res_mpt
*mpt
;
2428 case RES_OP_RESERVE
:
2429 index
= get_param_l(&in_param
);
2430 id
= index
& mpt_mask(dev
);
2431 err
= get_res(dev
, slave
, id
, RES_MPT
, &mpt
);
2435 put_res(dev
, slave
, id
, RES_MPT
);
2437 err
= rem_res_range(dev
, slave
, id
, 1, RES_MPT
, 0);
2440 mlx4_release_resource(dev
, slave
, RES_MPT
, 1, 0);
2441 __mlx4_mpt_release(dev
, index
);
2443 case RES_OP_MAP_ICM
:
2444 index
= get_param_l(&in_param
);
2445 id
= index
& mpt_mask(dev
);
2446 err
= mr_res_start_move_to(dev
, slave
, id
,
2447 RES_MPT_RESERVED
, &mpt
);
2451 __mlx4_mpt_free_icm(dev
, mpt
->key
);
2452 res_end_move(dev
, slave
, RES_MPT
, id
);
2461 static int cq_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2462 u64 in_param
, u64
*out_param
)
2468 case RES_OP_RESERVE_AND_MAP
:
2469 cqn
= get_param_l(&in_param
);
2470 err
= rem_res_range(dev
, slave
, cqn
, 1, RES_CQ
, 0);
2474 mlx4_release_resource(dev
, slave
, RES_CQ
, 1, 0);
2475 __mlx4_cq_free_icm(dev
, cqn
);
2486 static int srq_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2487 u64 in_param
, u64
*out_param
)
2493 case RES_OP_RESERVE_AND_MAP
:
2494 srqn
= get_param_l(&in_param
);
2495 err
= rem_res_range(dev
, slave
, srqn
, 1, RES_SRQ
, 0);
2499 mlx4_release_resource(dev
, slave
, RES_SRQ
, 1, 0);
2500 __mlx4_srq_free_icm(dev
, srqn
);
2511 static int mac_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2512 u64 in_param
, u64
*out_param
, int in_port
)
2518 case RES_OP_RESERVE_AND_MAP
:
2519 port
= !in_port
? get_param_l(out_param
) : in_port
;
2520 port
= mlx4_slave_convert_port(
2525 mac_del_from_slave(dev
, slave
, in_param
, port
);
2526 __mlx4_unregister_mac(dev
, port
, in_param
);
2537 static int vlan_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2538 u64 in_param
, u64
*out_param
, int port
)
2540 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2541 struct mlx4_slave_state
*slave_state
= priv
->mfunc
.master
.slave_state
;
2544 port
= mlx4_slave_convert_port(
2550 case RES_OP_RESERVE_AND_MAP
:
2551 if (slave_state
[slave
].old_vlan_api
)
2555 vlan_del_from_slave(dev
, slave
, in_param
, port
);
2556 __mlx4_unregister_vlan(dev
, port
, in_param
);
2566 static int counter_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2567 u64 in_param
, u64
*out_param
)
2572 if (op
!= RES_OP_RESERVE
)
2575 index
= get_param_l(&in_param
);
2576 if (index
== MLX4_SINK_COUNTER_INDEX(dev
))
2579 err
= rem_res_range(dev
, slave
, index
, 1, RES_COUNTER
, 0);
2583 __mlx4_counter_free(dev
, index
);
2584 mlx4_release_resource(dev
, slave
, RES_COUNTER
, 1, 0);
2589 static int xrcdn_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2590 u64 in_param
, u64
*out_param
)
2595 if (op
!= RES_OP_RESERVE
)
2598 xrcdn
= get_param_l(&in_param
);
2599 err
= rem_res_range(dev
, slave
, xrcdn
, 1, RES_XRCD
, 0);
2603 __mlx4_xrcd_free(dev
, xrcdn
);
2608 int mlx4_FREE_RES_wrapper(struct mlx4_dev
*dev
, int slave
,
2609 struct mlx4_vhcr
*vhcr
,
2610 struct mlx4_cmd_mailbox
*inbox
,
2611 struct mlx4_cmd_mailbox
*outbox
,
2612 struct mlx4_cmd_info
*cmd
)
2615 int alop
= vhcr
->op_modifier
;
2617 switch (vhcr
->in_modifier
& 0xFF) {
2619 err
= qp_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2624 err
= mtt_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2625 vhcr
->in_param
, &vhcr
->out_param
);
2629 err
= mpt_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2634 err
= cq_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2635 vhcr
->in_param
, &vhcr
->out_param
);
2639 err
= srq_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2640 vhcr
->in_param
, &vhcr
->out_param
);
2644 err
= mac_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2645 vhcr
->in_param
, &vhcr
->out_param
,
2646 (vhcr
->in_modifier
>> 8) & 0xFF);
2650 err
= vlan_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2651 vhcr
->in_param
, &vhcr
->out_param
,
2652 (vhcr
->in_modifier
>> 8) & 0xFF);
2656 err
= counter_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2657 vhcr
->in_param
, &vhcr
->out_param
);
2661 err
= xrcdn_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2662 vhcr
->in_param
, &vhcr
->out_param
);
2670 /* ugly but other choices are uglier */
2671 static int mr_phys_mpt(struct mlx4_mpt_entry
*mpt
)
2673 return (be32_to_cpu(mpt
->flags
) >> 9) & 1;
2676 static int mr_get_mtt_addr(struct mlx4_mpt_entry
*mpt
)
2678 return (int)be64_to_cpu(mpt
->mtt_addr
) & 0xfffffff8;
2681 static int mr_get_mtt_size(struct mlx4_mpt_entry
*mpt
)
2683 return be32_to_cpu(mpt
->mtt_sz
);
2686 static u32
mr_get_pd(struct mlx4_mpt_entry
*mpt
)
2688 return be32_to_cpu(mpt
->pd_flags
) & 0x00ffffff;
2691 static int mr_is_fmr(struct mlx4_mpt_entry
*mpt
)
2693 return be32_to_cpu(mpt
->pd_flags
) & MLX4_MPT_PD_FLAG_FAST_REG
;
2696 static int mr_is_bind_enabled(struct mlx4_mpt_entry
*mpt
)
2698 return be32_to_cpu(mpt
->flags
) & MLX4_MPT_FLAG_BIND_ENABLE
;
2701 static int mr_is_region(struct mlx4_mpt_entry
*mpt
)
2703 return be32_to_cpu(mpt
->flags
) & MLX4_MPT_FLAG_REGION
;
2706 static int qp_get_mtt_addr(struct mlx4_qp_context
*qpc
)
2708 return be32_to_cpu(qpc
->mtt_base_addr_l
) & 0xfffffff8;
2711 static int srq_get_mtt_addr(struct mlx4_srq_context
*srqc
)
2713 return be32_to_cpu(srqc
->mtt_base_addr_l
) & 0xfffffff8;
2716 static int qp_get_mtt_size(struct mlx4_qp_context
*qpc
)
2718 int page_shift
= (qpc
->log_page_size
& 0x3f) + 12;
2719 int log_sq_size
= (qpc
->sq_size_stride
>> 3) & 0xf;
2720 int log_sq_sride
= qpc
->sq_size_stride
& 7;
2721 int log_rq_size
= (qpc
->rq_size_stride
>> 3) & 0xf;
2722 int log_rq_stride
= qpc
->rq_size_stride
& 7;
2723 int srq
= (be32_to_cpu(qpc
->srqn
) >> 24) & 1;
2724 int rss
= (be32_to_cpu(qpc
->flags
) >> 13) & 1;
2725 u32 ts
= (be32_to_cpu(qpc
->flags
) >> 16) & 0xff;
2726 int xrc
= (ts
== MLX4_QP_ST_XRC
) ? 1 : 0;
2731 int page_offset
= (be32_to_cpu(qpc
->params2
) >> 6) & 0x3f;
2734 sq_size
= 1 << (log_sq_size
+ log_sq_sride
+ 4);
2735 rq_size
= (srq
|rss
|xrc
) ? 0 : (1 << (log_rq_size
+ log_rq_stride
+ 4));
2736 total_mem
= sq_size
+ rq_size
;
2737 tot
= (total_mem
+ (page_offset
<< 6)) >> page_shift
;
2738 total_pages
= !tot
? 1 : roundup_pow_of_two(tot
);
2743 static int check_mtt_range(struct mlx4_dev
*dev
, int slave
, int start
,
2744 int size
, struct res_mtt
*mtt
)
2746 int res_start
= mtt
->com
.res_id
;
2747 int res_size
= (1 << mtt
->order
);
2749 if (start
< res_start
|| start
+ size
> res_start
+ res_size
)
2754 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev
*dev
, int slave
,
2755 struct mlx4_vhcr
*vhcr
,
2756 struct mlx4_cmd_mailbox
*inbox
,
2757 struct mlx4_cmd_mailbox
*outbox
,
2758 struct mlx4_cmd_info
*cmd
)
2761 int index
= vhcr
->in_modifier
;
2762 struct res_mtt
*mtt
;
2763 struct res_mpt
*mpt
= NULL
;
2764 int mtt_base
= mr_get_mtt_addr(inbox
->buf
) / dev
->caps
.mtt_entry_sz
;
2770 id
= index
& mpt_mask(dev
);
2771 err
= mr_res_start_move_to(dev
, slave
, id
, RES_MPT_HW
, &mpt
);
2775 /* Disable memory windows for VFs. */
2776 if (!mr_is_region(inbox
->buf
)) {
2781 /* Make sure that the PD bits related to the slave id are zeros. */
2782 pd
= mr_get_pd(inbox
->buf
);
2783 pd_slave
= (pd
>> 17) & 0x7f;
2784 if (pd_slave
!= 0 && --pd_slave
!= slave
) {
2789 if (mr_is_fmr(inbox
->buf
)) {
2790 /* FMR and Bind Enable are forbidden in slave devices. */
2791 if (mr_is_bind_enabled(inbox
->buf
)) {
2795 /* FMR and Memory Windows are also forbidden. */
2796 if (!mr_is_region(inbox
->buf
)) {
2802 phys
= mr_phys_mpt(inbox
->buf
);
2804 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
2808 err
= check_mtt_range(dev
, slave
, mtt_base
,
2809 mr_get_mtt_size(inbox
->buf
), mtt
);
2816 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2821 atomic_inc(&mtt
->ref_count
);
2822 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2825 res_end_move(dev
, slave
, RES_MPT
, id
);
2830 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2832 res_abort_move(dev
, slave
, RES_MPT
, id
);
2837 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev
*dev
, int slave
,
2838 struct mlx4_vhcr
*vhcr
,
2839 struct mlx4_cmd_mailbox
*inbox
,
2840 struct mlx4_cmd_mailbox
*outbox
,
2841 struct mlx4_cmd_info
*cmd
)
2844 int index
= vhcr
->in_modifier
;
2845 struct res_mpt
*mpt
;
2848 id
= index
& mpt_mask(dev
);
2849 err
= mr_res_start_move_to(dev
, slave
, id
, RES_MPT_MAPPED
, &mpt
);
2853 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2858 atomic_dec(&mpt
->mtt
->ref_count
);
2860 res_end_move(dev
, slave
, RES_MPT
, id
);
2864 res_abort_move(dev
, slave
, RES_MPT
, id
);
2869 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev
*dev
, int slave
,
2870 struct mlx4_vhcr
*vhcr
,
2871 struct mlx4_cmd_mailbox
*inbox
,
2872 struct mlx4_cmd_mailbox
*outbox
,
2873 struct mlx4_cmd_info
*cmd
)
2876 int index
= vhcr
->in_modifier
;
2877 struct res_mpt
*mpt
;
2880 id
= index
& mpt_mask(dev
);
2881 err
= get_res(dev
, slave
, id
, RES_MPT
, &mpt
);
2885 if (mpt
->com
.from_state
== RES_MPT_MAPPED
) {
2886 /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do
2887 * that, the VF must read the MPT. But since the MPT entry memory is not
2888 * in the VF's virtual memory space, it must use QUERY_MPT to obtain the
2889 * entry contents. To guarantee that the MPT cannot be changed, the driver
2890 * must perform HW2SW_MPT before this query and return the MPT entry to HW
2891 * ownership fofollowing the change. The change here allows the VF to
2892 * perform QUERY_MPT also when the entry is in SW ownership.
2894 struct mlx4_mpt_entry
*mpt_entry
= mlx4_table_find(
2895 &mlx4_priv(dev
)->mr_table
.dmpt_table
,
2898 if (NULL
== mpt_entry
|| NULL
== outbox
->buf
) {
2903 memcpy(outbox
->buf
, mpt_entry
, sizeof(*mpt_entry
));
2906 } else if (mpt
->com
.from_state
== RES_MPT_HW
) {
2907 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2915 put_res(dev
, slave
, id
, RES_MPT
);
2919 static int qp_get_rcqn(struct mlx4_qp_context
*qpc
)
2921 return be32_to_cpu(qpc
->cqn_recv
) & 0xffffff;
2924 static int qp_get_scqn(struct mlx4_qp_context
*qpc
)
2926 return be32_to_cpu(qpc
->cqn_send
) & 0xffffff;
2929 static u32
qp_get_srqn(struct mlx4_qp_context
*qpc
)
2931 return be32_to_cpu(qpc
->srqn
) & 0x1ffffff;
2934 static void adjust_proxy_tun_qkey(struct mlx4_dev
*dev
, struct mlx4_vhcr
*vhcr
,
2935 struct mlx4_qp_context
*context
)
2937 u32 qpn
= vhcr
->in_modifier
& 0xffffff;
2940 if (mlx4_get_parav_qkey(dev
, qpn
, &qkey
))
2943 /* adjust qkey in qp context */
2944 context
->qkey
= cpu_to_be32(qkey
);
2947 static int adjust_qp_sched_queue(struct mlx4_dev
*dev
, int slave
,
2948 struct mlx4_qp_context
*qpc
,
2949 struct mlx4_cmd_mailbox
*inbox
);
2951 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
2952 struct mlx4_vhcr
*vhcr
,
2953 struct mlx4_cmd_mailbox
*inbox
,
2954 struct mlx4_cmd_mailbox
*outbox
,
2955 struct mlx4_cmd_info
*cmd
)
2958 int qpn
= vhcr
->in_modifier
& 0x7fffff;
2959 struct res_mtt
*mtt
;
2961 struct mlx4_qp_context
*qpc
= inbox
->buf
+ 8;
2962 int mtt_base
= qp_get_mtt_addr(qpc
) / dev
->caps
.mtt_entry_sz
;
2963 int mtt_size
= qp_get_mtt_size(qpc
);
2966 int rcqn
= qp_get_rcqn(qpc
);
2967 int scqn
= qp_get_scqn(qpc
);
2968 u32 srqn
= qp_get_srqn(qpc
) & 0xffffff;
2969 int use_srq
= (qp_get_srqn(qpc
) >> 24) & 1;
2970 struct res_srq
*srq
;
2971 int local_qpn
= vhcr
->in_modifier
& 0xffffff;
2973 err
= adjust_qp_sched_queue(dev
, slave
, qpc
, inbox
);
2977 err
= qp_res_start_move_to(dev
, slave
, qpn
, RES_QP_HW
, &qp
, 0);
2980 qp
->local_qpn
= local_qpn
;
2981 qp
->sched_queue
= 0;
2983 qp
->vlan_control
= 0;
2985 qp
->pri_path_fl
= 0;
2988 qp
->qpc_flags
= be32_to_cpu(qpc
->flags
);
2990 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
2994 err
= check_mtt_range(dev
, slave
, mtt_base
, mtt_size
, mtt
);
2998 err
= get_res(dev
, slave
, rcqn
, RES_CQ
, &rcq
);
3003 err
= get_res(dev
, slave
, scqn
, RES_CQ
, &scq
);
3010 err
= get_res(dev
, slave
, srqn
, RES_SRQ
, &srq
);
3015 adjust_proxy_tun_qkey(dev
, vhcr
, qpc
);
3016 update_pkey_index(dev
, slave
, inbox
);
3017 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3020 atomic_inc(&mtt
->ref_count
);
3022 atomic_inc(&rcq
->ref_count
);
3024 atomic_inc(&scq
->ref_count
);
3028 put_res(dev
, slave
, scqn
, RES_CQ
);
3031 atomic_inc(&srq
->ref_count
);
3032 put_res(dev
, slave
, srqn
, RES_SRQ
);
3036 /* Save param3 for dynamic changes from VST back to VGT */
3037 qp
->param3
= qpc
->param3
;
3038 put_res(dev
, slave
, rcqn
, RES_CQ
);
3039 put_res(dev
, slave
, mtt_base
, RES_MTT
);
3040 res_end_move(dev
, slave
, RES_QP
, qpn
);
3046 put_res(dev
, slave
, srqn
, RES_SRQ
);
3049 put_res(dev
, slave
, scqn
, RES_CQ
);
3051 put_res(dev
, slave
, rcqn
, RES_CQ
);
3053 put_res(dev
, slave
, mtt_base
, RES_MTT
);
3055 res_abort_move(dev
, slave
, RES_QP
, qpn
);
3060 static int eq_get_mtt_addr(struct mlx4_eq_context
*eqc
)
3062 return be32_to_cpu(eqc
->mtt_base_addr_l
) & 0xfffffff8;
3065 static int eq_get_mtt_size(struct mlx4_eq_context
*eqc
)
3067 int log_eq_size
= eqc
->log_eq_size
& 0x1f;
3068 int page_shift
= (eqc
->log_page_size
& 0x3f) + 12;
3070 if (log_eq_size
+ 5 < page_shift
)
3073 return 1 << (log_eq_size
+ 5 - page_shift
);
3076 static int cq_get_mtt_addr(struct mlx4_cq_context
*cqc
)
3078 return be32_to_cpu(cqc
->mtt_base_addr_l
) & 0xfffffff8;
3081 static int cq_get_mtt_size(struct mlx4_cq_context
*cqc
)
3083 int log_cq_size
= (be32_to_cpu(cqc
->logsize_usrpage
) >> 24) & 0x1f;
3084 int page_shift
= (cqc
->log_page_size
& 0x3f) + 12;
3086 if (log_cq_size
+ 5 < page_shift
)
3089 return 1 << (log_cq_size
+ 5 - page_shift
);
3092 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3093 struct mlx4_vhcr
*vhcr
,
3094 struct mlx4_cmd_mailbox
*inbox
,
3095 struct mlx4_cmd_mailbox
*outbox
,
3096 struct mlx4_cmd_info
*cmd
)
3099 int eqn
= vhcr
->in_modifier
;
3100 int res_id
= (slave
<< 10) | eqn
;
3101 struct mlx4_eq_context
*eqc
= inbox
->buf
;
3102 int mtt_base
= eq_get_mtt_addr(eqc
) / dev
->caps
.mtt_entry_sz
;
3103 int mtt_size
= eq_get_mtt_size(eqc
);
3105 struct res_mtt
*mtt
;
3107 err
= add_res_range(dev
, slave
, res_id
, 1, RES_EQ
, 0);
3110 err
= eq_res_start_move_to(dev
, slave
, res_id
, RES_EQ_HW
, &eq
);
3114 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
3118 err
= check_mtt_range(dev
, slave
, mtt_base
, mtt_size
, mtt
);
3122 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3126 atomic_inc(&mtt
->ref_count
);
3128 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3129 res_end_move(dev
, slave
, RES_EQ
, res_id
);
3133 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3135 res_abort_move(dev
, slave
, RES_EQ
, res_id
);
3137 rem_res_range(dev
, slave
, res_id
, 1, RES_EQ
, 0);
3141 int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev
*dev
, int slave
,
3142 struct mlx4_vhcr
*vhcr
,
3143 struct mlx4_cmd_mailbox
*inbox
,
3144 struct mlx4_cmd_mailbox
*outbox
,
3145 struct mlx4_cmd_info
*cmd
)
3148 u8 get
= vhcr
->op_modifier
;
3153 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3158 static int get_containing_mtt(struct mlx4_dev
*dev
, int slave
, int start
,
3159 int len
, struct res_mtt
**res
)
3161 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3162 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
3163 struct res_mtt
*mtt
;
3166 spin_lock_irq(mlx4_tlock(dev
));
3167 list_for_each_entry(mtt
, &tracker
->slave_list
[slave
].res_list
[RES_MTT
],
3169 if (!check_mtt_range(dev
, slave
, start
, len
, mtt
)) {
3171 mtt
->com
.from_state
= mtt
->com
.state
;
3172 mtt
->com
.state
= RES_MTT_BUSY
;
3177 spin_unlock_irq(mlx4_tlock(dev
));
3182 static int verify_qp_parameters(struct mlx4_dev
*dev
,
3183 struct mlx4_vhcr
*vhcr
,
3184 struct mlx4_cmd_mailbox
*inbox
,
3185 enum qp_transition transition
, u8 slave
)
3189 struct mlx4_qp_context
*qp_ctx
;
3190 enum mlx4_qp_optpar optpar
;
3194 qp_ctx
= inbox
->buf
+ 8;
3195 qp_type
= (be32_to_cpu(qp_ctx
->flags
) >> 16) & 0xff;
3196 optpar
= be32_to_cpu(*(__be32
*) inbox
->buf
);
3198 if (slave
!= mlx4_master_func_num(dev
)) {
3199 qp_ctx
->params2
&= ~cpu_to_be32(MLX4_QP_BIT_FPP
);
3200 /* setting QP rate-limit is disallowed for VFs */
3201 if (qp_ctx
->rate_limit_params
)
3207 case MLX4_QP_ST_XRC
:
3209 switch (transition
) {
3210 case QP_TRANS_INIT2RTR
:
3211 case QP_TRANS_RTR2RTS
:
3212 case QP_TRANS_RTS2RTS
:
3213 case QP_TRANS_SQD2SQD
:
3214 case QP_TRANS_SQD2RTS
:
3215 if (slave
!= mlx4_master_func_num(dev
)) {
3216 if (optpar
& MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH
) {
3217 port
= (qp_ctx
->pri_path
.sched_queue
>> 6 & 1) + 1;
3218 if (dev
->caps
.port_mask
[port
] != MLX4_PORT_TYPE_IB
)
3219 num_gids
= mlx4_get_slave_num_gids(dev
, slave
, port
);
3222 if (qp_ctx
->pri_path
.mgid_index
>= num_gids
)
3225 if (optpar
& MLX4_QP_OPTPAR_ALT_ADDR_PATH
) {
3226 port
= (qp_ctx
->alt_path
.sched_queue
>> 6 & 1) + 1;
3227 if (dev
->caps
.port_mask
[port
] != MLX4_PORT_TYPE_IB
)
3228 num_gids
= mlx4_get_slave_num_gids(dev
, slave
, port
);
3231 if (qp_ctx
->alt_path
.mgid_index
>= num_gids
)
3241 case MLX4_QP_ST_MLX
:
3242 qpn
= vhcr
->in_modifier
& 0x7fffff;
3243 port
= (qp_ctx
->pri_path
.sched_queue
>> 6 & 1) + 1;
3244 if (transition
== QP_TRANS_INIT2RTR
&&
3245 slave
!= mlx4_master_func_num(dev
) &&
3246 mlx4_is_qp_reserved(dev
, qpn
) &&
3247 !mlx4_vf_smi_enabled(dev
, slave
, port
)) {
3248 /* only enabled VFs may create MLX proxy QPs */
3249 mlx4_err(dev
, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
3250 __func__
, slave
, port
);
3262 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev
*dev
, int slave
,
3263 struct mlx4_vhcr
*vhcr
,
3264 struct mlx4_cmd_mailbox
*inbox
,
3265 struct mlx4_cmd_mailbox
*outbox
,
3266 struct mlx4_cmd_info
*cmd
)
3268 struct mlx4_mtt mtt
;
3269 __be64
*page_list
= inbox
->buf
;
3270 u64
*pg_list
= (u64
*)page_list
;
3272 struct res_mtt
*rmtt
= NULL
;
3273 int start
= be64_to_cpu(page_list
[0]);
3274 int npages
= vhcr
->in_modifier
;
3277 err
= get_containing_mtt(dev
, slave
, start
, npages
, &rmtt
);
3281 /* Call the SW implementation of write_mtt:
3282 * - Prepare a dummy mtt struct
3283 * - Translate inbox contents to simple addresses in host endianness */
3284 mtt
.offset
= 0; /* TBD this is broken but I don't handle it since
3285 we don't really use it */
3288 for (i
= 0; i
< npages
; ++i
)
3289 pg_list
[i
+ 2] = (be64_to_cpu(page_list
[i
+ 2]) & ~1ULL);
3291 err
= __mlx4_write_mtt(dev
, &mtt
, be64_to_cpu(page_list
[0]), npages
,
3292 ((u64
*)page_list
+ 2));
3295 put_res(dev
, slave
, rmtt
->com
.res_id
, RES_MTT
);
3300 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3301 struct mlx4_vhcr
*vhcr
,
3302 struct mlx4_cmd_mailbox
*inbox
,
3303 struct mlx4_cmd_mailbox
*outbox
,
3304 struct mlx4_cmd_info
*cmd
)
3306 int eqn
= vhcr
->in_modifier
;
3307 int res_id
= eqn
| (slave
<< 10);
3311 err
= eq_res_start_move_to(dev
, slave
, res_id
, RES_EQ_RESERVED
, &eq
);
3315 err
= get_res(dev
, slave
, eq
->mtt
->com
.res_id
, RES_MTT
, NULL
);
3319 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3323 atomic_dec(&eq
->mtt
->ref_count
);
3324 put_res(dev
, slave
, eq
->mtt
->com
.res_id
, RES_MTT
);
3325 res_end_move(dev
, slave
, RES_EQ
, res_id
);
3326 rem_res_range(dev
, slave
, res_id
, 1, RES_EQ
, 0);
3331 put_res(dev
, slave
, eq
->mtt
->com
.res_id
, RES_MTT
);
3333 res_abort_move(dev
, slave
, RES_EQ
, res_id
);
3338 int mlx4_GEN_EQE(struct mlx4_dev
*dev
, int slave
, struct mlx4_eqe
*eqe
)
3340 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3341 struct mlx4_slave_event_eq_info
*event_eq
;
3342 struct mlx4_cmd_mailbox
*mailbox
;
3343 u32 in_modifier
= 0;
3348 if (!priv
->mfunc
.master
.slave_state
)
3351 /* check for slave valid, slave not PF, and slave active */
3352 if (slave
< 0 || slave
> dev
->persist
->num_vfs
||
3353 slave
== dev
->caps
.function
||
3354 !priv
->mfunc
.master
.slave_state
[slave
].active
)
3357 event_eq
= &priv
->mfunc
.master
.slave_state
[slave
].event_eq
[eqe
->type
];
3359 /* Create the event only if the slave is registered */
3360 if (event_eq
->eqn
< 0)
3363 mutex_lock(&priv
->mfunc
.master
.gen_eqe_mutex
[slave
]);
3364 res_id
= (slave
<< 10) | event_eq
->eqn
;
3365 err
= get_res(dev
, slave
, res_id
, RES_EQ
, &req
);
3369 if (req
->com
.from_state
!= RES_EQ_HW
) {
3374 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
3375 if (IS_ERR(mailbox
)) {
3376 err
= PTR_ERR(mailbox
);
3380 if (eqe
->type
== MLX4_EVENT_TYPE_CMD
) {
3382 eqe
->event
.cmd
.token
= cpu_to_be16(event_eq
->token
);
3385 memcpy(mailbox
->buf
, (u8
*) eqe
, 28);
3387 in_modifier
= (slave
& 0xff) | ((event_eq
->eqn
& 0x3ff) << 16);
3389 err
= mlx4_cmd(dev
, mailbox
->dma
, in_modifier
, 0,
3390 MLX4_CMD_GEN_EQE
, MLX4_CMD_TIME_CLASS_B
,
3393 put_res(dev
, slave
, res_id
, RES_EQ
);
3394 mutex_unlock(&priv
->mfunc
.master
.gen_eqe_mutex
[slave
]);
3395 mlx4_free_cmd_mailbox(dev
, mailbox
);
3399 put_res(dev
, slave
, res_id
, RES_EQ
);
3402 mutex_unlock(&priv
->mfunc
.master
.gen_eqe_mutex
[slave
]);
3406 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3407 struct mlx4_vhcr
*vhcr
,
3408 struct mlx4_cmd_mailbox
*inbox
,
3409 struct mlx4_cmd_mailbox
*outbox
,
3410 struct mlx4_cmd_info
*cmd
)
3412 int eqn
= vhcr
->in_modifier
;
3413 int res_id
= eqn
| (slave
<< 10);
3417 err
= get_res(dev
, slave
, res_id
, RES_EQ
, &eq
);
3421 if (eq
->com
.from_state
!= RES_EQ_HW
) {
3426 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3429 put_res(dev
, slave
, res_id
, RES_EQ
);
3433 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3434 struct mlx4_vhcr
*vhcr
,
3435 struct mlx4_cmd_mailbox
*inbox
,
3436 struct mlx4_cmd_mailbox
*outbox
,
3437 struct mlx4_cmd_info
*cmd
)
3440 int cqn
= vhcr
->in_modifier
;
3441 struct mlx4_cq_context
*cqc
= inbox
->buf
;
3442 int mtt_base
= cq_get_mtt_addr(cqc
) / dev
->caps
.mtt_entry_sz
;
3443 struct res_cq
*cq
= NULL
;
3444 struct res_mtt
*mtt
;
3446 err
= cq_res_start_move_to(dev
, slave
, cqn
, RES_CQ_HW
, &cq
);
3449 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
3452 err
= check_mtt_range(dev
, slave
, mtt_base
, cq_get_mtt_size(cqc
), mtt
);
3455 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3458 atomic_inc(&mtt
->ref_count
);
3460 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3461 res_end_move(dev
, slave
, RES_CQ
, cqn
);
3465 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3467 res_abort_move(dev
, slave
, RES_CQ
, cqn
);
3471 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3472 struct mlx4_vhcr
*vhcr
,
3473 struct mlx4_cmd_mailbox
*inbox
,
3474 struct mlx4_cmd_mailbox
*outbox
,
3475 struct mlx4_cmd_info
*cmd
)
3478 int cqn
= vhcr
->in_modifier
;
3479 struct res_cq
*cq
= NULL
;
3481 err
= cq_res_start_move_to(dev
, slave
, cqn
, RES_CQ_ALLOCATED
, &cq
);
3484 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3487 atomic_dec(&cq
->mtt
->ref_count
);
3488 res_end_move(dev
, slave
, RES_CQ
, cqn
);
3492 res_abort_move(dev
, slave
, RES_CQ
, cqn
);
3496 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3497 struct mlx4_vhcr
*vhcr
,
3498 struct mlx4_cmd_mailbox
*inbox
,
3499 struct mlx4_cmd_mailbox
*outbox
,
3500 struct mlx4_cmd_info
*cmd
)
3502 int cqn
= vhcr
->in_modifier
;
3506 err
= get_res(dev
, slave
, cqn
, RES_CQ
, &cq
);
3510 if (cq
->com
.from_state
!= RES_CQ_HW
)
3513 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3515 put_res(dev
, slave
, cqn
, RES_CQ
);
3520 static int handle_resize(struct mlx4_dev
*dev
, int slave
,
3521 struct mlx4_vhcr
*vhcr
,
3522 struct mlx4_cmd_mailbox
*inbox
,
3523 struct mlx4_cmd_mailbox
*outbox
,
3524 struct mlx4_cmd_info
*cmd
,
3528 struct res_mtt
*orig_mtt
;
3529 struct res_mtt
*mtt
;
3530 struct mlx4_cq_context
*cqc
= inbox
->buf
;
3531 int mtt_base
= cq_get_mtt_addr(cqc
) / dev
->caps
.mtt_entry_sz
;
3533 err
= get_res(dev
, slave
, cq
->mtt
->com
.res_id
, RES_MTT
, &orig_mtt
);
3537 if (orig_mtt
!= cq
->mtt
) {
3542 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
3546 err
= check_mtt_range(dev
, slave
, mtt_base
, cq_get_mtt_size(cqc
), mtt
);
3549 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3552 atomic_dec(&orig_mtt
->ref_count
);
3553 put_res(dev
, slave
, orig_mtt
->com
.res_id
, RES_MTT
);
3554 atomic_inc(&mtt
->ref_count
);
3556 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3560 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3562 put_res(dev
, slave
, orig_mtt
->com
.res_id
, RES_MTT
);
3568 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3569 struct mlx4_vhcr
*vhcr
,
3570 struct mlx4_cmd_mailbox
*inbox
,
3571 struct mlx4_cmd_mailbox
*outbox
,
3572 struct mlx4_cmd_info
*cmd
)
3574 int cqn
= vhcr
->in_modifier
;
3578 err
= get_res(dev
, slave
, cqn
, RES_CQ
, &cq
);
3582 if (cq
->com
.from_state
!= RES_CQ_HW
)
3585 if (vhcr
->op_modifier
== 0) {
3586 err
= handle_resize(dev
, slave
, vhcr
, inbox
, outbox
, cmd
, cq
);
3590 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3592 put_res(dev
, slave
, cqn
, RES_CQ
);
3597 static int srq_get_mtt_size(struct mlx4_srq_context
*srqc
)
3599 int log_srq_size
= (be32_to_cpu(srqc
->state_logsize_srqn
) >> 24) & 0xf;
3600 int log_rq_stride
= srqc
->logstride
& 7;
3601 int page_shift
= (srqc
->log_page_size
& 0x3f) + 12;
3603 if (log_srq_size
+ log_rq_stride
+ 4 < page_shift
)
3606 return 1 << (log_srq_size
+ log_rq_stride
+ 4 - page_shift
);
3609 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3610 struct mlx4_vhcr
*vhcr
,
3611 struct mlx4_cmd_mailbox
*inbox
,
3612 struct mlx4_cmd_mailbox
*outbox
,
3613 struct mlx4_cmd_info
*cmd
)
3616 int srqn
= vhcr
->in_modifier
;
3617 struct res_mtt
*mtt
;
3618 struct res_srq
*srq
= NULL
;
3619 struct mlx4_srq_context
*srqc
= inbox
->buf
;
3620 int mtt_base
= srq_get_mtt_addr(srqc
) / dev
->caps
.mtt_entry_sz
;
3622 if (srqn
!= (be32_to_cpu(srqc
->state_logsize_srqn
) & 0xffffff))
3625 err
= srq_res_start_move_to(dev
, slave
, srqn
, RES_SRQ_HW
, &srq
);
3628 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
3631 err
= check_mtt_range(dev
, slave
, mtt_base
, srq_get_mtt_size(srqc
),
3636 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3640 atomic_inc(&mtt
->ref_count
);
3642 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3643 res_end_move(dev
, slave
, RES_SRQ
, srqn
);
3647 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3649 res_abort_move(dev
, slave
, RES_SRQ
, srqn
);
3654 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3655 struct mlx4_vhcr
*vhcr
,
3656 struct mlx4_cmd_mailbox
*inbox
,
3657 struct mlx4_cmd_mailbox
*outbox
,
3658 struct mlx4_cmd_info
*cmd
)
3661 int srqn
= vhcr
->in_modifier
;
3662 struct res_srq
*srq
= NULL
;
3664 err
= srq_res_start_move_to(dev
, slave
, srqn
, RES_SRQ_ALLOCATED
, &srq
);
3667 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3670 atomic_dec(&srq
->mtt
->ref_count
);
3672 atomic_dec(&srq
->cq
->ref_count
);
3673 res_end_move(dev
, slave
, RES_SRQ
, srqn
);
3678 res_abort_move(dev
, slave
, RES_SRQ
, srqn
);
3683 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3684 struct mlx4_vhcr
*vhcr
,
3685 struct mlx4_cmd_mailbox
*inbox
,
3686 struct mlx4_cmd_mailbox
*outbox
,
3687 struct mlx4_cmd_info
*cmd
)
3690 int srqn
= vhcr
->in_modifier
;
3691 struct res_srq
*srq
;
3693 err
= get_res(dev
, slave
, srqn
, RES_SRQ
, &srq
);
3696 if (srq
->com
.from_state
!= RES_SRQ_HW
) {
3700 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3702 put_res(dev
, slave
, srqn
, RES_SRQ
);
3706 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3707 struct mlx4_vhcr
*vhcr
,
3708 struct mlx4_cmd_mailbox
*inbox
,
3709 struct mlx4_cmd_mailbox
*outbox
,
3710 struct mlx4_cmd_info
*cmd
)
3713 int srqn
= vhcr
->in_modifier
;
3714 struct res_srq
*srq
;
3716 err
= get_res(dev
, slave
, srqn
, RES_SRQ
, &srq
);
3720 if (srq
->com
.from_state
!= RES_SRQ_HW
) {
3725 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3727 put_res(dev
, slave
, srqn
, RES_SRQ
);
3731 int mlx4_GEN_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3732 struct mlx4_vhcr
*vhcr
,
3733 struct mlx4_cmd_mailbox
*inbox
,
3734 struct mlx4_cmd_mailbox
*outbox
,
3735 struct mlx4_cmd_info
*cmd
)
3738 int qpn
= vhcr
->in_modifier
& 0x7fffff;
3741 err
= get_res(dev
, slave
, qpn
, RES_QP
, &qp
);
3744 if (qp
->com
.from_state
!= RES_QP_HW
) {
3749 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3751 put_res(dev
, slave
, qpn
, RES_QP
);
3755 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3756 struct mlx4_vhcr
*vhcr
,
3757 struct mlx4_cmd_mailbox
*inbox
,
3758 struct mlx4_cmd_mailbox
*outbox
,
3759 struct mlx4_cmd_info
*cmd
)
3761 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3762 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3763 update_pkey_index(dev
, slave
, inbox
);
3764 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3767 static int adjust_qp_sched_queue(struct mlx4_dev
*dev
, int slave
,
3768 struct mlx4_qp_context
*qpc
,
3769 struct mlx4_cmd_mailbox
*inbox
)
3771 enum mlx4_qp_optpar optpar
= be32_to_cpu(*(__be32
*)inbox
->buf
);
3773 int port
= mlx4_slave_convert_port(
3774 dev
, slave
, (qpc
->pri_path
.sched_queue
>> 6 & 1) + 1) - 1;
3779 pri_sched_queue
= (qpc
->pri_path
.sched_queue
& ~(1 << 6)) |
3782 if (optpar
& (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH
| MLX4_QP_OPTPAR_SCHED_QUEUE
) ||
3783 qpc
->pri_path
.sched_queue
|| mlx4_is_eth(dev
, port
+ 1)) {
3784 qpc
->pri_path
.sched_queue
= pri_sched_queue
;
3787 if (optpar
& MLX4_QP_OPTPAR_ALT_ADDR_PATH
) {
3788 port
= mlx4_slave_convert_port(
3789 dev
, slave
, (qpc
->alt_path
.sched_queue
>> 6 & 1)
3793 qpc
->alt_path
.sched_queue
=
3794 (qpc
->alt_path
.sched_queue
& ~(1 << 6)) |
3800 static int roce_verify_mac(struct mlx4_dev
*dev
, int slave
,
3801 struct mlx4_qp_context
*qpc
,
3802 struct mlx4_cmd_mailbox
*inbox
)
3806 u32 ts
= (be32_to_cpu(qpc
->flags
) >> 16) & 0xff;
3807 u8 sched
= *(u8
*)(inbox
->buf
+ 64);
3810 port
= (sched
>> 6 & 1) + 1;
3811 if (mlx4_is_eth(dev
, port
) && (ts
!= MLX4_QP_ST_MLX
)) {
3812 smac_ix
= qpc
->pri_path
.grh_mylmc
& 0x7f;
3813 if (mac_find_smac_ix_in_slave(dev
, slave
, port
, smac_ix
, &mac
))
3819 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3820 struct mlx4_vhcr
*vhcr
,
3821 struct mlx4_cmd_mailbox
*inbox
,
3822 struct mlx4_cmd_mailbox
*outbox
,
3823 struct mlx4_cmd_info
*cmd
)
3826 struct mlx4_qp_context
*qpc
= inbox
->buf
+ 8;
3827 int qpn
= vhcr
->in_modifier
& 0x7fffff;
3829 u8 orig_sched_queue
;
3830 u8 orig_vlan_control
= qpc
->pri_path
.vlan_control
;
3831 u8 orig_fvl_rx
= qpc
->pri_path
.fvl_rx
;
3832 u8 orig_pri_path_fl
= qpc
->pri_path
.fl
;
3833 u8 orig_vlan_index
= qpc
->pri_path
.vlan_index
;
3834 u8 orig_feup
= qpc
->pri_path
.feup
;
3836 err
= adjust_qp_sched_queue(dev
, slave
, qpc
, inbox
);
3839 err
= verify_qp_parameters(dev
, vhcr
, inbox
, QP_TRANS_INIT2RTR
, slave
);
3843 if (roce_verify_mac(dev
, slave
, qpc
, inbox
))
3846 update_pkey_index(dev
, slave
, inbox
);
3847 update_gid(dev
, inbox
, (u8
)slave
);
3848 adjust_proxy_tun_qkey(dev
, vhcr
, qpc
);
3849 orig_sched_queue
= qpc
->pri_path
.sched_queue
;
3851 err
= get_res(dev
, slave
, qpn
, RES_QP
, &qp
);
3854 if (qp
->com
.from_state
!= RES_QP_HW
) {
3859 err
= update_vport_qp_param(dev
, inbox
, slave
, qpn
);
3863 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3865 /* if no error, save sched queue value passed in by VF. This is
3866 * essentially the QOS value provided by the VF. This will be useful
3867 * if we allow dynamic changes from VST back to VGT
3870 qp
->sched_queue
= orig_sched_queue
;
3871 qp
->vlan_control
= orig_vlan_control
;
3872 qp
->fvl_rx
= orig_fvl_rx
;
3873 qp
->pri_path_fl
= orig_pri_path_fl
;
3874 qp
->vlan_index
= orig_vlan_index
;
3875 qp
->feup
= orig_feup
;
3877 put_res(dev
, slave
, qpn
, RES_QP
);
3881 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3882 struct mlx4_vhcr
*vhcr
,
3883 struct mlx4_cmd_mailbox
*inbox
,
3884 struct mlx4_cmd_mailbox
*outbox
,
3885 struct mlx4_cmd_info
*cmd
)
3888 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3890 err
= adjust_qp_sched_queue(dev
, slave
, context
, inbox
);
3893 err
= verify_qp_parameters(dev
, vhcr
, inbox
, QP_TRANS_RTR2RTS
, slave
);
3897 update_pkey_index(dev
, slave
, inbox
);
3898 update_gid(dev
, inbox
, (u8
)slave
);
3899 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3900 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3903 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3904 struct mlx4_vhcr
*vhcr
,
3905 struct mlx4_cmd_mailbox
*inbox
,
3906 struct mlx4_cmd_mailbox
*outbox
,
3907 struct mlx4_cmd_info
*cmd
)
3910 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3912 err
= adjust_qp_sched_queue(dev
, slave
, context
, inbox
);
3915 err
= verify_qp_parameters(dev
, vhcr
, inbox
, QP_TRANS_RTS2RTS
, slave
);
3919 update_pkey_index(dev
, slave
, inbox
);
3920 update_gid(dev
, inbox
, (u8
)slave
);
3921 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3922 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3926 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3927 struct mlx4_vhcr
*vhcr
,
3928 struct mlx4_cmd_mailbox
*inbox
,
3929 struct mlx4_cmd_mailbox
*outbox
,
3930 struct mlx4_cmd_info
*cmd
)
3932 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3933 int err
= adjust_qp_sched_queue(dev
, slave
, context
, inbox
);
3936 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3937 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3940 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3941 struct mlx4_vhcr
*vhcr
,
3942 struct mlx4_cmd_mailbox
*inbox
,
3943 struct mlx4_cmd_mailbox
*outbox
,
3944 struct mlx4_cmd_info
*cmd
)
3947 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3949 err
= adjust_qp_sched_queue(dev
, slave
, context
, inbox
);
3952 err
= verify_qp_parameters(dev
, vhcr
, inbox
, QP_TRANS_SQD2SQD
, slave
);
3956 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3957 update_gid(dev
, inbox
, (u8
)slave
);
3958 update_pkey_index(dev
, slave
, inbox
);
3959 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3962 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3963 struct mlx4_vhcr
*vhcr
,
3964 struct mlx4_cmd_mailbox
*inbox
,
3965 struct mlx4_cmd_mailbox
*outbox
,
3966 struct mlx4_cmd_info
*cmd
)
3969 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3971 err
= adjust_qp_sched_queue(dev
, slave
, context
, inbox
);
3974 err
= verify_qp_parameters(dev
, vhcr
, inbox
, QP_TRANS_SQD2RTS
, slave
);
3978 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3979 update_gid(dev
, inbox
, (u8
)slave
);
3980 update_pkey_index(dev
, slave
, inbox
);
3981 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3984 int mlx4_2RST_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3985 struct mlx4_vhcr
*vhcr
,
3986 struct mlx4_cmd_mailbox
*inbox
,
3987 struct mlx4_cmd_mailbox
*outbox
,
3988 struct mlx4_cmd_info
*cmd
)
3991 int qpn
= vhcr
->in_modifier
& 0x7fffff;
3994 err
= qp_res_start_move_to(dev
, slave
, qpn
, RES_QP_MAPPED
, &qp
, 0);
3997 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
4001 atomic_dec(&qp
->mtt
->ref_count
);
4002 atomic_dec(&qp
->rcq
->ref_count
);
4003 atomic_dec(&qp
->scq
->ref_count
);
4005 atomic_dec(&qp
->srq
->ref_count
);
4006 res_end_move(dev
, slave
, RES_QP
, qpn
);
4010 res_abort_move(dev
, slave
, RES_QP
, qpn
);
4015 static struct res_gid
*find_gid(struct mlx4_dev
*dev
, int slave
,
4016 struct res_qp
*rqp
, u8
*gid
)
4018 struct res_gid
*res
;
4020 list_for_each_entry(res
, &rqp
->mcg_list
, list
) {
4021 if (!memcmp(res
->gid
, gid
, 16))
4027 static int add_mcg_res(struct mlx4_dev
*dev
, int slave
, struct res_qp
*rqp
,
4028 u8
*gid
, enum mlx4_protocol prot
,
4029 enum mlx4_steer_type steer
, u64 reg_id
)
4031 struct res_gid
*res
;
4034 res
= kzalloc(sizeof(*res
), GFP_KERNEL
);
4038 spin_lock_irq(&rqp
->mcg_spl
);
4039 if (find_gid(dev
, slave
, rqp
, gid
)) {
4043 memcpy(res
->gid
, gid
, 16);
4046 res
->reg_id
= reg_id
;
4047 list_add_tail(&res
->list
, &rqp
->mcg_list
);
4050 spin_unlock_irq(&rqp
->mcg_spl
);
4055 static int rem_mcg_res(struct mlx4_dev
*dev
, int slave
, struct res_qp
*rqp
,
4056 u8
*gid
, enum mlx4_protocol prot
,
4057 enum mlx4_steer_type steer
, u64
*reg_id
)
4059 struct res_gid
*res
;
4062 spin_lock_irq(&rqp
->mcg_spl
);
4063 res
= find_gid(dev
, slave
, rqp
, gid
);
4064 if (!res
|| res
->prot
!= prot
|| res
->steer
!= steer
)
4067 *reg_id
= res
->reg_id
;
4068 list_del(&res
->list
);
4072 spin_unlock_irq(&rqp
->mcg_spl
);
4077 static int qp_attach(struct mlx4_dev
*dev
, int slave
, struct mlx4_qp
*qp
,
4078 u8 gid
[16], int block_loopback
, enum mlx4_protocol prot
,
4079 enum mlx4_steer_type type
, u64
*reg_id
)
4081 switch (dev
->caps
.steering_mode
) {
4082 case MLX4_STEERING_MODE_DEVICE_MANAGED
: {
4083 int port
= mlx4_slave_convert_port(dev
, slave
, gid
[5]);
4086 return mlx4_trans_to_dmfs_attach(dev
, qp
, gid
, port
,
4087 block_loopback
, prot
,
4090 case MLX4_STEERING_MODE_B0
:
4091 if (prot
== MLX4_PROT_ETH
) {
4092 int port
= mlx4_slave_convert_port(dev
, slave
, gid
[5]);
4097 return mlx4_qp_attach_common(dev
, qp
, gid
,
4098 block_loopback
, prot
, type
);
4104 static int qp_detach(struct mlx4_dev
*dev
, struct mlx4_qp
*qp
,
4105 u8 gid
[16], enum mlx4_protocol prot
,
4106 enum mlx4_steer_type type
, u64 reg_id
)
4108 switch (dev
->caps
.steering_mode
) {
4109 case MLX4_STEERING_MODE_DEVICE_MANAGED
:
4110 return mlx4_flow_detach(dev
, reg_id
);
4111 case MLX4_STEERING_MODE_B0
:
4112 return mlx4_qp_detach_common(dev
, qp
, gid
, prot
, type
);
4118 static int mlx4_adjust_port(struct mlx4_dev
*dev
, int slave
,
4119 u8
*gid
, enum mlx4_protocol prot
)
4123 if (prot
!= MLX4_PROT_ETH
)
4126 if (dev
->caps
.steering_mode
== MLX4_STEERING_MODE_B0
||
4127 dev
->caps
.steering_mode
== MLX4_STEERING_MODE_DEVICE_MANAGED
) {
4128 real_port
= mlx4_slave_convert_port(dev
, slave
, gid
[5]);
4137 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev
*dev
, int slave
,
4138 struct mlx4_vhcr
*vhcr
,
4139 struct mlx4_cmd_mailbox
*inbox
,
4140 struct mlx4_cmd_mailbox
*outbox
,
4141 struct mlx4_cmd_info
*cmd
)
4143 struct mlx4_qp qp
; /* dummy for calling attach/detach */
4144 u8
*gid
= inbox
->buf
;
4145 enum mlx4_protocol prot
= (vhcr
->in_modifier
>> 28) & 0x7;
4150 int attach
= vhcr
->op_modifier
;
4151 int block_loopback
= vhcr
->in_modifier
>> 31;
4152 u8 steer_type_mask
= 2;
4153 enum mlx4_steer_type type
= (gid
[7] & steer_type_mask
) >> 1;
4155 qpn
= vhcr
->in_modifier
& 0xffffff;
4156 err
= get_res(dev
, slave
, qpn
, RES_QP
, &rqp
);
4162 err
= qp_attach(dev
, slave
, &qp
, gid
, block_loopback
, prot
,
4165 pr_err("Fail to attach rule to qp 0x%x\n", qpn
);
4168 err
= add_mcg_res(dev
, slave
, rqp
, gid
, prot
, type
, reg_id
);
4172 err
= mlx4_adjust_port(dev
, slave
, gid
, prot
);
4176 err
= rem_mcg_res(dev
, slave
, rqp
, gid
, prot
, type
, ®_id
);
4180 err
= qp_detach(dev
, &qp
, gid
, prot
, type
, reg_id
);
4182 pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
4185 put_res(dev
, slave
, qpn
, RES_QP
);
4189 qp_detach(dev
, &qp
, gid
, prot
, type
, reg_id
);
4191 put_res(dev
, slave
, qpn
, RES_QP
);
4196 * MAC validation for Flow Steering rules.
4197 * VF can attach rules only with a mac address which is assigned to it.
4199 static int validate_eth_header_mac(int slave
, struct _rule_hw
*eth_header
,
4200 struct list_head
*rlist
)
4202 struct mac_res
*res
, *tmp
;
4205 /* make sure it isn't multicast or broadcast mac*/
4206 if (!is_multicast_ether_addr(eth_header
->eth
.dst_mac
) &&
4207 !is_broadcast_ether_addr(eth_header
->eth
.dst_mac
)) {
4208 list_for_each_entry_safe(res
, tmp
, rlist
, list
) {
4209 be_mac
= cpu_to_be64(res
->mac
<< 16);
4210 if (ether_addr_equal((u8
*)&be_mac
, eth_header
->eth
.dst_mac
))
4213 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
4214 eth_header
->eth
.dst_mac
, slave
);
4221 * In case of missing eth header, append eth header with a MAC address
4222 * assigned to the VF.
4224 static int add_eth_header(struct mlx4_dev
*dev
, int slave
,
4225 struct mlx4_cmd_mailbox
*inbox
,
4226 struct list_head
*rlist
, int header_id
)
4228 struct mac_res
*res
, *tmp
;
4230 struct mlx4_net_trans_rule_hw_ctrl
*ctrl
;
4231 struct mlx4_net_trans_rule_hw_eth
*eth_header
;
4232 struct mlx4_net_trans_rule_hw_ipv4
*ip_header
;
4233 struct mlx4_net_trans_rule_hw_tcp_udp
*l4_header
;
4235 __be64 mac_msk
= cpu_to_be64(MLX4_MAC_MASK
<< 16);
4237 ctrl
= (struct mlx4_net_trans_rule_hw_ctrl
*)inbox
->buf
;
4239 eth_header
= (struct mlx4_net_trans_rule_hw_eth
*)(ctrl
+ 1);
4241 /* Clear a space in the inbox for eth header */
4242 switch (header_id
) {
4243 case MLX4_NET_TRANS_RULE_ID_IPV4
:
4245 (struct mlx4_net_trans_rule_hw_ipv4
*)(eth_header
+ 1);
4246 memmove(ip_header
, eth_header
,
4247 sizeof(*ip_header
) + sizeof(*l4_header
));
4249 case MLX4_NET_TRANS_RULE_ID_TCP
:
4250 case MLX4_NET_TRANS_RULE_ID_UDP
:
4251 l4_header
= (struct mlx4_net_trans_rule_hw_tcp_udp
*)
4253 memmove(l4_header
, eth_header
, sizeof(*l4_header
));
4258 list_for_each_entry_safe(res
, tmp
, rlist
, list
) {
4259 if (port
== res
->port
) {
4260 be_mac
= cpu_to_be64(res
->mac
<< 16);
4265 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
4270 memset(eth_header
, 0, sizeof(*eth_header
));
4271 eth_header
->size
= sizeof(*eth_header
) >> 2;
4272 eth_header
->id
= cpu_to_be16(__sw_id_hw
[MLX4_NET_TRANS_RULE_ID_ETH
]);
4273 memcpy(eth_header
->dst_mac
, &be_mac
, ETH_ALEN
);
4274 memcpy(eth_header
->dst_mac_msk
, &mac_msk
, ETH_ALEN
);
4280 #define MLX4_UPD_QP_PATH_MASK_SUPPORTED ( \
4281 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX |\
4282 1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)
4283 int mlx4_UPDATE_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
4284 struct mlx4_vhcr
*vhcr
,
4285 struct mlx4_cmd_mailbox
*inbox
,
4286 struct mlx4_cmd_mailbox
*outbox
,
4287 struct mlx4_cmd_info
*cmd_info
)
4290 u32 qpn
= vhcr
->in_modifier
& 0xffffff;
4294 u64 pri_addr_path_mask
;
4295 struct mlx4_update_qp_context
*cmd
;
4298 cmd
= (struct mlx4_update_qp_context
*)inbox
->buf
;
4300 pri_addr_path_mask
= be64_to_cpu(cmd
->primary_addr_path_mask
);
4301 if (cmd
->qp_mask
|| cmd
->secondary_addr_path_mask
||
4302 (pri_addr_path_mask
& ~MLX4_UPD_QP_PATH_MASK_SUPPORTED
))
4305 if ((pri_addr_path_mask
&
4306 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB
)) &&
4307 !(dev
->caps
.flags2
&
4308 MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB
)) {
4309 mlx4_warn(dev
, "Src check LB for slave %d isn't supported\n",
4314 /* Just change the smac for the QP */
4315 err
= get_res(dev
, slave
, qpn
, RES_QP
, &rqp
);
4317 mlx4_err(dev
, "Updating qpn 0x%x for slave %d rejected\n", qpn
, slave
);
4321 port
= (rqp
->sched_queue
>> 6 & 1) + 1;
4323 if (pri_addr_path_mask
& (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX
)) {
4324 smac_index
= cmd
->qp_context
.pri_path
.grh_mylmc
;
4325 err
= mac_find_smac_ix_in_slave(dev
, slave
, port
,
4329 mlx4_err(dev
, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
4335 err
= mlx4_cmd(dev
, inbox
->dma
,
4336 vhcr
->in_modifier
, 0,
4337 MLX4_CMD_UPDATE_QP
, MLX4_CMD_TIME_CLASS_A
,
4340 mlx4_err(dev
, "Failed to update qpn on qpn 0x%x, command failed\n", qpn
);
4345 put_res(dev
, slave
, qpn
, RES_QP
);
4349 static u32
qp_attach_mbox_size(void *mbox
)
4351 u32 size
= sizeof(struct mlx4_net_trans_rule_hw_ctrl
);
4352 struct _rule_hw
*rule_header
;
4354 rule_header
= (struct _rule_hw
*)(mbox
+ size
);
4356 while (rule_header
->size
) {
4357 size
+= rule_header
->size
* sizeof(u32
);
4363 static int mlx4_do_mirror_rule(struct mlx4_dev
*dev
, struct res_fs_rule
*fs_rule
);
4365 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev
*dev
, int slave
,
4366 struct mlx4_vhcr
*vhcr
,
4367 struct mlx4_cmd_mailbox
*inbox
,
4368 struct mlx4_cmd_mailbox
*outbox
,
4369 struct mlx4_cmd_info
*cmd
)
4372 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4373 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4374 struct list_head
*rlist
= &tracker
->slave_list
[slave
].res_list
[RES_MAC
];
4378 struct mlx4_net_trans_rule_hw_ctrl
*ctrl
;
4379 struct _rule_hw
*rule_header
;
4381 struct res_fs_rule
*rrule
;
4384 if (dev
->caps
.steering_mode
!=
4385 MLX4_STEERING_MODE_DEVICE_MANAGED
)
4388 ctrl
= (struct mlx4_net_trans_rule_hw_ctrl
*)inbox
->buf
;
4389 err
= mlx4_slave_convert_port(dev
, slave
, ctrl
->port
);
4393 qpn
= be32_to_cpu(ctrl
->qpn
) & 0xffffff;
4394 err
= get_res(dev
, slave
, qpn
, RES_QP
, &rqp
);
4396 pr_err("Steering rule with qpn 0x%x rejected\n", qpn
);
4399 rule_header
= (struct _rule_hw
*)(ctrl
+ 1);
4400 header_id
= map_hw_to_sw_id(be16_to_cpu(rule_header
->id
));
4402 if (header_id
== MLX4_NET_TRANS_RULE_ID_ETH
)
4403 mlx4_handle_eth_header_mcast_prio(ctrl
, rule_header
);
4405 switch (header_id
) {
4406 case MLX4_NET_TRANS_RULE_ID_ETH
:
4407 if (validate_eth_header_mac(slave
, rule_header
, rlist
)) {
4412 case MLX4_NET_TRANS_RULE_ID_IB
:
4414 case MLX4_NET_TRANS_RULE_ID_IPV4
:
4415 case MLX4_NET_TRANS_RULE_ID_TCP
:
4416 case MLX4_NET_TRANS_RULE_ID_UDP
:
4417 pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
4418 if (add_eth_header(dev
, slave
, inbox
, rlist
, header_id
)) {
4422 vhcr
->in_modifier
+=
4423 sizeof(struct mlx4_net_trans_rule_hw_eth
) >> 2;
4426 pr_err("Corrupted mailbox\n");
4431 err
= mlx4_cmd_imm(dev
, inbox
->dma
, &vhcr
->out_param
,
4432 vhcr
->in_modifier
, 0,
4433 MLX4_QP_FLOW_STEERING_ATTACH
, MLX4_CMD_TIME_CLASS_A
,
4439 err
= add_res_range(dev
, slave
, vhcr
->out_param
, 1, RES_FS_RULE
, qpn
);
4441 mlx4_err(dev
, "Fail to add flow steering resources\n");
4445 err
= get_res(dev
, slave
, vhcr
->out_param
, RES_FS_RULE
, &rrule
);
4449 mbox_size
= qp_attach_mbox_size(inbox
->buf
);
4450 rrule
->mirr_mbox
= kmalloc(mbox_size
, GFP_KERNEL
);
4451 if (!rrule
->mirr_mbox
) {
4455 rrule
->mirr_mbox_size
= mbox_size
;
4456 rrule
->mirr_rule_id
= 0;
4457 memcpy(rrule
->mirr_mbox
, inbox
->buf
, mbox_size
);
4459 /* set different port */
4460 ctrl
= (struct mlx4_net_trans_rule_hw_ctrl
*)rrule
->mirr_mbox
;
4461 if (ctrl
->port
== 1)
4466 if (mlx4_is_bonded(dev
))
4467 mlx4_do_mirror_rule(dev
, rrule
);
4469 atomic_inc(&rqp
->ref_count
);
4472 put_res(dev
, slave
, vhcr
->out_param
, RES_FS_RULE
);
4474 /* detach rule on error */
4476 mlx4_cmd(dev
, vhcr
->out_param
, 0, 0,
4477 MLX4_QP_FLOW_STEERING_DETACH
, MLX4_CMD_TIME_CLASS_A
,
4480 put_res(dev
, slave
, qpn
, RES_QP
);
4484 static int mlx4_undo_mirror_rule(struct mlx4_dev
*dev
, struct res_fs_rule
*fs_rule
)
4488 err
= rem_res_range(dev
, fs_rule
->com
.owner
, fs_rule
->com
.res_id
, 1, RES_FS_RULE
, 0);
4490 mlx4_err(dev
, "Fail to remove flow steering resources\n");
4494 mlx4_cmd(dev
, fs_rule
->com
.res_id
, 0, 0, MLX4_QP_FLOW_STEERING_DETACH
,
4495 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
4499 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev
*dev
, int slave
,
4500 struct mlx4_vhcr
*vhcr
,
4501 struct mlx4_cmd_mailbox
*inbox
,
4502 struct mlx4_cmd_mailbox
*outbox
,
4503 struct mlx4_cmd_info
*cmd
)
4507 struct res_fs_rule
*rrule
;
4511 if (dev
->caps
.steering_mode
!=
4512 MLX4_STEERING_MODE_DEVICE_MANAGED
)
4515 err
= get_res(dev
, slave
, vhcr
->in_param
, RES_FS_RULE
, &rrule
);
4519 if (!rrule
->mirr_mbox
) {
4520 mlx4_err(dev
, "Mirror rules cannot be removed explicitly\n");
4521 put_res(dev
, slave
, vhcr
->in_param
, RES_FS_RULE
);
4524 mirr_reg_id
= rrule
->mirr_rule_id
;
4525 kfree(rrule
->mirr_mbox
);
4528 /* Release the rule form busy state before removal */
4529 put_res(dev
, slave
, vhcr
->in_param
, RES_FS_RULE
);
4530 err
= get_res(dev
, slave
, qpn
, RES_QP
, &rqp
);
4534 if (mirr_reg_id
&& mlx4_is_bonded(dev
)) {
4535 err
= get_res(dev
, slave
, mirr_reg_id
, RES_FS_RULE
, &rrule
);
4537 mlx4_err(dev
, "Fail to get resource of mirror rule\n");
4539 put_res(dev
, slave
, mirr_reg_id
, RES_FS_RULE
);
4540 mlx4_undo_mirror_rule(dev
, rrule
);
4543 err
= rem_res_range(dev
, slave
, vhcr
->in_param
, 1, RES_FS_RULE
, 0);
4545 mlx4_err(dev
, "Fail to remove flow steering resources\n");
4549 err
= mlx4_cmd(dev
, vhcr
->in_param
, 0, 0,
4550 MLX4_QP_FLOW_STEERING_DETACH
, MLX4_CMD_TIME_CLASS_A
,
4553 atomic_dec(&rqp
->ref_count
);
4555 put_res(dev
, slave
, qpn
, RES_QP
);
4560 BUSY_MAX_RETRIES
= 10
4563 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev
*dev
, int slave
,
4564 struct mlx4_vhcr
*vhcr
,
4565 struct mlx4_cmd_mailbox
*inbox
,
4566 struct mlx4_cmd_mailbox
*outbox
,
4567 struct mlx4_cmd_info
*cmd
)
4570 int index
= vhcr
->in_modifier
& 0xffff;
4572 err
= get_res(dev
, slave
, index
, RES_COUNTER
, NULL
);
4576 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
4577 put_res(dev
, slave
, index
, RES_COUNTER
);
4581 static void detach_qp(struct mlx4_dev
*dev
, int slave
, struct res_qp
*rqp
)
4583 struct res_gid
*rgid
;
4584 struct res_gid
*tmp
;
4585 struct mlx4_qp qp
; /* dummy for calling attach/detach */
4587 list_for_each_entry_safe(rgid
, tmp
, &rqp
->mcg_list
, list
) {
4588 switch (dev
->caps
.steering_mode
) {
4589 case MLX4_STEERING_MODE_DEVICE_MANAGED
:
4590 mlx4_flow_detach(dev
, rgid
->reg_id
);
4592 case MLX4_STEERING_MODE_B0
:
4593 qp
.qpn
= rqp
->local_qpn
;
4594 (void) mlx4_qp_detach_common(dev
, &qp
, rgid
->gid
,
4595 rgid
->prot
, rgid
->steer
);
4598 list_del(&rgid
->list
);
4603 static int _move_all_busy(struct mlx4_dev
*dev
, int slave
,
4604 enum mlx4_resource type
, int print
)
4606 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4607 struct mlx4_resource_tracker
*tracker
=
4608 &priv
->mfunc
.master
.res_tracker
;
4609 struct list_head
*rlist
= &tracker
->slave_list
[slave
].res_list
[type
];
4610 struct res_common
*r
;
4611 struct res_common
*tmp
;
4615 spin_lock_irq(mlx4_tlock(dev
));
4616 list_for_each_entry_safe(r
, tmp
, rlist
, list
) {
4617 if (r
->owner
== slave
) {
4619 if (r
->state
== RES_ANY_BUSY
) {
4622 "%s id 0x%llx is busy\n",
4627 r
->from_state
= r
->state
;
4628 r
->state
= RES_ANY_BUSY
;
4634 spin_unlock_irq(mlx4_tlock(dev
));
4639 static int move_all_busy(struct mlx4_dev
*dev
, int slave
,
4640 enum mlx4_resource type
)
4642 unsigned long begin
;
4647 busy
= _move_all_busy(dev
, slave
, type
, 0);
4648 if (time_after(jiffies
, begin
+ 5 * HZ
))
4655 busy
= _move_all_busy(dev
, slave
, type
, 1);
4659 static void rem_slave_qps(struct mlx4_dev
*dev
, int slave
)
4661 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4662 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4663 struct list_head
*qp_list
=
4664 &tracker
->slave_list
[slave
].res_list
[RES_QP
];
4672 err
= move_all_busy(dev
, slave
, RES_QP
);
4674 mlx4_warn(dev
, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
4677 spin_lock_irq(mlx4_tlock(dev
));
4678 list_for_each_entry_safe(qp
, tmp
, qp_list
, com
.list
) {
4679 spin_unlock_irq(mlx4_tlock(dev
));
4680 if (qp
->com
.owner
== slave
) {
4681 qpn
= qp
->com
.res_id
;
4682 detach_qp(dev
, slave
, qp
);
4683 state
= qp
->com
.from_state
;
4684 while (state
!= 0) {
4686 case RES_QP_RESERVED
:
4687 spin_lock_irq(mlx4_tlock(dev
));
4688 rb_erase(&qp
->com
.node
,
4689 &tracker
->res_tree
[RES_QP
]);
4690 list_del(&qp
->com
.list
);
4691 spin_unlock_irq(mlx4_tlock(dev
));
4692 if (!valid_reserved(dev
, slave
, qpn
)) {
4693 __mlx4_qp_release_range(dev
, qpn
, 1);
4694 mlx4_release_resource(dev
, slave
,
4701 if (!valid_reserved(dev
, slave
, qpn
))
4702 __mlx4_qp_free_icm(dev
, qpn
);
4703 state
= RES_QP_RESERVED
;
4707 err
= mlx4_cmd(dev
, in_param
,
4710 MLX4_CMD_TIME_CLASS_A
,
4713 mlx4_dbg(dev
, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
4714 slave
, qp
->local_qpn
);
4715 atomic_dec(&qp
->rcq
->ref_count
);
4716 atomic_dec(&qp
->scq
->ref_count
);
4717 atomic_dec(&qp
->mtt
->ref_count
);
4719 atomic_dec(&qp
->srq
->ref_count
);
4720 state
= RES_QP_MAPPED
;
4727 spin_lock_irq(mlx4_tlock(dev
));
4729 spin_unlock_irq(mlx4_tlock(dev
));
4732 static void rem_slave_srqs(struct mlx4_dev
*dev
, int slave
)
4734 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4735 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4736 struct list_head
*srq_list
=
4737 &tracker
->slave_list
[slave
].res_list
[RES_SRQ
];
4738 struct res_srq
*srq
;
4739 struct res_srq
*tmp
;
4745 err
= move_all_busy(dev
, slave
, RES_SRQ
);
4747 mlx4_warn(dev
, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
4750 spin_lock_irq(mlx4_tlock(dev
));
4751 list_for_each_entry_safe(srq
, tmp
, srq_list
, com
.list
) {
4752 spin_unlock_irq(mlx4_tlock(dev
));
4753 if (srq
->com
.owner
== slave
) {
4754 srqn
= srq
->com
.res_id
;
4755 state
= srq
->com
.from_state
;
4756 while (state
!= 0) {
4758 case RES_SRQ_ALLOCATED
:
4759 __mlx4_srq_free_icm(dev
, srqn
);
4760 spin_lock_irq(mlx4_tlock(dev
));
4761 rb_erase(&srq
->com
.node
,
4762 &tracker
->res_tree
[RES_SRQ
]);
4763 list_del(&srq
->com
.list
);
4764 spin_unlock_irq(mlx4_tlock(dev
));
4765 mlx4_release_resource(dev
, slave
,
4773 err
= mlx4_cmd(dev
, in_param
, srqn
, 1,
4775 MLX4_CMD_TIME_CLASS_A
,
4778 mlx4_dbg(dev
, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
4781 atomic_dec(&srq
->mtt
->ref_count
);
4783 atomic_dec(&srq
->cq
->ref_count
);
4784 state
= RES_SRQ_ALLOCATED
;
4792 spin_lock_irq(mlx4_tlock(dev
));
4794 spin_unlock_irq(mlx4_tlock(dev
));
4797 static void rem_slave_cqs(struct mlx4_dev
*dev
, int slave
)
4799 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4800 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4801 struct list_head
*cq_list
=
4802 &tracker
->slave_list
[slave
].res_list
[RES_CQ
];
4810 err
= move_all_busy(dev
, slave
, RES_CQ
);
4812 mlx4_warn(dev
, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
4815 spin_lock_irq(mlx4_tlock(dev
));
4816 list_for_each_entry_safe(cq
, tmp
, cq_list
, com
.list
) {
4817 spin_unlock_irq(mlx4_tlock(dev
));
4818 if (cq
->com
.owner
== slave
&& !atomic_read(&cq
->ref_count
)) {
4819 cqn
= cq
->com
.res_id
;
4820 state
= cq
->com
.from_state
;
4821 while (state
!= 0) {
4823 case RES_CQ_ALLOCATED
:
4824 __mlx4_cq_free_icm(dev
, cqn
);
4825 spin_lock_irq(mlx4_tlock(dev
));
4826 rb_erase(&cq
->com
.node
,
4827 &tracker
->res_tree
[RES_CQ
]);
4828 list_del(&cq
->com
.list
);
4829 spin_unlock_irq(mlx4_tlock(dev
));
4830 mlx4_release_resource(dev
, slave
,
4838 err
= mlx4_cmd(dev
, in_param
, cqn
, 1,
4840 MLX4_CMD_TIME_CLASS_A
,
4843 mlx4_dbg(dev
, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
4845 atomic_dec(&cq
->mtt
->ref_count
);
4846 state
= RES_CQ_ALLOCATED
;
4854 spin_lock_irq(mlx4_tlock(dev
));
4856 spin_unlock_irq(mlx4_tlock(dev
));
4859 static void rem_slave_mrs(struct mlx4_dev
*dev
, int slave
)
4861 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4862 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4863 struct list_head
*mpt_list
=
4864 &tracker
->slave_list
[slave
].res_list
[RES_MPT
];
4865 struct res_mpt
*mpt
;
4866 struct res_mpt
*tmp
;
4872 err
= move_all_busy(dev
, slave
, RES_MPT
);
4874 mlx4_warn(dev
, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
4877 spin_lock_irq(mlx4_tlock(dev
));
4878 list_for_each_entry_safe(mpt
, tmp
, mpt_list
, com
.list
) {
4879 spin_unlock_irq(mlx4_tlock(dev
));
4880 if (mpt
->com
.owner
== slave
) {
4881 mptn
= mpt
->com
.res_id
;
4882 state
= mpt
->com
.from_state
;
4883 while (state
!= 0) {
4885 case RES_MPT_RESERVED
:
4886 __mlx4_mpt_release(dev
, mpt
->key
);
4887 spin_lock_irq(mlx4_tlock(dev
));
4888 rb_erase(&mpt
->com
.node
,
4889 &tracker
->res_tree
[RES_MPT
]);
4890 list_del(&mpt
->com
.list
);
4891 spin_unlock_irq(mlx4_tlock(dev
));
4892 mlx4_release_resource(dev
, slave
,
4898 case RES_MPT_MAPPED
:
4899 __mlx4_mpt_free_icm(dev
, mpt
->key
);
4900 state
= RES_MPT_RESERVED
;
4905 err
= mlx4_cmd(dev
, in_param
, mptn
, 0,
4907 MLX4_CMD_TIME_CLASS_A
,
4910 mlx4_dbg(dev
, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
4913 atomic_dec(&mpt
->mtt
->ref_count
);
4914 state
= RES_MPT_MAPPED
;
4921 spin_lock_irq(mlx4_tlock(dev
));
4923 spin_unlock_irq(mlx4_tlock(dev
));
4926 static void rem_slave_mtts(struct mlx4_dev
*dev
, int slave
)
4928 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4929 struct mlx4_resource_tracker
*tracker
=
4930 &priv
->mfunc
.master
.res_tracker
;
4931 struct list_head
*mtt_list
=
4932 &tracker
->slave_list
[slave
].res_list
[RES_MTT
];
4933 struct res_mtt
*mtt
;
4934 struct res_mtt
*tmp
;
4939 err
= move_all_busy(dev
, slave
, RES_MTT
);
4941 mlx4_warn(dev
, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n",
4944 spin_lock_irq(mlx4_tlock(dev
));
4945 list_for_each_entry_safe(mtt
, tmp
, mtt_list
, com
.list
) {
4946 spin_unlock_irq(mlx4_tlock(dev
));
4947 if (mtt
->com
.owner
== slave
) {
4948 base
= mtt
->com
.res_id
;
4949 state
= mtt
->com
.from_state
;
4950 while (state
!= 0) {
4952 case RES_MTT_ALLOCATED
:
4953 __mlx4_free_mtt_range(dev
, base
,
4955 spin_lock_irq(mlx4_tlock(dev
));
4956 rb_erase(&mtt
->com
.node
,
4957 &tracker
->res_tree
[RES_MTT
]);
4958 list_del(&mtt
->com
.list
);
4959 spin_unlock_irq(mlx4_tlock(dev
));
4960 mlx4_release_resource(dev
, slave
, RES_MTT
,
4961 1 << mtt
->order
, 0);
4971 spin_lock_irq(mlx4_tlock(dev
));
4973 spin_unlock_irq(mlx4_tlock(dev
));
4976 static int mlx4_do_mirror_rule(struct mlx4_dev
*dev
, struct res_fs_rule
*fs_rule
)
4978 struct mlx4_cmd_mailbox
*mailbox
;
4980 struct res_fs_rule
*mirr_rule
;
4983 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
4984 if (IS_ERR(mailbox
))
4985 return PTR_ERR(mailbox
);
4987 if (!fs_rule
->mirr_mbox
) {
4988 mlx4_err(dev
, "rule mirroring mailbox is null\n");
4991 memcpy(mailbox
->buf
, fs_rule
->mirr_mbox
, fs_rule
->mirr_mbox_size
);
4992 err
= mlx4_cmd_imm(dev
, mailbox
->dma
, ®_id
, fs_rule
->mirr_mbox_size
>> 2, 0,
4993 MLX4_QP_FLOW_STEERING_ATTACH
, MLX4_CMD_TIME_CLASS_A
,
4995 mlx4_free_cmd_mailbox(dev
, mailbox
);
5000 err
= add_res_range(dev
, fs_rule
->com
.owner
, reg_id
, 1, RES_FS_RULE
, fs_rule
->qpn
);
5004 err
= get_res(dev
, fs_rule
->com
.owner
, reg_id
, RES_FS_RULE
, &mirr_rule
);
5008 fs_rule
->mirr_rule_id
= reg_id
;
5009 mirr_rule
->mirr_rule_id
= 0;
5010 mirr_rule
->mirr_mbox_size
= 0;
5011 mirr_rule
->mirr_mbox
= NULL
;
5012 put_res(dev
, fs_rule
->com
.owner
, reg_id
, RES_FS_RULE
);
5016 rem_res_range(dev
, fs_rule
->com
.owner
, reg_id
, 1, RES_FS_RULE
, 0);
5018 mlx4_cmd(dev
, reg_id
, 0, 0, MLX4_QP_FLOW_STEERING_DETACH
,
5019 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
5024 static int mlx4_mirror_fs_rules(struct mlx4_dev
*dev
, bool bond
)
5026 struct mlx4_priv
*priv
= mlx4_priv(dev
);
5027 struct mlx4_resource_tracker
*tracker
=
5028 &priv
->mfunc
.master
.res_tracker
;
5029 struct rb_root
*root
= &tracker
->res_tree
[RES_FS_RULE
];
5031 struct res_fs_rule
*fs_rule
;
5033 LIST_HEAD(mirr_list
);
5035 for (p
= rb_first(root
); p
; p
= rb_next(p
)) {
5036 fs_rule
= rb_entry(p
, struct res_fs_rule
, com
.node
);
5037 if ((bond
&& fs_rule
->mirr_mbox_size
) ||
5038 (!bond
&& !fs_rule
->mirr_mbox_size
))
5039 list_add_tail(&fs_rule
->mirr_list
, &mirr_list
);
5042 list_for_each_entry(fs_rule
, &mirr_list
, mirr_list
) {
5044 err
+= mlx4_do_mirror_rule(dev
, fs_rule
);
5046 err
+= mlx4_undo_mirror_rule(dev
, fs_rule
);
5051 int mlx4_bond_fs_rules(struct mlx4_dev
*dev
)
5053 return mlx4_mirror_fs_rules(dev
, true);
5056 int mlx4_unbond_fs_rules(struct mlx4_dev
*dev
)
5058 return mlx4_mirror_fs_rules(dev
, false);
5061 static void rem_slave_fs_rule(struct mlx4_dev
*dev
, int slave
)
5063 struct mlx4_priv
*priv
= mlx4_priv(dev
);
5064 struct mlx4_resource_tracker
*tracker
=
5065 &priv
->mfunc
.master
.res_tracker
;
5066 struct list_head
*fs_rule_list
=
5067 &tracker
->slave_list
[slave
].res_list
[RES_FS_RULE
];
5068 struct res_fs_rule
*fs_rule
;
5069 struct res_fs_rule
*tmp
;
5074 err
= move_all_busy(dev
, slave
, RES_FS_RULE
);
5076 mlx4_warn(dev
, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
5079 spin_lock_irq(mlx4_tlock(dev
));
5080 list_for_each_entry_safe(fs_rule
, tmp
, fs_rule_list
, com
.list
) {
5081 spin_unlock_irq(mlx4_tlock(dev
));
5082 if (fs_rule
->com
.owner
== slave
) {
5083 base
= fs_rule
->com
.res_id
;
5084 state
= fs_rule
->com
.from_state
;
5085 while (state
!= 0) {
5087 case RES_FS_RULE_ALLOCATED
:
5089 err
= mlx4_cmd(dev
, base
, 0, 0,
5090 MLX4_QP_FLOW_STEERING_DETACH
,
5091 MLX4_CMD_TIME_CLASS_A
,
5094 spin_lock_irq(mlx4_tlock(dev
));
5095 rb_erase(&fs_rule
->com
.node
,
5096 &tracker
->res_tree
[RES_FS_RULE
]);
5097 list_del(&fs_rule
->com
.list
);
5098 spin_unlock_irq(mlx4_tlock(dev
));
5099 kfree(fs_rule
->mirr_mbox
);
5109 spin_lock_irq(mlx4_tlock(dev
));
5111 spin_unlock_irq(mlx4_tlock(dev
));
5114 static void rem_slave_eqs(struct mlx4_dev
*dev
, int slave
)
5116 struct mlx4_priv
*priv
= mlx4_priv(dev
);
5117 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
5118 struct list_head
*eq_list
=
5119 &tracker
->slave_list
[slave
].res_list
[RES_EQ
];
5126 err
= move_all_busy(dev
, slave
, RES_EQ
);
5128 mlx4_warn(dev
, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
5131 spin_lock_irq(mlx4_tlock(dev
));
5132 list_for_each_entry_safe(eq
, tmp
, eq_list
, com
.list
) {
5133 spin_unlock_irq(mlx4_tlock(dev
));
5134 if (eq
->com
.owner
== slave
) {
5135 eqn
= eq
->com
.res_id
;
5136 state
= eq
->com
.from_state
;
5137 while (state
!= 0) {
5139 case RES_EQ_RESERVED
:
5140 spin_lock_irq(mlx4_tlock(dev
));
5141 rb_erase(&eq
->com
.node
,
5142 &tracker
->res_tree
[RES_EQ
]);
5143 list_del(&eq
->com
.list
);
5144 spin_unlock_irq(mlx4_tlock(dev
));
5150 err
= mlx4_cmd(dev
, slave
, eqn
& 0x3ff,
5151 1, MLX4_CMD_HW2SW_EQ
,
5152 MLX4_CMD_TIME_CLASS_A
,
5155 mlx4_dbg(dev
, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
5156 slave
, eqn
& 0x3ff);
5157 atomic_dec(&eq
->mtt
->ref_count
);
5158 state
= RES_EQ_RESERVED
;
5166 spin_lock_irq(mlx4_tlock(dev
));
5168 spin_unlock_irq(mlx4_tlock(dev
));
5171 static void rem_slave_counters(struct mlx4_dev
*dev
, int slave
)
5173 struct mlx4_priv
*priv
= mlx4_priv(dev
);
5174 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
5175 struct list_head
*counter_list
=
5176 &tracker
->slave_list
[slave
].res_list
[RES_COUNTER
];
5177 struct res_counter
*counter
;
5178 struct res_counter
*tmp
;
5180 int *counters_arr
= NULL
;
5183 err
= move_all_busy(dev
, slave
, RES_COUNTER
);
5185 mlx4_warn(dev
, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
5188 counters_arr
= kmalloc_array(dev
->caps
.max_counters
,
5189 sizeof(*counters_arr
), GFP_KERNEL
);
5196 spin_lock_irq(mlx4_tlock(dev
));
5197 list_for_each_entry_safe(counter
, tmp
, counter_list
, com
.list
) {
5198 if (counter
->com
.owner
== slave
) {
5199 counters_arr
[i
++] = counter
->com
.res_id
;
5200 rb_erase(&counter
->com
.node
,
5201 &tracker
->res_tree
[RES_COUNTER
]);
5202 list_del(&counter
->com
.list
);
5206 spin_unlock_irq(mlx4_tlock(dev
));
5209 __mlx4_counter_free(dev
, counters_arr
[j
++]);
5210 mlx4_release_resource(dev
, slave
, RES_COUNTER
, 1, 0);
5214 kfree(counters_arr
);
5217 static void rem_slave_xrcdns(struct mlx4_dev
*dev
, int slave
)
5219 struct mlx4_priv
*priv
= mlx4_priv(dev
);
5220 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
5221 struct list_head
*xrcdn_list
=
5222 &tracker
->slave_list
[slave
].res_list
[RES_XRCD
];
5223 struct res_xrcdn
*xrcd
;
5224 struct res_xrcdn
*tmp
;
5228 err
= move_all_busy(dev
, slave
, RES_XRCD
);
5230 mlx4_warn(dev
, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
5233 spin_lock_irq(mlx4_tlock(dev
));
5234 list_for_each_entry_safe(xrcd
, tmp
, xrcdn_list
, com
.list
) {
5235 if (xrcd
->com
.owner
== slave
) {
5236 xrcdn
= xrcd
->com
.res_id
;
5237 rb_erase(&xrcd
->com
.node
, &tracker
->res_tree
[RES_XRCD
]);
5238 list_del(&xrcd
->com
.list
);
5240 __mlx4_xrcd_free(dev
, xrcdn
);
5243 spin_unlock_irq(mlx4_tlock(dev
));
5246 void mlx4_delete_all_resources_for_slave(struct mlx4_dev
*dev
, int slave
)
5248 struct mlx4_priv
*priv
= mlx4_priv(dev
);
5249 mlx4_reset_roce_gids(dev
, slave
);
5250 mutex_lock(&priv
->mfunc
.master
.res_tracker
.slave_list
[slave
].mutex
);
5251 rem_slave_vlans(dev
, slave
);
5252 rem_slave_macs(dev
, slave
);
5253 rem_slave_fs_rule(dev
, slave
);
5254 rem_slave_qps(dev
, slave
);
5255 rem_slave_srqs(dev
, slave
);
5256 rem_slave_cqs(dev
, slave
);
5257 rem_slave_mrs(dev
, slave
);
5258 rem_slave_eqs(dev
, slave
);
5259 rem_slave_mtts(dev
, slave
);
5260 rem_slave_counters(dev
, slave
);
5261 rem_slave_xrcdns(dev
, slave
);
5262 mutex_unlock(&priv
->mfunc
.master
.res_tracker
.slave_list
[slave
].mutex
);
5265 static void update_qos_vpp(struct mlx4_update_qp_context
*ctx
,
5266 struct mlx4_vf_immed_vlan_work
*work
)
5268 ctx
->qp_mask
|= cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_QOS_VPP
);
5269 ctx
->qp_context
.qos_vport
= work
->qos_vport
;
5272 void mlx4_vf_immed_vlan_work_handler(struct work_struct
*_work
)
5274 struct mlx4_vf_immed_vlan_work
*work
=
5275 container_of(_work
, struct mlx4_vf_immed_vlan_work
, work
);
5276 struct mlx4_cmd_mailbox
*mailbox
;
5277 struct mlx4_update_qp_context
*upd_context
;
5278 struct mlx4_dev
*dev
= &work
->priv
->dev
;
5279 struct mlx4_resource_tracker
*tracker
=
5280 &work
->priv
->mfunc
.master
.res_tracker
;
5281 struct list_head
*qp_list
=
5282 &tracker
->slave_list
[work
->slave
].res_list
[RES_QP
];
5285 u64 qp_path_mask_vlan_ctrl
=
5286 ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED
) |
5287 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P
) |
5288 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED
) |
5289 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED
) |
5290 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P
) |
5291 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED
));
5293 u64 qp_path_mask
= ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX
) |
5294 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL
) |
5295 (1ULL << MLX4_UPD_QP_PATH_MASK_CV
) |
5296 (1ULL << MLX4_UPD_QP_PATH_MASK_SV
) |
5297 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN
) |
5298 (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP
) |
5299 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX
) |
5300 (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE
));
5303 int port
, errors
= 0;
5306 if (mlx4_is_slave(dev
)) {
5307 mlx4_warn(dev
, "Trying to update-qp in slave %d\n",
5312 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
5313 if (IS_ERR(mailbox
))
5315 if (work
->flags
& MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE
) /* block all */
5316 vlan_control
= MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
5317 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED
|
5318 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED
|
5319 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED
|
5320 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED
|
5321 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED
;
5322 else if (!work
->vlan_id
)
5323 vlan_control
= MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
5324 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED
;
5325 else if (work
->vlan_proto
== htons(ETH_P_8021AD
))
5326 vlan_control
= MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED
|
5327 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
5328 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED
|
5329 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED
;
5330 else /* vst 802.1Q */
5331 vlan_control
= MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
5332 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED
|
5333 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED
;
5335 upd_context
= mailbox
->buf
;
5336 upd_context
->qp_mask
= cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD
);
5338 spin_lock_irq(mlx4_tlock(dev
));
5339 list_for_each_entry_safe(qp
, tmp
, qp_list
, com
.list
) {
5340 spin_unlock_irq(mlx4_tlock(dev
));
5341 if (qp
->com
.owner
== work
->slave
) {
5342 if (qp
->com
.from_state
!= RES_QP_HW
||
5343 !qp
->sched_queue
|| /* no INIT2RTR trans yet */
5344 mlx4_is_qp_reserved(dev
, qp
->local_qpn
) ||
5345 qp
->qpc_flags
& (1 << MLX4_RSS_QPC_FLAG_OFFSET
)) {
5346 spin_lock_irq(mlx4_tlock(dev
));
5349 port
= (qp
->sched_queue
>> 6 & 1) + 1;
5350 if (port
!= work
->port
) {
5351 spin_lock_irq(mlx4_tlock(dev
));
5354 if (MLX4_QP_ST_RC
== ((qp
->qpc_flags
>> 16) & 0xff))
5355 upd_context
->primary_addr_path_mask
= cpu_to_be64(qp_path_mask
);
5357 upd_context
->primary_addr_path_mask
=
5358 cpu_to_be64(qp_path_mask
| qp_path_mask_vlan_ctrl
);
5359 if (work
->vlan_id
== MLX4_VGT
) {
5360 upd_context
->qp_context
.param3
= qp
->param3
;
5361 upd_context
->qp_context
.pri_path
.vlan_control
= qp
->vlan_control
;
5362 upd_context
->qp_context
.pri_path
.fvl_rx
= qp
->fvl_rx
;
5363 upd_context
->qp_context
.pri_path
.vlan_index
= qp
->vlan_index
;
5364 upd_context
->qp_context
.pri_path
.fl
= qp
->pri_path_fl
;
5365 upd_context
->qp_context
.pri_path
.feup
= qp
->feup
;
5366 upd_context
->qp_context
.pri_path
.sched_queue
=
5369 upd_context
->qp_context
.param3
= qp
->param3
& ~cpu_to_be32(MLX4_STRIP_VLAN
);
5370 upd_context
->qp_context
.pri_path
.vlan_control
= vlan_control
;
5371 upd_context
->qp_context
.pri_path
.vlan_index
= work
->vlan_ix
;
5372 upd_context
->qp_context
.pri_path
.fvl_rx
=
5373 qp
->fvl_rx
| MLX4_FVL_RX_FORCE_ETH_VLAN
;
5374 upd_context
->qp_context
.pri_path
.fl
=
5375 qp
->pri_path_fl
| MLX4_FL_ETH_HIDE_CQE_VLAN
;
5376 if (work
->vlan_proto
== htons(ETH_P_8021AD
))
5377 upd_context
->qp_context
.pri_path
.fl
|= MLX4_FL_SV
;
5379 upd_context
->qp_context
.pri_path
.fl
|= MLX4_FL_CV
;
5380 upd_context
->qp_context
.pri_path
.feup
=
5381 qp
->feup
| MLX4_FEUP_FORCE_ETH_UP
| MLX4_FVL_FORCE_ETH_VLAN
;
5382 upd_context
->qp_context
.pri_path
.sched_queue
=
5383 qp
->sched_queue
& 0xC7;
5384 upd_context
->qp_context
.pri_path
.sched_queue
|=
5385 ((work
->qos
& 0x7) << 3);
5387 if (dev
->caps
.flags2
&
5388 MLX4_DEV_CAP_FLAG2_QOS_VPP
)
5389 update_qos_vpp(upd_context
, work
);
5392 err
= mlx4_cmd(dev
, mailbox
->dma
,
5393 qp
->local_qpn
& 0xffffff,
5394 0, MLX4_CMD_UPDATE_QP
,
5395 MLX4_CMD_TIME_CLASS_C
, MLX4_CMD_NATIVE
);
5397 mlx4_info(dev
, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
5398 work
->slave
, port
, qp
->local_qpn
, err
);
5402 spin_lock_irq(mlx4_tlock(dev
));
5404 spin_unlock_irq(mlx4_tlock(dev
));
5405 mlx4_free_cmd_mailbox(dev
, mailbox
);
5408 mlx4_err(dev
, "%d UPDATE_QP failures for slave %d, port %d\n",
5409 errors
, work
->slave
, work
->port
);
5411 /* unregister previous vlan_id if needed and we had no errors
5412 * while updating the QPs
5414 if (work
->flags
& MLX4_VF_IMMED_VLAN_FLAG_VLAN
&& !errors
&&
5415 NO_INDX
!= work
->orig_vlan_ix
)
5416 __mlx4_unregister_vlan(&work
->priv
->dev
, work
->port
,
5417 work
->orig_vlan_id
);