2 * Copyright (c) 2012 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 /***********************************************************/
33 /*This file support the handling of the Alias GUID feature. */
34 /***********************************************************/
35 #include <rdma/ib_mad.h>
36 #include <rdma/ib_smi.h>
37 #include <rdma/ib_cache.h>
38 #include <rdma/ib_sa.h>
39 #include <rdma/ib_pack.h>
40 #include <linux/mlx4/cmd.h>
41 #include <linux/module.h>
42 #include <linux/init.h>
43 #include <linux/errno.h>
44 #include <rdma/ib_user_verbs.h>
45 #include <linux/delay.h>
49 The driver keeps the current state of all guids, as they are in the HW.
50 Whenever we receive an smp mad GUIDInfo record, the data will be cached.
53 struct mlx4_alias_guid_work_context
{
55 struct mlx4_ib_dev
*dev
;
56 struct ib_sa_query
*sa_query
;
57 struct completion done
;
59 struct list_head list
;
63 struct mlx4_next_alias_guid_work
{
66 struct mlx4_sriov_alias_guid_info_rec_det rec_det
;
70 void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev
*dev
, int block_num
,
71 u8 port_num
, u8
*p_data
)
76 int port_index
= port_num
- 1;
78 if (!mlx4_is_master(dev
->dev
))
81 guid_indexes
= be64_to_cpu((__force __be64
) dev
->sriov
.alias_guid
.
82 ports_guid
[port_num
- 1].
83 all_rec_per_port
[block_num
].guid_indexes
);
84 pr_debug("port: %d, guid_indexes: 0x%llx\n", port_num
, guid_indexes
);
86 for (i
= 0; i
< NUM_ALIAS_GUID_IN_REC
; i
++) {
87 /* The location of the specific index starts from bit number 4
89 if (test_bit(i
+ 4, (unsigned long *)&guid_indexes
)) {
90 slave_id
= (block_num
* NUM_ALIAS_GUID_IN_REC
) + i
;
91 if (slave_id
>= dev
->dev
->num_slaves
) {
92 pr_debug("The last slave: %d\n", slave_id
);
97 memcpy(&dev
->sriov
.demux
[port_index
].guid_cache
[slave_id
],
98 &p_data
[i
* GUID_REC_SIZE
],
101 pr_debug("Guid number: %d in block: %d"
102 " was not updated\n", i
, block_num
);
106 static __be64
get_cached_alias_guid(struct mlx4_ib_dev
*dev
, int port
, int index
)
108 if (index
>= NUM_ALIAS_GUID_PER_PORT
) {
109 pr_err("%s: ERROR: asked for index:%d\n", __func__
, index
);
110 return (__force __be64
) -1;
112 return *(__be64
*)&dev
->sriov
.demux
[port
- 1].guid_cache
[index
];
116 ib_sa_comp_mask
mlx4_ib_get_aguid_comp_mask_from_ix(int index
)
118 return IB_SA_COMP_MASK(4 + index
);
122 * Whenever new GUID is set/unset (guid table change) create event and
123 * notify the relevant slave (master also should be notified).
124 * If the GUID value is not as we have in the cache the slave will not be
125 * updated; in this case it waits for the smp_snoop or the port management
126 * event to call the function and to update the slave.
127 * block_number - the index of the block (16 blocks available)
128 * port_number - 1 or 2
130 void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev
*dev
,
131 int block_num
, u8 port_num
,
137 enum slave_port_state new_state
;
138 enum slave_port_state prev_state
;
139 __be64 tmp_cur_ag
, form_cache_ag
;
140 enum slave_port_gen_event gen_event
;
142 if (!mlx4_is_master(dev
->dev
))
145 guid_indexes
= be64_to_cpu((__force __be64
) dev
->sriov
.alias_guid
.
146 ports_guid
[port_num
- 1].
147 all_rec_per_port
[block_num
].guid_indexes
);
148 pr_debug("port: %d, guid_indexes: 0x%llx\n", port_num
, guid_indexes
);
150 /*calculate the slaves and notify them*/
151 for (i
= 0; i
< NUM_ALIAS_GUID_IN_REC
; i
++) {
152 /* the location of the specific index runs from bits 4..11 */
153 if (!(test_bit(i
+ 4, (unsigned long *)&guid_indexes
)))
156 slave_id
= (block_num
* NUM_ALIAS_GUID_IN_REC
) + i
;
157 if (slave_id
>= dev
->dev
->num_vfs
+ 1)
159 tmp_cur_ag
= *(__be64
*)&p_data
[i
* GUID_REC_SIZE
];
160 form_cache_ag
= get_cached_alias_guid(dev
, port_num
,
161 (NUM_ALIAS_GUID_IN_REC
* block_num
) + i
);
163 * Check if guid is not the same as in the cache,
164 * If it is different, wait for the snoop_smp or the port mgmt
165 * change event to update the slave on its port state change
167 if (tmp_cur_ag
!= form_cache_ag
)
169 mlx4_gen_guid_change_eqe(dev
->dev
, slave_id
, port_num
);
171 /*2 cases: Valid GUID, and Invalid Guid*/
173 if (tmp_cur_ag
!= MLX4_NOT_SET_GUID
) { /*valid GUID*/
174 prev_state
= mlx4_get_slave_port_state(dev
->dev
, slave_id
, port_num
);
175 new_state
= set_and_calc_slave_port_state(dev
->dev
, slave_id
, port_num
,
176 MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID
,
178 pr_debug("slave: %d, port: %d prev_port_state: %d,"
179 " new_port_state: %d, gen_event: %d\n",
180 slave_id
, port_num
, prev_state
, new_state
, gen_event
);
181 if (gen_event
== SLAVE_PORT_GEN_EVENT_UP
) {
182 pr_debug("sending PORT_UP event to slave: %d, port: %d\n",
184 mlx4_gen_port_state_change_eqe(dev
->dev
, slave_id
,
185 port_num
, MLX4_PORT_CHANGE_SUBTYPE_ACTIVE
);
187 } else { /* request to invalidate GUID */
188 set_and_calc_slave_port_state(dev
->dev
, slave_id
, port_num
,
189 MLX4_PORT_STATE_IB_EVENT_GID_INVALID
,
191 pr_debug("sending PORT DOWN event to slave: %d, port: %d\n",
193 mlx4_gen_port_state_change_eqe(dev
->dev
, slave_id
, port_num
,
194 MLX4_PORT_CHANGE_SUBTYPE_DOWN
);
199 static void aliasguid_query_handler(int status
,
200 struct ib_sa_guidinfo_rec
*guid_rec
,
203 struct mlx4_ib_dev
*dev
;
204 struct mlx4_alias_guid_work_context
*cb_ctx
= context
;
207 struct mlx4_sriov_alias_guid_info_rec_det
*rec
;
208 unsigned long flags
, flags1
;
214 port_index
= cb_ctx
->port
- 1;
215 rec
= &dev
->sriov
.alias_guid
.ports_guid
[port_index
].
216 all_rec_per_port
[cb_ctx
->block_num
];
219 rec
->status
= MLX4_GUID_INFO_STATUS_IDLE
;
220 pr_debug("(port: %d) failed: status = %d\n",
221 cb_ctx
->port
, status
);
225 if (guid_rec
->block_num
!= cb_ctx
->block_num
) {
226 pr_err("block num mismatch: %d != %d\n",
227 cb_ctx
->block_num
, guid_rec
->block_num
);
231 pr_debug("lid/port: %d/%d, block_num: %d\n",
232 be16_to_cpu(guid_rec
->lid
), cb_ctx
->port
,
233 guid_rec
->block_num
);
235 rec
= &dev
->sriov
.alias_guid
.ports_guid
[port_index
].
236 all_rec_per_port
[guid_rec
->block_num
];
238 rec
->status
= MLX4_GUID_INFO_STATUS_SET
;
239 rec
->method
= MLX4_GUID_INFO_RECORD_SET
;
241 for (i
= 0 ; i
< NUM_ALIAS_GUID_IN_REC
; i
++) {
243 tmp_cur_ag
= *(__be64
*)&guid_rec
->guid_info_list
[i
* GUID_REC_SIZE
];
244 /* check if the SM didn't assign one of the records.
245 * if it didn't, if it was not sysadmin request:
246 * ask the SM to give a new GUID, (instead of the driver request).
248 if (tmp_cur_ag
== MLX4_NOT_SET_GUID
) {
249 mlx4_ib_warn(&dev
->ib_dev
, "%s:Record num %d in "
250 "block_num: %d was declined by SM, "
251 "ownership by %d (0 = driver, 1=sysAdmin,"
252 " 2=None)\n", __func__
, i
,
253 guid_rec
->block_num
, rec
->ownership
);
254 if (rec
->ownership
== MLX4_GUID_DRIVER_ASSIGN
) {
255 /* if it is driver assign, asks for new GUID from SM*/
256 *(__be64
*)&rec
->all_recs
[i
* GUID_REC_SIZE
] =
259 /* Mark the record as not assigned, and let it
260 * be sent again in the next work sched.*/
261 rec
->status
= MLX4_GUID_INFO_STATUS_IDLE
;
262 rec
->guid_indexes
|= mlx4_ib_get_aguid_comp_mask_from_ix(i
);
265 /* properly assigned record. */
266 /* We save the GUID we just got from the SM in the
267 * admin_guid in order to be persistent, and in the
268 * request from the sm the process will ask for the same GUID */
269 if (rec
->ownership
== MLX4_GUID_SYSADMIN_ASSIGN
&&
270 tmp_cur_ag
!= *(__be64
*)&rec
->all_recs
[i
* GUID_REC_SIZE
]) {
271 /* the sysadmin assignment failed.*/
272 mlx4_ib_warn(&dev
->ib_dev
, "%s: Failed to set"
273 " admin guid after SysAdmin "
275 "Record num %d in block_num:%d "
276 "was declined by SM, "
277 "new val(0x%llx) was kept\n",
280 be64_to_cpu(*(__be64
*) &
281 rec
->all_recs
[i
* GUID_REC_SIZE
]));
283 memcpy(&rec
->all_recs
[i
* GUID_REC_SIZE
],
284 &guid_rec
->guid_info_list
[i
* GUID_REC_SIZE
],
290 The func is call here to close the cases when the
291 sm doesn't send smp, so in the sa response the driver
294 mlx4_ib_notify_slaves_on_guid_change(dev
, guid_rec
->block_num
,
296 guid_rec
->guid_info_list
);
298 spin_lock_irqsave(&dev
->sriov
.going_down_lock
, flags
);
299 spin_lock_irqsave(&dev
->sriov
.alias_guid
.ag_work_lock
, flags1
);
300 if (!dev
->sriov
.is_going_down
)
301 queue_delayed_work(dev
->sriov
.alias_guid
.ports_guid
[port_index
].wq
,
302 &dev
->sriov
.alias_guid
.ports_guid
[port_index
].
304 if (cb_ctx
->sa_query
) {
305 list_del(&cb_ctx
->list
);
308 complete(&cb_ctx
->done
);
309 spin_unlock_irqrestore(&dev
->sriov
.alias_guid
.ag_work_lock
, flags1
);
310 spin_unlock_irqrestore(&dev
->sriov
.going_down_lock
, flags
);
313 static void invalidate_guid_record(struct mlx4_ib_dev
*dev
, u8 port
, int index
)
317 ib_sa_comp_mask comp_mask
= 0;
319 dev
->sriov
.alias_guid
.ports_guid
[port
- 1].all_rec_per_port
[index
].status
320 = MLX4_GUID_INFO_STATUS_IDLE
;
321 dev
->sriov
.alias_guid
.ports_guid
[port
- 1].all_rec_per_port
[index
].method
322 = MLX4_GUID_INFO_RECORD_SET
;
324 /* calculate the comp_mask for that record.*/
325 for (i
= 0; i
< NUM_ALIAS_GUID_IN_REC
; i
++) {
327 *(u64
*)&dev
->sriov
.alias_guid
.ports_guid
[port
- 1].
328 all_rec_per_port
[index
].all_recs
[GUID_REC_SIZE
* i
];
330 check the admin value: if it's for delete (~00LL) or
331 it is the first guid of the first record (hw guid) or
332 the records is not in ownership of the sysadmin and the sm doesn't
333 need to assign GUIDs, then don't put it up for assignment.
335 if (MLX4_GUID_FOR_DELETE_VAL
== cur_admin_val
||
337 MLX4_GUID_NONE_ASSIGN
== dev
->sriov
.alias_guid
.
338 ports_guid
[port
- 1].all_rec_per_port
[index
].ownership
)
340 comp_mask
|= mlx4_ib_get_aguid_comp_mask_from_ix(i
);
342 dev
->sriov
.alias_guid
.ports_guid
[port
- 1].
343 all_rec_per_port
[index
].guid_indexes
= comp_mask
;
346 static int set_guid_rec(struct ib_device
*ibdev
,
348 struct mlx4_sriov_alias_guid_info_rec_det
*rec_det
)
351 struct mlx4_ib_dev
*dev
= to_mdev(ibdev
);
352 struct ib_sa_guidinfo_rec guid_info_rec
;
353 ib_sa_comp_mask comp_mask
;
354 struct ib_port_attr attr
;
355 struct mlx4_alias_guid_work_context
*callback_context
;
356 unsigned long resched_delay
, flags
, flags1
;
357 struct list_head
*head
=
358 &dev
->sriov
.alias_guid
.ports_guid
[port
- 1].cb_list
;
360 err
= __mlx4_ib_query_port(ibdev
, port
, &attr
, 1);
362 pr_debug("mlx4_ib_query_port failed (err: %d), port: %d\n",
366 /*check the port was configured by the sm, otherwise no need to send */
367 if (attr
.state
!= IB_PORT_ACTIVE
) {
368 pr_debug("port %d not active...rescheduling\n", port
);
369 resched_delay
= 5 * HZ
;
374 callback_context
= kmalloc(sizeof *callback_context
, GFP_KERNEL
);
375 if (!callback_context
) {
377 resched_delay
= HZ
* 5;
380 callback_context
->port
= port
;
381 callback_context
->dev
= dev
;
382 callback_context
->block_num
= index
;
384 memset(&guid_info_rec
, 0, sizeof (struct ib_sa_guidinfo_rec
));
386 guid_info_rec
.lid
= cpu_to_be16(attr
.lid
);
387 guid_info_rec
.block_num
= index
;
389 memcpy(guid_info_rec
.guid_info_list
, rec_det
->all_recs
,
390 GUID_REC_SIZE
* NUM_ALIAS_GUID_IN_REC
);
391 comp_mask
= IB_SA_GUIDINFO_REC_LID
| IB_SA_GUIDINFO_REC_BLOCK_NUM
|
392 rec_det
->guid_indexes
;
394 init_completion(&callback_context
->done
);
395 spin_lock_irqsave(&dev
->sriov
.alias_guid
.ag_work_lock
, flags1
);
396 list_add_tail(&callback_context
->list
, head
);
397 spin_unlock_irqrestore(&dev
->sriov
.alias_guid
.ag_work_lock
, flags1
);
399 callback_context
->query_id
=
400 ib_sa_guid_info_rec_query(dev
->sriov
.alias_guid
.sa_client
,
401 ibdev
, port
, &guid_info_rec
,
402 comp_mask
, rec_det
->method
, 1000,
403 GFP_KERNEL
, aliasguid_query_handler
,
405 &callback_context
->sa_query
);
406 if (callback_context
->query_id
< 0) {
407 pr_debug("ib_sa_guid_info_rec_query failed, query_id: "
408 "%d. will reschedule to the next 1 sec.\n",
409 callback_context
->query_id
);
410 spin_lock_irqsave(&dev
->sriov
.alias_guid
.ag_work_lock
, flags1
);
411 list_del(&callback_context
->list
);
412 kfree(callback_context
);
413 spin_unlock_irqrestore(&dev
->sriov
.alias_guid
.ag_work_lock
, flags1
);
414 resched_delay
= 1 * HZ
;
422 spin_lock_irqsave(&dev
->sriov
.going_down_lock
, flags
);
423 spin_lock_irqsave(&dev
->sriov
.alias_guid
.ag_work_lock
, flags1
);
424 invalidate_guid_record(dev
, port
, index
);
425 if (!dev
->sriov
.is_going_down
) {
426 queue_delayed_work(dev
->sriov
.alias_guid
.ports_guid
[port
- 1].wq
,
427 &dev
->sriov
.alias_guid
.ports_guid
[port
- 1].alias_guid_work
,
430 spin_unlock_irqrestore(&dev
->sriov
.alias_guid
.ag_work_lock
, flags1
);
431 spin_unlock_irqrestore(&dev
->sriov
.going_down_lock
, flags
);
437 void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev
*dev
, int port
)
440 unsigned long flags
, flags1
;
442 pr_debug("port %d\n", port
);
444 spin_lock_irqsave(&dev
->sriov
.going_down_lock
, flags
);
445 spin_lock_irqsave(&dev
->sriov
.alias_guid
.ag_work_lock
, flags1
);
446 for (i
= 0; i
< NUM_ALIAS_GUID_REC_IN_PORT
; i
++)
447 invalidate_guid_record(dev
, port
, i
);
449 if (mlx4_is_master(dev
->dev
) && !dev
->sriov
.is_going_down
) {
451 make sure no work waits in the queue, if the work is already
452 queued(not on the timer) the cancel will fail. That is not a problem
453 because we just want the work started.
455 cancel_delayed_work(&dev
->sriov
.alias_guid
.
456 ports_guid
[port
- 1].alias_guid_work
);
457 queue_delayed_work(dev
->sriov
.alias_guid
.ports_guid
[port
- 1].wq
,
458 &dev
->sriov
.alias_guid
.ports_guid
[port
- 1].alias_guid_work
,
461 spin_unlock_irqrestore(&dev
->sriov
.alias_guid
.ag_work_lock
, flags1
);
462 spin_unlock_irqrestore(&dev
->sriov
.going_down_lock
, flags
);
465 /* The function returns the next record that was
466 * not configured (or failed to be configured) */
467 static int get_next_record_to_update(struct mlx4_ib_dev
*dev
, u8 port
,
468 struct mlx4_next_alias_guid_work
*rec
)
473 for (j
= 0; j
< NUM_ALIAS_GUID_REC_IN_PORT
; j
++) {
474 spin_lock_irqsave(&dev
->sriov
.alias_guid
.ag_work_lock
, flags
);
475 if (dev
->sriov
.alias_guid
.ports_guid
[port
].all_rec_per_port
[j
].status
==
476 MLX4_GUID_INFO_STATUS_IDLE
) {
477 memcpy(&rec
->rec_det
,
478 &dev
->sriov
.alias_guid
.ports_guid
[port
].all_rec_per_port
[j
],
479 sizeof (struct mlx4_sriov_alias_guid_info_rec_det
));
482 dev
->sriov
.alias_guid
.ports_guid
[port
].all_rec_per_port
[j
].status
=
483 MLX4_GUID_INFO_STATUS_PENDING
;
484 spin_unlock_irqrestore(&dev
->sriov
.alias_guid
.ag_work_lock
, flags
);
487 spin_unlock_irqrestore(&dev
->sriov
.alias_guid
.ag_work_lock
, flags
);
492 static void set_administratively_guid_record(struct mlx4_ib_dev
*dev
, int port
,
494 struct mlx4_sriov_alias_guid_info_rec_det
*rec_det
)
496 dev
->sriov
.alias_guid
.ports_guid
[port
].all_rec_per_port
[rec_index
].guid_indexes
=
497 rec_det
->guid_indexes
;
498 memcpy(dev
->sriov
.alias_guid
.ports_guid
[port
].all_rec_per_port
[rec_index
].all_recs
,
499 rec_det
->all_recs
, NUM_ALIAS_GUID_IN_REC
* GUID_REC_SIZE
);
500 dev
->sriov
.alias_guid
.ports_guid
[port
].all_rec_per_port
[rec_index
].status
=
504 static void set_all_slaves_guids(struct mlx4_ib_dev
*dev
, int port
)
507 struct mlx4_sriov_alias_guid_info_rec_det rec_det
;
509 for (j
= 0 ; j
< NUM_ALIAS_GUID_REC_IN_PORT
; j
++) {
510 memset(rec_det
.all_recs
, 0, NUM_ALIAS_GUID_IN_REC
* GUID_REC_SIZE
);
511 rec_det
.guid_indexes
= (!j
? 0 : IB_SA_GUIDINFO_REC_GID0
) |
512 IB_SA_GUIDINFO_REC_GID1
| IB_SA_GUIDINFO_REC_GID2
|
513 IB_SA_GUIDINFO_REC_GID3
| IB_SA_GUIDINFO_REC_GID4
|
514 IB_SA_GUIDINFO_REC_GID5
| IB_SA_GUIDINFO_REC_GID6
|
515 IB_SA_GUIDINFO_REC_GID7
;
516 rec_det
.status
= MLX4_GUID_INFO_STATUS_IDLE
;
517 set_administratively_guid_record(dev
, port
, j
, &rec_det
);
521 static void alias_guid_work(struct work_struct
*work
)
523 struct delayed_work
*delay
= to_delayed_work(work
);
525 struct mlx4_next_alias_guid_work
*rec
;
526 struct mlx4_sriov_alias_guid_port_rec_det
*sriov_alias_port
=
527 container_of(delay
, struct mlx4_sriov_alias_guid_port_rec_det
,
529 struct mlx4_sriov_alias_guid
*sriov_alias_guid
= sriov_alias_port
->parent
;
530 struct mlx4_ib_sriov
*ib_sriov
= container_of(sriov_alias_guid
,
531 struct mlx4_ib_sriov
,
533 struct mlx4_ib_dev
*dev
= container_of(ib_sriov
, struct mlx4_ib_dev
, sriov
);
535 rec
= kzalloc(sizeof *rec
, GFP_KERNEL
);
537 pr_err("alias_guid_work: No Memory\n");
541 pr_debug("starting [port: %d]...\n", sriov_alias_port
->port
+ 1);
542 ret
= get_next_record_to_update(dev
, sriov_alias_port
->port
, rec
);
544 pr_debug("No more records to update.\n");
548 set_guid_rec(&dev
->ib_dev
, rec
->port
+ 1, rec
->block_num
,
556 void mlx4_ib_init_alias_guid_work(struct mlx4_ib_dev
*dev
, int port
)
558 unsigned long flags
, flags1
;
560 if (!mlx4_is_master(dev
->dev
))
562 spin_lock_irqsave(&dev
->sriov
.going_down_lock
, flags
);
563 spin_lock_irqsave(&dev
->sriov
.alias_guid
.ag_work_lock
, flags1
);
564 if (!dev
->sriov
.is_going_down
) {
565 queue_delayed_work(dev
->sriov
.alias_guid
.ports_guid
[port
].wq
,
566 &dev
->sriov
.alias_guid
.ports_guid
[port
].alias_guid_work
, 0);
568 spin_unlock_irqrestore(&dev
->sriov
.alias_guid
.ag_work_lock
, flags1
);
569 spin_unlock_irqrestore(&dev
->sriov
.going_down_lock
, flags
);
572 void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev
*dev
)
575 struct mlx4_ib_sriov
*sriov
= &dev
->sriov
;
576 struct mlx4_alias_guid_work_context
*cb_ctx
;
577 struct mlx4_sriov_alias_guid_port_rec_det
*det
;
578 struct ib_sa_query
*sa_query
;
581 for (i
= 0 ; i
< dev
->num_ports
; i
++) {
582 cancel_delayed_work(&dev
->sriov
.alias_guid
.ports_guid
[i
].alias_guid_work
);
583 det
= &sriov
->alias_guid
.ports_guid
[i
];
584 spin_lock_irqsave(&sriov
->alias_guid
.ag_work_lock
, flags
);
585 while (!list_empty(&det
->cb_list
)) {
586 cb_ctx
= list_entry(det
->cb_list
.next
,
587 struct mlx4_alias_guid_work_context
,
589 sa_query
= cb_ctx
->sa_query
;
590 cb_ctx
->sa_query
= NULL
;
591 list_del(&cb_ctx
->list
);
592 spin_unlock_irqrestore(&sriov
->alias_guid
.ag_work_lock
, flags
);
593 ib_sa_cancel_query(cb_ctx
->query_id
, sa_query
);
594 wait_for_completion(&cb_ctx
->done
);
596 spin_lock_irqsave(&sriov
->alias_guid
.ag_work_lock
, flags
);
598 spin_unlock_irqrestore(&sriov
->alias_guid
.ag_work_lock
, flags
);
600 for (i
= 0 ; i
< dev
->num_ports
; i
++) {
601 flush_workqueue(dev
->sriov
.alias_guid
.ports_guid
[i
].wq
);
602 destroy_workqueue(dev
->sriov
.alias_guid
.ports_guid
[i
].wq
);
604 ib_sa_unregister_client(dev
->sriov
.alias_guid
.sa_client
);
605 kfree(dev
->sriov
.alias_guid
.sa_client
);
608 int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev
*dev
)
610 char alias_wq_name
[15];
615 if (!mlx4_is_master(dev
->dev
))
617 dev
->sriov
.alias_guid
.sa_client
=
618 kzalloc(sizeof *dev
->sriov
.alias_guid
.sa_client
, GFP_KERNEL
);
619 if (!dev
->sriov
.alias_guid
.sa_client
)
622 ib_sa_register_client(dev
->sriov
.alias_guid
.sa_client
);
624 spin_lock_init(&dev
->sriov
.alias_guid
.ag_work_lock
);
626 for (i
= 1; i
<= dev
->num_ports
; ++i
) {
627 if (dev
->ib_dev
.query_gid(&dev
->ib_dev
, i
, 0, &gid
)) {
633 for (i
= 0 ; i
< dev
->num_ports
; i
++) {
634 memset(&dev
->sriov
.alias_guid
.ports_guid
[i
], 0,
635 sizeof (struct mlx4_sriov_alias_guid_port_rec_det
));
636 /*Check if the SM doesn't need to assign the GUIDs*/
637 for (j
= 0; j
< NUM_ALIAS_GUID_REC_IN_PORT
; j
++) {
638 if (mlx4_ib_sm_guid_assign
) {
639 dev
->sriov
.alias_guid
.ports_guid
[i
].
641 ownership
= MLX4_GUID_DRIVER_ASSIGN
;
644 dev
->sriov
.alias_guid
.ports_guid
[i
].all_rec_per_port
[j
].
645 ownership
= MLX4_GUID_NONE_ASSIGN
;
646 /*mark each val as it was deleted,
647 till the sysAdmin will give it valid val*/
648 for (k
= 0; k
< NUM_ALIAS_GUID_IN_REC
; k
++) {
649 *(__be64
*)&dev
->sriov
.alias_guid
.ports_guid
[i
].
650 all_rec_per_port
[j
].all_recs
[GUID_REC_SIZE
* k
] =
651 cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL
);
654 INIT_LIST_HEAD(&dev
->sriov
.alias_guid
.ports_guid
[i
].cb_list
);
655 /*prepare the records, set them to be allocated by sm*/
656 for (j
= 0 ; j
< NUM_ALIAS_GUID_REC_IN_PORT
; j
++)
657 invalidate_guid_record(dev
, i
+ 1, j
);
659 dev
->sriov
.alias_guid
.ports_guid
[i
].parent
= &dev
->sriov
.alias_guid
;
660 dev
->sriov
.alias_guid
.ports_guid
[i
].port
= i
;
661 if (mlx4_ib_sm_guid_assign
)
662 set_all_slaves_guids(dev
, i
);
664 snprintf(alias_wq_name
, sizeof alias_wq_name
, "alias_guid%d", i
);
665 dev
->sriov
.alias_guid
.ports_guid
[i
].wq
=
666 create_singlethread_workqueue(alias_wq_name
);
667 if (!dev
->sriov
.alias_guid
.ports_guid
[i
].wq
) {
671 INIT_DELAYED_WORK(&dev
->sriov
.alias_guid
.ports_guid
[i
].alias_guid_work
,
677 for (--i
; i
>= 0; i
--) {
678 destroy_workqueue(dev
->sriov
.alias_guid
.ports_guid
[i
].wq
);
679 dev
->sriov
.alias_guid
.ports_guid
[i
].wq
= NULL
;
683 ib_sa_unregister_client(dev
->sriov
.alias_guid
.sa_client
);
684 kfree(dev
->sriov
.alias_guid
.sa_client
);
685 dev
->sriov
.alias_guid
.sa_client
= NULL
;
686 pr_err("init_alias_guid_service: Failed. (ret:%d)\n", ret
);