2 * Copyright (c) 2012 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 /***********************************************************/
33 /*This file support the handling of the Alias GUID feature. */
34 /***********************************************************/
35 #include <rdma/ib_mad.h>
36 #include <rdma/ib_smi.h>
37 #include <rdma/ib_cache.h>
38 #include <rdma/ib_sa.h>
39 #include <rdma/ib_pack.h>
40 #include <linux/mlx4/cmd.h>
41 #include <linux/module.h>
42 #include <linux/init.h>
43 #include <linux/errno.h>
44 #include <rdma/ib_user_verbs.h>
45 #include <linux/delay.h>
49 The driver keeps the current state of all guids, as they are in the HW.
50 Whenever we receive an smp mad GUIDInfo record, the data will be cached.
53 struct mlx4_alias_guid_work_context
{
55 struct mlx4_ib_dev
*dev
;
56 struct ib_sa_query
*sa_query
;
57 struct completion done
;
59 struct list_head list
;
61 ib_sa_comp_mask guid_indexes
;
65 struct mlx4_next_alias_guid_work
{
69 struct mlx4_sriov_alias_guid_info_rec_det rec_det
;
72 static int get_low_record_time_index(struct mlx4_ib_dev
*dev
, u8 port
,
73 int *resched_delay_sec
);
75 void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev
*dev
, int block_num
,
76 u8 port_num
, u8
*p_data
)
81 int port_index
= port_num
- 1;
83 if (!mlx4_is_master(dev
->dev
))
86 guid_indexes
= be64_to_cpu((__force __be64
) dev
->sriov
.alias_guid
.
87 ports_guid
[port_num
- 1].
88 all_rec_per_port
[block_num
].guid_indexes
);
89 pr_debug("port: %d, guid_indexes: 0x%llx\n", port_num
, guid_indexes
);
91 for (i
= 0; i
< NUM_ALIAS_GUID_IN_REC
; i
++) {
92 /* The location of the specific index starts from bit number 4
94 if (test_bit(i
+ 4, (unsigned long *)&guid_indexes
)) {
95 slave_id
= (block_num
* NUM_ALIAS_GUID_IN_REC
) + i
;
96 if (slave_id
>= dev
->dev
->num_slaves
) {
97 pr_debug("The last slave: %d\n", slave_id
);
101 /* cache the guid: */
102 memcpy(&dev
->sriov
.demux
[port_index
].guid_cache
[slave_id
],
103 &p_data
[i
* GUID_REC_SIZE
],
106 pr_debug("Guid number: %d in block: %d"
107 " was not updated\n", i
, block_num
);
111 static __be64
get_cached_alias_guid(struct mlx4_ib_dev
*dev
, int port
, int index
)
113 if (index
>= NUM_ALIAS_GUID_PER_PORT
) {
114 pr_err("%s: ERROR: asked for index:%d\n", __func__
, index
);
115 return (__force __be64
) -1;
117 return *(__be64
*)&dev
->sriov
.demux
[port
- 1].guid_cache
[index
];
121 ib_sa_comp_mask
mlx4_ib_get_aguid_comp_mask_from_ix(int index
)
123 return IB_SA_COMP_MASK(4 + index
);
126 void mlx4_ib_slave_alias_guid_event(struct mlx4_ib_dev
*dev
, int slave
,
127 int port
, int slave_init
)
129 __be64 curr_guid
, required_guid
;
130 int record_num
= slave
/ 8;
131 int index
= slave
% 8;
132 int port_index
= port
- 1;
136 spin_lock_irqsave(&dev
->sriov
.alias_guid
.ag_work_lock
, flags
);
137 if (dev
->sriov
.alias_guid
.ports_guid
[port_index
].state_flags
&
138 GUID_STATE_NEED_PORT_INIT
)
141 curr_guid
= *(__be64
*)&dev
->sriov
.
142 alias_guid
.ports_guid
[port_index
].
143 all_rec_per_port
[record_num
].
144 all_recs
[GUID_REC_SIZE
* index
];
145 if (curr_guid
== cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL
) ||
148 required_guid
= cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL
);
150 required_guid
= mlx4_get_admin_guid(dev
->dev
, slave
, port
);
151 if (required_guid
== cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL
))
154 *(__be64
*)&dev
->sriov
.alias_guid
.ports_guid
[port_index
].
155 all_rec_per_port
[record_num
].
156 all_recs
[GUID_REC_SIZE
* index
] = required_guid
;
157 dev
->sriov
.alias_guid
.ports_guid
[port_index
].
158 all_rec_per_port
[record_num
].guid_indexes
159 |= mlx4_ib_get_aguid_comp_mask_from_ix(index
);
160 dev
->sriov
.alias_guid
.ports_guid
[port_index
].
161 all_rec_per_port
[record_num
].status
162 = MLX4_GUID_INFO_STATUS_IDLE
;
163 /* set to run immediately */
164 dev
->sriov
.alias_guid
.ports_guid
[port_index
].
165 all_rec_per_port
[record_num
].time_to_run
= 0;
166 dev
->sriov
.alias_guid
.ports_guid
[port_index
].
167 all_rec_per_port
[record_num
].
168 guids_retry_schedule
[index
] = 0;
171 spin_unlock_irqrestore(&dev
->sriov
.alias_guid
.ag_work_lock
, flags
);
174 mlx4_ib_init_alias_guid_work(dev
, port_index
);
178 * Whenever new GUID is set/unset (guid table change) create event and
179 * notify the relevant slave (master also should be notified).
180 * If the GUID value is not as we have in the cache the slave will not be
181 * updated; in this case it waits for the smp_snoop or the port management
182 * event to call the function and to update the slave.
183 * block_number - the index of the block (16 blocks available)
184 * port_number - 1 or 2
186 void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev
*dev
,
187 int block_num
, u8 port_num
,
192 int slave_id
, slave_port
;
193 enum slave_port_state new_state
;
194 enum slave_port_state prev_state
;
195 __be64 tmp_cur_ag
, form_cache_ag
;
196 enum slave_port_gen_event gen_event
;
197 struct mlx4_sriov_alias_guid_info_rec_det
*rec
;
199 __be64 required_value
;
201 if (!mlx4_is_master(dev
->dev
))
204 rec
= &dev
->sriov
.alias_guid
.ports_guid
[port_num
- 1].
205 all_rec_per_port
[block_num
];
206 guid_indexes
= be64_to_cpu((__force __be64
) dev
->sriov
.alias_guid
.
207 ports_guid
[port_num
- 1].
208 all_rec_per_port
[block_num
].guid_indexes
);
209 pr_debug("port: %d, guid_indexes: 0x%llx\n", port_num
, guid_indexes
);
211 /*calculate the slaves and notify them*/
212 for (i
= 0; i
< NUM_ALIAS_GUID_IN_REC
; i
++) {
213 /* the location of the specific index runs from bits 4..11 */
214 if (!(test_bit(i
+ 4, (unsigned long *)&guid_indexes
)))
217 slave_id
= (block_num
* NUM_ALIAS_GUID_IN_REC
) + i
;
218 if (slave_id
>= dev
->dev
->persist
->num_vfs
+ 1)
221 slave_port
= mlx4_phys_to_slave_port(dev
->dev
, slave_id
, port_num
);
222 if (slave_port
< 0) /* this port isn't available for the VF */
225 tmp_cur_ag
= *(__be64
*)&p_data
[i
* GUID_REC_SIZE
];
226 form_cache_ag
= get_cached_alias_guid(dev
, port_num
,
227 (NUM_ALIAS_GUID_IN_REC
* block_num
) + i
);
229 * Check if guid is not the same as in the cache,
230 * If it is different, wait for the snoop_smp or the port mgmt
231 * change event to update the slave on its port state change
233 if (tmp_cur_ag
!= form_cache_ag
)
236 spin_lock_irqsave(&dev
->sriov
.alias_guid
.ag_work_lock
, flags
);
237 required_value
= *(__be64
*)&rec
->all_recs
[i
* GUID_REC_SIZE
];
239 if (required_value
== cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL
))
242 if (tmp_cur_ag
== required_value
) {
243 rec
->guid_indexes
= rec
->guid_indexes
&
244 ~mlx4_ib_get_aguid_comp_mask_from_ix(i
);
246 /* may notify port down if value is 0 */
247 if (tmp_cur_ag
!= MLX4_NOT_SET_GUID
) {
248 spin_unlock_irqrestore(&dev
->sriov
.
249 alias_guid
.ag_work_lock
, flags
);
253 spin_unlock_irqrestore(&dev
->sriov
.alias_guid
.ag_work_lock
,
255 mlx4_gen_guid_change_eqe(dev
->dev
, slave_id
, port_num
);
256 /*2 cases: Valid GUID, and Invalid Guid*/
258 if (tmp_cur_ag
!= MLX4_NOT_SET_GUID
) { /*valid GUID*/
259 prev_state
= mlx4_get_slave_port_state(dev
->dev
, slave_id
, port_num
);
260 new_state
= set_and_calc_slave_port_state(dev
->dev
, slave_id
, port_num
,
261 MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID
,
263 pr_debug("slave: %d, port: %d prev_port_state: %d,"
264 " new_port_state: %d, gen_event: %d\n",
265 slave_id
, port_num
, prev_state
, new_state
, gen_event
);
266 if (gen_event
== SLAVE_PORT_GEN_EVENT_UP
) {
267 pr_debug("sending PORT_UP event to slave: %d, port: %d\n",
269 mlx4_gen_port_state_change_eqe(dev
->dev
, slave_id
,
270 port_num
, MLX4_PORT_CHANGE_SUBTYPE_ACTIVE
);
272 } else { /* request to invalidate GUID */
273 set_and_calc_slave_port_state(dev
->dev
, slave_id
, port_num
,
274 MLX4_PORT_STATE_IB_EVENT_GID_INVALID
,
276 if (gen_event
== SLAVE_PORT_GEN_EVENT_DOWN
) {
277 pr_debug("sending PORT DOWN event to slave: %d, port: %d\n",
279 mlx4_gen_port_state_change_eqe(dev
->dev
,
282 MLX4_PORT_CHANGE_SUBTYPE_DOWN
);
288 static void aliasguid_query_handler(int status
,
289 struct ib_sa_guidinfo_rec
*guid_rec
,
292 struct mlx4_ib_dev
*dev
;
293 struct mlx4_alias_guid_work_context
*cb_ctx
= context
;
296 struct mlx4_sriov_alias_guid_info_rec_det
*rec
;
297 unsigned long flags
, flags1
;
298 ib_sa_comp_mask declined_guid_indexes
= 0;
299 ib_sa_comp_mask applied_guid_indexes
= 0;
300 unsigned int resched_delay_sec
= 0;
306 port_index
= cb_ctx
->port
- 1;
307 rec
= &dev
->sriov
.alias_guid
.ports_guid
[port_index
].
308 all_rec_per_port
[cb_ctx
->block_num
];
311 pr_debug("(port: %d) failed: status = %d\n",
312 cb_ctx
->port
, status
);
313 rec
->time_to_run
= ktime_get_real_ns() + 1 * NSEC_PER_SEC
;
317 if (guid_rec
->block_num
!= cb_ctx
->block_num
) {
318 pr_err("block num mismatch: %d != %d\n",
319 cb_ctx
->block_num
, guid_rec
->block_num
);
323 pr_debug("lid/port: %d/%d, block_num: %d\n",
324 be16_to_cpu(guid_rec
->lid
), cb_ctx
->port
,
325 guid_rec
->block_num
);
327 rec
= &dev
->sriov
.alias_guid
.ports_guid
[port_index
].
328 all_rec_per_port
[guid_rec
->block_num
];
330 spin_lock_irqsave(&dev
->sriov
.alias_guid
.ag_work_lock
, flags
);
331 for (i
= 0 ; i
< NUM_ALIAS_GUID_IN_REC
; i
++) {
332 __be64 sm_response
, required_val
;
334 if (!(cb_ctx
->guid_indexes
&
335 mlx4_ib_get_aguid_comp_mask_from_ix(i
)))
337 sm_response
= *(__be64
*)&guid_rec
->guid_info_list
339 required_val
= *(__be64
*)&rec
->all_recs
[i
* GUID_REC_SIZE
];
340 if (cb_ctx
->method
== MLX4_GUID_INFO_RECORD_DELETE
) {
342 cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL
))
345 /* A new value was set till we got the response */
346 pr_debug("need to set new value %llx, record num %d, block_num:%d\n",
347 be64_to_cpu(required_val
),
348 i
, guid_rec
->block_num
);
352 /* check if the SM didn't assign one of the records.
353 * if it didn't, re-ask for.
355 if (sm_response
== MLX4_NOT_SET_GUID
) {
356 if (rec
->guids_retry_schedule
[i
] == 0)
357 mlx4_ib_warn(&dev
->ib_dev
,
358 "%s:Record num %d in block_num: %d was declined by SM\n",
360 guid_rec
->block_num
);
363 /* properly assigned record. */
364 /* We save the GUID we just got from the SM in the
365 * admin_guid in order to be persistent, and in the
366 * request from the sm the process will ask for the same GUID */
368 sm_response
!= required_val
) {
369 /* Warn only on first retry */
370 if (rec
->guids_retry_schedule
[i
] == 0)
371 mlx4_ib_warn(&dev
->ib_dev
, "%s: Failed to set"
372 " admin guid after SysAdmin "
374 "Record num %d in block_num:%d "
375 "was declined by SM, "
376 "new val(0x%llx) was kept, SM returned (0x%llx)\n",
379 be64_to_cpu(required_val
),
380 be64_to_cpu(sm_response
));
383 *(__be64
*)&rec
->all_recs
[i
* GUID_REC_SIZE
] =
385 if (required_val
== 0)
386 mlx4_set_admin_guid(dev
->dev
,
389 * NUM_ALIAS_GUID_IN_REC
) + i
,
395 declined_guid_indexes
|= mlx4_ib_get_aguid_comp_mask_from_ix(i
);
396 rec
->guids_retry_schedule
[i
] =
397 (rec
->guids_retry_schedule
[i
] == 0) ? 1 :
398 min((unsigned int)60,
399 rec
->guids_retry_schedule
[i
] * 2);
400 /* using the minimum value among all entries in that record */
401 resched_delay_sec
= (resched_delay_sec
== 0) ?
402 rec
->guids_retry_schedule
[i
] :
403 min(resched_delay_sec
,
404 rec
->guids_retry_schedule
[i
]);
408 rec
->guids_retry_schedule
[i
] = 0;
411 applied_guid_indexes
= cb_ctx
->guid_indexes
& ~declined_guid_indexes
;
412 if (declined_guid_indexes
||
413 rec
->guid_indexes
& ~(applied_guid_indexes
)) {
414 pr_debug("record=%d wasn't fully set, guid_indexes=0x%llx applied_indexes=0x%llx, declined_indexes=0x%llx\n",
416 be64_to_cpu((__force __be64
)rec
->guid_indexes
),
417 be64_to_cpu((__force __be64
)applied_guid_indexes
),
418 be64_to_cpu((__force __be64
)declined_guid_indexes
));
419 rec
->time_to_run
= ktime_get_real_ns() +
420 resched_delay_sec
* NSEC_PER_SEC
;
422 rec
->status
= MLX4_GUID_INFO_STATUS_SET
;
424 spin_unlock_irqrestore(&dev
->sriov
.alias_guid
.ag_work_lock
, flags
);
426 The func is call here to close the cases when the
427 sm doesn't send smp, so in the sa response the driver
430 mlx4_ib_notify_slaves_on_guid_change(dev
, guid_rec
->block_num
,
432 guid_rec
->guid_info_list
);
434 spin_lock_irqsave(&dev
->sriov
.going_down_lock
, flags
);
435 spin_lock_irqsave(&dev
->sriov
.alias_guid
.ag_work_lock
, flags1
);
436 if (!dev
->sriov
.is_going_down
) {
437 get_low_record_time_index(dev
, port_index
, &resched_delay_sec
);
438 queue_delayed_work(dev
->sriov
.alias_guid
.ports_guid
[port_index
].wq
,
439 &dev
->sriov
.alias_guid
.ports_guid
[port_index
].
441 msecs_to_jiffies(resched_delay_sec
* 1000));
443 if (cb_ctx
->sa_query
) {
444 list_del(&cb_ctx
->list
);
447 complete(&cb_ctx
->done
);
448 spin_unlock_irqrestore(&dev
->sriov
.alias_guid
.ag_work_lock
, flags1
);
449 spin_unlock_irqrestore(&dev
->sriov
.going_down_lock
, flags
);
452 static void invalidate_guid_record(struct mlx4_ib_dev
*dev
, u8 port
, int index
)
456 ib_sa_comp_mask comp_mask
= 0;
458 dev
->sriov
.alias_guid
.ports_guid
[port
- 1].all_rec_per_port
[index
].status
459 = MLX4_GUID_INFO_STATUS_SET
;
461 /* calculate the comp_mask for that record.*/
462 for (i
= 0; i
< NUM_ALIAS_GUID_IN_REC
; i
++) {
464 *(u64
*)&dev
->sriov
.alias_guid
.ports_guid
[port
- 1].
465 all_rec_per_port
[index
].all_recs
[GUID_REC_SIZE
* i
];
467 check the admin value: if it's for delete (~00LL) or
468 it is the first guid of the first record (hw guid) or
469 the records is not in ownership of the sysadmin and the sm doesn't
470 need to assign GUIDs, then don't put it up for assignment.
472 if (MLX4_GUID_FOR_DELETE_VAL
== cur_admin_val
||
475 comp_mask
|= mlx4_ib_get_aguid_comp_mask_from_ix(i
);
477 dev
->sriov
.alias_guid
.ports_guid
[port
- 1].
478 all_rec_per_port
[index
].guid_indexes
|= comp_mask
;
479 if (dev
->sriov
.alias_guid
.ports_guid
[port
- 1].
480 all_rec_per_port
[index
].guid_indexes
)
481 dev
->sriov
.alias_guid
.ports_guid
[port
- 1].
482 all_rec_per_port
[index
].status
= MLX4_GUID_INFO_STATUS_IDLE
;
486 static int set_guid_rec(struct ib_device
*ibdev
,
487 struct mlx4_next_alias_guid_work
*rec
)
490 struct mlx4_ib_dev
*dev
= to_mdev(ibdev
);
491 struct ib_sa_guidinfo_rec guid_info_rec
;
492 ib_sa_comp_mask comp_mask
;
493 struct ib_port_attr attr
;
494 struct mlx4_alias_guid_work_context
*callback_context
;
495 unsigned long resched_delay
, flags
, flags1
;
496 u8 port
= rec
->port
+ 1;
497 int index
= rec
->block_num
;
498 struct mlx4_sriov_alias_guid_info_rec_det
*rec_det
= &rec
->rec_det
;
499 struct list_head
*head
=
500 &dev
->sriov
.alias_guid
.ports_guid
[port
- 1].cb_list
;
502 err
= __mlx4_ib_query_port(ibdev
, port
, &attr
, 1);
504 pr_debug("mlx4_ib_query_port failed (err: %d), port: %d\n",
508 /*check the port was configured by the sm, otherwise no need to send */
509 if (attr
.state
!= IB_PORT_ACTIVE
) {
510 pr_debug("port %d not active...rescheduling\n", port
);
511 resched_delay
= 5 * HZ
;
516 callback_context
= kmalloc(sizeof *callback_context
, GFP_KERNEL
);
517 if (!callback_context
) {
519 resched_delay
= HZ
* 5;
522 callback_context
->port
= port
;
523 callback_context
->dev
= dev
;
524 callback_context
->block_num
= index
;
525 callback_context
->guid_indexes
= rec_det
->guid_indexes
;
526 callback_context
->method
= rec
->method
;
528 memset(&guid_info_rec
, 0, sizeof (struct ib_sa_guidinfo_rec
));
530 guid_info_rec
.lid
= cpu_to_be16(attr
.lid
);
531 guid_info_rec
.block_num
= index
;
533 memcpy(guid_info_rec
.guid_info_list
, rec_det
->all_recs
,
534 GUID_REC_SIZE
* NUM_ALIAS_GUID_IN_REC
);
535 comp_mask
= IB_SA_GUIDINFO_REC_LID
| IB_SA_GUIDINFO_REC_BLOCK_NUM
|
536 rec_det
->guid_indexes
;
538 init_completion(&callback_context
->done
);
539 spin_lock_irqsave(&dev
->sriov
.alias_guid
.ag_work_lock
, flags1
);
540 list_add_tail(&callback_context
->list
, head
);
541 spin_unlock_irqrestore(&dev
->sriov
.alias_guid
.ag_work_lock
, flags1
);
543 callback_context
->query_id
=
544 ib_sa_guid_info_rec_query(dev
->sriov
.alias_guid
.sa_client
,
545 ibdev
, port
, &guid_info_rec
,
546 comp_mask
, rec
->method
, 1000,
547 GFP_KERNEL
, aliasguid_query_handler
,
549 &callback_context
->sa_query
);
550 if (callback_context
->query_id
< 0) {
551 pr_debug("ib_sa_guid_info_rec_query failed, query_id: "
552 "%d. will reschedule to the next 1 sec.\n",
553 callback_context
->query_id
);
554 spin_lock_irqsave(&dev
->sriov
.alias_guid
.ag_work_lock
, flags1
);
555 list_del(&callback_context
->list
);
556 kfree(callback_context
);
557 spin_unlock_irqrestore(&dev
->sriov
.alias_guid
.ag_work_lock
, flags1
);
558 resched_delay
= 1 * HZ
;
566 spin_lock_irqsave(&dev
->sriov
.going_down_lock
, flags
);
567 spin_lock_irqsave(&dev
->sriov
.alias_guid
.ag_work_lock
, flags1
);
568 invalidate_guid_record(dev
, port
, index
);
569 if (!dev
->sriov
.is_going_down
) {
570 queue_delayed_work(dev
->sriov
.alias_guid
.ports_guid
[port
- 1].wq
,
571 &dev
->sriov
.alias_guid
.ports_guid
[port
- 1].alias_guid_work
,
574 spin_unlock_irqrestore(&dev
->sriov
.alias_guid
.ag_work_lock
, flags1
);
575 spin_unlock_irqrestore(&dev
->sriov
.going_down_lock
, flags
);
581 static void mlx4_ib_guid_port_init(struct mlx4_ib_dev
*dev
, int port
)
586 /*Check if the SM doesn't need to assign the GUIDs*/
587 for (j
= 0; j
< NUM_ALIAS_GUID_REC_IN_PORT
; j
++) {
588 for (k
= 0; k
< NUM_ALIAS_GUID_IN_REC
; k
++) {
589 entry
= j
* NUM_ALIAS_GUID_IN_REC
+ k
;
590 /* no request for the 0 entry (hw guid) */
591 if (!entry
|| entry
> dev
->dev
->persist
->num_vfs
||
592 !mlx4_is_slave_active(dev
->dev
, entry
))
594 guid
= mlx4_get_admin_guid(dev
->dev
, entry
, port
);
595 *(__be64
*)&dev
->sriov
.alias_guid
.ports_guid
[port
- 1].
596 all_rec_per_port
[j
].all_recs
597 [GUID_REC_SIZE
* k
] = guid
;
598 pr_debug("guid was set, entry=%d, val=0x%llx, port=%d\n",
605 void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev
*dev
, int port
)
608 unsigned long flags
, flags1
;
610 pr_debug("port %d\n", port
);
612 spin_lock_irqsave(&dev
->sriov
.going_down_lock
, flags
);
613 spin_lock_irqsave(&dev
->sriov
.alias_guid
.ag_work_lock
, flags1
);
615 if (dev
->sriov
.alias_guid
.ports_guid
[port
- 1].state_flags
&
616 GUID_STATE_NEED_PORT_INIT
) {
617 mlx4_ib_guid_port_init(dev
, port
);
618 dev
->sriov
.alias_guid
.ports_guid
[port
- 1].state_flags
&=
619 (~GUID_STATE_NEED_PORT_INIT
);
621 for (i
= 0; i
< NUM_ALIAS_GUID_REC_IN_PORT
; i
++)
622 invalidate_guid_record(dev
, port
, i
);
624 if (mlx4_is_master(dev
->dev
) && !dev
->sriov
.is_going_down
) {
626 make sure no work waits in the queue, if the work is already
627 queued(not on the timer) the cancel will fail. That is not a problem
628 because we just want the work started.
630 cancel_delayed_work(&dev
->sriov
.alias_guid
.
631 ports_guid
[port
- 1].alias_guid_work
);
632 queue_delayed_work(dev
->sriov
.alias_guid
.ports_guid
[port
- 1].wq
,
633 &dev
->sriov
.alias_guid
.ports_guid
[port
- 1].alias_guid_work
,
636 spin_unlock_irqrestore(&dev
->sriov
.alias_guid
.ag_work_lock
, flags1
);
637 spin_unlock_irqrestore(&dev
->sriov
.going_down_lock
, flags
);
640 static void set_required_record(struct mlx4_ib_dev
*dev
, u8 port
,
641 struct mlx4_next_alias_guid_work
*next_rec
,
645 int lowset_time_entry
= -1;
647 ib_sa_comp_mask delete_guid_indexes
= 0;
648 ib_sa_comp_mask set_guid_indexes
= 0;
649 struct mlx4_sriov_alias_guid_info_rec_det
*rec
=
650 &dev
->sriov
.alias_guid
.ports_guid
[port
].
651 all_rec_per_port
[record_index
];
653 for (i
= 0; i
< NUM_ALIAS_GUID_IN_REC
; i
++) {
654 if (!(rec
->guid_indexes
&
655 mlx4_ib_get_aguid_comp_mask_from_ix(i
)))
658 if (*(__be64
*)&rec
->all_recs
[i
* GUID_REC_SIZE
] ==
659 cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL
))
660 delete_guid_indexes
|=
661 mlx4_ib_get_aguid_comp_mask_from_ix(i
);
664 mlx4_ib_get_aguid_comp_mask_from_ix(i
);
666 if (lowset_time_entry
== -1 || rec
->guids_retry_schedule
[i
] <=
668 lowset_time_entry
= i
;
669 lowest_time
= rec
->guids_retry_schedule
[i
];
673 memcpy(&next_rec
->rec_det
, rec
, sizeof(*rec
));
674 next_rec
->port
= port
;
675 next_rec
->block_num
= record_index
;
677 if (*(__be64
*)&rec
->all_recs
[lowset_time_entry
* GUID_REC_SIZE
] ==
678 cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL
)) {
679 next_rec
->rec_det
.guid_indexes
= delete_guid_indexes
;
680 next_rec
->method
= MLX4_GUID_INFO_RECORD_DELETE
;
682 next_rec
->rec_det
.guid_indexes
= set_guid_indexes
;
683 next_rec
->method
= MLX4_GUID_INFO_RECORD_SET
;
687 /* return index of record that should be updated based on lowest
690 static int get_low_record_time_index(struct mlx4_ib_dev
*dev
, u8 port
,
691 int *resched_delay_sec
)
693 int record_index
= -1;
694 u64 low_record_time
= 0;
695 struct mlx4_sriov_alias_guid_info_rec_det rec
;
698 for (j
= 0; j
< NUM_ALIAS_GUID_REC_IN_PORT
; j
++) {
699 rec
= dev
->sriov
.alias_guid
.ports_guid
[port
].
701 if (rec
.status
== MLX4_GUID_INFO_STATUS_IDLE
&&
703 if (record_index
== -1 ||
704 rec
.time_to_run
< low_record_time
) {
706 low_record_time
= rec
.time_to_run
;
710 if (resched_delay_sec
) {
711 u64 curr_time
= ktime_get_real_ns();
713 *resched_delay_sec
= (low_record_time
< curr_time
) ? 0 :
714 div_u64((low_record_time
- curr_time
), NSEC_PER_SEC
);
720 /* The function returns the next record that was
721 * not configured (or failed to be configured) */
722 static int get_next_record_to_update(struct mlx4_ib_dev
*dev
, u8 port
,
723 struct mlx4_next_alias_guid_work
*rec
)
729 spin_lock_irqsave(&dev
->sriov
.alias_guid
.ag_work_lock
, flags
);
730 record_index
= get_low_record_time_index(dev
, port
, NULL
);
732 if (record_index
< 0) {
737 set_required_record(dev
, port
, rec
, record_index
);
739 spin_unlock_irqrestore(&dev
->sriov
.alias_guid
.ag_work_lock
, flags
);
743 static void alias_guid_work(struct work_struct
*work
)
745 struct delayed_work
*delay
= to_delayed_work(work
);
747 struct mlx4_next_alias_guid_work
*rec
;
748 struct mlx4_sriov_alias_guid_port_rec_det
*sriov_alias_port
=
749 container_of(delay
, struct mlx4_sriov_alias_guid_port_rec_det
,
751 struct mlx4_sriov_alias_guid
*sriov_alias_guid
= sriov_alias_port
->parent
;
752 struct mlx4_ib_sriov
*ib_sriov
= container_of(sriov_alias_guid
,
753 struct mlx4_ib_sriov
,
755 struct mlx4_ib_dev
*dev
= container_of(ib_sriov
, struct mlx4_ib_dev
, sriov
);
757 rec
= kzalloc(sizeof *rec
, GFP_KERNEL
);
759 pr_err("alias_guid_work: No Memory\n");
763 pr_debug("starting [port: %d]...\n", sriov_alias_port
->port
+ 1);
764 ret
= get_next_record_to_update(dev
, sriov_alias_port
->port
, rec
);
766 pr_debug("No more records to update.\n");
770 set_guid_rec(&dev
->ib_dev
, rec
);
776 void mlx4_ib_init_alias_guid_work(struct mlx4_ib_dev
*dev
, int port
)
778 unsigned long flags
, flags1
;
780 if (!mlx4_is_master(dev
->dev
))
782 spin_lock_irqsave(&dev
->sriov
.going_down_lock
, flags
);
783 spin_lock_irqsave(&dev
->sriov
.alias_guid
.ag_work_lock
, flags1
);
784 if (!dev
->sriov
.is_going_down
) {
785 /* If there is pending one should cancell then run, otherwise
786 * won't run till previous one is ended as same work
789 cancel_delayed_work(&dev
->sriov
.alias_guid
.ports_guid
[port
].
791 queue_delayed_work(dev
->sriov
.alias_guid
.ports_guid
[port
].wq
,
792 &dev
->sriov
.alias_guid
.ports_guid
[port
].alias_guid_work
, 0);
794 spin_unlock_irqrestore(&dev
->sriov
.alias_guid
.ag_work_lock
, flags1
);
795 spin_unlock_irqrestore(&dev
->sriov
.going_down_lock
, flags
);
798 void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev
*dev
)
801 struct mlx4_ib_sriov
*sriov
= &dev
->sriov
;
802 struct mlx4_alias_guid_work_context
*cb_ctx
;
803 struct mlx4_sriov_alias_guid_port_rec_det
*det
;
804 struct ib_sa_query
*sa_query
;
807 for (i
= 0 ; i
< dev
->num_ports
; i
++) {
808 cancel_delayed_work(&dev
->sriov
.alias_guid
.ports_guid
[i
].alias_guid_work
);
809 det
= &sriov
->alias_guid
.ports_guid
[i
];
810 spin_lock_irqsave(&sriov
->alias_guid
.ag_work_lock
, flags
);
811 while (!list_empty(&det
->cb_list
)) {
812 cb_ctx
= list_entry(det
->cb_list
.next
,
813 struct mlx4_alias_guid_work_context
,
815 sa_query
= cb_ctx
->sa_query
;
816 cb_ctx
->sa_query
= NULL
;
817 list_del(&cb_ctx
->list
);
818 spin_unlock_irqrestore(&sriov
->alias_guid
.ag_work_lock
, flags
);
819 ib_sa_cancel_query(cb_ctx
->query_id
, sa_query
);
820 wait_for_completion(&cb_ctx
->done
);
822 spin_lock_irqsave(&sriov
->alias_guid
.ag_work_lock
, flags
);
824 spin_unlock_irqrestore(&sriov
->alias_guid
.ag_work_lock
, flags
);
826 for (i
= 0 ; i
< dev
->num_ports
; i
++) {
827 flush_workqueue(dev
->sriov
.alias_guid
.ports_guid
[i
].wq
);
828 destroy_workqueue(dev
->sriov
.alias_guid
.ports_guid
[i
].wq
);
830 ib_sa_unregister_client(dev
->sriov
.alias_guid
.sa_client
);
831 kfree(dev
->sriov
.alias_guid
.sa_client
);
834 int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev
*dev
)
836 char alias_wq_name
[15];
841 if (!mlx4_is_master(dev
->dev
))
843 dev
->sriov
.alias_guid
.sa_client
=
844 kzalloc(sizeof *dev
->sriov
.alias_guid
.sa_client
, GFP_KERNEL
);
845 if (!dev
->sriov
.alias_guid
.sa_client
)
848 ib_sa_register_client(dev
->sriov
.alias_guid
.sa_client
);
850 spin_lock_init(&dev
->sriov
.alias_guid
.ag_work_lock
);
852 for (i
= 1; i
<= dev
->num_ports
; ++i
) {
853 if (dev
->ib_dev
.query_gid(&dev
->ib_dev
, i
, 0, &gid
)) {
859 for (i
= 0 ; i
< dev
->num_ports
; i
++) {
860 memset(&dev
->sriov
.alias_guid
.ports_guid
[i
], 0,
861 sizeof (struct mlx4_sriov_alias_guid_port_rec_det
));
862 dev
->sriov
.alias_guid
.ports_guid
[i
].state_flags
|=
863 GUID_STATE_NEED_PORT_INIT
;
864 for (j
= 0; j
< NUM_ALIAS_GUID_REC_IN_PORT
; j
++) {
865 /* mark each val as it was deleted */
866 memset(dev
->sriov
.alias_guid
.ports_guid
[i
].
867 all_rec_per_port
[j
].all_recs
, 0xFF,
868 sizeof(dev
->sriov
.alias_guid
.ports_guid
[i
].
869 all_rec_per_port
[j
].all_recs
));
871 INIT_LIST_HEAD(&dev
->sriov
.alias_guid
.ports_guid
[i
].cb_list
);
872 /*prepare the records, set them to be allocated by sm*/
873 if (mlx4_ib_sm_guid_assign
)
874 for (j
= 1; j
< NUM_ALIAS_GUID_PER_PORT
; j
++)
875 mlx4_set_admin_guid(dev
->dev
, 0, j
, i
+ 1);
876 for (j
= 0 ; j
< NUM_ALIAS_GUID_REC_IN_PORT
; j
++)
877 invalidate_guid_record(dev
, i
+ 1, j
);
879 dev
->sriov
.alias_guid
.ports_guid
[i
].parent
= &dev
->sriov
.alias_guid
;
880 dev
->sriov
.alias_guid
.ports_guid
[i
].port
= i
;
882 snprintf(alias_wq_name
, sizeof alias_wq_name
, "alias_guid%d", i
);
883 dev
->sriov
.alias_guid
.ports_guid
[i
].wq
=
884 create_singlethread_workqueue(alias_wq_name
);
885 if (!dev
->sriov
.alias_guid
.ports_guid
[i
].wq
) {
889 INIT_DELAYED_WORK(&dev
->sriov
.alias_guid
.ports_guid
[i
].alias_guid_work
,
895 for (--i
; i
>= 0; i
--) {
896 destroy_workqueue(dev
->sriov
.alias_guid
.ports_guid
[i
].wq
);
897 dev
->sriov
.alias_guid
.ports_guid
[i
].wq
= NULL
;
901 ib_sa_unregister_client(dev
->sriov
.alias_guid
.sa_client
);
902 kfree(dev
->sriov
.alias_guid
.sa_client
);
903 dev
->sriov
.alias_guid
.sa_client
= NULL
;
904 pr_err("init_alias_guid_service: Failed. (ret:%d)\n", ret
);