1 // SPDX-License-Identifier: GPL-2.0-only
3 * UWB reservation management.
5 * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
7 #include <linux/kernel.h>
9 #include <linux/slab.h>
10 #include <linux/random.h>
11 #include <linux/export.h>
13 #include "uwb-internal.h"
15 static void uwb_rsv_timer(struct timer_list
*t
);
17 static const char *rsv_states
[] = {
18 [UWB_RSV_STATE_NONE
] = "none ",
19 [UWB_RSV_STATE_O_INITIATED
] = "o initiated ",
20 [UWB_RSV_STATE_O_PENDING
] = "o pending ",
21 [UWB_RSV_STATE_O_MODIFIED
] = "o modified ",
22 [UWB_RSV_STATE_O_ESTABLISHED
] = "o established ",
23 [UWB_RSV_STATE_O_TO_BE_MOVED
] = "o to be moved ",
24 [UWB_RSV_STATE_O_MOVE_EXPANDING
] = "o move expanding",
25 [UWB_RSV_STATE_O_MOVE_COMBINING
] = "o move combining",
26 [UWB_RSV_STATE_O_MOVE_REDUCING
] = "o move reducing ",
27 [UWB_RSV_STATE_T_ACCEPTED
] = "t accepted ",
28 [UWB_RSV_STATE_T_CONFLICT
] = "t conflict ",
29 [UWB_RSV_STATE_T_PENDING
] = "t pending ",
30 [UWB_RSV_STATE_T_DENIED
] = "t denied ",
31 [UWB_RSV_STATE_T_RESIZED
] = "t resized ",
32 [UWB_RSV_STATE_T_EXPANDING_ACCEPTED
] = "t expanding acc ",
33 [UWB_RSV_STATE_T_EXPANDING_CONFLICT
] = "t expanding conf",
34 [UWB_RSV_STATE_T_EXPANDING_PENDING
] = "t expanding pend",
35 [UWB_RSV_STATE_T_EXPANDING_DENIED
] = "t expanding den ",
38 static const char *rsv_types
[] = {
39 [UWB_DRP_TYPE_ALIEN_BP
] = "alien-bp",
40 [UWB_DRP_TYPE_HARD
] = "hard",
41 [UWB_DRP_TYPE_SOFT
] = "soft",
42 [UWB_DRP_TYPE_PRIVATE
] = "private",
43 [UWB_DRP_TYPE_PCA
] = "pca",
46 bool uwb_rsv_has_two_drp_ies(struct uwb_rsv
*rsv
)
48 static const bool has_two_drp_ies
[] = {
49 [UWB_RSV_STATE_O_INITIATED
] = false,
50 [UWB_RSV_STATE_O_PENDING
] = false,
51 [UWB_RSV_STATE_O_MODIFIED
] = false,
52 [UWB_RSV_STATE_O_ESTABLISHED
] = false,
53 [UWB_RSV_STATE_O_TO_BE_MOVED
] = false,
54 [UWB_RSV_STATE_O_MOVE_COMBINING
] = false,
55 [UWB_RSV_STATE_O_MOVE_REDUCING
] = false,
56 [UWB_RSV_STATE_O_MOVE_EXPANDING
] = true,
57 [UWB_RSV_STATE_T_ACCEPTED
] = false,
58 [UWB_RSV_STATE_T_CONFLICT
] = false,
59 [UWB_RSV_STATE_T_PENDING
] = false,
60 [UWB_RSV_STATE_T_DENIED
] = false,
61 [UWB_RSV_STATE_T_RESIZED
] = false,
62 [UWB_RSV_STATE_T_EXPANDING_ACCEPTED
] = true,
63 [UWB_RSV_STATE_T_EXPANDING_CONFLICT
] = true,
64 [UWB_RSV_STATE_T_EXPANDING_PENDING
] = true,
65 [UWB_RSV_STATE_T_EXPANDING_DENIED
] = true,
68 return has_two_drp_ies
[rsv
->state
];
72 * uwb_rsv_state_str - return a string for a reservation state
73 * @state: the reservation state.
75 const char *uwb_rsv_state_str(enum uwb_rsv_state state
)
77 if (state
< UWB_RSV_STATE_NONE
|| state
>= UWB_RSV_STATE_LAST
)
79 return rsv_states
[state
];
81 EXPORT_SYMBOL_GPL(uwb_rsv_state_str
);
84 * uwb_rsv_type_str - return a string for a reservation type
85 * @type: the reservation type
87 const char *uwb_rsv_type_str(enum uwb_drp_type type
)
89 if (type
< UWB_DRP_TYPE_ALIEN_BP
|| type
> UWB_DRP_TYPE_PCA
)
91 return rsv_types
[type
];
93 EXPORT_SYMBOL_GPL(uwb_rsv_type_str
);
95 void uwb_rsv_dump(char *text
, struct uwb_rsv
*rsv
)
97 struct device
*dev
= &rsv
->rc
->uwb_dev
.dev
;
98 struct uwb_dev_addr devaddr
;
99 char owner
[UWB_ADDR_STRSIZE
], target
[UWB_ADDR_STRSIZE
];
101 uwb_dev_addr_print(owner
, sizeof(owner
), &rsv
->owner
->dev_addr
);
102 if (rsv
->target
.type
== UWB_RSV_TARGET_DEV
)
103 devaddr
= rsv
->target
.dev
->dev_addr
;
105 devaddr
= rsv
->target
.devaddr
;
106 uwb_dev_addr_print(target
, sizeof(target
), &devaddr
);
108 dev_dbg(dev
, "rsv %s %s -> %s: %s\n",
109 text
, owner
, target
, uwb_rsv_state_str(rsv
->state
));
112 static void uwb_rsv_release(struct kref
*kref
)
114 struct uwb_rsv
*rsv
= container_of(kref
, struct uwb_rsv
, kref
);
119 void uwb_rsv_get(struct uwb_rsv
*rsv
)
121 kref_get(&rsv
->kref
);
124 void uwb_rsv_put(struct uwb_rsv
*rsv
)
126 kref_put(&rsv
->kref
, uwb_rsv_release
);
130 * Get a free stream index for a reservation.
132 * If the target is a DevAddr (e.g., a WUSB cluster reservation) then
133 * the stream is allocated from a pool of per-RC stream indexes,
134 * otherwise a unique stream index for the target is selected.
136 static int uwb_rsv_get_stream(struct uwb_rsv
*rsv
)
138 struct uwb_rc
*rc
= rsv
->rc
;
139 struct device
*dev
= &rc
->uwb_dev
.dev
;
140 unsigned long *streams_bm
;
143 switch (rsv
->target
.type
) {
144 case UWB_RSV_TARGET_DEV
:
145 streams_bm
= rsv
->target
.dev
->streams
;
147 case UWB_RSV_TARGET_DEVADDR
:
148 streams_bm
= rc
->uwb_dev
.streams
;
154 stream
= find_first_zero_bit(streams_bm
, UWB_NUM_STREAMS
);
155 if (stream
>= UWB_NUM_STREAMS
) {
156 dev_err(dev
, "%s: no available stream found\n", __func__
);
160 rsv
->stream
= stream
;
161 set_bit(stream
, streams_bm
);
163 dev_dbg(dev
, "get stream %d\n", rsv
->stream
);
168 static void uwb_rsv_put_stream(struct uwb_rsv
*rsv
)
170 struct uwb_rc
*rc
= rsv
->rc
;
171 struct device
*dev
= &rc
->uwb_dev
.dev
;
172 unsigned long *streams_bm
;
174 switch (rsv
->target
.type
) {
175 case UWB_RSV_TARGET_DEV
:
176 streams_bm
= rsv
->target
.dev
->streams
;
178 case UWB_RSV_TARGET_DEVADDR
:
179 streams_bm
= rc
->uwb_dev
.streams
;
185 clear_bit(rsv
->stream
, streams_bm
);
187 dev_dbg(dev
, "put stream %d\n", rsv
->stream
);
190 void uwb_rsv_backoff_win_timer(struct timer_list
*t
)
192 struct uwb_drp_backoff_win
*bow
= from_timer(bow
, t
, timer
);
193 struct uwb_rc
*rc
= container_of(bow
, struct uwb_rc
, bow
);
194 struct device
*dev
= &rc
->uwb_dev
.dev
;
196 bow
->can_reserve_extra_mases
= true;
197 if (bow
->total_expired
<= 4) {
198 bow
->total_expired
++;
200 /* after 4 backoff window has expired we can exit from
201 * the backoff procedure */
202 bow
->total_expired
= 0;
203 bow
->window
= UWB_DRP_BACKOFF_WIN_MIN
>> 1;
205 dev_dbg(dev
, "backoff_win_timer total_expired=%d, n=%d\n", bow
->total_expired
, bow
->n
);
207 /* try to relocate all the "to be moved" relocations */
208 uwb_rsv_handle_drp_avail_change(rc
);
211 void uwb_rsv_backoff_win_increment(struct uwb_rc
*rc
)
213 struct uwb_drp_backoff_win
*bow
= &rc
->bow
;
214 struct device
*dev
= &rc
->uwb_dev
.dev
;
217 dev_dbg(dev
, "backoff_win_increment: window=%d\n", bow
->window
);
219 bow
->can_reserve_extra_mases
= false;
221 if((bow
->window
<< 1) == UWB_DRP_BACKOFF_WIN_MAX
)
225 bow
->n
= prandom_u32() & (bow
->window
- 1);
226 dev_dbg(dev
, "new_window=%d, n=%d\n", bow
->window
, bow
->n
);
228 /* reset the timer associated variables */
229 timeout_us
= bow
->n
* UWB_SUPERFRAME_LENGTH_US
;
230 bow
->total_expired
= 0;
231 mod_timer(&bow
->timer
, jiffies
+ usecs_to_jiffies(timeout_us
));
234 static void uwb_rsv_stroke_timer(struct uwb_rsv
*rsv
)
236 int sframes
= UWB_MAX_LOST_BEACONS
;
239 * Multicast reservations can become established within 1
240 * super frame and should not be terminated if no response is
243 if (rsv
->state
== UWB_RSV_STATE_NONE
) {
245 } else if (rsv
->is_multicast
) {
246 if (rsv
->state
== UWB_RSV_STATE_O_INITIATED
247 || rsv
->state
== UWB_RSV_STATE_O_MOVE_EXPANDING
248 || rsv
->state
== UWB_RSV_STATE_O_MOVE_COMBINING
249 || rsv
->state
== UWB_RSV_STATE_O_MOVE_REDUCING
)
251 if (rsv
->state
== UWB_RSV_STATE_O_ESTABLISHED
)
258 * Add an additional 2 superframes to account for the
259 * time to send the SET DRP IE command.
261 unsigned timeout_us
= (sframes
+ 2) * UWB_SUPERFRAME_LENGTH_US
;
262 mod_timer(&rsv
->timer
, jiffies
+ usecs_to_jiffies(timeout_us
));
264 del_timer(&rsv
->timer
);
268 * Update a reservations state, and schedule an update of the
269 * transmitted DRP IEs.
271 static void uwb_rsv_state_update(struct uwb_rsv
*rsv
,
272 enum uwb_rsv_state new_state
)
274 rsv
->state
= new_state
;
275 rsv
->ie_valid
= false;
277 uwb_rsv_dump("SU", rsv
);
279 uwb_rsv_stroke_timer(rsv
);
280 uwb_rsv_sched_update(rsv
->rc
);
283 static void uwb_rsv_callback(struct uwb_rsv
*rsv
)
289 void uwb_rsv_set_state(struct uwb_rsv
*rsv
, enum uwb_rsv_state new_state
)
291 struct uwb_rsv_move
*mv
= &rsv
->mv
;
293 if (rsv
->state
== new_state
) {
294 switch (rsv
->state
) {
295 case UWB_RSV_STATE_O_ESTABLISHED
:
296 case UWB_RSV_STATE_O_MOVE_EXPANDING
:
297 case UWB_RSV_STATE_O_MOVE_COMBINING
:
298 case UWB_RSV_STATE_O_MOVE_REDUCING
:
299 case UWB_RSV_STATE_T_ACCEPTED
:
300 case UWB_RSV_STATE_T_EXPANDING_ACCEPTED
:
301 case UWB_RSV_STATE_T_RESIZED
:
302 case UWB_RSV_STATE_NONE
:
303 uwb_rsv_stroke_timer(rsv
);
306 /* Expecting a state transition so leave timer
313 uwb_rsv_dump("SC", rsv
);
316 case UWB_RSV_STATE_NONE
:
317 uwb_rsv_state_update(rsv
, UWB_RSV_STATE_NONE
);
319 uwb_rsv_callback(rsv
);
321 case UWB_RSV_STATE_O_INITIATED
:
322 uwb_rsv_state_update(rsv
, UWB_RSV_STATE_O_INITIATED
);
324 case UWB_RSV_STATE_O_PENDING
:
325 uwb_rsv_state_update(rsv
, UWB_RSV_STATE_O_PENDING
);
327 case UWB_RSV_STATE_O_MODIFIED
:
328 /* in the companion there are the MASes to drop */
329 bitmap_andnot(rsv
->mas
.bm
, rsv
->mas
.bm
, mv
->companion_mas
.bm
, UWB_NUM_MAS
);
330 uwb_rsv_state_update(rsv
, UWB_RSV_STATE_O_MODIFIED
);
332 case UWB_RSV_STATE_O_ESTABLISHED
:
333 if (rsv
->state
== UWB_RSV_STATE_O_MODIFIED
334 || rsv
->state
== UWB_RSV_STATE_O_MOVE_REDUCING
) {
335 uwb_drp_avail_release(rsv
->rc
, &mv
->companion_mas
);
336 rsv
->needs_release_companion_mas
= false;
338 uwb_drp_avail_reserve(rsv
->rc
, &rsv
->mas
);
339 uwb_rsv_state_update(rsv
, UWB_RSV_STATE_O_ESTABLISHED
);
340 uwb_rsv_callback(rsv
);
342 case UWB_RSV_STATE_O_MOVE_EXPANDING
:
343 rsv
->needs_release_companion_mas
= true;
344 uwb_rsv_state_update(rsv
, UWB_RSV_STATE_O_MOVE_EXPANDING
);
346 case UWB_RSV_STATE_O_MOVE_COMBINING
:
347 rsv
->needs_release_companion_mas
= false;
348 uwb_drp_avail_reserve(rsv
->rc
, &mv
->companion_mas
);
349 bitmap_or(rsv
->mas
.bm
, rsv
->mas
.bm
, mv
->companion_mas
.bm
, UWB_NUM_MAS
);
350 rsv
->mas
.safe
+= mv
->companion_mas
.safe
;
351 rsv
->mas
.unsafe
+= mv
->companion_mas
.unsafe
;
352 uwb_rsv_state_update(rsv
, UWB_RSV_STATE_O_MOVE_COMBINING
);
354 case UWB_RSV_STATE_O_MOVE_REDUCING
:
355 bitmap_andnot(mv
->companion_mas
.bm
, rsv
->mas
.bm
, mv
->final_mas
.bm
, UWB_NUM_MAS
);
356 rsv
->needs_release_companion_mas
= true;
357 rsv
->mas
.safe
= mv
->final_mas
.safe
;
358 rsv
->mas
.unsafe
= mv
->final_mas
.unsafe
;
359 bitmap_copy(rsv
->mas
.bm
, mv
->final_mas
.bm
, UWB_NUM_MAS
);
360 bitmap_copy(rsv
->mas
.unsafe_bm
, mv
->final_mas
.unsafe_bm
, UWB_NUM_MAS
);
361 uwb_rsv_state_update(rsv
, UWB_RSV_STATE_O_MOVE_REDUCING
);
363 case UWB_RSV_STATE_T_ACCEPTED
:
364 case UWB_RSV_STATE_T_RESIZED
:
365 rsv
->needs_release_companion_mas
= false;
366 uwb_drp_avail_reserve(rsv
->rc
, &rsv
->mas
);
367 uwb_rsv_state_update(rsv
, UWB_RSV_STATE_T_ACCEPTED
);
368 uwb_rsv_callback(rsv
);
370 case UWB_RSV_STATE_T_DENIED
:
371 uwb_rsv_state_update(rsv
, UWB_RSV_STATE_T_DENIED
);
373 case UWB_RSV_STATE_T_CONFLICT
:
374 uwb_rsv_state_update(rsv
, UWB_RSV_STATE_T_CONFLICT
);
376 case UWB_RSV_STATE_T_PENDING
:
377 uwb_rsv_state_update(rsv
, UWB_RSV_STATE_T_PENDING
);
379 case UWB_RSV_STATE_T_EXPANDING_ACCEPTED
:
380 rsv
->needs_release_companion_mas
= true;
381 uwb_drp_avail_reserve(rsv
->rc
, &mv
->companion_mas
);
382 uwb_rsv_state_update(rsv
, UWB_RSV_STATE_T_EXPANDING_ACCEPTED
);
385 dev_err(&rsv
->rc
->uwb_dev
.dev
, "unhandled state: %s (%d)\n",
386 uwb_rsv_state_str(new_state
), new_state
);
390 static void uwb_rsv_handle_timeout_work(struct work_struct
*work
)
392 struct uwb_rsv
*rsv
= container_of(work
, struct uwb_rsv
,
393 handle_timeout_work
);
394 struct uwb_rc
*rc
= rsv
->rc
;
396 mutex_lock(&rc
->rsvs_mutex
);
398 uwb_rsv_dump("TO", rsv
);
400 switch (rsv
->state
) {
401 case UWB_RSV_STATE_O_INITIATED
:
402 if (rsv
->is_multicast
) {
403 uwb_rsv_set_state(rsv
, UWB_RSV_STATE_O_ESTABLISHED
);
407 case UWB_RSV_STATE_O_MOVE_EXPANDING
:
408 if (rsv
->is_multicast
) {
409 uwb_rsv_set_state(rsv
, UWB_RSV_STATE_O_MOVE_COMBINING
);
413 case UWB_RSV_STATE_O_MOVE_COMBINING
:
414 if (rsv
->is_multicast
) {
415 uwb_rsv_set_state(rsv
, UWB_RSV_STATE_O_MOVE_REDUCING
);
419 case UWB_RSV_STATE_O_MOVE_REDUCING
:
420 if (rsv
->is_multicast
) {
421 uwb_rsv_set_state(rsv
, UWB_RSV_STATE_O_ESTABLISHED
);
425 case UWB_RSV_STATE_O_ESTABLISHED
:
426 if (rsv
->is_multicast
)
429 case UWB_RSV_STATE_T_EXPANDING_ACCEPTED
:
431 * The time out could be for the main or of the
432 * companion DRP, assume it's for the companion and
433 * drop that first. A further time out is required to
436 uwb_rsv_set_state(rsv
, UWB_RSV_STATE_T_ACCEPTED
);
437 uwb_drp_avail_release(rsv
->rc
, &rsv
->mv
.companion_mas
);
439 case UWB_RSV_STATE_NONE
:
448 mutex_unlock(&rc
->rsvs_mutex
);
451 static struct uwb_rsv
*uwb_rsv_alloc(struct uwb_rc
*rc
)
455 rsv
= kzalloc(sizeof(struct uwb_rsv
), GFP_KERNEL
);
459 INIT_LIST_HEAD(&rsv
->rc_node
);
460 INIT_LIST_HEAD(&rsv
->pal_node
);
461 kref_init(&rsv
->kref
);
462 timer_setup(&rsv
->timer
, uwb_rsv_timer
, 0);
465 INIT_WORK(&rsv
->handle_timeout_work
, uwb_rsv_handle_timeout_work
);
471 * uwb_rsv_create - allocate and initialize a UWB reservation structure
472 * @rc: the radio controller
473 * @cb: callback to use when the reservation completes or terminates
474 * @pal_priv: data private to the PAL to be passed in the callback
476 * The callback is called when the state of the reservation changes from:
478 * - pending to accepted
479 * - pending to denined
480 * - accepted to terminated
481 * - pending to terminated
483 struct uwb_rsv
*uwb_rsv_create(struct uwb_rc
*rc
, uwb_rsv_cb_f cb
, void *pal_priv
)
487 rsv
= uwb_rsv_alloc(rc
);
492 rsv
->pal_priv
= pal_priv
;
496 EXPORT_SYMBOL_GPL(uwb_rsv_create
);
498 void uwb_rsv_remove(struct uwb_rsv
*rsv
)
500 uwb_rsv_dump("RM", rsv
);
502 if (rsv
->state
!= UWB_RSV_STATE_NONE
)
503 uwb_rsv_set_state(rsv
, UWB_RSV_STATE_NONE
);
505 if (rsv
->needs_release_companion_mas
)
506 uwb_drp_avail_release(rsv
->rc
, &rsv
->mv
.companion_mas
);
507 uwb_drp_avail_release(rsv
->rc
, &rsv
->mas
);
509 if (uwb_rsv_is_owner(rsv
))
510 uwb_rsv_put_stream(rsv
);
512 uwb_dev_put(rsv
->owner
);
513 if (rsv
->target
.type
== UWB_RSV_TARGET_DEV
)
514 uwb_dev_put(rsv
->target
.dev
);
516 list_del_init(&rsv
->rc_node
);
521 * uwb_rsv_destroy - free a UWB reservation structure
522 * @rsv: the reservation to free
524 * The reservation must already be terminated.
526 void uwb_rsv_destroy(struct uwb_rsv
*rsv
)
530 EXPORT_SYMBOL_GPL(uwb_rsv_destroy
);
533 * usb_rsv_establish - start a reservation establishment
534 * @rsv: the reservation
536 * The PAL should fill in @rsv's owner, target, type, max_mas,
537 * min_mas, max_interval and is_multicast fields. If the target is a
538 * uwb_dev it must be referenced.
540 * The reservation's callback will be called when the reservation is
541 * accepted, denied or times out.
543 int uwb_rsv_establish(struct uwb_rsv
*rsv
)
545 struct uwb_rc
*rc
= rsv
->rc
;
546 struct uwb_mas_bm available
;
547 struct device
*dev
= &rc
->uwb_dev
.dev
;
550 mutex_lock(&rc
->rsvs_mutex
);
551 ret
= uwb_rsv_get_stream(rsv
);
553 dev_err(dev
, "%s: uwb_rsv_get_stream failed: %d\n",
558 rsv
->tiebreaker
= prandom_u32() & 1;
559 /* get available mas bitmap */
560 uwb_drp_available(rc
, &available
);
562 ret
= uwb_rsv_find_best_allocation(rsv
, &available
, &rsv
->mas
);
563 if (ret
== UWB_RSV_ALLOC_NOT_FOUND
) {
565 uwb_rsv_put_stream(rsv
);
566 dev_err(dev
, "%s: uwb_rsv_find_best_allocation failed: %d\n",
571 ret
= uwb_drp_avail_reserve_pending(rc
, &rsv
->mas
);
573 uwb_rsv_put_stream(rsv
);
574 dev_err(dev
, "%s: uwb_drp_avail_reserve_pending failed: %d\n",
580 list_add_tail(&rsv
->rc_node
, &rc
->reservations
);
581 rsv
->owner
= &rc
->uwb_dev
;
582 uwb_dev_get(rsv
->owner
);
583 uwb_rsv_set_state(rsv
, UWB_RSV_STATE_O_INITIATED
);
585 mutex_unlock(&rc
->rsvs_mutex
);
588 EXPORT_SYMBOL_GPL(uwb_rsv_establish
);
591 * uwb_rsv_modify - modify an already established reservation
592 * @rsv: the reservation to modify
593 * @max_mas: new maximum MAS to reserve
594 * @min_mas: new minimum MAS to reserve
595 * @max_interval: new max_interval to use
597 * FIXME: implement this once there are PALs that use it.
599 int uwb_rsv_modify(struct uwb_rsv
*rsv
, int max_mas
, int min_mas
, int max_interval
)
603 EXPORT_SYMBOL_GPL(uwb_rsv_modify
);
606 * move an already established reservation (rc->rsvs_mutex must to be
607 * taken when tis function is called)
609 int uwb_rsv_try_move(struct uwb_rsv
*rsv
, struct uwb_mas_bm
*available
)
611 struct uwb_rc
*rc
= rsv
->rc
;
612 struct uwb_drp_backoff_win
*bow
= &rc
->bow
;
613 struct device
*dev
= &rc
->uwb_dev
.dev
;
614 struct uwb_rsv_move
*mv
;
617 if (bow
->can_reserve_extra_mases
== false)
622 if (uwb_rsv_find_best_allocation(rsv
, available
, &mv
->final_mas
) == UWB_RSV_ALLOC_FOUND
) {
624 if (!bitmap_equal(rsv
->mas
.bm
, mv
->final_mas
.bm
, UWB_NUM_MAS
)) {
625 /* We want to move the reservation */
626 bitmap_andnot(mv
->companion_mas
.bm
, mv
->final_mas
.bm
, rsv
->mas
.bm
, UWB_NUM_MAS
);
627 uwb_drp_avail_reserve_pending(rc
, &mv
->companion_mas
);
628 uwb_rsv_set_state(rsv
, UWB_RSV_STATE_O_MOVE_EXPANDING
);
631 dev_dbg(dev
, "new allocation not found\n");
637 /* It will try to move every reservation in state O_ESTABLISHED giving
638 * to the MAS allocator algorithm an availability that is the real one
639 * plus the allocation already established from the reservation. */
640 void uwb_rsv_handle_drp_avail_change(struct uwb_rc
*rc
)
642 struct uwb_drp_backoff_win
*bow
= &rc
->bow
;
644 struct uwb_mas_bm mas
;
646 if (bow
->can_reserve_extra_mases
== false)
649 list_for_each_entry(rsv
, &rc
->reservations
, rc_node
) {
650 if (rsv
->state
== UWB_RSV_STATE_O_ESTABLISHED
||
651 rsv
->state
== UWB_RSV_STATE_O_TO_BE_MOVED
) {
652 uwb_drp_available(rc
, &mas
);
653 bitmap_or(mas
.bm
, mas
.bm
, rsv
->mas
.bm
, UWB_NUM_MAS
);
654 uwb_rsv_try_move(rsv
, &mas
);
661 * uwb_rsv_terminate - terminate an established reservation
662 * @rsv: the reservation to terminate
664 * A reservation is terminated by removing the DRP IE from the beacon,
665 * the other end will consider the reservation to be terminated when
666 * it does not see the DRP IE for at least mMaxLostBeacons.
668 * If applicable, the reference to the target uwb_dev will be released.
670 void uwb_rsv_terminate(struct uwb_rsv
*rsv
)
672 struct uwb_rc
*rc
= rsv
->rc
;
674 mutex_lock(&rc
->rsvs_mutex
);
676 if (rsv
->state
!= UWB_RSV_STATE_NONE
)
677 uwb_rsv_set_state(rsv
, UWB_RSV_STATE_NONE
);
679 mutex_unlock(&rc
->rsvs_mutex
);
681 EXPORT_SYMBOL_GPL(uwb_rsv_terminate
);
684 * uwb_rsv_accept - accept a new reservation from a peer
685 * @rsv: the reservation
686 * @cb: call back for reservation changes
687 * @pal_priv: data to be passed in the above call back
689 * Reservation requests from peers are denied unless a PAL accepts it
690 * by calling this function.
692 * The PAL call uwb_rsv_destroy() for all accepted reservations before
693 * calling uwb_pal_unregister().
695 void uwb_rsv_accept(struct uwb_rsv
*rsv
, uwb_rsv_cb_f cb
, void *pal_priv
)
700 rsv
->pal_priv
= pal_priv
;
701 rsv
->state
= UWB_RSV_STATE_T_ACCEPTED
;
703 EXPORT_SYMBOL_GPL(uwb_rsv_accept
);
706 * Is a received DRP IE for this reservation?
708 static bool uwb_rsv_match(struct uwb_rsv
*rsv
, struct uwb_dev
*src
,
709 struct uwb_ie_drp
*drp_ie
)
711 struct uwb_dev_addr
*rsv_src
;
714 stream
= uwb_ie_drp_stream_index(drp_ie
);
716 if (rsv
->stream
!= stream
)
719 switch (rsv
->target
.type
) {
720 case UWB_RSV_TARGET_DEVADDR
:
721 return rsv
->stream
== stream
;
722 case UWB_RSV_TARGET_DEV
:
723 if (uwb_ie_drp_owner(drp_ie
))
724 rsv_src
= &rsv
->owner
->dev_addr
;
726 rsv_src
= &rsv
->target
.dev
->dev_addr
;
727 return uwb_dev_addr_cmp(&src
->dev_addr
, rsv_src
) == 0;
732 static struct uwb_rsv
*uwb_rsv_new_target(struct uwb_rc
*rc
,
734 struct uwb_ie_drp
*drp_ie
)
738 enum uwb_rsv_state state
;
740 rsv
= uwb_rsv_alloc(rc
);
746 uwb_dev_get(rsv
->owner
);
747 rsv
->target
.type
= UWB_RSV_TARGET_DEV
;
748 rsv
->target
.dev
= &rc
->uwb_dev
;
749 uwb_dev_get(&rc
->uwb_dev
);
750 rsv
->type
= uwb_ie_drp_type(drp_ie
);
751 rsv
->stream
= uwb_ie_drp_stream_index(drp_ie
);
752 uwb_drp_ie_to_bm(&rsv
->mas
, drp_ie
);
755 * See if any PALs are interested in this reservation. If not,
758 rsv
->state
= UWB_RSV_STATE_T_DENIED
;
759 mutex_lock(&rc
->uwb_dev
.mutex
);
760 list_for_each_entry(pal
, &rc
->pals
, node
) {
762 pal
->new_rsv(pal
, rsv
);
763 if (rsv
->state
== UWB_RSV_STATE_T_ACCEPTED
)
766 mutex_unlock(&rc
->uwb_dev
.mutex
);
768 list_add_tail(&rsv
->rc_node
, &rc
->reservations
);
770 rsv
->state
= UWB_RSV_STATE_NONE
;
772 /* FIXME: do something sensible here */
773 if (state
== UWB_RSV_STATE_T_ACCEPTED
774 && uwb_drp_avail_reserve_pending(rc
, &rsv
->mas
) == -EBUSY
) {
775 /* FIXME: do something sensible here */
777 uwb_rsv_set_state(rsv
, state
);
784 * uwb_rsv_get_usable_mas - get the bitmap of the usable MAS of a reservations
785 * @rsv: the reservation.
786 * @mas: returns the available MAS.
788 * The usable MAS of a reservation may be less than the negotiated MAS
789 * if alien BPs are present.
791 void uwb_rsv_get_usable_mas(struct uwb_rsv
*rsv
, struct uwb_mas_bm
*mas
)
793 bitmap_zero(mas
->bm
, UWB_NUM_MAS
);
794 bitmap_andnot(mas
->bm
, rsv
->mas
.bm
, rsv
->rc
->cnflt_alien_bitmap
.bm
, UWB_NUM_MAS
);
796 EXPORT_SYMBOL_GPL(uwb_rsv_get_usable_mas
);
799 * uwb_rsv_find - find a reservation for a received DRP IE.
800 * @rc: the radio controller
801 * @src: source of the DRP IE
802 * @drp_ie: the DRP IE
804 * If the reservation cannot be found and the DRP IE is from a peer
805 * attempting to establish a new reservation, create a new reservation
806 * and add it to the list.
808 struct uwb_rsv
*uwb_rsv_find(struct uwb_rc
*rc
, struct uwb_dev
*src
,
809 struct uwb_ie_drp
*drp_ie
)
813 list_for_each_entry(rsv
, &rc
->reservations
, rc_node
) {
814 if (uwb_rsv_match(rsv
, src
, drp_ie
))
818 if (uwb_ie_drp_owner(drp_ie
))
819 return uwb_rsv_new_target(rc
, src
, drp_ie
);
825 * Go through all the reservations and check for timeouts and (if
826 * necessary) update their DRP IEs.
828 * FIXME: look at building the SET_DRP_IE command here rather than
829 * having to rescan the list in uwb_rc_send_all_drp_ie().
831 static bool uwb_rsv_update_all(struct uwb_rc
*rc
)
833 struct uwb_rsv
*rsv
, *t
;
834 bool ie_updated
= false;
836 list_for_each_entry_safe(rsv
, t
, &rc
->reservations
, rc_node
) {
837 if (!rsv
->ie_valid
) {
838 uwb_drp_ie_update(rsv
);
846 void uwb_rsv_queue_update(struct uwb_rc
*rc
)
848 unsigned long delay_us
= UWB_MAS_LENGTH_US
* UWB_MAS_PER_ZONE
;
850 queue_delayed_work(rc
->rsv_workq
, &rc
->rsv_update_work
, usecs_to_jiffies(delay_us
));
854 * uwb_rsv_sched_update - schedule an update of the DRP IEs
855 * @rc: the radio controller.
857 * To improve performance and ensure correctness with [ECMA-368] the
858 * number of SET-DRP-IE commands that are done are limited.
860 * DRP IEs update come from two sources: DRP events from the hardware
861 * which all occur at the beginning of the superframe ('syncronous'
862 * events) and reservation establishment/termination requests from
863 * PALs or timers ('asynchronous' events).
865 * A delayed work ensures that all the synchronous events result in
866 * one SET-DRP-IE command.
868 * Additional logic (the set_drp_ie_pending and rsv_updated_postponed
869 * flags) will prevent an asynchrous event starting a SET-DRP-IE
870 * command if one is currently awaiting a response.
872 * FIXME: this does leave a window where an asynchrous event can delay
873 * the SET-DRP-IE for a synchronous event by one superframe.
875 void uwb_rsv_sched_update(struct uwb_rc
*rc
)
877 spin_lock_irq(&rc
->rsvs_lock
);
878 if (!delayed_work_pending(&rc
->rsv_update_work
)) {
879 if (rc
->set_drp_ie_pending
> 0) {
880 rc
->set_drp_ie_pending
++;
883 uwb_rsv_queue_update(rc
);
886 spin_unlock_irq(&rc
->rsvs_lock
);
890 * Update DRP IEs and, if necessary, the DRP Availability IE and send
891 * the updated IEs to the radio controller.
893 static void uwb_rsv_update_work(struct work_struct
*work
)
895 struct uwb_rc
*rc
= container_of(work
, struct uwb_rc
,
896 rsv_update_work
.work
);
899 mutex_lock(&rc
->rsvs_mutex
);
901 ie_updated
= uwb_rsv_update_all(rc
);
903 if (!rc
->drp_avail
.ie_valid
) {
904 uwb_drp_avail_ie_update(rc
);
908 if (ie_updated
&& (rc
->set_drp_ie_pending
== 0))
909 uwb_rc_send_all_drp_ie(rc
);
911 mutex_unlock(&rc
->rsvs_mutex
);
914 static void uwb_rsv_alien_bp_work(struct work_struct
*work
)
916 struct uwb_rc
*rc
= container_of(work
, struct uwb_rc
,
917 rsv_alien_bp_work
.work
);
920 mutex_lock(&rc
->rsvs_mutex
);
922 list_for_each_entry(rsv
, &rc
->reservations
, rc_node
) {
923 if (rsv
->type
!= UWB_DRP_TYPE_ALIEN_BP
) {
924 uwb_rsv_callback(rsv
);
928 mutex_unlock(&rc
->rsvs_mutex
);
931 static void uwb_rsv_timer(struct timer_list
*t
)
933 struct uwb_rsv
*rsv
= from_timer(rsv
, t
, timer
);
935 queue_work(rsv
->rc
->rsv_workq
, &rsv
->handle_timeout_work
);
939 * uwb_rsv_remove_all - remove all reservations
940 * @rc: the radio controller
942 * A DRP IE update is not done.
944 void uwb_rsv_remove_all(struct uwb_rc
*rc
)
946 struct uwb_rsv
*rsv
, *t
;
948 mutex_lock(&rc
->rsvs_mutex
);
949 list_for_each_entry_safe(rsv
, t
, &rc
->reservations
, rc_node
) {
950 if (rsv
->state
!= UWB_RSV_STATE_NONE
)
951 uwb_rsv_set_state(rsv
, UWB_RSV_STATE_NONE
);
952 del_timer_sync(&rsv
->timer
);
954 /* Cancel any postponed update. */
955 rc
->set_drp_ie_pending
= 0;
956 mutex_unlock(&rc
->rsvs_mutex
);
958 cancel_delayed_work_sync(&rc
->rsv_update_work
);
959 flush_workqueue(rc
->rsv_workq
);
961 mutex_lock(&rc
->rsvs_mutex
);
962 list_for_each_entry_safe(rsv
, t
, &rc
->reservations
, rc_node
) {
965 mutex_unlock(&rc
->rsvs_mutex
);
968 void uwb_rsv_init(struct uwb_rc
*rc
)
970 INIT_LIST_HEAD(&rc
->reservations
);
971 INIT_LIST_HEAD(&rc
->cnflt_alien_list
);
972 mutex_init(&rc
->rsvs_mutex
);
973 spin_lock_init(&rc
->rsvs_lock
);
974 INIT_DELAYED_WORK(&rc
->rsv_update_work
, uwb_rsv_update_work
);
975 INIT_DELAYED_WORK(&rc
->rsv_alien_bp_work
, uwb_rsv_alien_bp_work
);
976 rc
->bow
.can_reserve_extra_mases
= true;
977 rc
->bow
.total_expired
= 0;
978 rc
->bow
.window
= UWB_DRP_BACKOFF_WIN_MIN
>> 1;
979 timer_setup(&rc
->bow
.timer
, uwb_rsv_backoff_win_timer
, 0);
981 bitmap_complement(rc
->uwb_dev
.streams
, rc
->uwb_dev
.streams
, UWB_NUM_STREAMS
);
984 int uwb_rsv_setup(struct uwb_rc
*rc
)
988 snprintf(name
, sizeof(name
), "%s_rsvd", dev_name(&rc
->uwb_dev
.dev
));
989 rc
->rsv_workq
= create_singlethread_workqueue(name
);
990 if (rc
->rsv_workq
== NULL
)
996 void uwb_rsv_cleanup(struct uwb_rc
*rc
)
998 uwb_rsv_remove_all(rc
);
999 destroy_workqueue(rc
->rsv_workq
);