1 // SPDX-License-Identifier: GPL-2.0
3 * Wireless Host Controller (WHC) qset management.
5 * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
7 #include <linux/kernel.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/slab.h>
10 #include <linux/uwb/umc.h>
11 #include <linux/usb.h>
13 #include "../../wusbcore/wusbhc.h"
17 struct whc_qset
*qset_alloc(struct whc
*whc
, gfp_t mem_flags
)
19 struct whc_qset
*qset
;
22 qset
= dma_pool_zalloc(whc
->qset_pool
, mem_flags
, &dma
);
29 INIT_LIST_HEAD(&qset
->list_node
);
30 INIT_LIST_HEAD(&qset
->stds
);
36 * qset_fill_qh - fill the static endpoint state in a qset's QHead
37 * @qset: the qset whose QH needs initializing with static endpoint
39 * @urb: an urb for a transfer to this endpoint
41 static void qset_fill_qh(struct whc
*whc
, struct whc_qset
*qset
, struct urb
*urb
)
43 struct usb_device
*usb_dev
= urb
->dev
;
44 struct wusb_dev
*wusb_dev
= usb_dev
->wusb_dev
;
45 struct usb_wireless_ep_comp_descriptor
*epcd
;
49 is_out
= usb_pipeout(urb
->pipe
);
51 qset
->max_packet
= le16_to_cpu(urb
->ep
->desc
.wMaxPacketSize
);
53 epcd
= (struct usb_wireless_ep_comp_descriptor
*)qset
->ep
->extra
;
55 qset
->max_seq
= epcd
->bMaxSequence
;
56 qset
->max_burst
= epcd
->bMaxBurst
;
63 * Initial PHY rate is 53.3 Mbit/s for control endpoints or
64 * the maximum supported by the device for other endpoints
65 * (unless limited by the user).
67 if (usb_pipecontrol(urb
->pipe
))
68 phy_rate
= UWB_PHY_RATE_53
;
72 phy_rates
= le16_to_cpu(wusb_dev
->wusb_cap_descr
->wPHYRates
);
73 phy_rate
= fls(phy_rates
) - 1;
74 if (phy_rate
> whc
->wusbhc
.phy_rate
)
75 phy_rate
= whc
->wusbhc
.phy_rate
;
78 qset
->qh
.info1
= cpu_to_le32(
79 QH_INFO1_EP(usb_pipeendpoint(urb
->pipe
))
80 | (is_out
? QH_INFO1_DIR_OUT
: QH_INFO1_DIR_IN
)
81 | usb_pipe_to_qh_type(urb
->pipe
)
82 | QH_INFO1_DEV_INFO_IDX(wusb_port_no_to_idx(usb_dev
->portnum
))
83 | QH_INFO1_MAX_PKT_LEN(qset
->max_packet
)
85 qset
->qh
.info2
= cpu_to_le32(
86 QH_INFO2_BURST(qset
->max_burst
)
88 | QH_INFO2_MAX_COUNT(3)
89 | QH_INFO2_MAX_RETRY(3)
90 | QH_INFO2_MAX_SEQ(qset
->max_seq
- 1)
92 /* FIXME: where can we obtain these Tx parameters from? Why
93 * doesn't the chip know what Tx power to use? It knows the Rx
94 * strength and can presumably guess the Tx power required
96 qset
->qh
.info3
= cpu_to_le32(
97 QH_INFO3_TX_RATE(phy_rate
)
98 | QH_INFO3_TX_PWR(0) /* 0 == max power */
101 qset
->qh
.cur_window
= cpu_to_le32((1 << qset
->max_burst
) - 1);
105 * qset_clear - clear fields in a qset so it may be reinserted into a
108 * The sequence number and current window are not cleared (see
111 void qset_clear(struct whc
*whc
, struct whc_qset
*qset
)
113 qset
->td_start
= qset
->td_end
= qset
->ntds
= 0;
115 qset
->qh
.link
= cpu_to_le64(QH_LINK_NTDS(8) | QH_LINK_T
);
116 qset
->qh
.status
= qset
->qh
.status
& QH_STATUS_SEQ_MASK
;
117 qset
->qh
.err_count
= 0;
118 qset
->qh
.scratch
[0] = 0;
119 qset
->qh
.scratch
[1] = 0;
120 qset
->qh
.scratch
[2] = 0;
122 memset(&qset
->qh
.overlay
, 0, sizeof(qset
->qh
.overlay
));
124 init_completion(&qset
->remove_complete
);
128 * qset_reset - reset endpoint state in a qset.
130 * Clears the sequence number and current window. This qset must not
131 * be in the ASL or PZL.
133 void qset_reset(struct whc
*whc
, struct whc_qset
*qset
)
137 qset
->qh
.status
&= ~QH_STATUS_SEQ_MASK
;
138 qset
->qh
.cur_window
= cpu_to_le32((1 << qset
->max_burst
) - 1);
142 * get_qset - get the qset for an async endpoint
144 * A new qset is created if one does not already exist.
146 struct whc_qset
*get_qset(struct whc
*whc
, struct urb
*urb
,
149 struct whc_qset
*qset
;
151 qset
= urb
->ep
->hcpriv
;
153 qset
= qset_alloc(whc
, mem_flags
);
158 urb
->ep
->hcpriv
= qset
;
159 qset_fill_qh(whc
, qset
, urb
);
164 void qset_remove_complete(struct whc
*whc
, struct whc_qset
*qset
)
167 list_del_init(&qset
->list_node
);
168 complete(&qset
->remove_complete
);
172 * qset_add_qtds - add qTDs for an URB to a qset
174 * Returns true if the list (ASL/PZL) must be updated because (for a
175 * WHCI 0.95 controller) an activated qTD was pointed to be iCur.
177 enum whc_update
qset_add_qtds(struct whc
*whc
, struct whc_qset
*qset
)
180 enum whc_update update
= 0;
182 list_for_each_entry(std
, &qset
->stds
, list_node
) {
186 if (qset
->ntds
>= WHCI_QSET_TD_MAX
187 || (qset
->pause_after_urb
&& std
->urb
!= qset
->pause_after_urb
))
191 continue; /* already has a qTD */
193 qtd
= std
->qtd
= &qset
->qtd
[qset
->td_end
];
195 /* Fill in setup bytes for control transfers. */
196 if (usb_pipecontrol(std
->urb
->pipe
))
197 memcpy(qtd
->setup
, std
->urb
->setup_packet
, 8);
199 status
= QTD_STS_ACTIVE
| QTD_STS_LEN(std
->len
);
201 if (whc_std_last(std
) && usb_pipeout(std
->urb
->pipe
))
202 status
|= QTD_STS_LAST_PKT
;
205 * For an IN transfer the iAlt field should be set so
206 * the h/w will automatically advance to the next
207 * transfer. However, if there are 8 or more TDs
208 * remaining in this transfer then iAlt cannot be set
209 * as it could point to somewhere in this transfer.
211 if (std
->ntds_remaining
< WHCI_QSET_TD_MAX
) {
213 ialt
= (qset
->td_end
+ std
->ntds_remaining
) % WHCI_QSET_TD_MAX
;
214 status
|= QTD_STS_IALT(ialt
);
215 } else if (usb_pipein(std
->urb
->pipe
))
216 qset
->pause_after_urb
= std
->urb
;
218 if (std
->num_pointers
)
219 qtd
->options
= cpu_to_le32(QTD_OPT_IOC
);
221 qtd
->options
= cpu_to_le32(QTD_OPT_IOC
| QTD_OPT_SMALL
);
222 qtd
->page_list_ptr
= cpu_to_le64(std
->dma_addr
);
224 qtd
->status
= cpu_to_le32(status
);
226 if (QH_STATUS_TO_ICUR(qset
->qh
.status
) == qset
->td_end
)
227 update
= WHC_UPDATE_UPDATED
;
229 if (++qset
->td_end
>= WHCI_QSET_TD_MAX
)
238 * qset_remove_qtd - remove the first qTD from a qset.
240 * The qTD might be still active (if it's part of a IN URB that
241 * resulted in a short read) so ensure it's deactivated.
243 static void qset_remove_qtd(struct whc
*whc
, struct whc_qset
*qset
)
245 qset
->qtd
[qset
->td_start
].status
= 0;
247 if (++qset
->td_start
>= WHCI_QSET_TD_MAX
)
252 static void qset_copy_bounce_to_sg(struct whc
*whc
, struct whc_std
*std
)
254 struct scatterlist
*sg
;
256 size_t remaining
, offset
;
258 bounce
= std
->bounce_buf
;
259 remaining
= std
->len
;
262 offset
= std
->bounce_offset
;
267 len
= min(sg
->length
- offset
, remaining
);
268 memcpy(sg_virt(sg
) + offset
, bounce
, len
);
274 if (offset
>= sg
->length
) {
283 * qset_free_std - remove an sTD and free it.
284 * @whc: the WHCI host controller
285 * @std: the sTD to remove and free.
287 void qset_free_std(struct whc
*whc
, struct whc_std
*std
)
289 list_del(&std
->list_node
);
290 if (std
->bounce_buf
) {
291 bool is_out
= usb_pipeout(std
->urb
->pipe
);
294 if (std
->num_pointers
)
295 dma_addr
= le64_to_cpu(std
->pl_virt
[0].buf_ptr
);
297 dma_addr
= std
->dma_addr
;
299 dma_unmap_single(whc
->wusbhc
.dev
, dma_addr
,
300 std
->len
, is_out
? DMA_TO_DEVICE
: DMA_FROM_DEVICE
);
302 qset_copy_bounce_to_sg(whc
, std
);
303 kfree(std
->bounce_buf
);
306 if (!dma_mapping_error(whc
->wusbhc
.dev
, std
->dma_addr
))
307 dma_unmap_single(whc
->wusbhc
.dev
, std
->dma_addr
,
308 std
->num_pointers
* sizeof(struct whc_page_list_entry
),
317 * qset_remove_qtds - remove an URB's qTDs (and sTDs).
319 static void qset_remove_qtds(struct whc
*whc
, struct whc_qset
*qset
,
322 struct whc_std
*std
, *t
;
324 list_for_each_entry_safe(std
, t
, &qset
->stds
, list_node
) {
327 if (std
->qtd
!= NULL
)
328 qset_remove_qtd(whc
, qset
);
329 qset_free_std(whc
, std
);
334 * qset_free_stds - free any remaining sTDs for an URB.
336 static void qset_free_stds(struct whc_qset
*qset
, struct urb
*urb
)
338 struct whc_std
*std
, *t
;
340 list_for_each_entry_safe(std
, t
, &qset
->stds
, list_node
) {
342 qset_free_std(qset
->whc
, std
);
346 static int qset_fill_page_list(struct whc
*whc
, struct whc_std
*std
, gfp_t mem_flags
)
348 dma_addr_t dma_addr
= std
->dma_addr
;
353 /* Short buffers don't need a page list. */
354 if (std
->len
<= WHCI_PAGE_SIZE
) {
355 std
->num_pointers
= 0;
359 sp
= dma_addr
& ~(WHCI_PAGE_SIZE
-1);
360 ep
= dma_addr
+ std
->len
;
361 std
->num_pointers
= DIV_ROUND_UP(ep
- sp
, WHCI_PAGE_SIZE
);
363 pl_len
= std
->num_pointers
* sizeof(struct whc_page_list_entry
);
364 std
->pl_virt
= kmalloc(pl_len
, mem_flags
);
365 if (std
->pl_virt
== NULL
)
367 std
->dma_addr
= dma_map_single(whc
->wusbhc
.dev
, std
->pl_virt
, pl_len
, DMA_TO_DEVICE
);
368 if (dma_mapping_error(whc
->wusbhc
.dev
, std
->dma_addr
)) {
373 for (p
= 0; p
< std
->num_pointers
; p
++) {
374 std
->pl_virt
[p
].buf_ptr
= cpu_to_le64(dma_addr
);
375 dma_addr
= (dma_addr
+ WHCI_PAGE_SIZE
) & ~(WHCI_PAGE_SIZE
-1);
382 * urb_dequeue_work - executes asl/pzl update and gives back the urb to the system.
384 static void urb_dequeue_work(struct work_struct
*work
)
386 struct whc_urb
*wurb
= container_of(work
, struct whc_urb
, dequeue_work
);
387 struct whc_qset
*qset
= wurb
->qset
;
388 struct whc
*whc
= qset
->whc
;
392 asl_update(whc
, WUSBCMD_ASYNC_UPDATED
393 | WUSBCMD_ASYNC_SYNCED_DB
394 | WUSBCMD_ASYNC_QSET_RM
);
396 pzl_update(whc
, WUSBCMD_PERIODIC_UPDATED
397 | WUSBCMD_PERIODIC_SYNCED_DB
398 | WUSBCMD_PERIODIC_QSET_RM
);
400 spin_lock_irqsave(&whc
->lock
, flags
);
401 qset_remove_urb(whc
, qset
, wurb
->urb
, wurb
->status
);
402 spin_unlock_irqrestore(&whc
->lock
, flags
);
405 static struct whc_std
*qset_new_std(struct whc
*whc
, struct whc_qset
*qset
,
406 struct urb
*urb
, gfp_t mem_flags
)
410 std
= kzalloc(sizeof(struct whc_std
), mem_flags
);
417 INIT_LIST_HEAD(&std
->list_node
);
418 list_add_tail(&std
->list_node
, &qset
->stds
);
423 static int qset_add_urb_sg(struct whc
*whc
, struct whc_qset
*qset
, struct urb
*urb
,
427 struct scatterlist
*sg
;
430 struct whc_std
*std
= NULL
;
431 struct whc_page_list_entry
*new_pl_virt
;
432 dma_addr_t prev_end
= 0;
436 remaining
= urb
->transfer_buffer_length
;
438 for_each_sg(urb
->sg
, sg
, urb
->num_mapped_sgs
, i
) {
440 size_t dma_remaining
;
444 if (remaining
== 0) {
448 dma_addr
= sg_dma_address(sg
);
449 dma_remaining
= min_t(size_t, sg_dma_len(sg
), remaining
);
451 while (dma_remaining
) {
455 * We can use the previous std (if it exists) provided that:
456 * - the previous one ended on a page boundary.
457 * - the current one begins on a page boundary.
458 * - the previous one isn't full.
460 * If a new std is needed but the previous one
461 * was not a whole number of packets then this
462 * sg list cannot be mapped onto multiple
463 * qTDs. Return an error and let the caller
467 || (prev_end
& (WHCI_PAGE_SIZE
-1))
468 || (dma_addr
& (WHCI_PAGE_SIZE
-1))
469 || std
->len
+ WHCI_PAGE_SIZE
> QTD_MAX_XFER_SIZE
) {
470 if (std
&& std
->len
% qset
->max_packet
!= 0)
472 std
= qset_new_std(whc
, qset
, urb
, mem_flags
);
480 dma_len
= dma_remaining
;
483 * If the remainder of this element doesn't
484 * fit in a single qTD, limit the qTD to a
485 * whole number of packets. This allows the
486 * remainder to go into the next qTD.
488 if (std
->len
+ dma_len
> QTD_MAX_XFER_SIZE
) {
489 dma_len
= (QTD_MAX_XFER_SIZE
/ qset
->max_packet
)
490 * qset
->max_packet
- std
->len
;
494 std
->ntds_remaining
= -1; /* filled in later */
496 sp
= dma_addr
& ~(WHCI_PAGE_SIZE
-1);
497 ep
= dma_addr
+ dma_len
;
498 num_pointers
= DIV_ROUND_UP(ep
- sp
, WHCI_PAGE_SIZE
);
499 std
->num_pointers
+= num_pointers
;
501 pl_len
= std
->num_pointers
* sizeof(struct whc_page_list_entry
);
503 new_pl_virt
= krealloc(std
->pl_virt
, pl_len
, mem_flags
);
504 if (new_pl_virt
== NULL
) {
509 std
->pl_virt
= new_pl_virt
;
511 for (;p
< std
->num_pointers
; p
++) {
512 std
->pl_virt
[p
].buf_ptr
= cpu_to_le64(dma_addr
);
513 dma_addr
= (dma_addr
+ WHCI_PAGE_SIZE
) & ~(WHCI_PAGE_SIZE
-1);
516 prev_end
= dma_addr
= ep
;
517 dma_remaining
-= dma_len
;
518 remaining
-= dma_len
;
522 /* Now the number of stds is know, go back and fill in
523 std->ntds_remaining. */
524 list_for_each_entry(std
, &qset
->stds
, list_node
) {
525 if (std
->ntds_remaining
== -1) {
526 pl_len
= std
->num_pointers
* sizeof(struct whc_page_list_entry
);
527 std
->dma_addr
= dma_map_single(whc
->wusbhc
.dev
, std
->pl_virt
,
528 pl_len
, DMA_TO_DEVICE
);
529 if (dma_mapping_error(whc
->wusbhc
.dev
, std
->dma_addr
))
531 std
->ntds_remaining
= ntds
--;
538 * qset_add_urb_sg_linearize - add an urb with sg list, copying the data
540 * If the URB contains an sg list whose elements cannot be directly
541 * mapped to qTDs then the data must be transferred via bounce
544 static int qset_add_urb_sg_linearize(struct whc
*whc
, struct whc_qset
*qset
,
545 struct urb
*urb
, gfp_t mem_flags
)
547 bool is_out
= usb_pipeout(urb
->pipe
);
551 struct whc_std
*std
= NULL
;
553 struct scatterlist
*sg
;
556 /* limit maximum bounce buffer to 16 * 3.5 KiB ~= 28 k */
557 max_std_len
= qset
->max_burst
* qset
->max_packet
;
559 remaining
= urb
->transfer_buffer_length
;
561 for_each_sg(urb
->sg
, sg
, urb
->num_mapped_sgs
, i
) {
566 if (remaining
== 0) {
570 sg_remaining
= min_t(size_t, remaining
, sg
->length
);
573 while (sg_remaining
) {
574 if (!std
|| std
->len
== max_std_len
) {
575 std
= qset_new_std(whc
, qset
, urb
, mem_flags
);
578 std
->bounce_buf
= kmalloc(max_std_len
, mem_flags
);
579 if (std
->bounce_buf
== NULL
)
582 std
->bounce_offset
= orig
- sg_virt(sg
);
583 bounce
= std
->bounce_buf
;
587 len
= min(sg_remaining
, max_std_len
- std
->len
);
590 memcpy(bounce
, orig
, len
);
593 std
->ntds_remaining
= -1; /* filled in later */
603 * For each of the new sTDs, map the bounce buffers, create
604 * page lists (if necessary), and fill in std->ntds_remaining.
606 list_for_each_entry(std
, &qset
->stds
, list_node
) {
607 if (std
->ntds_remaining
!= -1)
610 std
->dma_addr
= dma_map_single(&whc
->umc
->dev
, std
->bounce_buf
, std
->len
,
611 is_out
? DMA_TO_DEVICE
: DMA_FROM_DEVICE
);
612 if (dma_mapping_error(&whc
->umc
->dev
, std
->dma_addr
))
615 if (qset_fill_page_list(whc
, std
, mem_flags
) < 0)
618 std
->ntds_remaining
= ntds
--;
625 * qset_add_urb - add an urb to the qset's queue.
627 * The URB is chopped into sTDs, one for each qTD that will required.
628 * At least one qTD (and sTD) is required even if the transfer has no
629 * data (e.g., for some control transfers).
631 int qset_add_urb(struct whc
*whc
, struct whc_qset
*qset
, struct urb
*urb
,
634 struct whc_urb
*wurb
;
635 int remaining
= urb
->transfer_buffer_length
;
636 u64 transfer_dma
= urb
->transfer_dma
;
640 wurb
= kzalloc(sizeof(struct whc_urb
), mem_flags
);
646 INIT_WORK(&wurb
->dequeue_work
, urb_dequeue_work
);
649 ret
= qset_add_urb_sg(whc
, qset
, urb
, mem_flags
);
650 if (ret
== -EINVAL
) {
651 qset_free_stds(qset
, urb
);
652 ret
= qset_add_urb_sg_linearize(whc
, qset
, urb
, mem_flags
);
659 ntds_remaining
= DIV_ROUND_UP(remaining
, QTD_MAX_XFER_SIZE
);
660 if (ntds_remaining
== 0)
663 while (ntds_remaining
) {
668 if (std_len
> QTD_MAX_XFER_SIZE
)
669 std_len
= QTD_MAX_XFER_SIZE
;
671 std
= qset_new_std(whc
, qset
, urb
, mem_flags
);
675 std
->dma_addr
= transfer_dma
;
677 std
->ntds_remaining
= ntds_remaining
;
679 if (qset_fill_page_list(whc
, std
, mem_flags
) < 0)
683 remaining
-= std_len
;
684 transfer_dma
+= std_len
;
690 qset_free_stds(qset
, urb
);
695 * qset_remove_urb - remove an URB from the urb queue.
697 * The URB is returned to the USB subsystem.
699 void qset_remove_urb(struct whc
*whc
, struct whc_qset
*qset
,
700 struct urb
*urb
, int status
)
702 struct wusbhc
*wusbhc
= &whc
->wusbhc
;
703 struct whc_urb
*wurb
= urb
->hcpriv
;
705 usb_hcd_unlink_urb_from_ep(&wusbhc
->usb_hcd
, urb
);
706 /* Drop the lock as urb->complete() may enqueue another urb. */
707 spin_unlock(&whc
->lock
);
708 wusbhc_giveback_urb(wusbhc
, urb
, status
);
709 spin_lock(&whc
->lock
);
715 * get_urb_status_from_qtd - get the completed urb status from qTD status
716 * @urb: completed urb
717 * @status: qTD status
719 static int get_urb_status_from_qtd(struct urb
*urb
, u32 status
)
721 if (status
& QTD_STS_HALTED
) {
722 if (status
& QTD_STS_DBE
)
723 return usb_pipein(urb
->pipe
) ? -ENOSR
: -ECOMM
;
724 else if (status
& QTD_STS_BABBLE
)
726 else if (status
& QTD_STS_RCE
)
730 if (usb_pipein(urb
->pipe
)
731 && (urb
->transfer_flags
& URB_SHORT_NOT_OK
)
732 && urb
->actual_length
< urb
->transfer_buffer_length
)
738 * process_inactive_qtd - process an inactive (but not halted) qTD.
740 * Update the urb with the transfer bytes from the qTD, if the urb is
741 * completely transferred or (in the case of an IN only) the LPF is
742 * set, then the transfer is complete and the urb should be returned
745 void process_inactive_qtd(struct whc
*whc
, struct whc_qset
*qset
,
748 struct whc_std
*std
= list_first_entry(&qset
->stds
, struct whc_std
, list_node
);
749 struct urb
*urb
= std
->urb
;
753 status
= le32_to_cpu(qtd
->status
);
755 urb
->actual_length
+= std
->len
- QTD_STS_TO_LEN(status
);
757 if (usb_pipein(urb
->pipe
) && (status
& QTD_STS_LAST_PKT
))
760 complete
= whc_std_last(std
);
762 qset_remove_qtd(whc
, qset
);
763 qset_free_std(whc
, std
);
766 * Transfers for this URB are complete? Then return it to the
770 qset_remove_qtds(whc
, qset
, urb
);
771 qset_remove_urb(whc
, qset
, urb
, get_urb_status_from_qtd(urb
, status
));
774 * If iAlt isn't valid then the hardware didn't
775 * advance iCur. Adjust the start and end pointers to
778 if (!(status
& QTD_STS_IALT_VALID
))
779 qset
->td_start
= qset
->td_end
780 = QH_STATUS_TO_ICUR(le16_to_cpu(qset
->qh
.status
));
781 qset
->pause_after_urb
= NULL
;
786 * process_halted_qtd - process a qset with a halted qtd
788 * Remove all the qTDs for the failed URB and return the failed URB to
789 * the USB subsystem. Then remove all other qTDs so the qset can be
792 * FIXME: this is the point where rate adaptation can be done. If a
793 * transfer failed because it exceeded the maximum number of retries
794 * then it could be reactivated with a slower rate without having to
797 void process_halted_qtd(struct whc
*whc
, struct whc_qset
*qset
,
800 struct whc_std
*std
= list_first_entry(&qset
->stds
, struct whc_std
, list_node
);
801 struct urb
*urb
= std
->urb
;
804 urb_status
= get_urb_status_from_qtd(urb
, le32_to_cpu(qtd
->status
));
806 qset_remove_qtds(whc
, qset
, urb
);
807 qset_remove_urb(whc
, qset
, urb
, urb_status
);
809 list_for_each_entry(std
, &qset
->stds
, list_node
) {
812 qset_remove_qtd(whc
, qset
);
819 void qset_free(struct whc
*whc
, struct whc_qset
*qset
)
821 dma_pool_free(whc
->qset_pool
, qset
, qset
->qset_dma
);
825 * qset_delete - wait for a qset to be unused, then free it.
827 void qset_delete(struct whc
*whc
, struct whc_qset
*qset
)
829 wait_for_completion(&qset
->remove_complete
);
830 qset_free(whc
, qset
);