1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 2018 - 2021, 2023 - 2024 Intel Corporation
5 #include <net/cfg80211.h>
10 static int pmsr_parse_ftm(struct cfg80211_registered_device
*rdev
,
11 struct nlattr
*ftmreq
,
12 struct cfg80211_pmsr_request_peer
*out
,
13 struct genl_info
*info
)
15 const struct cfg80211_pmsr_capabilities
*capa
= rdev
->wiphy
.pmsr_capa
;
16 struct nlattr
*tb
[NL80211_PMSR_FTM_REQ_ATTR_MAX
+ 1];
17 u32 preamble
= NL80211_PREAMBLE_DMG
; /* only optional in DMG */
19 /* validate existing data */
20 if (!(rdev
->wiphy
.pmsr_capa
->ftm
.bandwidths
& BIT(out
->chandef
.width
))) {
21 NL_SET_ERR_MSG(info
->extack
, "FTM: unsupported bandwidth");
25 /* no validation needed - was already done via nested policy */
26 nla_parse_nested_deprecated(tb
, NL80211_PMSR_FTM_REQ_ATTR_MAX
, ftmreq
,
29 if (tb
[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE
])
30 preamble
= nla_get_u32(tb
[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE
]);
32 /* set up values - struct is 0-initialized */
33 out
->ftm
.requested
= true;
35 switch (out
->chandef
.chan
->band
) {
36 case NL80211_BAND_60GHZ
:
40 if (!tb
[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE
]) {
41 NL_SET_ERR_MSG(info
->extack
,
42 "FTM: must specify preamble");
47 if (!(capa
->ftm
.preambles
& BIT(preamble
))) {
48 NL_SET_ERR_MSG_ATTR(info
->extack
,
49 tb
[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE
],
50 "FTM: invalid preamble");
54 out
->ftm
.preamble
= preamble
;
56 out
->ftm
.burst_period
= 0;
57 if (tb
[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD
])
58 out
->ftm
.burst_period
=
59 nla_get_u16(tb
[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD
]);
61 out
->ftm
.asap
= !!tb
[NL80211_PMSR_FTM_REQ_ATTR_ASAP
];
62 if (out
->ftm
.asap
&& !capa
->ftm
.asap
) {
63 NL_SET_ERR_MSG_ATTR(info
->extack
,
64 tb
[NL80211_PMSR_FTM_REQ_ATTR_ASAP
],
65 "FTM: ASAP mode not supported");
69 if (!out
->ftm
.asap
&& !capa
->ftm
.non_asap
) {
70 NL_SET_ERR_MSG(info
->extack
,
71 "FTM: non-ASAP mode not supported");
75 out
->ftm
.num_bursts_exp
= 0;
76 if (tb
[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP
])
77 out
->ftm
.num_bursts_exp
=
78 nla_get_u8(tb
[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP
]);
80 if (capa
->ftm
.max_bursts_exponent
>= 0 &&
81 out
->ftm
.num_bursts_exp
> capa
->ftm
.max_bursts_exponent
) {
82 NL_SET_ERR_MSG_ATTR(info
->extack
,
83 tb
[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP
],
84 "FTM: max NUM_BURSTS_EXP must be set lower than the device limit");
88 out
->ftm
.burst_duration
= 15;
89 if (tb
[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION
])
90 out
->ftm
.burst_duration
=
91 nla_get_u8(tb
[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION
]);
93 out
->ftm
.ftms_per_burst
= 0;
94 if (tb
[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST
])
95 out
->ftm
.ftms_per_burst
=
96 nla_get_u32(tb
[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST
]);
98 if (capa
->ftm
.max_ftms_per_burst
&&
99 (out
->ftm
.ftms_per_burst
> capa
->ftm
.max_ftms_per_burst
||
100 out
->ftm
.ftms_per_burst
== 0)) {
101 NL_SET_ERR_MSG_ATTR(info
->extack
,
102 tb
[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST
],
103 "FTM: FTMs per burst must be set lower than the device limit but non-zero");
107 out
->ftm
.ftmr_retries
= 3;
108 if (tb
[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES
])
109 out
->ftm
.ftmr_retries
=
110 nla_get_u8(tb
[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES
]);
112 out
->ftm
.request_lci
= !!tb
[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI
];
113 if (out
->ftm
.request_lci
&& !capa
->ftm
.request_lci
) {
114 NL_SET_ERR_MSG_ATTR(info
->extack
,
115 tb
[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI
],
116 "FTM: LCI request not supported");
119 out
->ftm
.request_civicloc
=
120 !!tb
[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC
];
121 if (out
->ftm
.request_civicloc
&& !capa
->ftm
.request_civicloc
) {
122 NL_SET_ERR_MSG_ATTR(info
->extack
,
123 tb
[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC
],
124 "FTM: civic location request not supported");
127 out
->ftm
.trigger_based
=
128 !!tb
[NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED
];
129 if (out
->ftm
.trigger_based
&& !capa
->ftm
.trigger_based
) {
130 NL_SET_ERR_MSG_ATTR(info
->extack
,
131 tb
[NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED
],
132 "FTM: trigger based ranging is not supported");
136 out
->ftm
.non_trigger_based
=
137 !!tb
[NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED
];
138 if (out
->ftm
.non_trigger_based
&& !capa
->ftm
.non_trigger_based
) {
139 NL_SET_ERR_MSG_ATTR(info
->extack
,
140 tb
[NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED
],
141 "FTM: trigger based ranging is not supported");
145 if (out
->ftm
.trigger_based
&& out
->ftm
.non_trigger_based
) {
146 NL_SET_ERR_MSG(info
->extack
,
147 "FTM: can't set both trigger based and non trigger based");
151 if (out
->ftm
.ftms_per_burst
> 31 && !out
->ftm
.non_trigger_based
&&
152 !out
->ftm
.trigger_based
) {
153 NL_SET_ERR_MSG_ATTR(info
->extack
,
154 tb
[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST
],
155 "FTM: FTMs per burst must be set lower than 31");
159 if ((out
->ftm
.trigger_based
|| out
->ftm
.non_trigger_based
) &&
160 out
->ftm
.preamble
!= NL80211_PREAMBLE_HE
) {
161 NL_SET_ERR_MSG_ATTR(info
->extack
,
162 tb
[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE
],
163 "FTM: non EDCA based ranging must use HE preamble");
167 out
->ftm
.lmr_feedback
=
168 !!tb
[NL80211_PMSR_FTM_REQ_ATTR_LMR_FEEDBACK
];
169 if (!out
->ftm
.trigger_based
&& !out
->ftm
.non_trigger_based
&&
170 out
->ftm
.lmr_feedback
) {
171 NL_SET_ERR_MSG_ATTR(info
->extack
,
172 tb
[NL80211_PMSR_FTM_REQ_ATTR_LMR_FEEDBACK
],
173 "FTM: LMR feedback set for EDCA based ranging");
177 if (tb
[NL80211_PMSR_FTM_REQ_ATTR_BSS_COLOR
]) {
178 if (!out
->ftm
.non_trigger_based
&& !out
->ftm
.trigger_based
) {
179 NL_SET_ERR_MSG_ATTR(info
->extack
,
180 tb
[NL80211_PMSR_FTM_REQ_ATTR_BSS_COLOR
],
181 "FTM: BSS color set for EDCA based ranging");
186 nla_get_u8(tb
[NL80211_PMSR_FTM_REQ_ATTR_BSS_COLOR
]);
192 static int pmsr_parse_peer(struct cfg80211_registered_device
*rdev
,
194 struct cfg80211_pmsr_request_peer
*out
,
195 struct genl_info
*info
)
197 struct nlattr
*tb
[NL80211_PMSR_PEER_ATTR_MAX
+ 1];
198 struct nlattr
*req
[NL80211_PMSR_REQ_ATTR_MAX
+ 1];
202 /* no validation needed - was already done via nested policy */
203 nla_parse_nested_deprecated(tb
, NL80211_PMSR_PEER_ATTR_MAX
, peer
,
206 if (!tb
[NL80211_PMSR_PEER_ATTR_ADDR
] ||
207 !tb
[NL80211_PMSR_PEER_ATTR_CHAN
] ||
208 !tb
[NL80211_PMSR_PEER_ATTR_REQ
]) {
209 NL_SET_ERR_MSG_ATTR(info
->extack
, peer
,
210 "insufficient peer data");
214 memcpy(out
->addr
, nla_data(tb
[NL80211_PMSR_PEER_ATTR_ADDR
]), ETH_ALEN
);
216 /* reuse info->attrs */
217 memset(info
->attrs
, 0, sizeof(*info
->attrs
) * (NL80211_ATTR_MAX
+ 1));
218 err
= nla_parse_nested_deprecated(info
->attrs
, NL80211_ATTR_MAX
,
219 tb
[NL80211_PMSR_PEER_ATTR_CHAN
],
224 err
= nl80211_parse_chandef(rdev
, info
, &out
->chandef
);
228 /* no validation needed - was already done via nested policy */
229 nla_parse_nested_deprecated(req
, NL80211_PMSR_REQ_ATTR_MAX
,
230 tb
[NL80211_PMSR_PEER_ATTR_REQ
], NULL
,
233 if (!req
[NL80211_PMSR_REQ_ATTR_DATA
]) {
234 NL_SET_ERR_MSG_ATTR(info
->extack
,
235 tb
[NL80211_PMSR_PEER_ATTR_REQ
],
236 "missing request type/data");
240 if (req
[NL80211_PMSR_REQ_ATTR_GET_AP_TSF
])
241 out
->report_ap_tsf
= true;
243 if (out
->report_ap_tsf
&& !rdev
->wiphy
.pmsr_capa
->report_ap_tsf
) {
244 NL_SET_ERR_MSG_ATTR(info
->extack
,
245 req
[NL80211_PMSR_REQ_ATTR_GET_AP_TSF
],
246 "reporting AP TSF is not supported");
250 nla_for_each_nested(treq
, req
[NL80211_PMSR_REQ_ATTR_DATA
], rem
) {
251 switch (nla_type(treq
)) {
252 case NL80211_PMSR_TYPE_FTM
:
253 err
= pmsr_parse_ftm(rdev
, treq
, out
, info
);
256 NL_SET_ERR_MSG_ATTR(info
->extack
, treq
,
257 "unsupported measurement type");
268 int nl80211_pmsr_start(struct sk_buff
*skb
, struct genl_info
*info
)
270 struct nlattr
*reqattr
= info
->attrs
[NL80211_ATTR_PEER_MEASUREMENTS
];
271 struct cfg80211_registered_device
*rdev
= info
->user_ptr
[0];
272 struct wireless_dev
*wdev
= info
->user_ptr
[1];
273 struct cfg80211_pmsr_request
*req
;
274 struct nlattr
*peers
, *peer
;
275 int count
, rem
, err
, idx
;
277 if (!rdev
->wiphy
.pmsr_capa
)
283 peers
= nla_find(nla_data(reqattr
), nla_len(reqattr
),
284 NL80211_PMSR_ATTR_PEERS
);
289 nla_for_each_nested(peer
, peers
, rem
) {
292 if (count
> rdev
->wiphy
.pmsr_capa
->max_peers
) {
293 NL_SET_ERR_MSG_ATTR(info
->extack
, peer
,
294 "Too many peers used");
299 req
= kzalloc(struct_size(req
, peers
, count
), GFP_KERNEL
);
302 req
->n_peers
= count
;
304 if (info
->attrs
[NL80211_ATTR_TIMEOUT
])
305 req
->timeout
= nla_get_u32(info
->attrs
[NL80211_ATTR_TIMEOUT
]);
307 if (info
->attrs
[NL80211_ATTR_MAC
]) {
308 if (!rdev
->wiphy
.pmsr_capa
->randomize_mac_addr
) {
309 NL_SET_ERR_MSG_ATTR(info
->extack
,
310 info
->attrs
[NL80211_ATTR_MAC
],
311 "device cannot randomize MAC address");
316 err
= nl80211_parse_random_mac(info
->attrs
, req
->mac_addr
,
321 memcpy(req
->mac_addr
, wdev_address(wdev
), ETH_ALEN
);
322 eth_broadcast_addr(req
->mac_addr_mask
);
326 nla_for_each_nested(peer
, peers
, rem
) {
327 /* NB: this reuses info->attrs, but we no longer need it */
328 err
= pmsr_parse_peer(rdev
, peer
, &req
->peers
[idx
], info
);
333 req
->cookie
= cfg80211_assign_cookie(rdev
);
334 req
->nl_portid
= info
->snd_portid
;
336 err
= rdev_start_pmsr(rdev
, wdev
, req
);
340 list_add_tail(&req
->list
, &wdev
->pmsr_list
);
342 nl_set_extack_cookie_u64(info
->extack
, req
->cookie
);
349 void cfg80211_pmsr_complete(struct wireless_dev
*wdev
,
350 struct cfg80211_pmsr_request
*req
,
353 struct cfg80211_registered_device
*rdev
= wiphy_to_rdev(wdev
->wiphy
);
354 struct cfg80211_pmsr_request
*tmp
, *prev
, *to_free
= NULL
;
358 trace_cfg80211_pmsr_complete(wdev
->wiphy
, wdev
, req
->cookie
);
360 msg
= nlmsg_new(NLMSG_DEFAULT_SIZE
, gfp
);
364 hdr
= nl80211hdr_put(msg
, 0, 0, 0,
365 NL80211_CMD_PEER_MEASUREMENT_COMPLETE
);
369 if (nla_put_u32(msg
, NL80211_ATTR_WIPHY
, rdev
->wiphy_idx
) ||
370 nla_put_u64_64bit(msg
, NL80211_ATTR_WDEV
, wdev_id(wdev
),
374 if (nla_put_u64_64bit(msg
, NL80211_ATTR_COOKIE
, req
->cookie
,
378 genlmsg_end(msg
, hdr
);
379 genlmsg_unicast(wiphy_net(wdev
->wiphy
), msg
, req
->nl_portid
);
384 spin_lock_bh(&wdev
->pmsr_lock
);
386 * cfg80211_pmsr_process_abort() may have already moved this request
387 * to the free list, and will free it later. In this case, don't free
390 list_for_each_entry_safe(tmp
, prev
, &wdev
->pmsr_list
, list
) {
392 list_del(&req
->list
);
397 spin_unlock_bh(&wdev
->pmsr_lock
);
400 EXPORT_SYMBOL_GPL(cfg80211_pmsr_complete
);
402 static int nl80211_pmsr_send_ftm_res(struct sk_buff
*msg
,
403 struct cfg80211_pmsr_result
*res
)
405 if (res
->status
== NL80211_PMSR_STATUS_FAILURE
) {
406 if (nla_put_u32(msg
, NL80211_PMSR_FTM_RESP_ATTR_FAIL_REASON
,
407 res
->ftm
.failure_reason
))
410 if (res
->ftm
.failure_reason
==
411 NL80211_PMSR_FTM_FAILURE_PEER_BUSY
&&
412 res
->ftm
.busy_retry_time
&&
413 nla_put_u32(msg
, NL80211_PMSR_FTM_RESP_ATTR_BUSY_RETRY_TIME
,
414 res
->ftm
.busy_retry_time
))
420 #define PUT(tp, attr, val) \
422 if (nla_put_##tp(msg, \
423 NL80211_PMSR_FTM_RESP_ATTR_##attr, \
428 #define PUTOPT(tp, attr, val) \
430 if (res->ftm.val##_valid) \
431 PUT(tp, attr, val); \
434 #define PUT_U64(attr, val) \
436 if (nla_put_u64_64bit(msg, \
437 NL80211_PMSR_FTM_RESP_ATTR_##attr,\
439 NL80211_PMSR_FTM_RESP_ATTR_PAD)) \
443 #define PUTOPT_U64(attr, val) \
445 if (res->ftm.val##_valid) \
446 PUT_U64(attr, val); \
449 if (res
->ftm
.burst_index
>= 0)
450 PUT(u32
, BURST_INDEX
, burst_index
);
451 PUTOPT(u32
, NUM_FTMR_ATTEMPTS
, num_ftmr_attempts
);
452 PUTOPT(u32
, NUM_FTMR_SUCCESSES
, num_ftmr_successes
);
453 PUT(u8
, NUM_BURSTS_EXP
, num_bursts_exp
);
454 PUT(u8
, BURST_DURATION
, burst_duration
);
455 PUT(u8
, FTMS_PER_BURST
, ftms_per_burst
);
456 PUTOPT(s32
, RSSI_AVG
, rssi_avg
);
457 PUTOPT(s32
, RSSI_SPREAD
, rssi_spread
);
458 if (res
->ftm
.tx_rate_valid
&&
459 !nl80211_put_sta_rate(msg
, &res
->ftm
.tx_rate
,
460 NL80211_PMSR_FTM_RESP_ATTR_TX_RATE
))
462 if (res
->ftm
.rx_rate_valid
&&
463 !nl80211_put_sta_rate(msg
, &res
->ftm
.rx_rate
,
464 NL80211_PMSR_FTM_RESP_ATTR_RX_RATE
))
466 PUTOPT_U64(RTT_AVG
, rtt_avg
);
467 PUTOPT_U64(RTT_VARIANCE
, rtt_variance
);
468 PUTOPT_U64(RTT_SPREAD
, rtt_spread
);
469 PUTOPT_U64(DIST_AVG
, dist_avg
);
470 PUTOPT_U64(DIST_VARIANCE
, dist_variance
);
471 PUTOPT_U64(DIST_SPREAD
, dist_spread
);
472 if (res
->ftm
.lci
&& res
->ftm
.lci_len
&&
473 nla_put(msg
, NL80211_PMSR_FTM_RESP_ATTR_LCI
,
474 res
->ftm
.lci_len
, res
->ftm
.lci
))
476 if (res
->ftm
.civicloc
&& res
->ftm
.civicloc_len
&&
477 nla_put(msg
, NL80211_PMSR_FTM_RESP_ATTR_CIVICLOC
,
478 res
->ftm
.civicloc_len
, res
->ftm
.civicloc
))
490 static int nl80211_pmsr_send_result(struct sk_buff
*msg
,
491 struct cfg80211_pmsr_result
*res
)
493 struct nlattr
*pmsr
, *peers
, *peer
, *resp
, *data
, *typedata
;
495 pmsr
= nla_nest_start_noflag(msg
, NL80211_ATTR_PEER_MEASUREMENTS
);
499 peers
= nla_nest_start_noflag(msg
, NL80211_PMSR_ATTR_PEERS
);
503 peer
= nla_nest_start_noflag(msg
, 1);
507 if (nla_put(msg
, NL80211_PMSR_PEER_ATTR_ADDR
, ETH_ALEN
, res
->addr
))
510 resp
= nla_nest_start_noflag(msg
, NL80211_PMSR_PEER_ATTR_RESP
);
514 if (nla_put_u32(msg
, NL80211_PMSR_RESP_ATTR_STATUS
, res
->status
) ||
515 nla_put_u64_64bit(msg
, NL80211_PMSR_RESP_ATTR_HOST_TIME
,
516 res
->host_time
, NL80211_PMSR_RESP_ATTR_PAD
))
519 if (res
->ap_tsf_valid
&&
520 nla_put_u64_64bit(msg
, NL80211_PMSR_RESP_ATTR_AP_TSF
,
521 res
->ap_tsf
, NL80211_PMSR_RESP_ATTR_PAD
))
524 if (res
->final
&& nla_put_flag(msg
, NL80211_PMSR_RESP_ATTR_FINAL
))
527 data
= nla_nest_start_noflag(msg
, NL80211_PMSR_RESP_ATTR_DATA
);
531 typedata
= nla_nest_start_noflag(msg
, res
->type
);
536 case NL80211_PMSR_TYPE_FTM
:
537 if (nl80211_pmsr_send_ftm_res(msg
, res
))
544 nla_nest_end(msg
, typedata
);
545 nla_nest_end(msg
, data
);
546 nla_nest_end(msg
, resp
);
547 nla_nest_end(msg
, peer
);
548 nla_nest_end(msg
, peers
);
549 nla_nest_end(msg
, pmsr
);
556 void cfg80211_pmsr_report(struct wireless_dev
*wdev
,
557 struct cfg80211_pmsr_request
*req
,
558 struct cfg80211_pmsr_result
*result
,
561 struct cfg80211_registered_device
*rdev
= wiphy_to_rdev(wdev
->wiphy
);
566 trace_cfg80211_pmsr_report(wdev
->wiphy
, wdev
, req
->cookie
,
570 * Currently, only variable items are LCI and civic location,
571 * both of which are reasonably short so we don't need to
572 * worry about them here for the allocation.
574 msg
= nlmsg_new(NLMSG_DEFAULT_SIZE
, gfp
);
578 hdr
= nl80211hdr_put(msg
, 0, 0, 0, NL80211_CMD_PEER_MEASUREMENT_RESULT
);
582 if (nla_put_u32(msg
, NL80211_ATTR_WIPHY
, rdev
->wiphy_idx
) ||
583 nla_put_u64_64bit(msg
, NL80211_ATTR_WDEV
, wdev_id(wdev
),
587 if (nla_put_u64_64bit(msg
, NL80211_ATTR_COOKIE
, req
->cookie
,
591 err
= nl80211_pmsr_send_result(msg
, result
);
593 pr_err_ratelimited("peer measurement result: message didn't fit!");
597 genlmsg_end(msg
, hdr
);
598 genlmsg_unicast(wiphy_net(wdev
->wiphy
), msg
, req
->nl_portid
);
603 EXPORT_SYMBOL_GPL(cfg80211_pmsr_report
);
605 static void cfg80211_pmsr_process_abort(struct wireless_dev
*wdev
)
607 struct cfg80211_registered_device
*rdev
= wiphy_to_rdev(wdev
->wiphy
);
608 struct cfg80211_pmsr_request
*req
, *tmp
;
609 LIST_HEAD(free_list
);
611 lockdep_assert_wiphy(wdev
->wiphy
);
613 spin_lock_bh(&wdev
->pmsr_lock
);
614 list_for_each_entry_safe(req
, tmp
, &wdev
->pmsr_list
, list
) {
617 list_move_tail(&req
->list
, &free_list
);
619 spin_unlock_bh(&wdev
->pmsr_lock
);
621 list_for_each_entry_safe(req
, tmp
, &free_list
, list
) {
622 rdev_abort_pmsr(rdev
, wdev
, req
);
628 void cfg80211_pmsr_free_wk(struct work_struct
*work
)
630 struct wireless_dev
*wdev
= container_of(work
, struct wireless_dev
,
633 wiphy_lock(wdev
->wiphy
);
634 cfg80211_pmsr_process_abort(wdev
);
635 wiphy_unlock(wdev
->wiphy
);
638 void cfg80211_pmsr_wdev_down(struct wireless_dev
*wdev
)
640 struct cfg80211_pmsr_request
*req
;
643 spin_lock_bh(&wdev
->pmsr_lock
);
644 list_for_each_entry(req
, &wdev
->pmsr_list
, list
) {
648 spin_unlock_bh(&wdev
->pmsr_lock
);
651 cfg80211_pmsr_process_abort(wdev
);
653 WARN_ON(!list_empty(&wdev
->pmsr_list
));
656 void cfg80211_release_pmsr(struct wireless_dev
*wdev
, u32 portid
)
658 struct cfg80211_pmsr_request
*req
;
660 spin_lock_bh(&wdev
->pmsr_lock
);
661 list_for_each_entry(req
, &wdev
->pmsr_list
, list
) {
662 if (req
->nl_portid
== portid
) {
664 schedule_work(&wdev
->pmsr_free_wk
);
667 spin_unlock_bh(&wdev
->pmsr_lock
);