1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Facebook */
3 #include <linux/rculist.h>
4 #include <linux/list.h>
5 #include <linux/hash.h>
6 #include <linux/types.h>
7 #include <linux/spinlock.h>
10 #include <linux/btf_ids.h>
11 #include <linux/bpf_local_storage.h>
12 #include <net/bpf_sk_storage.h>
14 #include <uapi/linux/sock_diag.h>
15 #include <uapi/linux/btf.h>
16 #include <linux/rcupdate_trace.h>
18 DEFINE_BPF_STORAGE_CACHE(sk_cache
);
20 static struct bpf_local_storage_data
*
21 bpf_sk_storage_lookup(struct sock
*sk
, struct bpf_map
*map
, bool cacheit_lockit
)
23 struct bpf_local_storage
*sk_storage
;
24 struct bpf_local_storage_map
*smap
;
27 rcu_dereference_check(sk
->sk_bpf_storage
, bpf_rcu_lock_held());
31 smap
= (struct bpf_local_storage_map
*)map
;
32 return bpf_local_storage_lookup(sk_storage
, smap
, cacheit_lockit
);
35 static int bpf_sk_storage_del(struct sock
*sk
, struct bpf_map
*map
)
37 struct bpf_local_storage_data
*sdata
;
39 sdata
= bpf_sk_storage_lookup(sk
, map
, false);
43 bpf_selem_unlink(SELEM(sdata
), false);
48 /* Called by __sk_destruct() & bpf_sk_storage_clone() */
49 void bpf_sk_storage_free(struct sock
*sk
)
51 struct bpf_local_storage
*sk_storage
;
54 sk_storage
= rcu_dereference(sk
->sk_bpf_storage
);
60 bpf_local_storage_destroy(sk_storage
);
64 static void bpf_sk_storage_map_free(struct bpf_map
*map
)
66 bpf_local_storage_map_free(map
, &sk_cache
, NULL
);
69 static struct bpf_map
*bpf_sk_storage_map_alloc(union bpf_attr
*attr
)
71 return bpf_local_storage_map_alloc(attr
, &sk_cache
, false);
74 static int notsupp_get_next_key(struct bpf_map
*map
, void *key
,
80 static void *bpf_fd_sk_storage_lookup_elem(struct bpf_map
*map
, void *key
)
82 struct bpf_local_storage_data
*sdata
;
87 sock
= sockfd_lookup(fd
, &err
);
89 sdata
= bpf_sk_storage_lookup(sock
->sk
, map
, true);
91 return sdata
? sdata
->data
: NULL
;
97 static long bpf_fd_sk_storage_update_elem(struct bpf_map
*map
, void *key
,
98 void *value
, u64 map_flags
)
100 struct bpf_local_storage_data
*sdata
;
105 sock
= sockfd_lookup(fd
, &err
);
107 sdata
= bpf_local_storage_update(
108 sock
->sk
, (struct bpf_local_storage_map
*)map
, value
,
109 map_flags
, false, GFP_ATOMIC
);
111 return PTR_ERR_OR_ZERO(sdata
);
117 static long bpf_fd_sk_storage_delete_elem(struct bpf_map
*map
, void *key
)
123 sock
= sockfd_lookup(fd
, &err
);
125 err
= bpf_sk_storage_del(sock
->sk
, map
);
133 static struct bpf_local_storage_elem
*
134 bpf_sk_storage_clone_elem(struct sock
*newsk
,
135 struct bpf_local_storage_map
*smap
,
136 struct bpf_local_storage_elem
*selem
)
138 struct bpf_local_storage_elem
*copy_selem
;
140 copy_selem
= bpf_selem_alloc(smap
, newsk
, NULL
, true, false, GFP_ATOMIC
);
144 if (btf_record_has_field(smap
->map
.record
, BPF_SPIN_LOCK
))
145 copy_map_value_locked(&smap
->map
, SDATA(copy_selem
)->data
,
146 SDATA(selem
)->data
, true);
148 copy_map_value(&smap
->map
, SDATA(copy_selem
)->data
,
154 int bpf_sk_storage_clone(const struct sock
*sk
, struct sock
*newsk
)
156 struct bpf_local_storage
*new_sk_storage
= NULL
;
157 struct bpf_local_storage
*sk_storage
;
158 struct bpf_local_storage_elem
*selem
;
161 RCU_INIT_POINTER(newsk
->sk_bpf_storage
, NULL
);
164 sk_storage
= rcu_dereference(sk
->sk_bpf_storage
);
166 if (!sk_storage
|| hlist_empty(&sk_storage
->list
))
169 hlist_for_each_entry_rcu(selem
, &sk_storage
->list
, snode
) {
170 struct bpf_local_storage_elem
*copy_selem
;
171 struct bpf_local_storage_map
*smap
;
174 smap
= rcu_dereference(SDATA(selem
)->smap
);
175 if (!(smap
->map
.map_flags
& BPF_F_CLONE
))
178 /* Note that for lockless listeners adding new element
179 * here can race with cleanup in bpf_local_storage_map_free.
180 * Try to grab map refcnt to make sure that it's still
181 * alive and prevent concurrent removal.
183 map
= bpf_map_inc_not_zero(&smap
->map
);
187 copy_selem
= bpf_sk_storage_clone_elem(newsk
, smap
, selem
);
194 if (new_sk_storage
) {
195 bpf_selem_link_map(smap
, copy_selem
);
196 bpf_selem_link_storage_nolock(new_sk_storage
, copy_selem
);
198 ret
= bpf_local_storage_alloc(newsk
, smap
, copy_selem
, GFP_ATOMIC
);
200 bpf_selem_free(copy_selem
, smap
, true);
201 atomic_sub(smap
->elem_size
,
202 &newsk
->sk_omem_alloc
);
208 rcu_dereference(copy_selem
->local_storage
);
216 /* In case of an error, don't free anything explicitly here, the
217 * caller is responsible to call bpf_sk_storage_free.
223 /* *gfp_flags* is a hidden argument provided by the verifier */
224 BPF_CALL_5(bpf_sk_storage_get
, struct bpf_map
*, map
, struct sock
*, sk
,
225 void *, value
, u64
, flags
, gfp_t
, gfp_flags
)
227 struct bpf_local_storage_data
*sdata
;
229 WARN_ON_ONCE(!bpf_rcu_lock_held());
230 if (!sk
|| !sk_fullsock(sk
) || flags
> BPF_SK_STORAGE_GET_F_CREATE
)
231 return (unsigned long)NULL
;
233 sdata
= bpf_sk_storage_lookup(sk
, map
, true);
235 return (unsigned long)sdata
->data
;
237 if (flags
== BPF_SK_STORAGE_GET_F_CREATE
&&
238 /* Cannot add new elem to a going away sk.
239 * Otherwise, the new elem may become a leak
240 * (and also other memory issues during map
243 refcount_inc_not_zero(&sk
->sk_refcnt
)) {
244 sdata
= bpf_local_storage_update(
245 sk
, (struct bpf_local_storage_map
*)map
, value
,
246 BPF_NOEXIST
, false, gfp_flags
);
247 /* sk must be a fullsock (guaranteed by verifier),
248 * so sock_gen_put() is unnecessary.
251 return IS_ERR(sdata
) ?
252 (unsigned long)NULL
: (unsigned long)sdata
->data
;
255 return (unsigned long)NULL
;
258 BPF_CALL_2(bpf_sk_storage_delete
, struct bpf_map
*, map
, struct sock
*, sk
)
260 WARN_ON_ONCE(!bpf_rcu_lock_held());
261 if (!sk
|| !sk_fullsock(sk
))
264 if (refcount_inc_not_zero(&sk
->sk_refcnt
)) {
267 err
= bpf_sk_storage_del(sk
, map
);
275 static int bpf_sk_storage_charge(struct bpf_local_storage_map
*smap
,
276 void *owner
, u32 size
)
278 struct sock
*sk
= (struct sock
*)owner
;
281 optmem_max
= READ_ONCE(sock_net(sk
)->core
.sysctl_optmem_max
);
282 /* same check as in sock_kmalloc() */
283 if (size
<= optmem_max
&&
284 atomic_read(&sk
->sk_omem_alloc
) + size
< optmem_max
) {
285 atomic_add(size
, &sk
->sk_omem_alloc
);
292 static void bpf_sk_storage_uncharge(struct bpf_local_storage_map
*smap
,
293 void *owner
, u32 size
)
295 struct sock
*sk
= owner
;
297 atomic_sub(size
, &sk
->sk_omem_alloc
);
300 static struct bpf_local_storage __rcu
**
301 bpf_sk_storage_ptr(void *owner
)
303 struct sock
*sk
= owner
;
305 return &sk
->sk_bpf_storage
;
308 const struct bpf_map_ops sk_storage_map_ops
= {
309 .map_meta_equal
= bpf_map_meta_equal
,
310 .map_alloc_check
= bpf_local_storage_map_alloc_check
,
311 .map_alloc
= bpf_sk_storage_map_alloc
,
312 .map_free
= bpf_sk_storage_map_free
,
313 .map_get_next_key
= notsupp_get_next_key
,
314 .map_lookup_elem
= bpf_fd_sk_storage_lookup_elem
,
315 .map_update_elem
= bpf_fd_sk_storage_update_elem
,
316 .map_delete_elem
= bpf_fd_sk_storage_delete_elem
,
317 .map_check_btf
= bpf_local_storage_map_check_btf
,
318 .map_btf_id
= &bpf_local_storage_map_btf_id
[0],
319 .map_local_storage_charge
= bpf_sk_storage_charge
,
320 .map_local_storage_uncharge
= bpf_sk_storage_uncharge
,
321 .map_owner_storage_ptr
= bpf_sk_storage_ptr
,
322 .map_mem_usage
= bpf_local_storage_map_mem_usage
,
325 const struct bpf_func_proto bpf_sk_storage_get_proto
= {
326 .func
= bpf_sk_storage_get
,
328 .ret_type
= RET_PTR_TO_MAP_VALUE_OR_NULL
,
329 .arg1_type
= ARG_CONST_MAP_PTR
,
330 .arg2_type
= ARG_PTR_TO_BTF_ID_SOCK_COMMON
,
331 .arg3_type
= ARG_PTR_TO_MAP_VALUE_OR_NULL
,
332 .arg4_type
= ARG_ANYTHING
,
335 const struct bpf_func_proto bpf_sk_storage_get_cg_sock_proto
= {
336 .func
= bpf_sk_storage_get
,
338 .ret_type
= RET_PTR_TO_MAP_VALUE_OR_NULL
,
339 .arg1_type
= ARG_CONST_MAP_PTR
,
340 .arg2_type
= ARG_PTR_TO_CTX
, /* context is 'struct sock' */
341 .arg3_type
= ARG_PTR_TO_MAP_VALUE_OR_NULL
,
342 .arg4_type
= ARG_ANYTHING
,
345 const struct bpf_func_proto bpf_sk_storage_delete_proto
= {
346 .func
= bpf_sk_storage_delete
,
348 .ret_type
= RET_INTEGER
,
349 .arg1_type
= ARG_CONST_MAP_PTR
,
350 .arg2_type
= ARG_PTR_TO_BTF_ID_SOCK_COMMON
,
353 static bool bpf_sk_storage_tracing_allowed(const struct bpf_prog
*prog
)
355 const struct btf
*btf_vmlinux
;
356 const struct btf_type
*t
;
360 if (prog
->aux
->dst_prog
)
363 /* Ensure the tracing program is not tracing
364 * any bpf_sk_storage*() function and also
365 * use the bpf_sk_storage_(get|delete) helper.
367 switch (prog
->expected_attach_type
) {
369 case BPF_TRACE_RAW_TP
:
370 /* bpf_sk_storage has no trace point */
372 case BPF_TRACE_FENTRY
:
373 case BPF_TRACE_FEXIT
:
374 btf_vmlinux
= bpf_get_btf_vmlinux();
375 if (IS_ERR_OR_NULL(btf_vmlinux
))
377 btf_id
= prog
->aux
->attach_btf_id
;
378 t
= btf_type_by_id(btf_vmlinux
, btf_id
);
379 tname
= btf_name_by_offset(btf_vmlinux
, t
->name_off
);
380 return !!strncmp(tname
, "bpf_sk_storage",
381 strlen("bpf_sk_storage"));
389 /* *gfp_flags* is a hidden argument provided by the verifier */
390 BPF_CALL_5(bpf_sk_storage_get_tracing
, struct bpf_map
*, map
, struct sock
*, sk
,
391 void *, value
, u64
, flags
, gfp_t
, gfp_flags
)
393 WARN_ON_ONCE(!bpf_rcu_lock_held());
394 if (in_hardirq() || in_nmi())
395 return (unsigned long)NULL
;
397 return (unsigned long)____bpf_sk_storage_get(map
, sk
, value
, flags
,
401 BPF_CALL_2(bpf_sk_storage_delete_tracing
, struct bpf_map
*, map
,
404 WARN_ON_ONCE(!bpf_rcu_lock_held());
405 if (in_hardirq() || in_nmi())
408 return ____bpf_sk_storage_delete(map
, sk
);
411 const struct bpf_func_proto bpf_sk_storage_get_tracing_proto
= {
412 .func
= bpf_sk_storage_get_tracing
,
414 .ret_type
= RET_PTR_TO_MAP_VALUE_OR_NULL
,
415 .arg1_type
= ARG_CONST_MAP_PTR
,
416 .arg2_type
= ARG_PTR_TO_BTF_ID_OR_NULL
,
417 .arg2_btf_id
= &btf_sock_ids
[BTF_SOCK_TYPE_SOCK_COMMON
],
418 .arg3_type
= ARG_PTR_TO_MAP_VALUE_OR_NULL
,
419 .arg4_type
= ARG_ANYTHING
,
420 .allowed
= bpf_sk_storage_tracing_allowed
,
423 const struct bpf_func_proto bpf_sk_storage_delete_tracing_proto
= {
424 .func
= bpf_sk_storage_delete_tracing
,
426 .ret_type
= RET_INTEGER
,
427 .arg1_type
= ARG_CONST_MAP_PTR
,
428 .arg2_type
= ARG_PTR_TO_BTF_ID_OR_NULL
,
429 .arg2_btf_id
= &btf_sock_ids
[BTF_SOCK_TYPE_SOCK_COMMON
],
430 .allowed
= bpf_sk_storage_tracing_allowed
,
433 struct bpf_sk_storage_diag
{
435 struct bpf_map
*maps
[];
438 /* The reply will be like:
439 * INET_DIAG_BPF_SK_STORAGES (nla_nest)
440 * SK_DIAG_BPF_STORAGE (nla_nest)
441 * SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
442 * SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
443 * SK_DIAG_BPF_STORAGE (nla_nest)
444 * SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
445 * SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
448 static int nla_value_size(u32 value_size
)
450 /* SK_DIAG_BPF_STORAGE (nla_nest)
451 * SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
452 * SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
454 return nla_total_size(0) + nla_total_size(sizeof(u32
)) +
455 nla_total_size_64bit(value_size
);
458 void bpf_sk_storage_diag_free(struct bpf_sk_storage_diag
*diag
)
465 for (i
= 0; i
< diag
->nr_maps
; i
++)
466 bpf_map_put(diag
->maps
[i
]);
470 EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_free
);
472 static bool diag_check_dup(const struct bpf_sk_storage_diag
*diag
,
473 const struct bpf_map
*map
)
477 for (i
= 0; i
< diag
->nr_maps
; i
++) {
478 if (diag
->maps
[i
] == map
)
485 struct bpf_sk_storage_diag
*
486 bpf_sk_storage_diag_alloc(const struct nlattr
*nla_stgs
)
488 struct bpf_sk_storage_diag
*diag
;
493 /* bpf_local_storage_map is currently limited to CAP_SYS_ADMIN as
494 * the map_alloc_check() side also does.
497 return ERR_PTR(-EPERM
);
499 nla_for_each_nested_type(nla
, SK_DIAG_BPF_STORAGE_REQ_MAP_FD
,
501 if (nla_len(nla
) != sizeof(u32
))
502 return ERR_PTR(-EINVAL
);
506 diag
= kzalloc(struct_size(diag
, maps
, nr_maps
), GFP_KERNEL
);
508 return ERR_PTR(-ENOMEM
);
510 nla_for_each_nested_type(nla
, SK_DIAG_BPF_STORAGE_REQ_MAP_FD
,
512 int map_fd
= nla_get_u32(nla
);
513 struct bpf_map
*map
= bpf_map_get(map_fd
);
519 if (map
->map_type
!= BPF_MAP_TYPE_SK_STORAGE
) {
524 if (diag_check_dup(diag
, map
)) {
529 diag
->maps
[diag
->nr_maps
++] = map
;
535 bpf_sk_storage_diag_free(diag
);
538 EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_alloc
);
540 static int diag_get(struct bpf_local_storage_data
*sdata
, struct sk_buff
*skb
)
542 struct nlattr
*nla_stg
, *nla_value
;
543 struct bpf_local_storage_map
*smap
;
545 /* It cannot exceed max nlattr's payload */
546 BUILD_BUG_ON(U16_MAX
- NLA_HDRLEN
< BPF_LOCAL_STORAGE_MAX_VALUE_SIZE
);
548 nla_stg
= nla_nest_start(skb
, SK_DIAG_BPF_STORAGE
);
552 smap
= rcu_dereference(sdata
->smap
);
553 if (nla_put_u32(skb
, SK_DIAG_BPF_STORAGE_MAP_ID
, smap
->map
.id
))
556 nla_value
= nla_reserve_64bit(skb
, SK_DIAG_BPF_STORAGE_MAP_VALUE
,
557 smap
->map
.value_size
,
558 SK_DIAG_BPF_STORAGE_PAD
);
562 if (btf_record_has_field(smap
->map
.record
, BPF_SPIN_LOCK
))
563 copy_map_value_locked(&smap
->map
, nla_data(nla_value
),
566 copy_map_value(&smap
->map
, nla_data(nla_value
), sdata
->data
);
568 nla_nest_end(skb
, nla_stg
);
572 nla_nest_cancel(skb
, nla_stg
);
576 static int bpf_sk_storage_diag_put_all(struct sock
*sk
, struct sk_buff
*skb
,
578 unsigned int *res_diag_size
)
580 /* stg_array_type (e.g. INET_DIAG_BPF_SK_STORAGES) */
581 unsigned int diag_size
= nla_total_size(0);
582 struct bpf_local_storage
*sk_storage
;
583 struct bpf_local_storage_elem
*selem
;
584 struct bpf_local_storage_map
*smap
;
585 struct nlattr
*nla_stgs
;
586 unsigned int saved_len
;
591 sk_storage
= rcu_dereference(sk
->sk_bpf_storage
);
592 if (!sk_storage
|| hlist_empty(&sk_storage
->list
)) {
597 nla_stgs
= nla_nest_start(skb
, stg_array_type
);
599 /* Continue to learn diag_size */
602 saved_len
= skb
->len
;
603 hlist_for_each_entry_rcu(selem
, &sk_storage
->list
, snode
) {
604 smap
= rcu_dereference(SDATA(selem
)->smap
);
605 diag_size
+= nla_value_size(smap
->map
.value_size
);
607 if (nla_stgs
&& diag_get(SDATA(selem
), skb
))
608 /* Continue to learn diag_size */
615 if (saved_len
== skb
->len
)
616 nla_nest_cancel(skb
, nla_stgs
);
618 nla_nest_end(skb
, nla_stgs
);
621 if (diag_size
== nla_total_size(0)) {
626 *res_diag_size
= diag_size
;
630 int bpf_sk_storage_diag_put(struct bpf_sk_storage_diag
*diag
,
631 struct sock
*sk
, struct sk_buff
*skb
,
633 unsigned int *res_diag_size
)
635 /* stg_array_type (e.g. INET_DIAG_BPF_SK_STORAGES) */
636 unsigned int diag_size
= nla_total_size(0);
637 struct bpf_local_storage
*sk_storage
;
638 struct bpf_local_storage_data
*sdata
;
639 struct nlattr
*nla_stgs
;
640 unsigned int saved_len
;
646 /* No map has been specified. Dump all. */
648 return bpf_sk_storage_diag_put_all(sk
, skb
, stg_array_type
,
652 sk_storage
= rcu_dereference(sk
->sk_bpf_storage
);
653 if (!sk_storage
|| hlist_empty(&sk_storage
->list
)) {
658 nla_stgs
= nla_nest_start(skb
, stg_array_type
);
660 /* Continue to learn diag_size */
663 saved_len
= skb
->len
;
664 for (i
= 0; i
< diag
->nr_maps
; i
++) {
665 sdata
= bpf_local_storage_lookup(sk_storage
,
666 (struct bpf_local_storage_map
*)diag
->maps
[i
],
672 diag_size
+= nla_value_size(diag
->maps
[i
]->value_size
);
674 if (nla_stgs
&& diag_get(sdata
, skb
))
675 /* Continue to learn diag_size */
681 if (saved_len
== skb
->len
)
682 nla_nest_cancel(skb
, nla_stgs
);
684 nla_nest_end(skb
, nla_stgs
);
687 if (diag_size
== nla_total_size(0)) {
692 *res_diag_size
= diag_size
;
695 EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_put
);
697 struct bpf_iter_seq_sk_storage_map_info
{
699 unsigned int bucket_id
;
703 static struct bpf_local_storage_elem
*
704 bpf_sk_storage_map_seq_find_next(struct bpf_iter_seq_sk_storage_map_info
*info
,
705 struct bpf_local_storage_elem
*prev_selem
)
706 __acquires(RCU
) __releases(RCU
)
708 struct bpf_local_storage
*sk_storage
;
709 struct bpf_local_storage_elem
*selem
;
710 u32 skip_elems
= info
->skip_elems
;
711 struct bpf_local_storage_map
*smap
;
712 u32 bucket_id
= info
->bucket_id
;
713 u32 i
, count
, n_buckets
;
714 struct bpf_local_storage_map_bucket
*b
;
716 smap
= (struct bpf_local_storage_map
*)info
->map
;
717 n_buckets
= 1U << smap
->bucket_log
;
718 if (bucket_id
>= n_buckets
)
721 /* try to find next selem in the same bucket */
725 selem
= hlist_entry_safe(rcu_dereference(hlist_next_rcu(&selem
->map_node
)),
726 struct bpf_local_storage_elem
, map_node
);
728 /* not found, unlock and go to the next bucket */
729 b
= &smap
->buckets
[bucket_id
++];
734 sk_storage
= rcu_dereference(selem
->local_storage
);
736 info
->skip_elems
= skip_elems
+ count
;
742 for (i
= bucket_id
; i
< (1U << smap
->bucket_log
); i
++) {
743 b
= &smap
->buckets
[i
];
746 hlist_for_each_entry_rcu(selem
, &b
->list
, map_node
) {
747 sk_storage
= rcu_dereference(selem
->local_storage
);
748 if (sk_storage
&& count
>= skip_elems
) {
750 info
->skip_elems
= count
;
760 info
->skip_elems
= 0;
764 static void *bpf_sk_storage_map_seq_start(struct seq_file
*seq
, loff_t
*pos
)
766 struct bpf_local_storage_elem
*selem
;
768 selem
= bpf_sk_storage_map_seq_find_next(seq
->private, NULL
);
777 static void *bpf_sk_storage_map_seq_next(struct seq_file
*seq
, void *v
,
780 struct bpf_iter_seq_sk_storage_map_info
*info
= seq
->private;
784 return bpf_sk_storage_map_seq_find_next(seq
->private, v
);
787 struct bpf_iter__bpf_sk_storage_map
{
788 __bpf_md_ptr(struct bpf_iter_meta
*, meta
);
789 __bpf_md_ptr(struct bpf_map
*, map
);
790 __bpf_md_ptr(struct sock
*, sk
);
791 __bpf_md_ptr(void *, value
);
794 DEFINE_BPF_ITER_FUNC(bpf_sk_storage_map
, struct bpf_iter_meta
*meta
,
795 struct bpf_map
*map
, struct sock
*sk
,
798 static int __bpf_sk_storage_map_seq_show(struct seq_file
*seq
,
799 struct bpf_local_storage_elem
*selem
)
801 struct bpf_iter_seq_sk_storage_map_info
*info
= seq
->private;
802 struct bpf_iter__bpf_sk_storage_map ctx
= {};
803 struct bpf_local_storage
*sk_storage
;
804 struct bpf_iter_meta meta
;
805 struct bpf_prog
*prog
;
809 prog
= bpf_iter_get_info(&meta
, selem
== NULL
);
814 sk_storage
= rcu_dereference(selem
->local_storage
);
815 ctx
.sk
= sk_storage
->owner
;
816 ctx
.value
= SDATA(selem
)->data
;
818 ret
= bpf_iter_run_prog(prog
, &ctx
);
824 static int bpf_sk_storage_map_seq_show(struct seq_file
*seq
, void *v
)
826 return __bpf_sk_storage_map_seq_show(seq
, v
);
829 static void bpf_sk_storage_map_seq_stop(struct seq_file
*seq
, void *v
)
833 (void)__bpf_sk_storage_map_seq_show(seq
, v
);
838 static int bpf_iter_init_sk_storage_map(void *priv_data
,
839 struct bpf_iter_aux_info
*aux
)
841 struct bpf_iter_seq_sk_storage_map_info
*seq_info
= priv_data
;
843 bpf_map_inc_with_uref(aux
->map
);
844 seq_info
->map
= aux
->map
;
848 static void bpf_iter_fini_sk_storage_map(void *priv_data
)
850 struct bpf_iter_seq_sk_storage_map_info
*seq_info
= priv_data
;
852 bpf_map_put_with_uref(seq_info
->map
);
855 static int bpf_iter_attach_map(struct bpf_prog
*prog
,
856 union bpf_iter_link_info
*linfo
,
857 struct bpf_iter_aux_info
*aux
)
862 if (!linfo
->map
.map_fd
)
865 map
= bpf_map_get_with_uref(linfo
->map
.map_fd
);
869 if (map
->map_type
!= BPF_MAP_TYPE_SK_STORAGE
)
872 if (prog
->aux
->max_rdwr_access
> map
->value_size
) {
881 bpf_map_put_with_uref(map
);
885 static void bpf_iter_detach_map(struct bpf_iter_aux_info
*aux
)
887 bpf_map_put_with_uref(aux
->map
);
890 static const struct seq_operations bpf_sk_storage_map_seq_ops
= {
891 .start
= bpf_sk_storage_map_seq_start
,
892 .next
= bpf_sk_storage_map_seq_next
,
893 .stop
= bpf_sk_storage_map_seq_stop
,
894 .show
= bpf_sk_storage_map_seq_show
,
897 static const struct bpf_iter_seq_info iter_seq_info
= {
898 .seq_ops
= &bpf_sk_storage_map_seq_ops
,
899 .init_seq_private
= bpf_iter_init_sk_storage_map
,
900 .fini_seq_private
= bpf_iter_fini_sk_storage_map
,
901 .seq_priv_size
= sizeof(struct bpf_iter_seq_sk_storage_map_info
),
904 static struct bpf_iter_reg bpf_sk_storage_map_reg_info
= {
905 .target
= "bpf_sk_storage_map",
906 .attach_target
= bpf_iter_attach_map
,
907 .detach_target
= bpf_iter_detach_map
,
908 .show_fdinfo
= bpf_iter_map_show_fdinfo
,
909 .fill_link_info
= bpf_iter_map_fill_link_info
,
910 .ctx_arg_info_size
= 2,
912 { offsetof(struct bpf_iter__bpf_sk_storage_map
, sk
),
913 PTR_TO_BTF_ID_OR_NULL
},
914 { offsetof(struct bpf_iter__bpf_sk_storage_map
, value
),
915 PTR_TO_BUF
| PTR_MAYBE_NULL
},
917 .seq_info
= &iter_seq_info
,
920 static int __init
bpf_sk_storage_map_iter_init(void)
922 bpf_sk_storage_map_reg_info
.ctx_arg_info
[0].btf_id
=
923 btf_sock_ids
[BTF_SOCK_TYPE_SOCK
];
924 return bpf_iter_reg_target(&bpf_sk_storage_map_reg_info
);
926 late_initcall(bpf_sk_storage_map_iter_init
);