1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Facebook */
3 #include <linux/rculist.h>
4 #include <linux/list.h>
5 #include <linux/hash.h>
6 #include <linux/types.h>
7 #include <linux/spinlock.h>
10 #include <linux/btf_ids.h>
11 #include <linux/bpf_local_storage.h>
12 #include <net/bpf_sk_storage.h>
14 #include <uapi/linux/sock_diag.h>
15 #include <uapi/linux/btf.h>
17 DEFINE_BPF_STORAGE_CACHE(sk_cache
);
19 static struct bpf_local_storage_data
*
20 bpf_sk_storage_lookup(struct sock
*sk
, struct bpf_map
*map
, bool cacheit_lockit
)
22 struct bpf_local_storage
*sk_storage
;
23 struct bpf_local_storage_map
*smap
;
25 sk_storage
= rcu_dereference(sk
->sk_bpf_storage
);
29 smap
= (struct bpf_local_storage_map
*)map
;
30 return bpf_local_storage_lookup(sk_storage
, smap
, cacheit_lockit
);
33 static int bpf_sk_storage_del(struct sock
*sk
, struct bpf_map
*map
)
35 struct bpf_local_storage_data
*sdata
;
37 sdata
= bpf_sk_storage_lookup(sk
, map
, false);
41 bpf_selem_unlink(SELEM(sdata
));
46 /* Called by __sk_destruct() & bpf_sk_storage_clone() */
47 void bpf_sk_storage_free(struct sock
*sk
)
49 struct bpf_local_storage_elem
*selem
;
50 struct bpf_local_storage
*sk_storage
;
51 bool free_sk_storage
= false;
55 sk_storage
= rcu_dereference(sk
->sk_bpf_storage
);
61 /* Netiher the bpf_prog nor the bpf-map's syscall
62 * could be modifying the sk_storage->list now.
63 * Thus, no elem can be added-to or deleted-from the
64 * sk_storage->list by the bpf_prog or by the bpf-map's syscall.
66 * It is racing with bpf_local_storage_map_free() alone
67 * when unlinking elem from the sk_storage->list and
68 * the map's bucket->list.
70 raw_spin_lock_bh(&sk_storage
->lock
);
71 hlist_for_each_entry_safe(selem
, n
, &sk_storage
->list
, snode
) {
72 /* Always unlink from map before unlinking from
75 bpf_selem_unlink_map(selem
);
76 free_sk_storage
= bpf_selem_unlink_storage_nolock(sk_storage
,
79 raw_spin_unlock_bh(&sk_storage
->lock
);
83 kfree_rcu(sk_storage
, rcu
);
86 static void bpf_sk_storage_map_free(struct bpf_map
*map
)
88 struct bpf_local_storage_map
*smap
;
90 smap
= (struct bpf_local_storage_map
*)map
;
91 bpf_local_storage_cache_idx_free(&sk_cache
, smap
->cache_idx
);
92 bpf_local_storage_map_free(smap
);
95 static struct bpf_map
*bpf_sk_storage_map_alloc(union bpf_attr
*attr
)
97 struct bpf_local_storage_map
*smap
;
99 smap
= bpf_local_storage_map_alloc(attr
);
101 return ERR_CAST(smap
);
103 smap
->cache_idx
= bpf_local_storage_cache_idx_get(&sk_cache
);
107 static int notsupp_get_next_key(struct bpf_map
*map
, void *key
,
113 static void *bpf_fd_sk_storage_lookup_elem(struct bpf_map
*map
, void *key
)
115 struct bpf_local_storage_data
*sdata
;
120 sock
= sockfd_lookup(fd
, &err
);
122 sdata
= bpf_sk_storage_lookup(sock
->sk
, map
, true);
124 return sdata
? sdata
->data
: NULL
;
130 static int bpf_fd_sk_storage_update_elem(struct bpf_map
*map
, void *key
,
131 void *value
, u64 map_flags
)
133 struct bpf_local_storage_data
*sdata
;
138 sock
= sockfd_lookup(fd
, &err
);
140 sdata
= bpf_local_storage_update(
141 sock
->sk
, (struct bpf_local_storage_map
*)map
, value
,
144 return PTR_ERR_OR_ZERO(sdata
);
150 static int bpf_fd_sk_storage_delete_elem(struct bpf_map
*map
, void *key
)
156 sock
= sockfd_lookup(fd
, &err
);
158 err
= bpf_sk_storage_del(sock
->sk
, map
);
166 static struct bpf_local_storage_elem
*
167 bpf_sk_storage_clone_elem(struct sock
*newsk
,
168 struct bpf_local_storage_map
*smap
,
169 struct bpf_local_storage_elem
*selem
)
171 struct bpf_local_storage_elem
*copy_selem
;
173 copy_selem
= bpf_selem_alloc(smap
, newsk
, NULL
, true);
177 if (map_value_has_spin_lock(&smap
->map
))
178 copy_map_value_locked(&smap
->map
, SDATA(copy_selem
)->data
,
179 SDATA(selem
)->data
, true);
181 copy_map_value(&smap
->map
, SDATA(copy_selem
)->data
,
187 int bpf_sk_storage_clone(const struct sock
*sk
, struct sock
*newsk
)
189 struct bpf_local_storage
*new_sk_storage
= NULL
;
190 struct bpf_local_storage
*sk_storage
;
191 struct bpf_local_storage_elem
*selem
;
194 RCU_INIT_POINTER(newsk
->sk_bpf_storage
, NULL
);
197 sk_storage
= rcu_dereference(sk
->sk_bpf_storage
);
199 if (!sk_storage
|| hlist_empty(&sk_storage
->list
))
202 hlist_for_each_entry_rcu(selem
, &sk_storage
->list
, snode
) {
203 struct bpf_local_storage_elem
*copy_selem
;
204 struct bpf_local_storage_map
*smap
;
207 smap
= rcu_dereference(SDATA(selem
)->smap
);
208 if (!(smap
->map
.map_flags
& BPF_F_CLONE
))
211 /* Note that for lockless listeners adding new element
212 * here can race with cleanup in bpf_local_storage_map_free.
213 * Try to grab map refcnt to make sure that it's still
214 * alive and prevent concurrent removal.
216 map
= bpf_map_inc_not_zero(&smap
->map
);
220 copy_selem
= bpf_sk_storage_clone_elem(newsk
, smap
, selem
);
227 if (new_sk_storage
) {
228 bpf_selem_link_map(smap
, copy_selem
);
229 bpf_selem_link_storage_nolock(new_sk_storage
, copy_selem
);
231 ret
= bpf_local_storage_alloc(newsk
, smap
, copy_selem
);
234 atomic_sub(smap
->elem_size
,
235 &newsk
->sk_omem_alloc
);
241 rcu_dereference(copy_selem
->local_storage
);
249 /* In case of an error, don't free anything explicitly here, the
250 * caller is responsible to call bpf_sk_storage_free.
256 BPF_CALL_4(bpf_sk_storage_get
, struct bpf_map
*, map
, struct sock
*, sk
,
257 void *, value
, u64
, flags
)
259 struct bpf_local_storage_data
*sdata
;
261 if (!sk
|| !sk_fullsock(sk
) || flags
> BPF_SK_STORAGE_GET_F_CREATE
)
262 return (unsigned long)NULL
;
264 sdata
= bpf_sk_storage_lookup(sk
, map
, true);
266 return (unsigned long)sdata
->data
;
268 if (flags
== BPF_SK_STORAGE_GET_F_CREATE
&&
269 /* Cannot add new elem to a going away sk.
270 * Otherwise, the new elem may become a leak
271 * (and also other memory issues during map
274 refcount_inc_not_zero(&sk
->sk_refcnt
)) {
275 sdata
= bpf_local_storage_update(
276 sk
, (struct bpf_local_storage_map
*)map
, value
,
278 /* sk must be a fullsock (guaranteed by verifier),
279 * so sock_gen_put() is unnecessary.
282 return IS_ERR(sdata
) ?
283 (unsigned long)NULL
: (unsigned long)sdata
->data
;
286 return (unsigned long)NULL
;
289 BPF_CALL_2(bpf_sk_storage_delete
, struct bpf_map
*, map
, struct sock
*, sk
)
291 if (!sk
|| !sk_fullsock(sk
))
294 if (refcount_inc_not_zero(&sk
->sk_refcnt
)) {
297 err
= bpf_sk_storage_del(sk
, map
);
305 static int bpf_sk_storage_charge(struct bpf_local_storage_map
*smap
,
306 void *owner
, u32 size
)
308 struct sock
*sk
= (struct sock
*)owner
;
310 /* same check as in sock_kmalloc() */
311 if (size
<= sysctl_optmem_max
&&
312 atomic_read(&sk
->sk_omem_alloc
) + size
< sysctl_optmem_max
) {
313 atomic_add(size
, &sk
->sk_omem_alloc
);
320 static void bpf_sk_storage_uncharge(struct bpf_local_storage_map
*smap
,
321 void *owner
, u32 size
)
323 struct sock
*sk
= owner
;
325 atomic_sub(size
, &sk
->sk_omem_alloc
);
328 static struct bpf_local_storage __rcu
**
329 bpf_sk_storage_ptr(void *owner
)
331 struct sock
*sk
= owner
;
333 return &sk
->sk_bpf_storage
;
336 static int sk_storage_map_btf_id
;
337 const struct bpf_map_ops sk_storage_map_ops
= {
338 .map_meta_equal
= bpf_map_meta_equal
,
339 .map_alloc_check
= bpf_local_storage_map_alloc_check
,
340 .map_alloc
= bpf_sk_storage_map_alloc
,
341 .map_free
= bpf_sk_storage_map_free
,
342 .map_get_next_key
= notsupp_get_next_key
,
343 .map_lookup_elem
= bpf_fd_sk_storage_lookup_elem
,
344 .map_update_elem
= bpf_fd_sk_storage_update_elem
,
345 .map_delete_elem
= bpf_fd_sk_storage_delete_elem
,
346 .map_check_btf
= bpf_local_storage_map_check_btf
,
347 .map_btf_name
= "bpf_local_storage_map",
348 .map_btf_id
= &sk_storage_map_btf_id
,
349 .map_local_storage_charge
= bpf_sk_storage_charge
,
350 .map_local_storage_uncharge
= bpf_sk_storage_uncharge
,
351 .map_owner_storage_ptr
= bpf_sk_storage_ptr
,
354 const struct bpf_func_proto bpf_sk_storage_get_proto
= {
355 .func
= bpf_sk_storage_get
,
357 .ret_type
= RET_PTR_TO_MAP_VALUE_OR_NULL
,
358 .arg1_type
= ARG_CONST_MAP_PTR
,
359 .arg2_type
= ARG_PTR_TO_BTF_ID_SOCK_COMMON
,
360 .arg3_type
= ARG_PTR_TO_MAP_VALUE_OR_NULL
,
361 .arg4_type
= ARG_ANYTHING
,
364 const struct bpf_func_proto bpf_sk_storage_get_cg_sock_proto
= {
365 .func
= bpf_sk_storage_get
,
367 .ret_type
= RET_PTR_TO_MAP_VALUE_OR_NULL
,
368 .arg1_type
= ARG_CONST_MAP_PTR
,
369 .arg2_type
= ARG_PTR_TO_CTX
, /* context is 'struct sock' */
370 .arg3_type
= ARG_PTR_TO_MAP_VALUE_OR_NULL
,
371 .arg4_type
= ARG_ANYTHING
,
374 const struct bpf_func_proto bpf_sk_storage_delete_proto
= {
375 .func
= bpf_sk_storage_delete
,
377 .ret_type
= RET_INTEGER
,
378 .arg1_type
= ARG_CONST_MAP_PTR
,
379 .arg2_type
= ARG_PTR_TO_BTF_ID_SOCK_COMMON
,
382 static bool bpf_sk_storage_tracing_allowed(const struct bpf_prog
*prog
)
384 const struct btf
*btf_vmlinux
;
385 const struct btf_type
*t
;
389 if (prog
->aux
->dst_prog
)
392 /* Ensure the tracing program is not tracing
393 * any bpf_sk_storage*() function and also
394 * use the bpf_sk_storage_(get|delete) helper.
396 switch (prog
->expected_attach_type
) {
398 case BPF_TRACE_RAW_TP
:
399 /* bpf_sk_storage has no trace point */
401 case BPF_TRACE_FENTRY
:
402 case BPF_TRACE_FEXIT
:
403 btf_vmlinux
= bpf_get_btf_vmlinux();
404 btf_id
= prog
->aux
->attach_btf_id
;
405 t
= btf_type_by_id(btf_vmlinux
, btf_id
);
406 tname
= btf_name_by_offset(btf_vmlinux
, t
->name_off
);
407 return !!strncmp(tname
, "bpf_sk_storage",
408 strlen("bpf_sk_storage"));
416 BPF_CALL_4(bpf_sk_storage_get_tracing
, struct bpf_map
*, map
, struct sock
*, sk
,
417 void *, value
, u64
, flags
)
419 if (in_irq() || in_nmi())
420 return (unsigned long)NULL
;
422 return (unsigned long)____bpf_sk_storage_get(map
, sk
, value
, flags
);
425 BPF_CALL_2(bpf_sk_storage_delete_tracing
, struct bpf_map
*, map
,
428 if (in_irq() || in_nmi())
431 return ____bpf_sk_storage_delete(map
, sk
);
434 const struct bpf_func_proto bpf_sk_storage_get_tracing_proto
= {
435 .func
= bpf_sk_storage_get_tracing
,
437 .ret_type
= RET_PTR_TO_MAP_VALUE_OR_NULL
,
438 .arg1_type
= ARG_CONST_MAP_PTR
,
439 .arg2_type
= ARG_PTR_TO_BTF_ID
,
440 .arg2_btf_id
= &btf_sock_ids
[BTF_SOCK_TYPE_SOCK_COMMON
],
441 .arg3_type
= ARG_PTR_TO_MAP_VALUE_OR_NULL
,
442 .arg4_type
= ARG_ANYTHING
,
443 .allowed
= bpf_sk_storage_tracing_allowed
,
446 const struct bpf_func_proto bpf_sk_storage_delete_tracing_proto
= {
447 .func
= bpf_sk_storage_delete_tracing
,
449 .ret_type
= RET_INTEGER
,
450 .arg1_type
= ARG_CONST_MAP_PTR
,
451 .arg2_type
= ARG_PTR_TO_BTF_ID
,
452 .arg2_btf_id
= &btf_sock_ids
[BTF_SOCK_TYPE_SOCK_COMMON
],
453 .allowed
= bpf_sk_storage_tracing_allowed
,
456 struct bpf_sk_storage_diag
{
458 struct bpf_map
*maps
[];
461 /* The reply will be like:
462 * INET_DIAG_BPF_SK_STORAGES (nla_nest)
463 * SK_DIAG_BPF_STORAGE (nla_nest)
464 * SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
465 * SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
466 * SK_DIAG_BPF_STORAGE (nla_nest)
467 * SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
468 * SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
471 static int nla_value_size(u32 value_size
)
473 /* SK_DIAG_BPF_STORAGE (nla_nest)
474 * SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
475 * SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
477 return nla_total_size(0) + nla_total_size(sizeof(u32
)) +
478 nla_total_size_64bit(value_size
);
481 void bpf_sk_storage_diag_free(struct bpf_sk_storage_diag
*diag
)
488 for (i
= 0; i
< diag
->nr_maps
; i
++)
489 bpf_map_put(diag
->maps
[i
]);
493 EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_free
);
495 static bool diag_check_dup(const struct bpf_sk_storage_diag
*diag
,
496 const struct bpf_map
*map
)
500 for (i
= 0; i
< diag
->nr_maps
; i
++) {
501 if (diag
->maps
[i
] == map
)
508 struct bpf_sk_storage_diag
*
509 bpf_sk_storage_diag_alloc(const struct nlattr
*nla_stgs
)
511 struct bpf_sk_storage_diag
*diag
;
516 /* bpf_local_storage_map is currently limited to CAP_SYS_ADMIN as
517 * the map_alloc_check() side also does.
520 return ERR_PTR(-EPERM
);
522 nla_for_each_nested(nla
, nla_stgs
, rem
) {
523 if (nla_type(nla
) == SK_DIAG_BPF_STORAGE_REQ_MAP_FD
)
527 diag
= kzalloc(sizeof(*diag
) + sizeof(diag
->maps
[0]) * nr_maps
,
530 return ERR_PTR(-ENOMEM
);
532 nla_for_each_nested(nla
, nla_stgs
, rem
) {
536 if (nla_type(nla
) != SK_DIAG_BPF_STORAGE_REQ_MAP_FD
)
539 map_fd
= nla_get_u32(nla
);
540 map
= bpf_map_get(map_fd
);
545 if (map
->map_type
!= BPF_MAP_TYPE_SK_STORAGE
) {
550 if (diag_check_dup(diag
, map
)) {
555 diag
->maps
[diag
->nr_maps
++] = map
;
561 bpf_sk_storage_diag_free(diag
);
564 EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_alloc
);
566 static int diag_get(struct bpf_local_storage_data
*sdata
, struct sk_buff
*skb
)
568 struct nlattr
*nla_stg
, *nla_value
;
569 struct bpf_local_storage_map
*smap
;
571 /* It cannot exceed max nlattr's payload */
572 BUILD_BUG_ON(U16_MAX
- NLA_HDRLEN
< BPF_LOCAL_STORAGE_MAX_VALUE_SIZE
);
574 nla_stg
= nla_nest_start(skb
, SK_DIAG_BPF_STORAGE
);
578 smap
= rcu_dereference(sdata
->smap
);
579 if (nla_put_u32(skb
, SK_DIAG_BPF_STORAGE_MAP_ID
, smap
->map
.id
))
582 nla_value
= nla_reserve_64bit(skb
, SK_DIAG_BPF_STORAGE_MAP_VALUE
,
583 smap
->map
.value_size
,
584 SK_DIAG_BPF_STORAGE_PAD
);
588 if (map_value_has_spin_lock(&smap
->map
))
589 copy_map_value_locked(&smap
->map
, nla_data(nla_value
),
592 copy_map_value(&smap
->map
, nla_data(nla_value
), sdata
->data
);
594 nla_nest_end(skb
, nla_stg
);
598 nla_nest_cancel(skb
, nla_stg
);
602 static int bpf_sk_storage_diag_put_all(struct sock
*sk
, struct sk_buff
*skb
,
604 unsigned int *res_diag_size
)
606 /* stg_array_type (e.g. INET_DIAG_BPF_SK_STORAGES) */
607 unsigned int diag_size
= nla_total_size(0);
608 struct bpf_local_storage
*sk_storage
;
609 struct bpf_local_storage_elem
*selem
;
610 struct bpf_local_storage_map
*smap
;
611 struct nlattr
*nla_stgs
;
612 unsigned int saved_len
;
617 sk_storage
= rcu_dereference(sk
->sk_bpf_storage
);
618 if (!sk_storage
|| hlist_empty(&sk_storage
->list
)) {
623 nla_stgs
= nla_nest_start(skb
, stg_array_type
);
625 /* Continue to learn diag_size */
628 saved_len
= skb
->len
;
629 hlist_for_each_entry_rcu(selem
, &sk_storage
->list
, snode
) {
630 smap
= rcu_dereference(SDATA(selem
)->smap
);
631 diag_size
+= nla_value_size(smap
->map
.value_size
);
633 if (nla_stgs
&& diag_get(SDATA(selem
), skb
))
634 /* Continue to learn diag_size */
641 if (saved_len
== skb
->len
)
642 nla_nest_cancel(skb
, nla_stgs
);
644 nla_nest_end(skb
, nla_stgs
);
647 if (diag_size
== nla_total_size(0)) {
652 *res_diag_size
= diag_size
;
656 int bpf_sk_storage_diag_put(struct bpf_sk_storage_diag
*diag
,
657 struct sock
*sk
, struct sk_buff
*skb
,
659 unsigned int *res_diag_size
)
661 /* stg_array_type (e.g. INET_DIAG_BPF_SK_STORAGES) */
662 unsigned int diag_size
= nla_total_size(0);
663 struct bpf_local_storage
*sk_storage
;
664 struct bpf_local_storage_data
*sdata
;
665 struct nlattr
*nla_stgs
;
666 unsigned int saved_len
;
672 /* No map has been specified. Dump all. */
674 return bpf_sk_storage_diag_put_all(sk
, skb
, stg_array_type
,
678 sk_storage
= rcu_dereference(sk
->sk_bpf_storage
);
679 if (!sk_storage
|| hlist_empty(&sk_storage
->list
)) {
684 nla_stgs
= nla_nest_start(skb
, stg_array_type
);
686 /* Continue to learn diag_size */
689 saved_len
= skb
->len
;
690 for (i
= 0; i
< diag
->nr_maps
; i
++) {
691 sdata
= bpf_local_storage_lookup(sk_storage
,
692 (struct bpf_local_storage_map
*)diag
->maps
[i
],
698 diag_size
+= nla_value_size(diag
->maps
[i
]->value_size
);
700 if (nla_stgs
&& diag_get(sdata
, skb
))
701 /* Continue to learn diag_size */
707 if (saved_len
== skb
->len
)
708 nla_nest_cancel(skb
, nla_stgs
);
710 nla_nest_end(skb
, nla_stgs
);
713 if (diag_size
== nla_total_size(0)) {
718 *res_diag_size
= diag_size
;
721 EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_put
);
723 struct bpf_iter_seq_sk_storage_map_info
{
725 unsigned int bucket_id
;
729 static struct bpf_local_storage_elem
*
730 bpf_sk_storage_map_seq_find_next(struct bpf_iter_seq_sk_storage_map_info
*info
,
731 struct bpf_local_storage_elem
*prev_selem
)
732 __acquires(RCU
) __releases(RCU
)
734 struct bpf_local_storage
*sk_storage
;
735 struct bpf_local_storage_elem
*selem
;
736 u32 skip_elems
= info
->skip_elems
;
737 struct bpf_local_storage_map
*smap
;
738 u32 bucket_id
= info
->bucket_id
;
739 u32 i
, count
, n_buckets
;
740 struct bpf_local_storage_map_bucket
*b
;
742 smap
= (struct bpf_local_storage_map
*)info
->map
;
743 n_buckets
= 1U << smap
->bucket_log
;
744 if (bucket_id
>= n_buckets
)
747 /* try to find next selem in the same bucket */
751 selem
= hlist_entry_safe(rcu_dereference(hlist_next_rcu(&selem
->map_node
)),
752 struct bpf_local_storage_elem
, map_node
);
754 /* not found, unlock and go to the next bucket */
755 b
= &smap
->buckets
[bucket_id
++];
760 sk_storage
= rcu_dereference(selem
->local_storage
);
762 info
->skip_elems
= skip_elems
+ count
;
768 for (i
= bucket_id
; i
< (1U << smap
->bucket_log
); i
++) {
769 b
= &smap
->buckets
[i
];
772 hlist_for_each_entry_rcu(selem
, &b
->list
, map_node
) {
773 sk_storage
= rcu_dereference(selem
->local_storage
);
774 if (sk_storage
&& count
>= skip_elems
) {
776 info
->skip_elems
= count
;
786 info
->skip_elems
= 0;
790 static void *bpf_sk_storage_map_seq_start(struct seq_file
*seq
, loff_t
*pos
)
792 struct bpf_local_storage_elem
*selem
;
794 selem
= bpf_sk_storage_map_seq_find_next(seq
->private, NULL
);
803 static void *bpf_sk_storage_map_seq_next(struct seq_file
*seq
, void *v
,
806 struct bpf_iter_seq_sk_storage_map_info
*info
= seq
->private;
810 return bpf_sk_storage_map_seq_find_next(seq
->private, v
);
813 struct bpf_iter__bpf_sk_storage_map
{
814 __bpf_md_ptr(struct bpf_iter_meta
*, meta
);
815 __bpf_md_ptr(struct bpf_map
*, map
);
816 __bpf_md_ptr(struct sock
*, sk
);
817 __bpf_md_ptr(void *, value
);
820 DEFINE_BPF_ITER_FUNC(bpf_sk_storage_map
, struct bpf_iter_meta
*meta
,
821 struct bpf_map
*map
, struct sock
*sk
,
824 static int __bpf_sk_storage_map_seq_show(struct seq_file
*seq
,
825 struct bpf_local_storage_elem
*selem
)
827 struct bpf_iter_seq_sk_storage_map_info
*info
= seq
->private;
828 struct bpf_iter__bpf_sk_storage_map ctx
= {};
829 struct bpf_local_storage
*sk_storage
;
830 struct bpf_iter_meta meta
;
831 struct bpf_prog
*prog
;
835 prog
= bpf_iter_get_info(&meta
, selem
== NULL
);
840 sk_storage
= rcu_dereference(selem
->local_storage
);
841 ctx
.sk
= sk_storage
->owner
;
842 ctx
.value
= SDATA(selem
)->data
;
844 ret
= bpf_iter_run_prog(prog
, &ctx
);
850 static int bpf_sk_storage_map_seq_show(struct seq_file
*seq
, void *v
)
852 return __bpf_sk_storage_map_seq_show(seq
, v
);
855 static void bpf_sk_storage_map_seq_stop(struct seq_file
*seq
, void *v
)
859 (void)__bpf_sk_storage_map_seq_show(seq
, v
);
864 static int bpf_iter_init_sk_storage_map(void *priv_data
,
865 struct bpf_iter_aux_info
*aux
)
867 struct bpf_iter_seq_sk_storage_map_info
*seq_info
= priv_data
;
869 seq_info
->map
= aux
->map
;
873 static int bpf_iter_attach_map(struct bpf_prog
*prog
,
874 union bpf_iter_link_info
*linfo
,
875 struct bpf_iter_aux_info
*aux
)
880 if (!linfo
->map
.map_fd
)
883 map
= bpf_map_get_with_uref(linfo
->map
.map_fd
);
887 if (map
->map_type
!= BPF_MAP_TYPE_SK_STORAGE
)
890 if (prog
->aux
->max_rdonly_access
> map
->value_size
) {
899 bpf_map_put_with_uref(map
);
903 static void bpf_iter_detach_map(struct bpf_iter_aux_info
*aux
)
905 bpf_map_put_with_uref(aux
->map
);
908 static const struct seq_operations bpf_sk_storage_map_seq_ops
= {
909 .start
= bpf_sk_storage_map_seq_start
,
910 .next
= bpf_sk_storage_map_seq_next
,
911 .stop
= bpf_sk_storage_map_seq_stop
,
912 .show
= bpf_sk_storage_map_seq_show
,
915 static const struct bpf_iter_seq_info iter_seq_info
= {
916 .seq_ops
= &bpf_sk_storage_map_seq_ops
,
917 .init_seq_private
= bpf_iter_init_sk_storage_map
,
918 .fini_seq_private
= NULL
,
919 .seq_priv_size
= sizeof(struct bpf_iter_seq_sk_storage_map_info
),
922 static struct bpf_iter_reg bpf_sk_storage_map_reg_info
= {
923 .target
= "bpf_sk_storage_map",
924 .attach_target
= bpf_iter_attach_map
,
925 .detach_target
= bpf_iter_detach_map
,
926 .show_fdinfo
= bpf_iter_map_show_fdinfo
,
927 .fill_link_info
= bpf_iter_map_fill_link_info
,
928 .ctx_arg_info_size
= 2,
930 { offsetof(struct bpf_iter__bpf_sk_storage_map
, sk
),
931 PTR_TO_BTF_ID_OR_NULL
},
932 { offsetof(struct bpf_iter__bpf_sk_storage_map
, value
),
933 PTR_TO_RDWR_BUF_OR_NULL
},
935 .seq_info
= &iter_seq_info
,
938 static int __init
bpf_sk_storage_map_iter_init(void)
940 bpf_sk_storage_map_reg_info
.ctx_arg_info
[0].btf_id
=
941 btf_sock_ids
[BTF_SOCK_TYPE_SOCK
];
942 return bpf_iter_reg_target(&bpf_sk_storage_map_reg_info
);
944 late_initcall(bpf_sk_storage_map_iter_init
);