netfilter: nft_set_rbtree: fix panic when destroying set by GC
[linux/fpc-iii.git] / fs / xfs / kmem.h
blob8e6b3ba81c03e398d2b91c76c456760c2a97ae82
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6 #ifndef __XFS_SUPPORT_KMEM_H__
7 #define __XFS_SUPPORT_KMEM_H__
9 #include <linux/slab.h>
10 #include <linux/sched.h>
11 #include <linux/mm.h>
12 #include <linux/vmalloc.h>
15 * General memory allocation interfaces
18 typedef unsigned __bitwise xfs_km_flags_t;
19 #define KM_SLEEP ((__force xfs_km_flags_t)0x0001u)
20 #define KM_NOSLEEP ((__force xfs_km_flags_t)0x0002u)
21 #define KM_NOFS ((__force xfs_km_flags_t)0x0004u)
22 #define KM_MAYFAIL ((__force xfs_km_flags_t)0x0008u)
23 #define KM_ZERO ((__force xfs_km_flags_t)0x0010u)
26 * We use a special process flag to avoid recursive callbacks into
27 * the filesystem during transactions. We will also issue our own
28 * warnings, so we explicitly skip any generic ones (silly of us).
30 static inline gfp_t
31 kmem_flags_convert(xfs_km_flags_t flags)
33 gfp_t lflags;
35 BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL|KM_ZERO));
37 if (flags & KM_NOSLEEP) {
38 lflags = GFP_ATOMIC | __GFP_NOWARN;
39 } else {
40 lflags = GFP_KERNEL | __GFP_NOWARN;
41 if (flags & KM_NOFS)
42 lflags &= ~__GFP_FS;
46 * Default page/slab allocator behavior is to retry for ever
47 * for small allocations. We can override this behavior by using
48 * __GFP_RETRY_MAYFAIL which will tell the allocator to retry as long
49 * as it is feasible but rather fail than retry forever for all
50 * request sizes.
52 if (flags & KM_MAYFAIL)
53 lflags |= __GFP_RETRY_MAYFAIL;
55 if (flags & KM_ZERO)
56 lflags |= __GFP_ZERO;
58 return lflags;
61 extern void *kmem_alloc(size_t, xfs_km_flags_t);
62 extern void *kmem_alloc_large(size_t size, xfs_km_flags_t);
63 extern void *kmem_realloc(const void *, size_t, xfs_km_flags_t);
64 static inline void kmem_free(const void *ptr)
66 kvfree(ptr);
70 static inline void *
71 kmem_zalloc(size_t size, xfs_km_flags_t flags)
73 return kmem_alloc(size, flags | KM_ZERO);
76 static inline void *
77 kmem_zalloc_large(size_t size, xfs_km_flags_t flags)
79 return kmem_alloc_large(size, flags | KM_ZERO);
83 * Zone interfaces
86 #define KM_ZONE_HWALIGN SLAB_HWCACHE_ALIGN
87 #define KM_ZONE_RECLAIM SLAB_RECLAIM_ACCOUNT
88 #define KM_ZONE_SPREAD SLAB_MEM_SPREAD
89 #define KM_ZONE_ACCOUNT SLAB_ACCOUNT
91 #define kmem_zone kmem_cache
92 #define kmem_zone_t struct kmem_cache
94 static inline kmem_zone_t *
95 kmem_zone_init(int size, char *zone_name)
97 return kmem_cache_create(zone_name, size, 0, 0, NULL);
100 static inline kmem_zone_t *
101 kmem_zone_init_flags(int size, char *zone_name, slab_flags_t flags,
102 void (*construct)(void *))
104 return kmem_cache_create(zone_name, size, 0, flags, construct);
107 static inline void
108 kmem_zone_free(kmem_zone_t *zone, void *ptr)
110 kmem_cache_free(zone, ptr);
113 static inline void
114 kmem_zone_destroy(kmem_zone_t *zone)
116 kmem_cache_destroy(zone);
119 extern void *kmem_zone_alloc(kmem_zone_t *, xfs_km_flags_t);
121 static inline void *
122 kmem_zone_zalloc(kmem_zone_t *zone, xfs_km_flags_t flags)
124 return kmem_zone_alloc(zone, flags | KM_ZERO);
127 #endif /* __XFS_SUPPORT_KMEM_H__ */