Merge tag 'for-5.8/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device...
[linux/fpc-iii.git] / fs / xfs / kmem.c
blobf1366475c389c0a7de3a1f4c9288cc4ae6abb919
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6 #include "xfs.h"
7 #include <linux/backing-dev.h>
8 #include "xfs_message.h"
9 #include "xfs_trace.h"
11 void *
12 kmem_alloc(size_t size, xfs_km_flags_t flags)
14 int retries = 0;
15 gfp_t lflags = kmem_flags_convert(flags);
16 void *ptr;
18 trace_kmem_alloc(size, flags, _RET_IP_);
20 do {
21 ptr = kmalloc(size, lflags);
22 if (ptr || (flags & KM_MAYFAIL))
23 return ptr;
24 if (!(++retries % 100))
25 xfs_err(NULL,
26 "%s(%u) possible memory allocation deadlock size %u in %s (mode:0x%x)",
27 current->comm, current->pid,
28 (unsigned int)size, __func__, lflags);
29 congestion_wait(BLK_RW_ASYNC, HZ/50);
30 } while (1);
35 * __vmalloc() will allocate data pages and auxiliary structures (e.g.
36 * pagetables) with GFP_KERNEL, yet we may be under GFP_NOFS context here. Hence
37 * we need to tell memory reclaim that we are in such a context via
38 * PF_MEMALLOC_NOFS to prevent memory reclaim re-entering the filesystem here
39 * and potentially deadlocking.
41 static void *
42 __kmem_vmalloc(size_t size, xfs_km_flags_t flags)
44 unsigned nofs_flag = 0;
45 void *ptr;
46 gfp_t lflags = kmem_flags_convert(flags);
48 if (flags & KM_NOFS)
49 nofs_flag = memalloc_nofs_save();
51 ptr = __vmalloc(size, lflags);
53 if (flags & KM_NOFS)
54 memalloc_nofs_restore(nofs_flag);
56 return ptr;
60 * Same as kmem_alloc_large, except we guarantee the buffer returned is aligned
61 * to the @align_mask. We only guarantee alignment up to page size, we'll clamp
62 * alignment at page size if it is larger. vmalloc always returns a PAGE_SIZE
63 * aligned region.
65 void *
66 kmem_alloc_io(size_t size, int align_mask, xfs_km_flags_t flags)
68 void *ptr;
70 trace_kmem_alloc_io(size, flags, _RET_IP_);
72 if (WARN_ON_ONCE(align_mask >= PAGE_SIZE))
73 align_mask = PAGE_SIZE - 1;
75 ptr = kmem_alloc(size, flags | KM_MAYFAIL);
76 if (ptr) {
77 if (!((uintptr_t)ptr & align_mask))
78 return ptr;
79 kfree(ptr);
81 return __kmem_vmalloc(size, flags);
84 void *
85 kmem_alloc_large(size_t size, xfs_km_flags_t flags)
87 void *ptr;
89 trace_kmem_alloc_large(size, flags, _RET_IP_);
91 ptr = kmem_alloc(size, flags | KM_MAYFAIL);
92 if (ptr)
93 return ptr;
94 return __kmem_vmalloc(size, flags);
97 void *
98 kmem_realloc(const void *old, size_t newsize, xfs_km_flags_t flags)
100 int retries = 0;
101 gfp_t lflags = kmem_flags_convert(flags);
102 void *ptr;
104 trace_kmem_realloc(newsize, flags, _RET_IP_);
106 do {
107 ptr = krealloc(old, newsize, lflags);
108 if (ptr || (flags & KM_MAYFAIL))
109 return ptr;
110 if (!(++retries % 100))
111 xfs_err(NULL,
112 "%s(%u) possible memory allocation deadlock size %zu in %s (mode:0x%x)",
113 current->comm, current->pid,
114 newsize, __func__, lflags);
115 congestion_wait(BLK_RW_ASYNC, HZ/50);
116 } while (1);
119 void *
120 kmem_zone_alloc(kmem_zone_t *zone, xfs_km_flags_t flags)
122 int retries = 0;
123 gfp_t lflags = kmem_flags_convert(flags);
124 void *ptr;
126 trace_kmem_zone_alloc(kmem_cache_size(zone), flags, _RET_IP_);
127 do {
128 ptr = kmem_cache_alloc(zone, lflags);
129 if (ptr || (flags & KM_MAYFAIL))
130 return ptr;
131 if (!(++retries % 100))
132 xfs_err(NULL,
133 "%s(%u) possible memory allocation deadlock in %s (mode:0x%x)",
134 current->comm, current->pid,
135 __func__, lflags);
136 congestion_wait(BLK_RW_ASYNC, HZ/50);
137 } while (1);