Merge tag 'regmap-fix-v5.11-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux/fpc-iii.git] / fs / erofs / zpvec.h
blob1d67cbd3870421429dc7ac06c878001a873a8755
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2018 HUAWEI, Inc.
4 * https://www.huawei.com/
5 * Created by Gao Xiang <gaoxiang25@huawei.com>
6 */
7 #ifndef __EROFS_FS_ZPVEC_H
8 #define __EROFS_FS_ZPVEC_H
10 #include "tagptr.h"
12 /* page type in pagevec for decompress subsystem */
13 enum z_erofs_page_type {
14 /* including Z_EROFS_VLE_PAGE_TAIL_EXCLUSIVE */
15 Z_EROFS_PAGE_TYPE_EXCLUSIVE,
17 Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED,
19 Z_EROFS_VLE_PAGE_TYPE_HEAD,
20 Z_EROFS_VLE_PAGE_TYPE_MAX
23 extern void __compiletime_error("Z_EROFS_PAGE_TYPE_EXCLUSIVE != 0")
24 __bad_page_type_exclusive(void);
26 /* pagevec tagged pointer */
27 typedef tagptr2_t erofs_vtptr_t;
29 /* pagevec collector */
30 struct z_erofs_pagevec_ctor {
31 struct page *curr, *next;
32 erofs_vtptr_t *pages;
34 unsigned int nr, index;
37 static inline void z_erofs_pagevec_ctor_exit(struct z_erofs_pagevec_ctor *ctor,
38 bool atomic)
40 if (!ctor->curr)
41 return;
43 if (atomic)
44 kunmap_atomic(ctor->pages);
45 else
46 kunmap(ctor->curr);
49 static inline struct page *
50 z_erofs_pagevec_ctor_next_page(struct z_erofs_pagevec_ctor *ctor,
51 unsigned int nr)
53 unsigned int index;
55 /* keep away from occupied pages */
56 if (ctor->next)
57 return ctor->next;
59 for (index = 0; index < nr; ++index) {
60 const erofs_vtptr_t t = ctor->pages[index];
61 const unsigned int tags = tagptr_unfold_tags(t);
63 if (tags == Z_EROFS_PAGE_TYPE_EXCLUSIVE)
64 return tagptr_unfold_ptr(t);
66 DBG_BUGON(nr >= ctor->nr);
67 return NULL;
70 static inline void
71 z_erofs_pagevec_ctor_pagedown(struct z_erofs_pagevec_ctor *ctor,
72 bool atomic)
74 struct page *next = z_erofs_pagevec_ctor_next_page(ctor, ctor->nr);
76 z_erofs_pagevec_ctor_exit(ctor, atomic);
78 ctor->curr = next;
79 ctor->next = NULL;
80 ctor->pages = atomic ?
81 kmap_atomic(ctor->curr) : kmap(ctor->curr);
83 ctor->nr = PAGE_SIZE / sizeof(struct page *);
84 ctor->index = 0;
87 static inline void z_erofs_pagevec_ctor_init(struct z_erofs_pagevec_ctor *ctor,
88 unsigned int nr,
89 erofs_vtptr_t *pages,
90 unsigned int i)
92 ctor->nr = nr;
93 ctor->curr = ctor->next = NULL;
94 ctor->pages = pages;
96 if (i >= nr) {
97 i -= nr;
98 z_erofs_pagevec_ctor_pagedown(ctor, false);
99 while (i > ctor->nr) {
100 i -= ctor->nr;
101 z_erofs_pagevec_ctor_pagedown(ctor, false);
104 ctor->next = z_erofs_pagevec_ctor_next_page(ctor, i);
105 ctor->index = i;
108 static inline bool z_erofs_pagevec_enqueue(struct z_erofs_pagevec_ctor *ctor,
109 struct page *page,
110 enum z_erofs_page_type type,
111 bool *occupied)
113 *occupied = false;
114 if (!ctor->next && type)
115 if (ctor->index + 1 == ctor->nr)
116 return false;
118 if (ctor->index >= ctor->nr)
119 z_erofs_pagevec_ctor_pagedown(ctor, false);
121 /* exclusive page type must be 0 */
122 if (Z_EROFS_PAGE_TYPE_EXCLUSIVE != (uintptr_t)NULL)
123 __bad_page_type_exclusive();
125 /* should remind that collector->next never equal to 1, 2 */
126 if (type == (uintptr_t)ctor->next) {
127 ctor->next = page;
128 *occupied = true;
130 ctor->pages[ctor->index++] = tagptr_fold(erofs_vtptr_t, page, type);
131 return true;
134 static inline struct page *
135 z_erofs_pagevec_dequeue(struct z_erofs_pagevec_ctor *ctor,
136 enum z_erofs_page_type *type)
138 erofs_vtptr_t t;
140 if (ctor->index >= ctor->nr) {
141 DBG_BUGON(!ctor->next);
142 z_erofs_pagevec_ctor_pagedown(ctor, true);
145 t = ctor->pages[ctor->index];
147 *type = tagptr_unfold_tags(t);
149 /* should remind that collector->next never equal to 1, 2 */
150 if (*type == (uintptr_t)ctor->next)
151 ctor->next = tagptr_unfold_ptr(t);
153 ctor->pages[ctor->index++] = tagptr_fold(erofs_vtptr_t, NULL, 0);
154 return tagptr_unfold_ptr(t);
156 #endif