1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #include <commonlib/helpers.h>
4 #include <commonlib/region.h>
8 int region_is_subregion(const struct region
*p
, const struct region
*c
)
10 if (region_offset(c
) < region_offset(p
))
13 if (region_end(c
) > region_end(p
))
16 if (region_end(c
) < region_offset(c
))
22 static int normalize_and_ok(const struct region
*outer
, struct region
*inner
)
24 inner
->offset
+= region_offset(outer
);
25 return region_is_subregion(outer
, inner
);
28 static const struct region_device
*rdev_root(const struct region_device
*rdev
)
30 if (rdev
->root
== NULL
)
35 ssize_t
rdev_relative_offset(const struct region_device
*p
,
36 const struct region_device
*c
)
38 if (rdev_root(p
) != rdev_root(c
))
41 if (!region_is_subregion(&p
->region
, &c
->region
))
44 return region_device_offset(c
) - region_device_offset(p
);
47 void *rdev_mmap(const struct region_device
*rd
, size_t offset
, size_t size
)
49 const struct region_device
*rdev
;
55 if (!normalize_and_ok(&rd
->region
, &req
))
60 if (rdev
->ops
->mmap
== NULL
)
63 return rdev
->ops
->mmap(rdev
, req
.offset
, req
.size
);
66 int rdev_munmap(const struct region_device
*rd
, void *mapping
)
68 const struct region_device
*rdev
;
72 if (rdev
->ops
->munmap
== NULL
)
75 return rdev
->ops
->munmap(rdev
, mapping
);
78 ssize_t
rdev_readat(const struct region_device
*rd
, void *b
, size_t offset
,
81 const struct region_device
*rdev
;
87 if (!normalize_and_ok(&rd
->region
, &req
))
92 return rdev
->ops
->readat(rdev
, b
, req
.offset
, req
.size
);
95 ssize_t
rdev_writeat(const struct region_device
*rd
, const void *b
,
96 size_t offset
, size_t size
)
98 const struct region_device
*rdev
;
104 if (!normalize_and_ok(&rd
->region
, &req
))
107 rdev
= rdev_root(rd
);
109 if (rdev
->ops
->writeat
== NULL
)
112 return rdev
->ops
->writeat(rdev
, b
, req
.offset
, req
.size
);
115 ssize_t
rdev_eraseat(const struct region_device
*rd
, size_t offset
,
118 const struct region_device
*rdev
;
119 struct region req
= {
124 if (!normalize_and_ok(&rd
->region
, &req
))
127 rdev
= rdev_root(rd
);
129 /* If the eraseat ptr is NULL we assume that the erase
130 * function was completed successfully. */
131 if (rdev
->ops
->eraseat
== NULL
)
134 return rdev
->ops
->eraseat(rdev
, req
.offset
, req
.size
);
137 int rdev_chain(struct region_device
*child
, const struct region_device
*parent
,
138 size_t offset
, size_t size
)
140 struct region req
= {
145 if (!normalize_and_ok(&parent
->region
, &req
))
148 /* Keep track of root region device. Note the offsets are relative
149 * to the root device. */
150 child
->root
= rdev_root(parent
);
152 child
->region
.offset
= req
.offset
;
153 child
->region
.size
= req
.size
;
158 static void mem_region_device_init(struct mem_region_device
*mdev
,
159 const struct region_device_ops
*ops
, void *base
, size_t size
)
161 memset(mdev
, 0, sizeof(*mdev
));
163 mdev
->rdev
.ops
= ops
;
164 mdev
->rdev
.region
.size
= size
;
167 void mem_region_device_ro_init(struct mem_region_device
*mdev
, void *base
,
170 return mem_region_device_init(mdev
, &mem_rdev_ro_ops
, base
, size
);
173 void mem_region_device_rw_init(struct mem_region_device
*mdev
, void *base
,
176 return mem_region_device_init(mdev
, &mem_rdev_rw_ops
, base
, size
);
179 void region_device_init(struct region_device
*rdev
,
180 const struct region_device_ops
*ops
, size_t offset
,
183 memset(rdev
, 0, sizeof(*rdev
));
186 rdev
->region
.offset
= offset
;
187 rdev
->region
.size
= size
;
190 static void xlate_region_device_init(struct xlate_region_device
*xdev
,
191 const struct region_device_ops
*ops
,
192 size_t window_count
, const struct xlate_window
*window_arr
,
195 memset(xdev
, 0, sizeof(*xdev
));
196 xdev
->window_count
= window_count
;
197 xdev
->window_arr
= window_arr
;
198 region_device_init(&xdev
->rdev
, ops
, 0, parent_size
);
201 void xlate_region_device_ro_init(struct xlate_region_device
*xdev
,
202 size_t window_count
, const struct xlate_window
*window_arr
,
205 xlate_region_device_init(xdev
, &xlate_rdev_ro_ops
, window_count
, window_arr
,
209 void xlate_region_device_rw_init(struct xlate_region_device
*xdev
,
210 size_t window_count
, const struct xlate_window
*window_arr
,
213 xlate_region_device_init(xdev
, &xlate_rdev_rw_ops
, window_count
, window_arr
,
217 void xlate_window_init(struct xlate_window
*window
, const struct region_device
*access_dev
,
218 size_t sub_region_offset
, size_t sub_region_size
)
220 window
->access_dev
= access_dev
;
221 window
->sub_region
.offset
= sub_region_offset
;
222 window
->sub_region
.size
= sub_region_size
;
225 static void *mdev_mmap(const struct region_device
*rd
, size_t offset
,
226 size_t size __always_unused
)
228 const struct mem_region_device
*mdev
;
230 mdev
= container_of(rd
, __typeof__(*mdev
), rdev
);
232 return &mdev
->base
[offset
];
235 static int mdev_munmap(const struct region_device
*rd __always_unused
,
236 void *mapping __always_unused
)
241 static ssize_t
mdev_readat(const struct region_device
*rd
, void *b
,
242 size_t offset
, size_t size
)
244 const struct mem_region_device
*mdev
;
246 mdev
= container_of(rd
, __typeof__(*mdev
), rdev
);
248 memcpy(b
, &mdev
->base
[offset
], size
);
253 static ssize_t
mdev_writeat(const struct region_device
*rd
, const void *b
,
254 size_t offset
, size_t size
)
256 const struct mem_region_device
*mdev
;
258 mdev
= container_of(rd
, __typeof__(*mdev
), rdev
);
260 memcpy(&mdev
->base
[offset
], b
, size
);
265 static ssize_t
mdev_eraseat(const struct region_device
*rd
, size_t offset
,
268 const struct mem_region_device
*mdev
;
270 mdev
= container_of(rd
, __typeof__(*mdev
), rdev
);
272 memset(&mdev
->base
[offset
], 0, size
);
277 const struct region_device_ops mem_rdev_ro_ops
= {
279 .munmap
= mdev_munmap
,
280 .readat
= mdev_readat
,
283 const struct region_device_ops mem_rdev_rw_ops
= {
285 .munmap
= mdev_munmap
,
286 .readat
= mdev_readat
,
287 .writeat
= mdev_writeat
,
288 .eraseat
= mdev_eraseat
,
291 static const struct mem_region_device mem_rdev
= MEM_REGION_DEV_RO_INIT(0, ~(size_t)0);
292 static const struct mem_region_device mem_rdev_rw
= MEM_REGION_DEV_RW_INIT(0, ~(size_t)0);
294 int rdev_chain_mem(struct region_device
*child
, const void *base
, size_t size
)
296 return rdev_chain(child
, &mem_rdev
.rdev
, (uintptr_t)base
, size
);
299 int rdev_chain_mem_rw(struct region_device
*child
, void *base
, size_t size
)
301 return rdev_chain(child
, &mem_rdev_rw
.rdev
, (uintptr_t)base
, size
);
304 void *mmap_helper_rdev_mmap(const struct region_device
*rd
, size_t offset
,
307 struct mmap_helper_region_device
*mdev
;
310 mdev
= container_of((void *)rd
, __typeof__(*mdev
), rdev
);
312 mapping
= mem_pool_alloc(mdev
->pool
, size
);
317 if (rd
->ops
->readat(rd
, mapping
, offset
, size
) != size
) {
318 mem_pool_free(mdev
->pool
, mapping
);
325 int mmap_helper_rdev_munmap(const struct region_device
*rd
, void *mapping
)
327 struct mmap_helper_region_device
*mdev
;
329 mdev
= container_of((void *)rd
, __typeof__(*mdev
), rdev
);
331 mem_pool_free(mdev
->pool
, mapping
);
336 static const struct xlate_window
*xlate_find_window(const struct xlate_region_device
*xldev
,
337 const struct region
*req
)
340 const struct xlate_window
*xlwindow
;
342 for (i
= 0; i
< xldev
->window_count
; i
++) {
343 xlwindow
= &xldev
->window_arr
[i
];
344 if (region_is_subregion(&xlwindow
->sub_region
, req
))
351 static void *xlate_mmap(const struct region_device
*rd
, size_t offset
,
354 const struct xlate_region_device
*xldev
;
355 struct region req
= {
359 const struct xlate_window
*xlwindow
;
361 xldev
= container_of(rd
, __typeof__(*xldev
), rdev
);
363 xlwindow
= xlate_find_window(xldev
, &req
);
367 offset
-= region_offset(&xlwindow
->sub_region
);
369 return rdev_mmap(xlwindow
->access_dev
, offset
, size
);
372 static int xlate_munmap(const struct region_device
*rd __always_unused
,
373 void *mapping __always_unused
)
376 * xlate_region_device does not keep track of the access device that was used to service
377 * a mmap request. So, munmap does not do anything. If munmap functionality is required,
378 * then xlate_region_device will have to be updated to accept some pre-allocated space
379 * from caller to keep track of the mapping requests. Since xlate_region_device is only
380 * used for memory mapped boot media on the backend right now, skipping munmap is fine.
385 static ssize_t
xlate_readat(const struct region_device
*rd
, void *b
,
386 size_t offset
, size_t size
)
388 struct region req
= {
392 const struct xlate_window
*xlwindow
;
393 const struct xlate_region_device
*xldev
;
395 xldev
= container_of(rd
, __typeof__(*xldev
), rdev
);
397 xlwindow
= xlate_find_window(xldev
, &req
);
401 offset
-= region_offset(&xlwindow
->sub_region
);
403 return rdev_readat(xlwindow
->access_dev
, b
, offset
, size
);
406 static ssize_t
xlate_writeat(const struct region_device
*rd
, const void *b
,
407 size_t offset
, size_t size
)
409 struct region req
= {
413 const struct xlate_window
*xlwindow
;
414 const struct xlate_region_device
*xldev
;
416 xldev
= container_of(rd
, __typeof__(*xldev
), rdev
);
418 xlwindow
= xlate_find_window(xldev
, &req
);
422 offset
-= region_offset(&xlwindow
->sub_region
);
424 return rdev_writeat(xlwindow
->access_dev
, b
, offset
, size
);
427 static ssize_t
xlate_eraseat(const struct region_device
*rd
,
428 size_t offset
, size_t size
)
430 struct region req
= {
434 const struct xlate_window
*xlwindow
;
435 const struct xlate_region_device
*xldev
;
437 xldev
= container_of(rd
, __typeof__(*xldev
), rdev
);
439 xlwindow
= xlate_find_window(xldev
, &req
);
443 offset
-= region_offset(&xlwindow
->sub_region
);
445 return rdev_eraseat(xlwindow
->access_dev
, offset
, size
);
448 const struct region_device_ops xlate_rdev_ro_ops
= {
450 .munmap
= xlate_munmap
,
451 .readat
= xlate_readat
,
454 const struct region_device_ops xlate_rdev_rw_ops
= {
456 .munmap
= xlate_munmap
,
457 .readat
= xlate_readat
,
458 .writeat
= xlate_writeat
,
459 .eraseat
= xlate_eraseat
,
462 static void *incoherent_mmap(const struct region_device
*rd
, size_t offset
,
465 const struct incoherent_rdev
*irdev
;
467 irdev
= container_of(rd
, const struct incoherent_rdev
, rdev
);
469 return rdev_mmap(irdev
->read
, offset
, size
);
472 static int incoherent_munmap(const struct region_device
*rd
, void *mapping
)
474 const struct incoherent_rdev
*irdev
;
476 irdev
= container_of(rd
, const struct incoherent_rdev
, rdev
);
478 return rdev_munmap(irdev
->read
, mapping
);
481 static ssize_t
incoherent_readat(const struct region_device
*rd
, void *b
,
482 size_t offset
, size_t size
)
484 const struct incoherent_rdev
*irdev
;
486 irdev
= container_of(rd
, const struct incoherent_rdev
, rdev
);
488 return rdev_readat(irdev
->read
, b
, offset
, size
);
491 static ssize_t
incoherent_writeat(const struct region_device
*rd
, const void *b
,
492 size_t offset
, size_t size
)
494 const struct incoherent_rdev
*irdev
;
496 irdev
= container_of(rd
, const struct incoherent_rdev
, rdev
);
498 return rdev_writeat(irdev
->write
, b
, offset
, size
);
501 static ssize_t
incoherent_eraseat(const struct region_device
*rd
, size_t offset
,
504 const struct incoherent_rdev
*irdev
;
506 irdev
= container_of(rd
, const struct incoherent_rdev
, rdev
);
508 return rdev_eraseat(irdev
->write
, offset
, size
);
511 static const struct region_device_ops incoherent_rdev_ops
= {
512 .mmap
= incoherent_mmap
,
513 .munmap
= incoherent_munmap
,
514 .readat
= incoherent_readat
,
515 .writeat
= incoherent_writeat
,
516 .eraseat
= incoherent_eraseat
,
519 const struct region_device
*incoherent_rdev_init(struct incoherent_rdev
*irdev
,
520 const struct region
*r
,
521 const struct region_device
*read
,
522 const struct region_device
*write
)
524 const size_t size
= region_sz(r
);
526 if (size
!= region_device_sz(read
) || size
!= region_device_sz(write
))
529 /* The region is represented as offset 0 to size. That way, the generic
530 * rdev operations can be called on the read or write implementation
531 * without any unnecessary translation because the offsets all start
533 region_device_init(&irdev
->rdev
, &incoherent_rdev_ops
, 0, size
);
535 irdev
->write
= write
;