1 /* i915_mem.c -- Simple agp/fb memory manager for i915 -*- linux-c -*-
3 /**************************************************************************
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
8 **************************************************************************/
15 /* This memory manager is integrated into the global/local lru
16 * mechanisms used by the clients. Specifically, it operates by
17 * setting the 'in_use' fields of the global LRU to indicate whether
18 * this region is privately allocated to a client.
20 * This does require the client to actually respect that field.
22 * Currently no effort is made to allocate 'private' memory in any
23 * clever way - the LRU information isn't used to determine which
24 * block to allocate, and the ring is drained prior to allocations --
25 * in other words allocation is expensive.
27 static void mark_block(drm_device_t
* dev
, struct mem_block
*p
, int in_use
)
29 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
30 drm_i915_sarea_t
*sarea_priv
= dev_priv
->sarea_priv
;
31 drm_tex_region_t
*list
;
38 shift
= dev_priv
->tex_lru_log_granularity
;
39 nr
= I915_NR_TEX_REGIONS
;
41 start
= p
->start
>> shift
;
42 end
= (p
->start
+ p
->size
- 1) >> shift
;
44 age
= ++sarea_priv
->texAge
;
45 list
= sarea_priv
->texList
;
47 /* Mark the regions with the new flag and update their age. Move
48 * them to head of list to preserve LRU semantics.
50 for (i
= start
; i
<= end
; i
++) {
51 list
[i
].in_use
= in_use
;
54 /* remove_from_list(i)
56 list
[(unsigned)list
[i
].next
].prev
= list
[i
].prev
;
57 list
[(unsigned)list
[i
].prev
].next
= list
[i
].next
;
59 /* insert_at_head(list, i)
62 list
[i
].next
= list
[nr
].next
;
63 list
[(unsigned)list
[nr
].next
].prev
= i
;
68 /* Very simple allocator for agp memory, working on a static range
69 * already mapped into each client's address space.
72 static struct mem_block
*split_block(struct mem_block
*p
, int start
, int size
,
75 /* Maybe cut off the start of an existing block */
76 if (start
> p
->start
) {
77 struct mem_block
*newblock
= drm_alloc(sizeof(*newblock
), DRM_MEM_BUFLISTS
);
80 newblock
->start
= start
;
81 newblock
->size
= p
->size
- (start
- p
->start
);
82 newblock
->filp
= NULL
;
83 newblock
->next
= p
->next
;
85 p
->next
->prev
= newblock
;
87 p
->size
-= newblock
->size
;
91 /* Maybe cut off the end of an existing block */
93 struct mem_block
*newblock
= drm_alloc(sizeof(*newblock
), DRM_MEM_BUFLISTS
);
96 newblock
->start
= start
+ size
;
97 newblock
->size
= p
->size
- size
;
98 newblock
->filp
= NULL
;
99 newblock
->next
= p
->next
;
101 p
->next
->prev
= newblock
;
107 /* Our block is in the middle */
112 static struct mem_block
*alloc_block(struct mem_block
*heap
, int size
,
113 int align2
, DRMFILE filp
)
116 int mask
= (1 << align2
) - 1;
118 for (p
= heap
->next
; p
!= heap
; p
= p
->next
) {
119 int start
= (p
->start
+ mask
) & ~mask
;
120 if (p
->filp
== NULL
&& start
+ size
<= p
->start
+ p
->size
)
121 return split_block(p
, start
, size
, filp
);
127 static struct mem_block
*find_block(struct mem_block
*heap
, int start
)
131 for (p
= heap
->next
; p
!= heap
; p
= p
->next
)
132 if (p
->start
== start
)
138 static void free_block(struct mem_block
*p
)
142 /* Assumes a single contiguous range. Needs a special filp in
143 * 'heap' to stop it being subsumed.
145 if (p
->next
->filp
== NULL
) {
146 struct mem_block
*q
= p
->next
;
150 drm_free(q
, sizeof(*q
), DRM_MEM_BUFLISTS
);
153 if (p
->prev
->filp
== NULL
) {
154 struct mem_block
*q
= p
->prev
;
158 drm_free(p
, sizeof(*q
), DRM_MEM_BUFLISTS
);
162 /* Initialize. How to check for an uninitialized heap?
164 static int init_heap(struct mem_block
**heap
, int start
, int size
)
166 struct mem_block
*blocks
= drm_alloc(sizeof(*blocks
), DRM_MEM_BUFLISTS
);
171 *heap
= drm_alloc(sizeof(**heap
), DRM_MEM_BUFLISTS
);
173 drm_free(blocks
, sizeof(*blocks
), DRM_MEM_BUFLISTS
);
177 blocks
->start
= start
;
180 blocks
->next
= blocks
->prev
= *heap
;
182 memset(*heap
, 0, sizeof(**heap
));
183 (*heap
)->filp
= (DRMFILE
) - 1;
184 (*heap
)->next
= (*heap
)->prev
= blocks
;
188 /* Free all blocks associated with the releasing file.
190 void i915_mem_release(drm_device_t
* dev
, DRMFILE filp
, struct mem_block
*heap
)
194 if (!heap
|| !heap
->next
)
197 for (p
= heap
->next
; p
!= heap
; p
= p
->next
) {
198 if (p
->filp
== filp
) {
200 mark_block(dev
, p
, 0);
204 /* Assumes a single contiguous range. Needs a special filp in
205 * 'heap' to stop it being subsumed.
207 for (p
= heap
->next
; p
!= heap
; p
= p
->next
) {
208 while (p
->filp
== NULL
&& p
->next
->filp
== NULL
) {
209 struct mem_block
*q
= p
->next
;
213 drm_free(q
, sizeof(*q
), DRM_MEM_BUFLISTS
);
220 void i915_mem_takedown(struct mem_block
**heap
)
227 for (p
= (*heap
)->next
; p
!= *heap
;) {
228 struct mem_block
*q
= p
;
230 drm_free(q
, sizeof(*q
), DRM_MEM_BUFLISTS
);
233 drm_free(*heap
, sizeof(**heap
), DRM_MEM_BUFLISTS
);
237 static struct mem_block
**get_heap(drm_i915_private_t
* dev_priv
, int region
)
240 case I915_MEM_REGION_AGP
:
241 return &dev_priv
->agp_heap
;
249 int i915_mem_alloc(DRM_IOCTL_ARGS
)
252 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
253 drm_i915_mem_alloc_t alloc
;
254 struct mem_block
*block
, **heap
;
257 DRM_ERROR("%s called with no initialization\n", __FUNCTION__
);
258 return DRM_ERR(EINVAL
);
261 DRM_COPY_FROM_USER_IOCTL(alloc
, (drm_i915_mem_alloc_t __user
*) data
,
264 heap
= get_heap(dev_priv
, alloc
.region
);
266 return DRM_ERR(EFAULT
);
268 /* Make things easier on ourselves: all allocations at least
271 if (alloc
.alignment
< 12)
272 alloc
.alignment
= 12;
274 block
= alloc_block(*heap
, alloc
.size
, alloc
.alignment
, filp
);
277 return DRM_ERR(ENOMEM
);
279 mark_block(dev
, block
, 1);
281 if (DRM_COPY_TO_USER(alloc
.region_offset
, &block
->start
, sizeof(int))) {
282 DRM_ERROR("copy_to_user\n");
283 return DRM_ERR(EFAULT
);
289 int i915_mem_free(DRM_IOCTL_ARGS
)
292 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
293 drm_i915_mem_free_t memfree
;
294 struct mem_block
*block
, **heap
;
297 DRM_ERROR("%s called with no initialization\n", __FUNCTION__
);
298 return DRM_ERR(EINVAL
);
301 DRM_COPY_FROM_USER_IOCTL(memfree
, (drm_i915_mem_free_t __user
*) data
,
304 heap
= get_heap(dev_priv
, memfree
.region
);
306 return DRM_ERR(EFAULT
);
308 block
= find_block(*heap
, memfree
.region_offset
);
310 return DRM_ERR(EFAULT
);
312 if (block
->filp
!= filp
)
313 return DRM_ERR(EPERM
);
315 mark_block(dev
, block
, 0);
320 int i915_mem_init_heap(DRM_IOCTL_ARGS
)
323 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
324 drm_i915_mem_init_heap_t initheap
;
325 struct mem_block
**heap
;
328 DRM_ERROR("%s called with no initialization\n", __FUNCTION__
);
329 return DRM_ERR(EINVAL
);
332 DRM_COPY_FROM_USER_IOCTL(initheap
,
333 (drm_i915_mem_init_heap_t __user
*) data
,
336 heap
= get_heap(dev_priv
, initheap
.region
);
338 return DRM_ERR(EFAULT
);
341 DRM_ERROR("heap already initialized?");
342 return DRM_ERR(EFAULT
);
345 return init_heap(heap
, initheap
.start
, initheap
.size
);