1 /**************************************************************************
3 * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
31 #include "ttm/ttm_module.h"
32 #include "ttm/ttm_bo_driver.h"
33 #include "ttm/ttm_placement.h"
35 #include <linux/slab.h>
36 #include <linux/spinlock.h>
37 #include <linux/module.h>
40 * Currently we use a spinlock for the lock, but a mutex *may* be
41 * more appropriate to reduce scheduling latency if the range manager
42 * ends up with very fragmented allocation patterns.
45 struct ttm_range_manager
{
50 static int ttm_bo_man_get_node(struct ttm_mem_type_manager
*man
,
51 struct ttm_buffer_object
*bo
,
52 struct ttm_placement
*placement
,
53 struct ttm_mem_reg
*mem
)
55 struct ttm_range_manager
*rman
= (struct ttm_range_manager
*) man
->priv
;
56 struct drm_mm
*mm
= &rman
->mm
;
57 struct drm_mm_node
*node
= NULL
;
61 lpfn
= placement
->lpfn
;
65 ret
= drm_mm_pre_get(mm
);
69 spin_lock(&rman
->lock
);
70 node
= drm_mm_search_free_in_range(mm
,
71 mem
->num_pages
, mem
->page_alignment
,
72 placement
->fpfn
, lpfn
, 1);
73 if (unlikely(node
== NULL
)) {
74 spin_unlock(&rman
->lock
);
77 node
= drm_mm_get_block_atomic_range(node
, mem
->num_pages
,
81 spin_unlock(&rman
->lock
);
82 } while (node
== NULL
);
85 mem
->start
= node
->start
;
89 static void ttm_bo_man_put_node(struct ttm_mem_type_manager
*man
,
90 struct ttm_mem_reg
*mem
)
92 struct ttm_range_manager
*rman
= (struct ttm_range_manager
*) man
->priv
;
95 spin_lock(&rman
->lock
);
96 drm_mm_put_block(mem
->mm_node
);
97 spin_unlock(&rman
->lock
);
102 static int ttm_bo_man_init(struct ttm_mem_type_manager
*man
,
103 unsigned long p_size
)
105 struct ttm_range_manager
*rman
;
108 rman
= kzalloc(sizeof(*rman
), GFP_KERNEL
);
112 ret
= drm_mm_init(&rman
->mm
, 0, p_size
);
118 spin_lock_init(&rman
->lock
);
123 static int ttm_bo_man_takedown(struct ttm_mem_type_manager
*man
)
125 struct ttm_range_manager
*rman
= (struct ttm_range_manager
*) man
->priv
;
126 struct drm_mm
*mm
= &rman
->mm
;
128 spin_lock(&rman
->lock
);
129 if (drm_mm_clean(mm
)) {
131 spin_unlock(&rman
->lock
);
136 spin_unlock(&rman
->lock
);
140 static void ttm_bo_man_debug(struct ttm_mem_type_manager
*man
,
143 struct ttm_range_manager
*rman
= (struct ttm_range_manager
*) man
->priv
;
145 spin_lock(&rman
->lock
);
146 drm_mm_debug_table(&rman
->mm
, prefix
);
147 spin_unlock(&rman
->lock
);
150 const struct ttm_mem_type_manager_func ttm_bo_manager_func
= {
157 EXPORT_SYMBOL(ttm_bo_manager_func
);