1 /**************************************************************************
3 * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
31 #include "vmwgfx_drv.h"
32 #include "ttm/ttm_module.h"
33 #include "ttm/ttm_bo_driver.h"
34 #include "ttm/ttm_placement.h"
35 #include <linux/idr.h>
36 #include <linux/spinlock.h>
37 #include <linux/kernel.h>
39 struct vmwgfx_gmrid_man
{
43 uint32_t max_gmr_pages
;
44 uint32_t used_gmr_pages
;
47 static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager
*man
,
48 struct ttm_buffer_object
*bo
,
49 struct ttm_placement
*placement
,
50 struct ttm_mem_reg
*mem
)
52 struct vmwgfx_gmrid_man
*gman
=
53 (struct vmwgfx_gmrid_man
*)man
->priv
;
59 spin_lock(&gman
->lock
);
61 if (gman
->max_gmr_pages
> 0) {
62 gman
->used_gmr_pages
+= bo
->num_pages
;
63 if (unlikely(gman
->used_gmr_pages
> gman
->max_gmr_pages
))
68 spin_unlock(&gman
->lock
);
69 if (unlikely(ida_pre_get(&gman
->gmr_ida
, GFP_KERNEL
) == 0)) {
73 spin_lock(&gman
->lock
);
75 ret
= ida_get_new(&gman
->gmr_ida
, &id
);
76 if (unlikely(ret
== 0 && id
>= gman
->max_gmr_ids
)) {
77 ida_remove(&gman
->gmr_ida
, id
);
81 } while (ret
== -EAGAIN
);
83 if (likely(ret
== 0)) {
86 mem
->num_pages
= bo
->num_pages
;
90 spin_unlock(&gman
->lock
);
94 spin_lock(&gman
->lock
);
96 gman
->used_gmr_pages
-= bo
->num_pages
;
97 spin_unlock(&gman
->lock
);
101 static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager
*man
,
102 struct ttm_mem_reg
*mem
)
104 struct vmwgfx_gmrid_man
*gman
=
105 (struct vmwgfx_gmrid_man
*)man
->priv
;
108 spin_lock(&gman
->lock
);
109 ida_remove(&gman
->gmr_ida
, mem
->start
);
110 gman
->used_gmr_pages
-= mem
->num_pages
;
111 spin_unlock(&gman
->lock
);
116 static int vmw_gmrid_man_init(struct ttm_mem_type_manager
*man
,
117 unsigned long p_size
)
119 struct vmw_private
*dev_priv
=
120 container_of(man
->bdev
, struct vmw_private
, bdev
);
121 struct vmwgfx_gmrid_man
*gman
=
122 kzalloc(sizeof(*gman
), GFP_KERNEL
);
124 if (unlikely(gman
== NULL
))
127 spin_lock_init(&gman
->lock
);
128 gman
->max_gmr_pages
= dev_priv
->max_gmr_pages
;
129 gman
->used_gmr_pages
= 0;
130 ida_init(&gman
->gmr_ida
);
131 gman
->max_gmr_ids
= p_size
;
132 man
->priv
= (void *) gman
;
136 static int vmw_gmrid_man_takedown(struct ttm_mem_type_manager
*man
)
138 struct vmwgfx_gmrid_man
*gman
=
139 (struct vmwgfx_gmrid_man
*)man
->priv
;
142 ida_destroy(&gman
->gmr_ida
);
148 static void vmw_gmrid_man_debug(struct ttm_mem_type_manager
*man
,
151 printk(KERN_INFO
"%s: No debug info available for the GMR "
152 "id manager.\n", prefix
);
155 const struct ttm_mem_type_manager_func vmw_gmrid_manager_func
= {
157 vmw_gmrid_man_takedown
,
158 vmw_gmrid_man_get_node
,
159 vmw_gmrid_man_put_node
,