2 * Copyright 2008 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
25 * Jerome Glisse <glisse@freedesktop.org>
28 #include "radeon_drm.h"
29 #include "radeon_reg.h"
32 void r100_cs_dump_packet(struct radeon_cs_parser
*p
,
33 struct radeon_cs_packet
*pkt
);
35 int radeon_cs_parser_relocs(struct radeon_cs_parser
*p
)
37 struct drm_device
*ddev
= p
->rdev
->ddev
;
38 struct radeon_cs_chunk
*chunk
;
42 if (p
->chunk_relocs_idx
== -1) {
45 chunk
= &p
->chunks
[p
->chunk_relocs_idx
];
46 /* FIXME: we assume that each relocs use 4 dwords */
47 p
->nrelocs
= chunk
->length_dw
/ 4;
48 p
->relocs_ptr
= kcalloc(p
->nrelocs
, sizeof(void *), GFP_KERNEL
);
49 if (p
->relocs_ptr
== NULL
) {
52 p
->relocs
= kcalloc(p
->nrelocs
, sizeof(struct radeon_cs_reloc
), GFP_KERNEL
);
53 if (p
->relocs
== NULL
) {
56 for (i
= 0; i
< p
->nrelocs
; i
++) {
57 struct drm_radeon_cs_reloc
*r
;
60 r
= (struct drm_radeon_cs_reloc
*)&chunk
->kdata
[i
*4];
61 for (j
= 0; j
< p
->nrelocs
; j
++) {
62 if (r
->handle
== p
->relocs
[j
].handle
) {
63 p
->relocs_ptr
[i
] = &p
->relocs
[j
];
69 p
->relocs
[i
].gobj
= drm_gem_object_lookup(ddev
,
72 if (p
->relocs
[i
].gobj
== NULL
) {
73 DRM_ERROR("gem object lookup failed 0x%x\n",
77 p
->relocs_ptr
[i
] = &p
->relocs
[i
];
78 p
->relocs
[i
].robj
= gem_to_radeon_bo(p
->relocs
[i
].gobj
);
79 p
->relocs
[i
].lobj
.bo
= p
->relocs
[i
].robj
;
80 p
->relocs
[i
].lobj
.wdomain
= r
->write_domain
;
81 p
->relocs
[i
].lobj
.rdomain
= r
->read_domains
;
82 p
->relocs
[i
].lobj
.tv
.bo
= &p
->relocs
[i
].robj
->tbo
;
84 p
->relocs
[i
].lobj
.tv
.usage
|= TTM_USAGE_READ
;
86 p
->relocs
[i
].lobj
.tv
.usage
|= TTM_USAGE_WRITE
;
87 p
->relocs
[i
].handle
= r
->handle
;
88 p
->relocs
[i
].flags
= r
->flags
;
89 radeon_bo_list_add_object(&p
->relocs
[i
].lobj
,
93 return radeon_bo_list_validate(&p
->validated
);
96 int radeon_cs_parser_init(struct radeon_cs_parser
*p
, void *data
)
98 struct drm_radeon_cs
*cs
= data
;
99 uint64_t *chunk_array_ptr
;
102 if (!cs
->num_chunks
) {
106 INIT_LIST_HEAD(&p
->validated
);
108 p
->chunk_ib_idx
= -1;
109 p
->chunk_relocs_idx
= -1;
110 p
->chunks_array
= kcalloc(cs
->num_chunks
, sizeof(uint64_t), GFP_KERNEL
);
111 if (p
->chunks_array
== NULL
) {
114 chunk_array_ptr
= (uint64_t *)(unsigned long)(cs
->chunks
);
115 if (DRM_COPY_FROM_USER(p
->chunks_array
, chunk_array_ptr
,
116 sizeof(uint64_t)*cs
->num_chunks
)) {
119 p
->nchunks
= cs
->num_chunks
;
120 p
->chunks
= kcalloc(p
->nchunks
, sizeof(struct radeon_cs_chunk
), GFP_KERNEL
);
121 if (p
->chunks
== NULL
) {
124 for (i
= 0; i
< p
->nchunks
; i
++) {
125 struct drm_radeon_cs_chunk __user
**chunk_ptr
= NULL
;
126 struct drm_radeon_cs_chunk user_chunk
;
127 uint32_t __user
*cdata
;
129 chunk_ptr
= (void __user
*)(unsigned long)p
->chunks_array
[i
];
130 if (DRM_COPY_FROM_USER(&user_chunk
, chunk_ptr
,
131 sizeof(struct drm_radeon_cs_chunk
))) {
134 p
->chunks
[i
].length_dw
= user_chunk
.length_dw
;
135 p
->chunks
[i
].kdata
= NULL
;
136 p
->chunks
[i
].chunk_id
= user_chunk
.chunk_id
;
138 if (p
->chunks
[i
].chunk_id
== RADEON_CHUNK_ID_RELOCS
) {
139 p
->chunk_relocs_idx
= i
;
141 if (p
->chunks
[i
].chunk_id
== RADEON_CHUNK_ID_IB
) {
143 /* zero length IB isn't useful */
144 if (p
->chunks
[i
].length_dw
== 0)
148 p
->chunks
[i
].length_dw
= user_chunk
.length_dw
;
149 p
->chunks
[i
].user_ptr
= (void __user
*)(unsigned long)user_chunk
.chunk_data
;
151 cdata
= (uint32_t *)(unsigned long)user_chunk
.chunk_data
;
152 if (p
->chunks
[i
].chunk_id
!= RADEON_CHUNK_ID_IB
) {
153 size
= p
->chunks
[i
].length_dw
* sizeof(uint32_t);
154 p
->chunks
[i
].kdata
= kmalloc(size
, GFP_KERNEL
);
155 if (p
->chunks
[i
].kdata
== NULL
) {
158 if (DRM_COPY_FROM_USER(p
->chunks
[i
].kdata
,
159 p
->chunks
[i
].user_ptr
, size
)) {
163 p
->chunks
[i
].kpage
[0] = kmalloc(PAGE_SIZE
, GFP_KERNEL
);
164 p
->chunks
[i
].kpage
[1] = kmalloc(PAGE_SIZE
, GFP_KERNEL
);
165 if (p
->chunks
[i
].kpage
[0] == NULL
|| p
->chunks
[i
].kpage
[1] == NULL
) {
166 kfree(p
->chunks
[i
].kpage
[0]);
167 kfree(p
->chunks
[i
].kpage
[1]);
170 p
->chunks
[i
].kpage_idx
[0] = -1;
171 p
->chunks
[i
].kpage_idx
[1] = -1;
172 p
->chunks
[i
].last_copied_page
= -1;
173 p
->chunks
[i
].last_page_index
= ((p
->chunks
[i
].length_dw
* 4) - 1) / PAGE_SIZE
;
176 if (p
->chunks
[p
->chunk_ib_idx
].length_dw
> (16 * 1024)) {
177 DRM_ERROR("cs IB too big: %d\n",
178 p
->chunks
[p
->chunk_ib_idx
].length_dw
);
185 * cs_parser_fini() - clean parser states
186 * @parser: parser structure holding parsing context.
187 * @error: error number
189 * If error is set than unvalidate buffer, otherwise just free memory
190 * used by parsing context.
192 static void radeon_cs_parser_fini(struct radeon_cs_parser
*parser
, int error
)
197 if (!error
&& parser
->ib
)
198 ttm_eu_fence_buffer_objects(&parser
->validated
,
201 ttm_eu_backoff_reservation(&parser
->validated
);
203 if (parser
->relocs
!= NULL
) {
204 for (i
= 0; i
< parser
->nrelocs
; i
++) {
205 if (parser
->relocs
[i
].gobj
)
206 drm_gem_object_unreference_unlocked(parser
->relocs
[i
].gobj
);
209 kfree(parser
->track
);
210 kfree(parser
->relocs
);
211 kfree(parser
->relocs_ptr
);
212 for (i
= 0; i
< parser
->nchunks
; i
++) {
213 kfree(parser
->chunks
[i
].kdata
);
214 kfree(parser
->chunks
[i
].kpage
[0]);
215 kfree(parser
->chunks
[i
].kpage
[1]);
217 kfree(parser
->chunks
);
218 kfree(parser
->chunks_array
);
219 radeon_ib_free(parser
->rdev
, &parser
->ib
);
222 int radeon_cs_ioctl(struct drm_device
*dev
, void *data
, struct drm_file
*filp
)
224 struct radeon_device
*rdev
= dev
->dev_private
;
225 struct radeon_cs_parser parser
;
226 struct radeon_cs_chunk
*ib_chunk
;
229 mutex_lock(&rdev
->cs_mutex
);
230 /* initialize parser */
231 memset(&parser
, 0, sizeof(struct radeon_cs_parser
));
234 parser
.dev
= rdev
->dev
;
235 parser
.family
= rdev
->family
;
236 r
= radeon_cs_parser_init(&parser
, data
);
238 DRM_ERROR("Failed to initialize parser !\n");
239 radeon_cs_parser_fini(&parser
, r
);
240 mutex_unlock(&rdev
->cs_mutex
);
243 r
= radeon_ib_get(rdev
, &parser
.ib
);
245 DRM_ERROR("Failed to get ib !\n");
246 radeon_cs_parser_fini(&parser
, r
);
247 mutex_unlock(&rdev
->cs_mutex
);
250 r
= radeon_cs_parser_relocs(&parser
);
252 if (r
!= -ERESTARTSYS
)
253 DRM_ERROR("Failed to parse relocation %d!\n", r
);
254 radeon_cs_parser_fini(&parser
, r
);
255 mutex_unlock(&rdev
->cs_mutex
);
258 /* Copy the packet into the IB, the parser will read from the
259 * input memory (cached) and write to the IB (which can be
261 ib_chunk
= &parser
.chunks
[parser
.chunk_ib_idx
];
262 parser
.ib
->length_dw
= ib_chunk
->length_dw
;
263 r
= radeon_cs_parse(&parser
);
264 if (r
|| parser
.parser_error
) {
265 DRM_ERROR("Invalid command stream !\n");
266 radeon_cs_parser_fini(&parser
, r
);
267 mutex_unlock(&rdev
->cs_mutex
);
270 r
= radeon_cs_finish_pages(&parser
);
272 DRM_ERROR("Invalid command stream !\n");
273 radeon_cs_parser_fini(&parser
, r
);
274 mutex_unlock(&rdev
->cs_mutex
);
277 r
= radeon_ib_schedule(rdev
, parser
.ib
);
279 DRM_ERROR("Failed to schedule IB !\n");
281 radeon_cs_parser_fini(&parser
, r
);
282 mutex_unlock(&rdev
->cs_mutex
);
286 int radeon_cs_finish_pages(struct radeon_cs_parser
*p
)
288 struct radeon_cs_chunk
*ibc
= &p
->chunks
[p
->chunk_ib_idx
];
290 int size
= PAGE_SIZE
;
292 for (i
= ibc
->last_copied_page
+ 1; i
<= ibc
->last_page_index
; i
++) {
293 if (i
== ibc
->last_page_index
) {
294 size
= (ibc
->length_dw
* 4) % PAGE_SIZE
;
299 if (DRM_COPY_FROM_USER(p
->ib
->ptr
+ (i
* (PAGE_SIZE
/4)),
300 ibc
->user_ptr
+ (i
* PAGE_SIZE
),
307 int radeon_cs_update_pages(struct radeon_cs_parser
*p
, int pg_idx
)
310 struct radeon_cs_chunk
*ibc
= &p
->chunks
[p
->chunk_ib_idx
];
312 int size
= PAGE_SIZE
;
314 for (i
= ibc
->last_copied_page
+ 1; i
< pg_idx
; i
++) {
315 if (DRM_COPY_FROM_USER(p
->ib
->ptr
+ (i
* (PAGE_SIZE
/4)),
316 ibc
->user_ptr
+ (i
* PAGE_SIZE
),
318 p
->parser_error
= -EFAULT
;
323 new_page
= ibc
->kpage_idx
[0] < ibc
->kpage_idx
[1] ? 0 : 1;
325 if (pg_idx
== ibc
->last_page_index
) {
326 size
= (ibc
->length_dw
* 4) % PAGE_SIZE
;
331 if (DRM_COPY_FROM_USER(ibc
->kpage
[new_page
],
332 ibc
->user_ptr
+ (pg_idx
* PAGE_SIZE
),
334 p
->parser_error
= -EFAULT
;
338 /* copy to IB here */
339 memcpy((void *)(p
->ib
->ptr
+(pg_idx
*(PAGE_SIZE
/4))), ibc
->kpage
[new_page
], size
);
341 ibc
->last_copied_page
= pg_idx
;
342 ibc
->kpage_idx
[new_page
] = pg_idx
;