1 #ifndef __NOUVEAU_BUFFER_H__
2 #define __NOUVEAU_BUFFER_H__
4 #include "util/u_transfer.h"
5 #include "util/u_double_list.h"
8 struct nouveau_context
;
11 #define NOUVEAU_BUFFER_SCORE_MIN -25000
12 #define NOUVEAU_BUFFER_SCORE_MAX 25000
13 #define NOUVEAU_BUFFER_SCORE_VRAM_THRESHOLD 20000
15 /* DIRTY: buffer was (or will be after the next flush) written to by GPU and
16 * resource->data has not been updated to reflect modified VRAM contents
18 * USER_MEMORY: resource->data is a pointer to client memory and may change
21 #define NOUVEAU_BUFFER_STATUS_GPU_READING (1 << 0)
22 #define NOUVEAU_BUFFER_STATUS_GPU_WRITING (1 << 1)
23 #define NOUVEAU_BUFFER_STATUS_USER_MEMORY (1 << 7)
25 /* Resources, if mapped into the GPU's address space, are guaranteed to
26 * have constant virtual addresses (nv50+).
28 * The address of a resource will lie within the nouveau_bo referenced,
29 * and this bo should be added to the memory manager's validation list.
31 struct nv04_resource
{
32 struct pipe_resource base
;
33 const struct u_resource_vtbl
*vtbl
;
36 struct nouveau_bo
*bo
;
42 int16_t score
; /* low if mapped very often, if high can move to VRAM */
44 struct nouveau_fence
*fence
;
45 struct nouveau_fence
*fence_wr
;
47 struct nouveau_mm_allocation
*mm
;
51 nouveau_buffer_release_gpu_storage(struct nv04_resource
*);
54 nouveau_buffer_download(struct nouveau_context
*, struct nv04_resource
*,
55 unsigned start
, unsigned size
);
58 nouveau_buffer_migrate(struct nouveau_context
*,
59 struct nv04_resource
*, unsigned domain
);
62 nouveau_buffer_adjust_score(struct nouveau_context
*pipe
,
63 struct nv04_resource
*res
, int16_t score
)
66 if (res
->score
> NOUVEAU_BUFFER_SCORE_MIN
)
70 if (res
->score
< NOUVEAU_BUFFER_SCORE_MAX
)
72 if (res
->domain
== NOUVEAU_BO_GART
&&
73 res
->score
> NOUVEAU_BUFFER_SCORE_VRAM_THRESHOLD
)
74 nouveau_buffer_migrate(pipe
, res
, NOUVEAU_BO_VRAM
);
78 /* XXX: wait for fence (atm only using this for vertex push) */
80 nouveau_resource_map_offset(struct nouveau_context
*pipe
,
81 struct nv04_resource
*res
, uint32_t offset
,
86 nouveau_buffer_adjust_score(pipe
, res
, -250);
88 if ((res
->domain
== NOUVEAU_BO_VRAM
) &&
89 (res
->status
& NOUVEAU_BUFFER_STATUS_GPU_WRITING
))
90 nouveau_buffer_download(pipe
, res
, 0, res
->base
.width0
);
92 if ((res
->domain
!= NOUVEAU_BO_GART
) ||
93 (res
->status
& NOUVEAU_BUFFER_STATUS_USER_MEMORY
))
94 return res
->data
+ offset
;
97 flags
|= NOUVEAU_BO_NOSYNC
;
99 if (nouveau_bo_map_range(res
->bo
, res
->offset
+ offset
,
100 res
->base
.width0
, flags
))
104 nouveau_bo_unmap(res
->bo
);
109 nouveau_resource_unmap(struct nv04_resource
*res
)
114 static INLINE
struct nv04_resource
*
115 nv04_resource(struct pipe_resource
*resource
)
117 return (struct nv04_resource
*)resource
;
120 /* is resource mapped into the GPU's address space (i.e. VRAM or GART) ? */
121 static INLINE boolean
122 nouveau_resource_mapped_by_gpu(struct pipe_resource
*resource
)
124 return nv04_resource(resource
)->domain
!= 0;
127 struct pipe_resource
*
128 nouveau_buffer_create(struct pipe_screen
*pscreen
,
129 const struct pipe_resource
*templ
);
131 struct pipe_resource
*
132 nouveau_user_buffer_create(struct pipe_screen
*screen
, void *ptr
,
133 unsigned bytes
, unsigned usage
);
136 nouveau_user_buffer_upload(struct nv04_resource
*, unsigned base
,