Merge branch 'draw-instanced'
[mesa.git] / src / gallium / drivers / nvc0 / nvc0_resource.h
1
2 #ifndef __NVC0_RESOURCE_H__
3 #define __NVC0_RESOURCE_H__
4
5 #include "util/u_transfer.h"
6 #include "util/u_double_list.h"
7 #define NOUVEAU_NVC0
8 #include "nouveau/nouveau_winsys.h"
9 #undef NOUVEAU_NVC0
10
11 #include "nvc0_fence.h"
12
13 struct pipe_resource;
14 struct nouveau_bo;
15 struct nvc0_context;
16
17 #define NVC0_BUFFER_SCORE_MIN -25000
18 #define NVC0_BUFFER_SCORE_MAX 25000
19 #define NVC0_BUFFER_SCORE_VRAM_THRESHOLD 20000
20
21 /* DIRTY: buffer was (or will be after the next flush) written to by GPU and
22 * resource->data has not been updated to reflect modified VRAM contents
23 *
24 * USER_MEMORY: resource->data is a pointer to client memory and may change
25 * between GL calls
26 */
27 #define NVC0_BUFFER_STATUS_DIRTY (1 << 0)
28 #define NVC0_BUFFER_STATUS_USER_MEMORY (1 << 7)
29
30 /* Resources, if mapped into the GPU's address space, are guaranteed to
31 * have constant virtual addresses.
32 * The address of a resource will lie within the nouveau_bo referenced,
33 * and this bo should be added to the memory manager's validation list.
34 */
35 struct nvc0_resource {
36 struct pipe_resource base;
37 const struct u_resource_vtbl *vtbl;
38
39 uint8_t *data;
40 struct nouveau_bo *bo;
41 uint32_t offset;
42
43 uint8_t status;
44 uint8_t domain;
45
46 int16_t score; /* low if mapped very often, if high can move to VRAM */
47
48 struct nvc0_fence *fence;
49 struct nvc0_fence *fence_wr;
50
51 struct nvc0_mm_allocation *mm;
52 };
53
54 boolean
55 nvc0_buffer_download(struct nvc0_context *, struct nvc0_resource *,
56 unsigned start, unsigned size);
57
58 boolean
59 nvc0_buffer_migrate(struct nvc0_context *,
60 struct nvc0_resource *, unsigned domain);
61
62 static INLINE void
63 nvc0_buffer_adjust_score(struct nvc0_context *nvc0, struct nvc0_resource *res,
64 int16_t score)
65 {
66 if (score < 0) {
67 if (res->score > NVC0_BUFFER_SCORE_MIN)
68 res->score += score;
69 } else
70 if (score > 0){
71 if (res->score < NVC0_BUFFER_SCORE_MAX)
72 res->score += score;
73 if (res->domain == NOUVEAU_BO_GART &&
74 res->score > NVC0_BUFFER_SCORE_VRAM_THRESHOLD)
75 nvc0_buffer_migrate(nvc0, res, NOUVEAU_BO_VRAM);
76 }
77 }
78
79 /* XXX: wait for fence (atm only using this for vertex push) */
80 static INLINE void *
81 nvc0_resource_map_offset(struct nvc0_context *nvc0,
82 struct nvc0_resource *res, uint32_t offset,
83 uint32_t flags)
84 {
85 void *map;
86
87 nvc0_buffer_adjust_score(nvc0, res, -250);
88
89 if ((res->domain == NOUVEAU_BO_VRAM) &&
90 (res->status & NVC0_BUFFER_STATUS_DIRTY))
91 nvc0_buffer_download(nvc0, res, 0, res->base.width0);
92
93 if ((res->domain != NOUVEAU_BO_GART) ||
94 (res->status & NVC0_BUFFER_STATUS_USER_MEMORY))
95 return res->data + offset;
96
97 if (res->mm)
98 flags |= NOUVEAU_BO_NOSYNC;
99
100 if (nouveau_bo_map_range(res->bo, res->offset + offset,
101 res->base.width0, flags))
102 return NULL;
103
104 map = res->bo->map;
105 nouveau_bo_unmap(res->bo);
106 return map;
107 }
108
109 static INLINE void
110 nvc0_resource_unmap(struct nvc0_resource *res)
111 {
112 /* no-op */
113 }
114
115 #define NVC0_TILE_DIM_SHIFT(m, d) (((m) >> (d * 4)) & 0xf)
116
117 #define NVC0_TILE_PITCH(m) (64 << NVC0_TILE_DIM_SHIFT(m, 0))
118 #define NVC0_TILE_HEIGHT(m) ( 8 << NVC0_TILE_DIM_SHIFT(m, 1))
119 #define NVC0_TILE_DEPTH(m) ( 1 << NVC0_TILE_DIM_SHIFT(m, 2))
120
121 #define NVC0_TILE_SIZE_2D(m) (((64 * 8) << \
122 NVC0_TILE_DIM_SHIFT(m, 0)) << \
123 NVC0_TILE_DIM_SHIFT(m, 1))
124
125 #define NVC0_TILE_SIZE(m) (NVC0_TILE_SIZE_2D(m) << NVC0_TILE_DIM_SHIFT(m, 2))
126
127 struct nvc0_miptree_level {
128 uint32_t offset;
129 uint32_t pitch;
130 uint32_t tile_mode;
131 };
132
133 #define NVC0_MAX_TEXTURE_LEVELS 16
134
135 struct nvc0_miptree {
136 struct nvc0_resource base;
137 struct nvc0_miptree_level level[NVC0_MAX_TEXTURE_LEVELS];
138 uint32_t total_size;
139 uint32_t layer_stride;
140 boolean layout_3d; /* TRUE if layer count varies with mip level */
141 };
142
143 static INLINE struct nvc0_miptree *
144 nvc0_miptree(struct pipe_resource *pt)
145 {
146 return (struct nvc0_miptree *)pt;
147 }
148
149 static INLINE struct nvc0_resource *
150 nvc0_resource(struct pipe_resource *resource)
151 {
152 return (struct nvc0_resource *)resource;
153 }
154
155 /* is resource mapped into the GPU's address space (i.e. VRAM or GART) ? */
156 static INLINE boolean
157 nvc0_resource_mapped_by_gpu(struct pipe_resource *resource)
158 {
159 return nvc0_resource(resource)->domain != 0;
160 }
161
162 void
163 nvc0_init_resource_functions(struct pipe_context *pcontext);
164
165 void
166 nvc0_screen_init_resource_functions(struct pipe_screen *pscreen);
167
168 /* Internal functions:
169 */
170 struct pipe_resource *
171 nvc0_miptree_create(struct pipe_screen *pscreen,
172 const struct pipe_resource *tmp);
173
174 struct pipe_resource *
175 nvc0_miptree_from_handle(struct pipe_screen *pscreen,
176 const struct pipe_resource *template,
177 struct winsys_handle *whandle);
178
179 struct pipe_resource *
180 nvc0_buffer_create(struct pipe_screen *pscreen,
181 const struct pipe_resource *templ);
182
183 struct pipe_resource *
184 nvc0_user_buffer_create(struct pipe_screen *screen,
185 void *ptr,
186 unsigned bytes,
187 unsigned usage);
188
189
190 struct pipe_surface *
191 nvc0_miptree_surface_new(struct pipe_context *,
192 struct pipe_resource *,
193 const struct pipe_surface *templ);
194
195 void
196 nvc0_miptree_surface_del(struct pipe_context *, struct pipe_surface *);
197
198 boolean
199 nvc0_user_buffer_upload(struct nvc0_resource *, unsigned base, unsigned size);
200
201 #endif