r300g: implement hyper-z support. (v4)
[mesa.git] / src / gallium / winsys / radeon / drm / radeon_r300.c
1 /*
2 * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE. */
22
23 #include "radeon_r300.h"
24 #include "radeon_buffer.h"
25
26 #include "radeon_bo_gem.h"
27 #include "radeon_cs_gem.h"
28 #include "state_tracker/drm_driver.h"
29
30 #include "util/u_memory.h"
31
32 static unsigned get_pb_usage_from_create_flags(unsigned bind, unsigned usage,
33 enum r300_buffer_domain domain)
34 {
35 unsigned res = 0;
36
37 if (bind & (PIPE_BIND_DEPTH_STENCIL | PIPE_BIND_RENDER_TARGET |
38 PIPE_BIND_DISPLAY_TARGET | PIPE_BIND_SCANOUT))
39 res |= PB_USAGE_GPU_WRITE;
40
41 if (bind & PIPE_BIND_SAMPLER_VIEW)
42 res |= PB_USAGE_GPU_READ | PB_USAGE_GPU_WRITE;
43
44 if (bind & (PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER))
45 res |= PB_USAGE_GPU_READ;
46
47 if (bind & PIPE_BIND_TRANSFER_WRITE)
48 res |= PB_USAGE_CPU_WRITE;
49
50 if (bind & PIPE_BIND_TRANSFER_READ)
51 res |= PB_USAGE_CPU_READ;
52
53 /* Is usage of any use for us? Probably not. */
54
55 /* Now add driver-specific usage flags. */
56 if (bind & (PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER))
57 res |= RADEON_PB_USAGE_VERTEX;
58
59 if (domain & R300_DOMAIN_GTT)
60 res |= RADEON_PB_USAGE_DOMAIN_GTT;
61
62 if (domain & R300_DOMAIN_VRAM)
63 res |= RADEON_PB_USAGE_DOMAIN_VRAM;
64
65 return res;
66 }
67
68 static struct r300_winsys_buffer *
69 radeon_r300_winsys_buffer_create(struct r300_winsys_screen *rws,
70 unsigned size,
71 unsigned alignment,
72 unsigned bind,
73 unsigned usage,
74 enum r300_buffer_domain domain)
75 {
76 struct radeon_libdrm_winsys *ws = radeon_libdrm_winsys(rws);
77 struct pb_desc desc;
78 struct pb_manager *provider;
79 struct pb_buffer *buffer;
80
81 memset(&desc, 0, sizeof(desc));
82 desc.alignment = alignment;
83 desc.usage = get_pb_usage_from_create_flags(bind, usage, domain);
84
85 /* Assign a buffer manager. */
86 if (bind & (PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER))
87 provider = ws->cman;
88 else
89 provider = ws->kman;
90
91 buffer = provider->create_buffer(provider, size, &desc);
92 if (!buffer)
93 return NULL;
94
95 return radeon_libdrm_winsys_buffer(buffer);
96 }
97
98 static void radeon_r300_winsys_buffer_reference(struct r300_winsys_screen *rws,
99 struct r300_winsys_buffer **pdst,
100 struct r300_winsys_buffer *src)
101 {
102 struct pb_buffer *_src = radeon_pb_buffer(src);
103 struct pb_buffer *_dst = radeon_pb_buffer(*pdst);
104
105 pb_reference(&_dst, _src);
106
107 *pdst = radeon_libdrm_winsys_buffer(_dst);
108 }
109
110 static struct r300_winsys_buffer *radeon_r300_winsys_buffer_from_handle(struct r300_winsys_screen *rws,
111 struct winsys_handle *whandle,
112 unsigned *stride,
113 unsigned *size)
114 {
115 struct radeon_libdrm_winsys *ws = radeon_libdrm_winsys(rws);
116 struct pb_buffer *_buf;
117
118 _buf = radeon_drm_bufmgr_create_buffer_from_handle(ws->kman, whandle->handle);
119
120 if (stride)
121 *stride = whandle->stride;
122 if (size)
123 *size = _buf->base.size;
124
125 return radeon_libdrm_winsys_buffer(_buf);
126 }
127
128 static boolean radeon_r300_winsys_buffer_get_handle(struct r300_winsys_screen *rws,
129 struct r300_winsys_buffer *buffer,
130 unsigned stride,
131 struct winsys_handle *whandle)
132 {
133 struct pb_buffer *_buf = radeon_pb_buffer(buffer);
134 whandle->stride = stride;
135 return radeon_drm_bufmgr_get_handle(_buf, whandle);
136 }
137
138 static void radeon_r300_winsys_cs_set_flush(struct r300_winsys_cs *rcs,
139 void (*flush)(void *),
140 void *user)
141 {
142 struct radeon_libdrm_cs *cs = radeon_libdrm_cs(rcs);
143 cs->flush_cs = flush;
144 cs->flush_data = user;
145 radeon_cs_space_set_flush(cs->cs, flush, user);
146 }
147
148 static boolean radeon_r300_winsys_cs_validate(struct r300_winsys_cs *rcs)
149 {
150 struct radeon_libdrm_cs *cs = radeon_libdrm_cs(rcs);
151
152 return radeon_cs_space_check(cs->cs) >= 0;
153 }
154
155 static void radeon_r300_winsys_cs_reset_buffers(struct r300_winsys_cs *rcs)
156 {
157 struct radeon_libdrm_cs *cs = radeon_libdrm_cs(rcs);
158 radeon_cs_space_reset_bos(cs->cs);
159 }
160
161 static void radeon_r300_winsys_cs_flush(struct r300_winsys_cs *rcs)
162 {
163 struct radeon_libdrm_cs *cs = radeon_libdrm_cs(rcs);
164 int retval;
165
166 /* Don't flush a zero-sized CS. */
167 if (!cs->base.cdw) {
168 return;
169 }
170
171 cs->cs->cdw = cs->base.cdw;
172
173 radeon_drm_bufmgr_flush_maps(cs->ws->kman);
174
175 /* Emit the CS. */
176 retval = radeon_cs_emit(cs->cs);
177 if (retval) {
178 if (debug_get_bool_option("RADEON_DUMP_CS", FALSE)) {
179 fprintf(stderr, "radeon: The kernel rejected CS, dumping...\n");
180 radeon_cs_print(cs->cs, stderr);
181 } else {
182 fprintf(stderr, "radeon: The kernel rejected CS, "
183 "see dmesg for more information.\n");
184 }
185 }
186
187 /* Reset CS.
188 * Someday, when we care about performance, we should really find a way
189 * to rotate between two or three CS objects so that the GPU can be
190 * spinning through one CS while another one is being filled. */
191 radeon_cs_erase(cs->cs);
192
193 cs->base.ptr = cs->cs->packets;
194 cs->base.cdw = cs->cs->cdw;
195 cs->base.ndw = cs->cs->ndw;
196 }
197
198 static uint32_t radeon_get_value(struct r300_winsys_screen *rws,
199 enum r300_value_id id)
200 {
201 struct radeon_libdrm_winsys *ws = (struct radeon_libdrm_winsys *)rws;
202
203 switch(id) {
204 case R300_VID_PCI_ID:
205 return ws->pci_id;
206 case R300_VID_GB_PIPES:
207 return ws->gb_pipes;
208 case R300_VID_Z_PIPES:
209 return ws->z_pipes;
210 case R300_VID_SQUARE_TILING_SUPPORT:
211 return ws->squaretiling;
212 case R300_VID_DRM_2_3_0:
213 return ws->drm_2_3_0;
214 case R300_CAN_HYPERZ:
215 return ws->hyperz;
216 }
217 return 0;
218 }
219
220 static struct r300_winsys_cs *radeon_r300_winsys_cs_create(struct r300_winsys_screen *rws)
221 {
222 struct radeon_libdrm_winsys *ws = radeon_libdrm_winsys(rws);
223 struct radeon_libdrm_cs *cs = CALLOC_STRUCT(radeon_libdrm_cs);
224
225 if (!cs)
226 return NULL;
227
228 /* Size limit on IBs is 64 kibibytes. */
229 cs->cs = radeon_cs_create(ws->csm, 1024 * 64 / 4);
230 if (!cs->cs) {
231 FREE(cs);
232 return NULL;
233 }
234
235 radeon_cs_set_limit(cs->cs,
236 RADEON_GEM_DOMAIN_GTT, ws->gart_size);
237 radeon_cs_set_limit(cs->cs,
238 RADEON_GEM_DOMAIN_VRAM, ws->vram_size);
239
240 cs->ws = ws;
241 cs->base.ptr = cs->cs->packets;
242 cs->base.cdw = cs->cs->cdw;
243 cs->base.ndw = cs->cs->ndw;
244 return &cs->base;
245 }
246
247 static void radeon_r300_winsys_cs_destroy(struct r300_winsys_cs *rcs)
248 {
249 struct radeon_libdrm_cs *cs = radeon_libdrm_cs(rcs);
250 radeon_cs_destroy(cs->cs);
251 }
252
253 static void radeon_winsys_destroy(struct r300_winsys_screen *rws)
254 {
255 struct radeon_libdrm_winsys *ws = (struct radeon_libdrm_winsys *)rws;
256
257 ws->cman->destroy(ws->cman);
258 ws->kman->destroy(ws->kman);
259
260 radeon_bo_manager_gem_dtor(ws->bom);
261 radeon_cs_manager_gem_dtor(ws->csm);
262 }
263
264 boolean radeon_setup_winsys(int fd, struct radeon_libdrm_winsys* ws)
265 {
266 ws->csm = radeon_cs_manager_gem_ctor(fd);
267 if (!ws->csm)
268 goto fail;
269 ws->bom = radeon_bo_manager_gem_ctor(fd);
270 if (!ws->bom)
271 goto fail;
272 ws->kman = radeon_drm_bufmgr_create(ws);
273 if (!ws->kman)
274 goto fail;
275
276 ws->cman = pb_cache_manager_create(ws->kman, 100000);
277 if (!ws->cman)
278 goto fail;
279
280 ws->base.destroy = radeon_winsys_destroy;
281 ws->base.get_value = radeon_get_value;
282
283 ws->base.buffer_create = radeon_r300_winsys_buffer_create;
284 ws->base.buffer_set_tiling = radeon_drm_bufmgr_set_tiling;
285 ws->base.buffer_get_tiling = radeon_drm_bufmgr_get_tiling;
286 ws->base.buffer_map = radeon_drm_buffer_map;
287 ws->base.buffer_unmap = radeon_drm_buffer_unmap;
288 ws->base.buffer_wait = radeon_drm_bufmgr_wait;
289 ws->base.buffer_reference = radeon_r300_winsys_buffer_reference;
290 ws->base.buffer_from_handle = radeon_r300_winsys_buffer_from_handle;
291 ws->base.buffer_get_handle = radeon_r300_winsys_buffer_get_handle;
292
293 ws->base.cs_create = radeon_r300_winsys_cs_create;
294 ws->base.cs_destroy = radeon_r300_winsys_cs_destroy;
295 ws->base.cs_add_buffer = radeon_drm_bufmgr_add_buffer;
296 ws->base.cs_validate = radeon_r300_winsys_cs_validate;
297 ws->base.cs_write_reloc = radeon_drm_bufmgr_write_reloc;
298 ws->base.cs_flush = radeon_r300_winsys_cs_flush;
299 ws->base.cs_reset_buffers = radeon_r300_winsys_cs_reset_buffers;
300 ws->base.cs_set_flush = radeon_r300_winsys_cs_set_flush;
301 ws->base.cs_is_buffer_referenced = radeon_drm_bufmgr_is_buffer_referenced;
302 return TRUE;
303
304 fail:
305 if (ws->csm)
306 radeon_cs_manager_gem_dtor(ws->csm);
307
308 if (ws->bom)
309 radeon_bo_manager_gem_dtor(ws->bom);
310
311 if (ws->cman)
312 ws->cman->destroy(ws->cman);
313 if (ws->kman)
314 ws->kman->destroy(ws->kman);
315
316 return FALSE;
317 }