intel: Add implementation of MapTextureImage/UnmapTextureImage.
[mesa.git] / src / mesa / drivers / dri / r600 / r600_cmdbuf.c
1 /*
2 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
3
4 The Weather Channel (TM) funded Tungsten Graphics to develop the
5 initial release of the Radeon 8500 driver under the XFree86 license.
6 This notice must be preserved.
7
8 Permission is hereby granted, free of charge, to any person obtaining
9 a copy of this software and associated documentation files (the
10 "Software"), to deal in the Software without restriction, including
11 without limitation the rights to use, copy, modify, merge, publish,
12 distribute, sublicense, and/or sell copies of the Software, and to
13 permit persons to whom the Software is furnished to do so, subject to
14 the following conditions:
15
16 The above copyright notice and this permission notice (including the
17 next paragraph) shall be included in all copies or substantial
18 portions of the Software.
19
20 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
23 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
24 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
25 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
26 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27
28 **************************************************************************/
29
30 /**
31 * Mostly coppied from \radeon\radeon_cs_legacy.c
32 */
33
34 #include <errno.h>
35
36 #include "main/glheader.h"
37 #include "main/state.h"
38 #include "main/imports.h"
39 #include "main/macros.h"
40 #include "main/context.h"
41 #include "main/simple_list.h"
42
43 #include "drm.h"
44 #include "radeon_drm.h"
45
46 #include "r600_context.h"
47 #include "radeon_reg.h"
48 #include "r600_cmdbuf.h"
49 #include "radeon_bocs_wrapper.h"
50
51 #ifdef HAVE_LIBDRM_RADEON
52 #include "radeon_cs_int.h"
53 #else
54 #include "radeon_cs_int_drm.h"
55 #endif
56
57 struct r600_cs_manager_legacy
58 {
59 struct radeon_cs_manager base;
60 struct radeon_context *ctx;
61 /* hack for scratch stuff */
62 uint32_t pending_age;
63 uint32_t pending_count;
64 };
65
66 struct r600_cs_reloc_legacy {
67 struct radeon_cs_reloc base;
68 uint32_t cindices;
69 uint32_t *indices;
70 uint32_t *reloc_indices;
71 };
72
73 static struct radeon_cs_int *r600_cs_create(struct radeon_cs_manager *csm,
74 uint32_t ndw)
75 {
76 struct radeon_cs_int *csi;
77
78 csi = (struct radeon_cs_int*)calloc(1, sizeof(struct radeon_cs_int));
79 if (csi == NULL) {
80 return NULL;
81 }
82 csi->csm = csm;
83 csi->ndw = (ndw + 0x3FF) & (~0x3FF);
84 csi->packets = (uint32_t*)malloc(4*csi->ndw);
85 if (csi->packets == NULL) {
86 free(csi);
87 return NULL;
88 }
89 csi->relocs_total_size = 0;
90 return csi;
91 }
92
93 static int r600_cs_write_reloc(struct radeon_cs_int *csi,
94 struct radeon_bo *bo,
95 uint32_t read_domain,
96 uint32_t write_domain,
97 uint32_t flags)
98 {
99 struct r600_cs_reloc_legacy *relocs;
100 int i;
101
102 relocs = (struct r600_cs_reloc_legacy *)csi->relocs;
103 /* check domains */
104 if ((read_domain && write_domain) || (!read_domain && !write_domain)) {
105 /* in one CS a bo can only be in read or write domain but not
106 * in read & write domain at the same sime
107 */
108 return -EINVAL;
109 }
110 if (read_domain == RADEON_GEM_DOMAIN_CPU) {
111 return -EINVAL;
112 }
113 if (write_domain == RADEON_GEM_DOMAIN_CPU) {
114 return -EINVAL;
115 }
116 /* check if bo is already referenced */
117 for(i = 0; i < csi->crelocs; i++) {
118 uint32_t *indices;
119 uint32_t *reloc_indices;
120
121 if (relocs[i].base.bo->handle == bo->handle) {
122 /* Check domains must be in read or write. As we check already
123 * checked that in argument one of the read or write domain was
124 * set we only need to check that if previous reloc as the read
125 * domain set then the read_domain should also be set for this
126 * new relocation.
127 */
128 if (relocs[i].base.read_domain && !read_domain) {
129 return -EINVAL;
130 }
131 if (relocs[i].base.write_domain && !write_domain) {
132 return -EINVAL;
133 }
134 relocs[i].base.read_domain |= read_domain;
135 relocs[i].base.write_domain |= write_domain;
136 /* save indice */
137 relocs[i].cindices++;
138 indices = (uint32_t*)realloc(relocs[i].indices,
139 relocs[i].cindices * 4);
140 reloc_indices = (uint32_t*)realloc(relocs[i].reloc_indices,
141 relocs[i].cindices * 4);
142 if ( (indices == NULL) || (reloc_indices == NULL) ) {
143 relocs[i].cindices -= 1;
144 return -ENOMEM;
145 }
146 relocs[i].indices = indices;
147 relocs[i].reloc_indices = reloc_indices;
148 relocs[i].indices[relocs[i].cindices - 1] = csi->cdw;
149 relocs[i].reloc_indices[relocs[i].cindices - 1] = csi->cdw;
150 csi->section_cdw += 2;
151 csi->cdw += 2;
152
153 return 0;
154 }
155 }
156 /* add bo to reloc */
157 relocs = (struct r600_cs_reloc_legacy*)
158 realloc(csi->relocs,
159 sizeof(struct r600_cs_reloc_legacy) * (csi->crelocs + 1));
160 if (relocs == NULL) {
161 return -ENOMEM;
162 }
163 csi->relocs = relocs;
164 relocs[csi->crelocs].base.bo = bo;
165 relocs[csi->crelocs].base.read_domain = read_domain;
166 relocs[csi->crelocs].base.write_domain = write_domain;
167 relocs[csi->crelocs].base.flags = flags;
168 relocs[csi->crelocs].indices = (uint32_t*)malloc(4);
169 relocs[csi->crelocs].reloc_indices = (uint32_t*)malloc(4);
170 if ( (relocs[csi->crelocs].indices == NULL) || (relocs[csi->crelocs].reloc_indices == NULL) )
171 {
172 return -ENOMEM;
173 }
174
175 relocs[csi->crelocs].indices[0] = csi->cdw;
176 relocs[csi->crelocs].reloc_indices[0] = csi->cdw;
177 csi->section_cdw += 2;
178 csi->cdw += 2;
179 relocs[csi->crelocs].cindices = 1;
180 csi->relocs_total_size += radeon_bo_legacy_relocs_size(bo);
181 csi->crelocs++;
182
183 radeon_bo_ref(bo);
184
185 return 0;
186 }
187
188 static int r600_cs_begin(struct radeon_cs_int *csi,
189 uint32_t ndw,
190 const char *file,
191 const char *func,
192 int line)
193 {
194 if (csi->section_ndw) {
195 fprintf(stderr, "CS already in a section(%s,%s,%d)\n",
196 csi->section_file, csi->section_func, csi->section_line);
197 fprintf(stderr, "CS can't start section(%s,%s,%d)\n",
198 file, func, line);
199 return -EPIPE;
200 }
201
202 csi->section_ndw = ndw;
203 csi->section_cdw = 0;
204 csi->section_file = file;
205 csi->section_func = func;
206 csi->section_line = line;
207
208 if (csi->cdw + ndw > csi->ndw) {
209 uint32_t tmp, *ptr;
210 int num = (ndw > 0x400) ? ndw : 0x400;
211
212 tmp = (csi->cdw + num + 0x3FF) & (~0x3FF);
213 ptr = (uint32_t*)realloc(csi->packets, 4 * tmp);
214 if (ptr == NULL) {
215 return -ENOMEM;
216 }
217 csi->packets = ptr;
218 csi->ndw = tmp;
219 }
220
221 return 0;
222 }
223
224 static int r600_cs_end(struct radeon_cs_int *csi,
225 const char *file,
226 const char *func,
227 int line)
228
229 {
230 if (!csi->section_ndw) {
231 fprintf(stderr, "CS no section to end at (%s,%s,%d)\n",
232 file, func, line);
233 return -EPIPE;
234 }
235
236 if ( csi->section_ndw != csi->section_cdw ) {
237 fprintf(stderr, "CS section size missmatch start at (%s,%s,%d) %d vs %d\n",
238 csi->section_file, csi->section_func, csi->section_line, csi->section_ndw, csi->section_cdw);
239 fprintf(stderr, "csi->section_ndw = %d, csi->cdw = %d, csi->section_cdw = %d \n",
240 csi->section_ndw, csi->cdw, csi->section_cdw);
241 fprintf(stderr, "CS section end at (%s,%s,%d)\n",
242 file, func, line);
243 return -EPIPE;
244 }
245 csi->section_ndw = 0;
246
247 if (csi->cdw > csi->ndw) {
248 fprintf(stderr, "CS section overflow at (%s,%s,%d) cdw %d ndw %d\n",
249 csi->section_file, csi->section_func, csi->section_line,csi->cdw,csi->ndw);
250 fprintf(stderr, "CS section end at (%s,%s,%d)\n",
251 file, func, line);
252 assert(0);
253 }
254
255 return 0;
256 }
257
258 static int r600_cs_process_relocs(struct radeon_cs_int *csi,
259 uint32_t * reloc_chunk,
260 uint32_t * length_dw_reloc_chunk)
261 {
262 struct r600_cs_reloc_legacy *relocs;
263 int i, j, r;
264
265 uint32_t offset_dw = 0;
266
267 relocs = (struct r600_cs_reloc_legacy *)csi->relocs;
268 restart:
269 for (i = 0; i < csi->crelocs; i++) {
270 uint32_t soffset, eoffset;
271
272 r = radeon_bo_legacy_validate(relocs[i].base.bo,
273 &soffset, &eoffset);
274 if (r == -EAGAIN) {
275 goto restart;
276 }
277 if (r) {
278 fprintf(stderr, "invalid bo(%p) [0x%08X, 0x%08X]\n",
279 relocs[i].base.bo, soffset, eoffset);
280 return r;
281 }
282
283 for (j = 0; j < relocs[i].cindices; j++) {
284 /* pkt3 nop header in ib chunk */
285 csi->packets[relocs[i].reloc_indices[j]] = 0xC0001000;
286 /* reloc index in ib chunk */
287 csi->packets[relocs[i].reloc_indices[j] + 1] = offset_dw;
288 }
289
290 /* asic offset in reloc chunk */ /* see alex drm r600_nomm_relocate */
291 reloc_chunk[offset_dw] = soffset;
292 reloc_chunk[offset_dw + 3] = 0;
293
294 offset_dw += 4;
295 }
296
297 *length_dw_reloc_chunk = offset_dw;
298
299 return 0;
300 }
301
302 static int r600_cs_set_age(struct radeon_cs_int *csi) /* -------------- */
303 {
304 struct r600_cs_manager_legacy *csm = (struct r600_cs_manager_legacy*)csi->csm;
305 struct r600_cs_reloc_legacy *relocs;
306 int i;
307
308 relocs = (struct r600_cs_reloc_legacy *)csi->relocs;
309 for (i = 0; i < csi->crelocs; i++) {
310 radeon_bo_legacy_pending(relocs[i].base.bo, csm->pending_age);
311 radeon_bo_unref(relocs[i].base.bo);
312 }
313 return 0;
314 }
315
316 #if 0
317 static void dump_cmdbuf(struct radeon_cs_int *csi)
318 {
319 int i;
320 fprintf(stderr,"--start--\n");
321 for (i = 0; i < csi->cdw; i++){
322 fprintf(stderr,"0x%08x\n", csi->packets[i]);
323 }
324 fprintf(stderr,"--end--\n");
325
326 }
327 #endif
328
329 static int r600_cs_emit(struct radeon_cs_int *csi)
330 {
331 struct r600_cs_manager_legacy *csm = (struct r600_cs_manager_legacy*)csi->csm;
332 struct drm_radeon_cs cs_cmd;
333 struct drm_radeon_cs_chunk cs_chunk[2];
334 uint32_t length_dw_reloc_chunk;
335 uint64_t chunk_ptrs[2];
336 uint32_t *reloc_chunk;
337 int r;
338 int retry = 0;
339
340 /* TODO : put chip level things here if need. */
341 /* csm->ctx->vtbl.emit_cs_header(cs, csm->ctx); */
342
343 csm->pending_count = 1;
344
345 reloc_chunk = (uint32_t*)calloc(1, csi->crelocs * 4 * 4);
346
347 r = r600_cs_process_relocs(csi, reloc_chunk, &length_dw_reloc_chunk);
348 if (r) {
349 free(reloc_chunk);
350 return 0;
351 }
352
353 /* raw ib chunk */
354 cs_chunk[0].chunk_id = RADEON_CHUNK_ID_IB;
355 cs_chunk[0].length_dw = csi->cdw;
356 cs_chunk[0].chunk_data = (unsigned long)(csi->packets);
357
358 /* reloc chaunk */
359 cs_chunk[1].chunk_id = RADEON_CHUNK_ID_RELOCS;
360 cs_chunk[1].length_dw = length_dw_reloc_chunk;
361 cs_chunk[1].chunk_data = (unsigned long)reloc_chunk;
362
363 chunk_ptrs[0] = (uint64_t)(unsigned long)&(cs_chunk[0]);
364 chunk_ptrs[1] = (uint64_t)(unsigned long)&(cs_chunk[1]);
365
366 cs_cmd.num_chunks = 2;
367 /* cs_cmd.cs_id = 0; */
368 cs_cmd.chunks = (uint64_t)(unsigned long)chunk_ptrs;
369
370 //dump_cmdbuf(cs);
371
372 do
373 {
374 r = drmCommandWriteRead(csi->csm->fd, DRM_RADEON_CS, &cs_cmd, sizeof(cs_cmd));
375 retry++;
376 } while (r == -EAGAIN && retry < 1000);
377
378 if (r) {
379 free(reloc_chunk);
380 return r;
381 }
382
383 csm->pending_age = cs_cmd.cs_id;
384
385 r600_cs_set_age(csi);
386
387 csi->csm->read_used = 0;
388 csi->csm->vram_write_used = 0;
389 csi->csm->gart_write_used = 0;
390
391 free(reloc_chunk);
392
393 return 0;
394 }
395
396 static void inline r600_cs_free_reloc(void *relocs_p, int crelocs)
397 {
398 struct r600_cs_reloc_legacy *relocs = relocs_p;
399 int i;
400 if (!relocs_p)
401 return;
402 for (i = 0; i < crelocs; i++)
403 {
404 free(relocs[i].indices);
405 free(relocs[i].reloc_indices);
406 }
407 }
408
409 static int r600_cs_destroy(struct radeon_cs_int *csi)
410 {
411 r600_cs_free_reloc(csi->relocs, csi->crelocs);
412 free(csi->relocs);
413 free(csi->packets);
414 free(csi);
415 return 0;
416 }
417
418 static int r600_cs_erase(struct radeon_cs_int *csi)
419 {
420 r600_cs_free_reloc(csi->relocs, csi->crelocs);
421 free(csi->relocs);
422 csi->relocs_total_size = 0;
423 csi->relocs = NULL;
424 csi->crelocs = 0;
425 csi->cdw = 0;
426 return 0;
427 }
428
429 static int r600_cs_need_flush(struct radeon_cs_int *csi)
430 {
431 /* this function used to flush when the BO usage got to
432 * a certain size, now the higher levels handle this better */
433 return 0;
434 }
435
436 static void r600_cs_print(struct radeon_cs_int *csi, FILE *file)
437 {
438 }
439
440 static struct radeon_cs_funcs r600_cs_funcs = {
441 r600_cs_create,
442 r600_cs_write_reloc,
443 r600_cs_begin,
444 r600_cs_end,
445 r600_cs_emit,
446 r600_cs_destroy,
447 r600_cs_erase,
448 r600_cs_need_flush,
449 r600_cs_print
450 };
451
452 struct radeon_cs_manager * r600_radeon_cs_manager_legacy_ctor(struct radeon_context *ctx)
453 {
454 struct r600_cs_manager_legacy *csm;
455
456 csm = (struct r600_cs_manager_legacy*)
457 calloc(1, sizeof(struct r600_cs_manager_legacy));
458 if (csm == NULL) {
459 return NULL;
460 }
461 csm->base.funcs = &r600_cs_funcs;
462 csm->base.fd = ctx->dri.fd;
463 csm->ctx = ctx;
464 csm->pending_age = 1;
465 return (struct radeon_cs_manager*)csm;
466 }
467
468 void r600InitCmdBuf(context_t *r600) /* from rcommonInitCmdBuf */
469 {
470 radeonContextPtr rmesa = &r600->radeon;
471 GLuint size;
472
473 if(r600->radeon.radeonScreen->chip_family >= CHIP_FAMILY_CEDAR)
474 {
475 evergreenInitAtoms(r600);
476 }
477 else
478 {
479 r600InitAtoms(r600);
480 }
481
482 /* Initialize command buffer */
483 size = 256 * driQueryOptioni(&rmesa->optionCache,
484 "command_buffer_size");
485 if (size < 2 * rmesa->hw.max_state_size) {
486 size = 2 * rmesa->hw.max_state_size + 65535;
487 }
488 if (size > 64 * 256)
489 size = 64 * 256;
490
491 if (rmesa->radeonScreen->kernel_mm) {
492 int fd = rmesa->radeonScreen->driScreen->fd;
493 rmesa->cmdbuf.csm = radeon_cs_manager_gem_ctor(fd);
494 } else {
495 rmesa->cmdbuf.csm = r600_radeon_cs_manager_legacy_ctor(rmesa);
496 }
497 if (rmesa->cmdbuf.csm == NULL) {
498 /* FIXME: fatal error */
499 return;
500 }
501 rmesa->cmdbuf.cs = radeon_cs_create(rmesa->cmdbuf.csm, size);
502 assert(rmesa->cmdbuf.cs != NULL);
503 rmesa->cmdbuf.size = size;
504
505 radeon_cs_space_set_flush(rmesa->cmdbuf.cs,
506 (void (*)(void *))rmesa->glCtx->Driver.Flush, rmesa->glCtx);
507
508 if (!rmesa->radeonScreen->kernel_mm) {
509 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, rmesa->radeonScreen->texSize[0]);
510 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, rmesa->radeonScreen->gartTextures.size);
511 } else {
512 struct drm_radeon_gem_info mminfo;
513
514 if (!drmCommandWriteRead(rmesa->dri.fd, DRM_RADEON_GEM_INFO, &mminfo, sizeof(mminfo)))
515 {
516 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, mminfo.vram_visible);
517 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, mminfo.gart_size);
518 }
519 }
520 }
521