more cleanup
[mesa.git] / src / mesa / drivers / dri / r600 / r600_cmdbuf.c
1 /*
2 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
3
4 The Weather Channel (TM) funded Tungsten Graphics to develop the
5 initial release of the Radeon 8500 driver under the XFree86 license.
6 This notice must be preserved.
7
8 Permission is hereby granted, free of charge, to any person obtaining
9 a copy of this software and associated documentation files (the
10 "Software"), to deal in the Software without restriction, including
11 without limitation the rights to use, copy, modify, merge, publish,
12 distribute, sublicense, and/or sell copies of the Software, and to
13 permit persons to whom the Software is furnished to do so, subject to
14 the following conditions:
15
16 The above copyright notice and this permission notice (including the
17 next paragraph) shall be included in all copies or substantial
18 portions of the Software.
19
20 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
23 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
24 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
25 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
26 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27
28 **************************************************************************/
29
30 /**
31 * Mostly coppied from \radeon\radeon_cs_legacy.c
32 */
33
34 #include <errno.h>
35
36 #include "main/glheader.h"
37 #include "main/state.h"
38 #include "main/imports.h"
39 #include "main/macros.h"
40 #include "main/context.h"
41 #include "main/simple_list.h"
42 #include "swrast/swrast.h"
43
44 #include "drm.h"
45 #include "radeon_drm.h"
46
47 #include "r600_context.h"
48 #include "radeon_reg.h"
49 #include "r600_cmdbuf.h"
50 #include "r600_emit.h"
51 #include "radeon_bocs_wrapper.h"
52 #include "radeon_mipmap_tree.h"
53 #include "radeon_reg.h"
54
55 struct r600_cs_manager_legacy
56 {
57 struct radeon_cs_manager base;
58 struct radeon_context *ctx;
59 /* hack for scratch stuff */
60 uint32_t pending_age;
61 uint32_t pending_count;
62 };
63
64 struct r600_cs_reloc_legacy {
65 struct radeon_cs_reloc base;
66 uint32_t cindices;
67 uint32_t *indices;
68 uint32_t *reloc_indices;
69 struct offset_modifiers offset_mod;
70 };
71
72 static struct radeon_cs * r600_cs_create(struct radeon_cs_manager *csm,
73 uint32_t ndw)
74 {
75 struct radeon_cs *cs;
76
77 cs = (struct radeon_cs*)calloc(1, sizeof(struct radeon_cs));
78 if (cs == NULL) {
79 return NULL;
80 }
81 cs->csm = csm;
82 cs->ndw = (ndw + 0x3FF) & (~0x3FF);
83 cs->packets = (uint32_t*)malloc(4*cs->ndw);
84 if (cs->packets == NULL) {
85 free(cs);
86 return NULL;
87 }
88 cs->relocs_total_size = 0;
89 return cs;
90 }
91
92 int r600_cs_write_reloc(struct radeon_cs *cs,
93 struct radeon_bo *bo,
94 uint32_t read_domain,
95 uint32_t write_domain,
96 uint32_t flags,
97 offset_modifiers* poffset_mod)
98 {
99 struct r600_cs_reloc_legacy *relocs;
100 int i;
101
102 relocs = (struct r600_cs_reloc_legacy *)cs->relocs;
103 /* check domains */
104 if ((read_domain && write_domain) || (!read_domain && !write_domain)) {
105 /* in one CS a bo can only be in read or write domain but not
106 * in read & write domain at the same sime
107 */
108 return -EINVAL;
109 }
110 if (read_domain == RADEON_GEM_DOMAIN_CPU) {
111 return -EINVAL;
112 }
113 if (write_domain == RADEON_GEM_DOMAIN_CPU) {
114 return -EINVAL;
115 }
116 /* check if bo is already referenced */
117 for(i = 0; i < cs->crelocs; i++) {
118 uint32_t *indices;
119 uint32_t *reloc_indices;
120
121 if (relocs[i].base.bo->handle == bo->handle) {
122 /* Check domains must be in read or write. As we check already
123 * checked that in argument one of the read or write domain was
124 * set we only need to check that if previous reloc as the read
125 * domain set then the read_domain should also be set for this
126 * new relocation.
127 */
128 if (relocs[i].base.read_domain && !read_domain) {
129 return -EINVAL;
130 }
131 if (relocs[i].base.write_domain && !write_domain) {
132 return -EINVAL;
133 }
134 relocs[i].base.read_domain |= read_domain;
135 relocs[i].base.write_domain |= write_domain;
136 /* save indice */
137 relocs[i].cindices++;
138 indices = (uint32_t*)realloc(relocs[i].indices,
139 relocs[i].cindices * 4);
140 reloc_indices = (uint32_t*)realloc(relocs[i].reloc_indices,
141 relocs[i].cindices * 4);
142 if ( (indices == NULL) || (reloc_indices == NULL) ) {
143 relocs[i].cindices -= 1;
144 return -ENOMEM;
145 }
146 relocs[i].indices = indices;
147 relocs[i].reloc_indices = reloc_indices;
148 relocs[i].indices[relocs[i].cindices - 1] = cs->cdw - 1;
149 relocs[i].reloc_indices[relocs[i].cindices - 1] = cs->section_cdw;
150 cs->section_ndw += 2;
151 cs->section_cdw += 2;
152
153 relocs[i].offset_mod.shift = poffset_mod->shift;
154 relocs[i].offset_mod.shiftbits = poffset_mod->shiftbits;
155 relocs[i].offset_mod.mask = poffset_mod->mask;
156
157 return 0;
158 }
159 }
160 /* add bo to reloc */
161 relocs = (struct r600_cs_reloc_legacy*)
162 realloc(cs->relocs,
163 sizeof(struct r600_cs_reloc_legacy) * (cs->crelocs + 1));
164 if (relocs == NULL) {
165 return -ENOMEM;
166 }
167 cs->relocs = relocs;
168 relocs[cs->crelocs].base.bo = bo;
169 relocs[cs->crelocs].base.read_domain = read_domain;
170 relocs[cs->crelocs].base.write_domain = write_domain;
171 relocs[cs->crelocs].base.flags = flags;
172 relocs[cs->crelocs].indices = (uint32_t*)malloc(4);
173 relocs[cs->crelocs].reloc_indices = (uint32_t*)malloc(4);
174 if ( (relocs[cs->crelocs].indices == NULL) || (relocs[cs->crelocs].reloc_indices == NULL) )
175 {
176 return -ENOMEM;
177 }
178 relocs[cs->crelocs].offset_mod.shift = poffset_mod->shift;
179 relocs[cs->crelocs].offset_mod.shiftbits = poffset_mod->shiftbits;
180 relocs[cs->crelocs].offset_mod.mask = poffset_mod->mask;
181
182 relocs[cs->crelocs].indices[0] = cs->cdw - 1;
183 relocs[cs->crelocs].reloc_indices[0] = cs->section_cdw;
184 cs->section_ndw += 2;
185 cs->section_cdw += 2;
186 relocs[cs->crelocs].cindices = 1;
187 cs->relocs_total_size += radeon_bo_legacy_relocs_size(bo);
188 cs->crelocs++;
189
190 radeon_bo_ref(bo);
191
192 return 0;
193 }
194
195 static int r600_cs_begin(struct radeon_cs *cs,
196 uint32_t ndw,
197 const char *file,
198 const char *func,
199 int line)
200 {
201 if (cs->section) {
202 fprintf(stderr, "CS already in a section(%s,%s,%d)\n",
203 cs->section_file, cs->section_func, cs->section_line);
204 fprintf(stderr, "CS can't start section(%s,%s,%d)\n",
205 file, func, line);
206 return -EPIPE;
207 }
208
209 if (cs->cdw + ndw + 32 > cs->ndw) { /* Left 32 DWORD (8 offset+pitch) spare room for reloc indices */
210 uint32_t tmp, *ptr;
211 int num = (ndw > 0x3FF) ? ndw : 0x3FF;
212
213 tmp = (cs->cdw + 1 + num) & (~num);
214 ptr = (uint32_t*)realloc(cs->packets, 4 * tmp);
215 if (ptr == NULL) {
216 return -ENOMEM;
217 }
218 cs->packets = ptr;
219 cs->ndw = tmp;
220 }
221
222 cs->section = 1;
223 cs->section_ndw = 0;
224 cs->section_cdw = cs->cdw + ndw; /* start of reloc indices. */
225 cs->section_file = file;
226 cs->section_func = func;
227 cs->section_line = line;
228
229 return 0;
230 }
231
232 static int r600_cs_end(struct radeon_cs *cs,
233 const char *file,
234 const char *func,
235 int line)
236
237 {
238 if (!cs->section) {
239 fprintf(stderr, "CS no section to end at (%s,%s,%d)\n",
240 file, func, line);
241 return -EPIPE;
242 }
243 cs->section = 0;
244
245 if ( (cs->section_ndw + cs->cdw) != cs->section_cdw )
246 {
247 fprintf(stderr, "CS section size missmatch start at (%s,%s,%d) %d vs %d\n",
248 cs->section_file, cs->section_func, cs->section_line, cs->section_ndw, cs->section_cdw);
249 fprintf(stderr, "cs->section_ndw = %d, cs->cdw = %d, cs->section_cdw = %d \n",
250 cs->section_ndw, cs->cdw, cs->section_cdw);
251 fprintf(stderr, "CS section end at (%s,%s,%d)\n",
252 file, func, line);
253 return -EPIPE;
254 }
255
256 cs->cdw = cs->section_cdw;
257 return 0;
258 }
259
260 static int r600_cs_process_relocs(struct radeon_cs *cs,
261 uint32_t * reloc_chunk,
262 uint32_t * length_dw_reloc_chunk)
263 {
264 struct r600_cs_manager_legacy *csm = (struct r600_cs_manager_legacy*)cs->csm;
265 struct r600_cs_reloc_legacy *relocs;
266 int i, j, r;
267
268 uint32_t offset_dw = 0;
269
270 csm = (struct r600_cs_manager_legacy*)cs->csm;
271 relocs = (struct r600_cs_reloc_legacy *)cs->relocs;
272 restart:
273 for (i = 0; i < cs->crelocs; i++)
274 {
275 for (j = 0; j < relocs[i].cindices; j++)
276 {
277 uint32_t soffset, eoffset, asicoffset;
278
279 r = radeon_bo_legacy_validate(relocs[i].base.bo,
280 &soffset, &eoffset);
281 if (r == -EAGAIN)
282 {
283 goto restart;
284 }
285 if (r)
286 {
287 fprintf(stderr, "validated %p [0x%08X, 0x%08X]\n",
288 relocs[i].base.bo, soffset, eoffset);
289 return r;
290 }
291 asicoffset = cs->packets[relocs[i].indices[j]] + soffset;
292 if (asicoffset >= eoffset)
293 {
294 /* radeon_bo_debug(relocs[i].base.bo, 12); */
295 fprintf(stderr, "validated %p [0x%08X, 0x%08X]\n",
296 relocs[i].base.bo, soffset, eoffset);
297 fprintf(stderr, "above end: %p 0x%08X 0x%08X\n",
298 relocs[i].base.bo,
299 cs->packets[relocs[i].indices[j]],
300 eoffset);
301 exit(0);
302 return -EINVAL;
303 }
304 /* apply offset operator */
305 switch (relocs[i].offset_mod.shift)
306 {
307 case NO_SHIFT:
308 asicoffset = asicoffset & relocs[i].offset_mod.mask;
309 break;
310 case LEFT_SHIFT:
311 asicoffset = (asicoffset << relocs[i].offset_mod.shiftbits) & relocs[i].offset_mod.mask;
312 break;
313 case RIGHT_SHIFT:
314 asicoffset = (asicoffset >> relocs[i].offset_mod.shiftbits) & relocs[i].offset_mod.mask;
315 break;
316 default:
317 break;
318 };
319
320 /* pkt3 nop header in ib chunk */
321 cs->packets[relocs[i].reloc_indices[j]] = 0xC0001000;
322
323 /* reloc index in ib chunk */
324 cs->packets[relocs[i].reloc_indices[j] + 1] = offset_dw;
325
326 /* asic offset in reloc chunk */ /* see alex drm r600_nomm_relocate */
327 reloc_chunk[offset_dw] = asicoffset;
328 reloc_chunk[offset_dw + 3] = 0;
329
330 offset_dw += 4;
331 }
332 }
333
334 *length_dw_reloc_chunk = offset_dw;
335
336 return 0;
337 }
338
339 static int r600_cs_set_age(struct radeon_cs *cs) /* -------------- */
340 {
341 struct r600_cs_manager_legacy *csm = (struct r600_cs_manager_legacy*)cs->csm;
342 struct r600_cs_reloc_legacy *relocs;
343 int i;
344
345 relocs = (struct r600_cs_reloc_legacy *)cs->relocs;
346 for (i = 0; i < cs->crelocs; i++) {
347 radeon_bo_legacy_pending(relocs[i].base.bo, csm->pending_age);
348 radeon_bo_unref(relocs[i].base.bo);
349 }
350 return 0;
351 }
352
353 static int r600_cs_emit(struct radeon_cs *cs)
354 {
355 struct r600_cs_manager_legacy *csm = (struct r600_cs_manager_legacy*)cs->csm;
356 struct drm_radeon_cs cs_cmd;
357 struct drm_radeon_cs_chunk cs_chunk[2];
358 drm_radeon_cmd_buffer_t cmd;
359 /* drm_r300_cmd_header_t age; */
360 uint32_t length_dw_reloc_chunk;
361 uint64_t ull;
362 uint64_t chunk_ptrs[2];
363 uint32_t reloc_chunk[128];
364 int r;
365 int retry = 0;
366
367 /* TODO : put chip level things here if need. */
368 /* csm->ctx->vtbl.emit_cs_header(cs, csm->ctx); */
369
370 /* TODO : append buffer age */
371
372 r = r600_cs_process_relocs(cs, &(reloc_chunk[0]), &length_dw_reloc_chunk);
373 if (r) {
374 return 0;
375 }
376
377 /* raw ib chunk */
378 cs_chunk[0].chunk_id = RADEON_CHUNK_ID_IB;
379 cs_chunk[0].length_dw = cs->cdw;
380 cs_chunk[0].chunk_data = (unsigned long)(cs->packets);
381
382 /* reloc chaunk */
383 cs_chunk[1].chunk_id = RADEON_CHUNK_ID_RELOCS;
384 cs_chunk[1].length_dw = length_dw_reloc_chunk;
385 cs_chunk[1].chunk_data = (unsigned long)&(reloc_chunk[0]);
386
387 chunk_ptrs[0] = (uint64_t)(unsigned long)&(cs_chunk[0]);
388 chunk_ptrs[1] = (uint64_t)(unsigned long)&(cs_chunk[1]);
389
390 cs_cmd.num_chunks = 2;
391 /* cs_cmd.cs_id = 0; */
392 cs_cmd.chunks = (uint64_t)(unsigned long)chunk_ptrs;
393
394 /* dump_cmdbuf(cs); */
395
396 do
397 {
398 r = drmCommandWriteRead(cs->csm->fd, DRM_RADEON_CS, &cs_cmd, sizeof(cs_cmd));
399 retry++;
400 } while (r == -EAGAIN && retry < 1000);
401
402 if (r) {
403 return r;
404 }
405
406 r600_cs_set_age(cs);
407
408 cs->csm->read_used = 0;
409 cs->csm->vram_write_used = 0;
410 cs->csm->gart_write_used = 0;
411
412 return 0;
413 }
414
415 static void inline r600_cs_free_reloc(void *relocs_p, int crelocs)
416 {
417 struct r600_cs_reloc_legacy *relocs = relocs_p;
418 int i;
419 if (!relocs_p)
420 return;
421 for (i = 0; i < crelocs; i++)
422 {
423 free(relocs[i].indices);
424 free(relocs[i].reloc_indices);
425 }
426 }
427
428 static int r600_cs_destroy(struct radeon_cs *cs)
429 {
430 r600_cs_free_reloc(cs->relocs, cs->crelocs);
431 free(cs->relocs);
432 free(cs->packets);
433 free(cs);
434 return 0;
435 }
436
437 static int r600_cs_erase(struct radeon_cs *cs)
438 {
439 r600_cs_free_reloc(cs->relocs, cs->crelocs);
440 free(cs->relocs);
441 cs->relocs_total_size = 0;
442 cs->relocs = NULL;
443 cs->crelocs = 0;
444 cs->cdw = 0;
445 cs->section = 0;
446 return 0;
447 }
448
449 static int r600_cs_need_flush(struct radeon_cs *cs)
450 {
451 /* this function used to flush when the BO usage got to
452 * a certain size, now the higher levels handle this better */
453 return 0;
454 }
455
456 static void r600_cs_print(struct radeon_cs *cs, FILE *file)
457 {
458 }
459
460 static int r600_cs_check_space(struct radeon_cs *cs, struct radeon_cs_space_check *bos, int num_bo)
461 {
462 struct radeon_cs_manager *csm = cs->csm;
463 int this_op_read = 0, this_op_gart_write = 0, this_op_vram_write = 0;
464 uint32_t read_domains, write_domain;
465 int i;
466 struct radeon_bo *bo;
467
468 /* check the totals for this operation */
469
470 if (num_bo == 0)
471 return 0;
472
473 /* prepare */
474 for (i = 0; i < num_bo; i++)
475 {
476 bo = bos[i].bo;
477
478 bos[i].new_accounted = 0;
479 read_domains = bos[i].read_domains;
480 write_domain = bos[i].write_domain;
481
482 /* pinned bos don't count */
483 if (radeon_legacy_bo_is_static(bo))
484 continue;
485
486 /* already accounted this bo */
487 if (write_domain && (write_domain == bo->space_accounted))
488 continue;
489
490 if (read_domains && ((read_domains << 16) == bo->space_accounted))
491 continue;
492
493 if (bo->space_accounted == 0)
494 {
495 if (write_domain == RADEON_GEM_DOMAIN_VRAM)
496 this_op_vram_write += bo->size;
497 else if (write_domain == RADEON_GEM_DOMAIN_GTT)
498 this_op_gart_write += bo->size;
499 else
500 this_op_read += bo->size;
501 bos[i].new_accounted = (read_domains << 16) | write_domain;
502 }
503 else
504 {
505 uint16_t old_read, old_write;
506
507 old_read = bo->space_accounted >> 16;
508 old_write = bo->space_accounted & 0xffff;
509
510 if (write_domain && (old_read & write_domain))
511 {
512 bos[i].new_accounted = write_domain;
513 /* moving from read to a write domain */
514 if (write_domain == RADEON_GEM_DOMAIN_VRAM)
515 {
516 this_op_read -= bo->size;
517 this_op_vram_write += bo->size;
518 }
519 else if (write_domain == RADEON_GEM_DOMAIN_VRAM)
520 {
521 this_op_read -= bo->size;
522 this_op_gart_write += bo->size;
523 }
524 }
525 else if (read_domains & old_write)
526 {
527 bos[i].new_accounted = bo->space_accounted & 0xffff;
528 }
529 else
530 {
531 /* rewrite the domains */
532 if (write_domain != old_write)
533 fprintf(stderr,"WRITE DOMAIN RELOC FAILURE 0x%x %d %d\n", bo->handle, write_domain, old_write);
534 if (read_domains != old_read)
535 fprintf(stderr,"READ DOMAIN RELOC FAILURE 0x%x %d %d\n", bo->handle, read_domains, old_read);
536 return RADEON_CS_SPACE_FLUSH;
537 }
538 }
539 }
540
541 if (this_op_read < 0)
542 this_op_read = 0;
543
544 /* check sizes - operation first */
545 if ((this_op_read + this_op_gart_write > csm->gart_limit) ||
546 (this_op_vram_write > csm->vram_limit)) {
547 return RADEON_CS_SPACE_OP_TO_BIG;
548 }
549
550 if (((csm->vram_write_used + this_op_vram_write) > csm->vram_limit) ||
551 ((csm->read_used + csm->gart_write_used + this_op_gart_write + this_op_read) > csm->gart_limit)) {
552 return RADEON_CS_SPACE_FLUSH;
553 }
554
555 csm->gart_write_used += this_op_gart_write;
556 csm->vram_write_used += this_op_vram_write;
557 csm->read_used += this_op_read;
558 /* commit */
559 for (i = 0; i < num_bo; i++) {
560 bo = bos[i].bo;
561 bo->space_accounted = bos[i].new_accounted;
562 }
563
564 return RADEON_CS_SPACE_OK;
565 }
566
567 static struct radeon_cs_funcs r600_cs_funcs = {
568 r600_cs_create,
569 r600_cs_write_reloc,
570 r600_cs_begin,
571 r600_cs_end,
572 r600_cs_emit,
573 r600_cs_destroy,
574 r600_cs_erase,
575 r600_cs_need_flush,
576 r600_cs_print,
577 r600_cs_check_space
578 };
579
580 struct radeon_cs_manager * r600_radeon_cs_manager_legacy_ctor(struct radeon_context *ctx)
581 {
582 struct r600_cs_manager_legacy *csm;
583
584 csm = (struct r600_cs_manager_legacy*)
585 calloc(1, sizeof(struct r600_cs_manager_legacy));
586 if (csm == NULL) {
587 return NULL;
588 }
589 csm->base.funcs = &r600_cs_funcs;
590 csm->base.fd = ctx->dri.fd;
591 csm->ctx = ctx;
592 csm->pending_age = 1;
593 return (struct radeon_cs_manager*)csm;
594 }
595
596 void r600InitCmdBuf(context_t *r600) /* from rcommonInitCmdBuf */
597 {
598 radeonContextPtr rmesa = &r600->radeon;
599
600 GLuint size;
601 /* Initialize command buffer */
602 size = 256 * driQueryOptioni(&rmesa->optionCache,
603 "command_buffer_size");
604 if (size < 2 * rmesa->hw.max_state_size) {
605 size = 2 * rmesa->hw.max_state_size + 65535;
606 }
607 if (size > 64 * 256)
608 size = 64 * 256;
609
610 if (rmesa->radeonScreen->kernel_mm) {
611 int fd = rmesa->radeonScreen->driScreen->fd;
612 rmesa->cmdbuf.csm = radeon_cs_manager_gem_ctor(fd);
613 } else {
614 rmesa->cmdbuf.csm = r600_radeon_cs_manager_legacy_ctor(rmesa);
615 }
616 if (rmesa->cmdbuf.csm == NULL) {
617 /* FIXME: fatal error */
618 return;
619 }
620 rmesa->cmdbuf.cs = radeon_cs_create(rmesa->cmdbuf.csm, size);
621 assert(rmesa->cmdbuf.cs != NULL);
622 rmesa->cmdbuf.size = size;
623
624 if (!rmesa->radeonScreen->kernel_mm) {
625 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, rmesa->radeonScreen->texSize[0]);
626 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, rmesa->radeonScreen->gartTextures.size);
627 } else {
628 struct drm_radeon_gem_info mminfo;
629
630 if (!drmCommandWriteRead(rmesa->dri.fd, DRM_RADEON_GEM_INFO, &mminfo, sizeof(mminfo)))
631 {
632 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, mminfo.vram_visible);
633 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, mminfo.gart_size);
634 }
635 }
636 }