Fix more merge fallout
[mesa.git] / src / mesa / drivers / dri / r600 / r600_cmdbuf.c
1 /*
2 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
3
4 The Weather Channel (TM) funded Tungsten Graphics to develop the
5 initial release of the Radeon 8500 driver under the XFree86 license.
6 This notice must be preserved.
7
8 Permission is hereby granted, free of charge, to any person obtaining
9 a copy of this software and associated documentation files (the
10 "Software"), to deal in the Software without restriction, including
11 without limitation the rights to use, copy, modify, merge, publish,
12 distribute, sublicense, and/or sell copies of the Software, and to
13 permit persons to whom the Software is furnished to do so, subject to
14 the following conditions:
15
16 The above copyright notice and this permission notice (including the
17 next paragraph) shall be included in all copies or substantial
18 portions of the Software.
19
20 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
23 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
24 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
25 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
26 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27
28 **************************************************************************/
29
30 /**
31 * Mostly coppied from \radeon\radeon_cs_legacy.c
32 */
33
34 #include <errno.h>
35
36 #include "main/glheader.h"
37 #include "main/state.h"
38 #include "main/imports.h"
39 #include "main/macros.h"
40 #include "main/context.h"
41 #include "main/simple_list.h"
42 #include "swrast/swrast.h"
43
44 #include "drm.h"
45 #include "radeon_drm.h"
46
47 #include "r600_context.h"
48 #include "radeon_reg.h"
49 #include "r600_cmdbuf.h"
50 #include "r600_emit.h"
51 #include "radeon_bocs_wrapper.h"
52 #include "radeon_mipmap_tree.h"
53 #include "radeon_reg.h"
54
55 struct r600_cs_manager_legacy
56 {
57 struct radeon_cs_manager base;
58 struct radeon_context *ctx;
59 /* hack for scratch stuff */
60 uint32_t pending_age;
61 uint32_t pending_count;
62 };
63
64 struct r600_cs_reloc_legacy {
65 struct radeon_cs_reloc base;
66 uint32_t cindices;
67 uint32_t *indices;
68 uint32_t *reloc_indices;
69 struct offset_modifiers offset_mod;
70 };
71
72 static struct radeon_cs * r600_cs_create(struct radeon_cs_manager *csm,
73 uint32_t ndw)
74 {
75 struct radeon_cs *cs;
76
77 cs = (struct radeon_cs*)calloc(1, sizeof(struct radeon_cs));
78 if (cs == NULL) {
79 return NULL;
80 }
81 cs->csm = csm;
82 cs->ndw = (ndw + 0x3FF) & (~0x3FF);
83 cs->packets = (uint32_t*)malloc(4*cs->ndw);
84 if (cs->packets == NULL) {
85 free(cs);
86 return NULL;
87 }
88 cs->relocs_total_size = 0;
89 return cs;
90 }
91
92 int r600_cs_write_reloc(struct radeon_cs *cs,
93 struct radeon_bo *bo,
94 uint32_t read_domain,
95 uint32_t write_domain,
96 uint32_t flags,
97 offset_modifiers* poffset_mod)
98 {
99 struct r600_cs_reloc_legacy *relocs;
100 int i;
101
102 relocs = (struct r600_cs_reloc_legacy *)cs->relocs;
103 /* check domains */
104 if ((read_domain && write_domain) || (!read_domain && !write_domain)) {
105 /* in one CS a bo can only be in read or write domain but not
106 * in read & write domain at the same sime
107 */
108 return -EINVAL;
109 }
110 if (read_domain == RADEON_GEM_DOMAIN_CPU) {
111 return -EINVAL;
112 }
113 if (write_domain == RADEON_GEM_DOMAIN_CPU) {
114 return -EINVAL;
115 }
116 /* check if bo is already referenced */
117 for(i = 0; i < cs->crelocs; i++) {
118 uint32_t *indices;
119 uint32_t *reloc_indices;
120
121 if (relocs[i].base.bo->handle == bo->handle) {
122 /* Check domains must be in read or write. As we check already
123 * checked that in argument one of the read or write domain was
124 * set we only need to check that if previous reloc as the read
125 * domain set then the read_domain should also be set for this
126 * new relocation.
127 */
128 if (relocs[i].base.read_domain && !read_domain) {
129 return -EINVAL;
130 }
131 if (relocs[i].base.write_domain && !write_domain) {
132 return -EINVAL;
133 }
134 relocs[i].base.read_domain |= read_domain;
135 relocs[i].base.write_domain |= write_domain;
136 /* save indice */
137 relocs[i].cindices++;
138 indices = (uint32_t*)realloc(relocs[i].indices,
139 relocs[i].cindices * 4);
140 reloc_indices = (uint32_t*)realloc(relocs[i].reloc_indices,
141 relocs[i].cindices * 4);
142 if ( (indices == NULL) || (reloc_indices == NULL) ) {
143 relocs[i].cindices -= 1;
144 return -ENOMEM;
145 }
146 relocs[i].indices = indices;
147 relocs[i].reloc_indices = reloc_indices;
148 relocs[i].indices[relocs[i].cindices - 1] = cs->cdw - 1;
149 relocs[i].reloc_indices[relocs[i].cindices - 1] = cs->section_cdw;
150 cs->section_ndw += 2;
151 cs->section_cdw += 2;
152
153 relocs[i].offset_mod.shift = poffset_mod->shift;
154 relocs[i].offset_mod.shiftbits = poffset_mod->shiftbits;
155 relocs[i].offset_mod.mask = poffset_mod->mask;
156
157 return 0;
158 }
159 }
160 /* add bo to reloc */
161 relocs = (struct r600_cs_reloc_legacy*)
162 realloc(cs->relocs,
163 sizeof(struct r600_cs_reloc_legacy) * (cs->crelocs + 1));
164 if (relocs == NULL) {
165 return -ENOMEM;
166 }
167 cs->relocs = relocs;
168 relocs[cs->crelocs].base.bo = bo;
169 relocs[cs->crelocs].base.read_domain = read_domain;
170 relocs[cs->crelocs].base.write_domain = write_domain;
171 relocs[cs->crelocs].base.flags = flags;
172 relocs[cs->crelocs].indices = (uint32_t*)malloc(4);
173 relocs[cs->crelocs].reloc_indices = (uint32_t*)malloc(4);
174 if ( (relocs[cs->crelocs].indices == NULL) || (relocs[cs->crelocs].reloc_indices == NULL) )
175 {
176 return -ENOMEM;
177 }
178 relocs[cs->crelocs].offset_mod.shift = poffset_mod->shift;
179 relocs[cs->crelocs].offset_mod.shiftbits = poffset_mod->shiftbits;
180 relocs[cs->crelocs].offset_mod.mask = poffset_mod->mask;
181
182 relocs[cs->crelocs].indices[0] = cs->cdw - 1;
183 relocs[cs->crelocs].reloc_indices[0] = cs->section_cdw;
184 cs->section_ndw += 2;
185 cs->section_cdw += 2;
186 relocs[cs->crelocs].cindices = 1;
187 cs->relocs_total_size += radeon_bo_legacy_relocs_size(bo);
188 cs->crelocs++;
189
190 radeon_bo_ref(bo);
191
192 return 0;
193 }
194
195 static int r600_cs_begin(struct radeon_cs *cs,
196 uint32_t ndw,
197 const char *file,
198 const char *func,
199 int line)
200 {
201 if (cs->section) {
202 fprintf(stderr, "CS already in a section(%s,%s,%d)\n",
203 cs->section_file, cs->section_func, cs->section_line);
204 fprintf(stderr, "CS can't start section(%s,%s,%d)\n",
205 file, func, line);
206 return -EPIPE;
207 }
208
209 if (cs->cdw + ndw + 32 > cs->ndw) { /* Left 32 DWORD (8 offset+pitch) spare room for reloc indices */
210 uint32_t tmp, *ptr;
211 int num = (ndw > 0x3FF) ? ndw : 0x3FF;
212
213 tmp = (cs->cdw + 1 + num) & (~num);
214 ptr = (uint32_t*)realloc(cs->packets, 4 * tmp);
215 if (ptr == NULL) {
216 return -ENOMEM;
217 }
218 cs->packets = ptr;
219 cs->ndw = tmp;
220 }
221
222 cs->section = 1;
223 cs->section_ndw = 0;
224 cs->section_cdw = cs->cdw + ndw; /* start of reloc indices. */
225 cs->section_file = file;
226 cs->section_func = func;
227 cs->section_line = line;
228
229 return 0;
230 }
231
232 static int r600_cs_end(struct radeon_cs *cs,
233 const char *file,
234 const char *func,
235 int line)
236
237 {
238 if (!cs->section) {
239 fprintf(stderr, "CS no section to end at (%s,%s,%d)\n",
240 file, func, line);
241 return -EPIPE;
242 }
243 cs->section = 0;
244
245 if ( (cs->section_ndw + cs->cdw) != cs->section_cdw )
246 {
247 fprintf(stderr, "CS section size missmatch start at (%s,%s,%d) %d vs %d\n",
248 cs->section_file, cs->section_func, cs->section_line, cs->section_ndw, cs->section_cdw);
249 fprintf(stderr, "cs->section_ndw = %d, cs->cdw = %d, cs->section_cdw = %d \n",
250 cs->section_ndw, cs->cdw, cs->section_cdw);
251 fprintf(stderr, "CS section end at (%s,%s,%d)\n",
252 file, func, line);
253 return -EPIPE;
254 }
255
256 cs->cdw = cs->section_cdw;
257 return 0;
258 }
259
260 static int r600_cs_process_relocs(struct radeon_cs *cs,
261 uint32_t * reloc_chunk,
262 uint32_t * length_dw_reloc_chunk)
263 {
264 struct r600_cs_manager_legacy *csm = (struct r600_cs_manager_legacy*)cs->csm;
265 struct r600_cs_reloc_legacy *relocs;
266 int i, j, r;
267
268 uint32_t offset_dw = 0;
269
270 csm = (struct r600_cs_manager_legacy*)cs->csm;
271 relocs = (struct r600_cs_reloc_legacy *)cs->relocs;
272 restart:
273 for (i = 0; i < cs->crelocs; i++)
274 {
275 for (j = 0; j < relocs[i].cindices; j++)
276 {
277 uint32_t soffset, eoffset, asicoffset;
278
279 r = radeon_bo_legacy_validate(relocs[i].base.bo,
280 &soffset, &eoffset);
281 if (r == -EAGAIN)
282 {
283 goto restart;
284 }
285 if (r)
286 {
287 fprintf(stderr, "validated %p [0x%08X, 0x%08X]\n",
288 relocs[i].base.bo, soffset, eoffset);
289 return r;
290 }
291 asicoffset = soffset;
292 if (asicoffset >= eoffset)
293 {
294 /* radeon_bo_debug(relocs[i].base.bo, 12); */
295 fprintf(stderr, "validated %p [0x%08X, 0x%08X]\n",
296 relocs[i].base.bo, soffset, eoffset);
297 fprintf(stderr, "above end: %p 0x%08X 0x%08X\n",
298 relocs[i].base.bo,
299 cs->packets[relocs[i].indices[j]],
300 eoffset);
301 exit(0);
302 return -EINVAL;
303 }
304 /* apply offset operator */
305 switch (relocs[i].offset_mod.shift)
306 {
307 case NO_SHIFT:
308 asicoffset = asicoffset & relocs[i].offset_mod.mask;
309 break;
310 case LEFT_SHIFT:
311 asicoffset = (asicoffset << relocs[i].offset_mod.shiftbits) & relocs[i].offset_mod.mask;
312 break;
313 case RIGHT_SHIFT:
314 asicoffset = (asicoffset >> relocs[i].offset_mod.shiftbits) & relocs[i].offset_mod.mask;
315 break;
316 default:
317 break;
318 };
319
320 /* pkt3 nop header in ib chunk */
321 cs->packets[relocs[i].reloc_indices[j]] = 0xC0001000;
322
323 /* reloc index in ib chunk */
324 cs->packets[relocs[i].reloc_indices[j] + 1] = offset_dw;
325
326 /* asic offset in reloc chunk */ /* see alex drm r600_nomm_relocate */
327 reloc_chunk[offset_dw] = asicoffset;
328 reloc_chunk[offset_dw + 3] = 0;
329
330 offset_dw += 4;
331 }
332 }
333
334 *length_dw_reloc_chunk = offset_dw;
335
336 return 0;
337 }
338
339 static int r600_cs_set_age(struct radeon_cs *cs) /* -------------- */
340 {
341 struct r600_cs_manager_legacy *csm = (struct r600_cs_manager_legacy*)cs->csm;
342 struct r600_cs_reloc_legacy *relocs;
343 int i;
344
345 relocs = (struct r600_cs_reloc_legacy *)cs->relocs;
346 for (i = 0; i < cs->crelocs; i++) {
347 radeon_bo_legacy_pending(relocs[i].base.bo, csm->pending_age);
348 radeon_bo_unref(relocs[i].base.bo);
349 }
350 return 0;
351 }
352
353 static void dump_cmdbuf(struct radeon_cs *cs)
354 {
355 int i;
356 fprintf(stderr,"--start--\n");
357 for (i = 0; i < cs->cdw; i++){
358 fprintf(stderr,"0x%08x\n", cs->packets[i]);
359 }
360 fprintf(stderr,"--end--\n");
361
362 }
363
364 static int r600_cs_emit(struct radeon_cs *cs)
365 {
366 struct r600_cs_manager_legacy *csm = (struct r600_cs_manager_legacy*)cs->csm;
367 struct drm_radeon_cs cs_cmd;
368 struct drm_radeon_cs_chunk cs_chunk[2];
369 drm_radeon_cmd_buffer_t cmd;
370 /* drm_r300_cmd_header_t age; */
371 uint32_t length_dw_reloc_chunk;
372 uint64_t ull;
373 uint64_t chunk_ptrs[2];
374 uint32_t reloc_chunk[128];
375 int r;
376 int retry = 0;
377
378 /* TODO : put chip level things here if need. */
379 /* csm->ctx->vtbl.emit_cs_header(cs, csm->ctx); */
380
381 BATCH_LOCALS(csm->ctx);
382 drm_radeon_getparam_t gp;
383 uint32_t current_scratchx_age;
384
385 gp.param = RADEON_PARAM_LAST_CLEAR;
386 gp.value = (int *)&current_scratchx_age;
387 r = drmCommandWriteRead(cs->csm->fd,
388 DRM_RADEON_GETPARAM,
389 &gp,
390 sizeof(gp));
391 if (r)
392 {
393 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__, r);
394 exit(1);
395 }
396
397 csm->pending_age = 0;
398 csm->pending_count = 1;
399
400 current_scratchx_age++;
401 csm->pending_age = current_scratchx_age;
402
403 BEGIN_BATCH_NO_AUTOSTATE(3);
404 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1));
405 R600_OUT_BATCH((SCRATCH_REG2 - R600_SET_CONFIG_REG_OFFSET) >> 2);
406 R600_OUT_BATCH(current_scratchx_age);
407 END_BATCH();
408 COMMIT_BATCH();
409
410 //TODO ioctl to get back cs id assigned in drm
411 //csm->pending_age = cs_id_back;
412
413 r = r600_cs_process_relocs(cs, &(reloc_chunk[0]), &length_dw_reloc_chunk);
414 if (r) {
415 return 0;
416 }
417
418 /* raw ib chunk */
419 cs_chunk[0].chunk_id = RADEON_CHUNK_ID_IB;
420 cs_chunk[0].length_dw = cs->cdw;
421 cs_chunk[0].chunk_data = (unsigned long)(cs->packets);
422
423 /* reloc chaunk */
424 cs_chunk[1].chunk_id = RADEON_CHUNK_ID_RELOCS;
425 cs_chunk[1].length_dw = length_dw_reloc_chunk;
426 cs_chunk[1].chunk_data = (unsigned long)&(reloc_chunk[0]);
427
428 chunk_ptrs[0] = (uint64_t)(unsigned long)&(cs_chunk[0]);
429 chunk_ptrs[1] = (uint64_t)(unsigned long)&(cs_chunk[1]);
430
431 cs_cmd.num_chunks = 2;
432 /* cs_cmd.cs_id = 0; */
433 cs_cmd.chunks = (uint64_t)(unsigned long)chunk_ptrs;
434
435 //dump_cmdbuf(cs);
436
437 do
438 {
439 r = drmCommandWriteRead(cs->csm->fd, DRM_RADEON_CS, &cs_cmd, sizeof(cs_cmd));
440 retry++;
441 } while (r == -EAGAIN && retry < 1000);
442
443 if (r) {
444 return r;
445 }
446
447 r600_cs_set_age(cs);
448
449 cs->csm->read_used = 0;
450 cs->csm->vram_write_used = 0;
451 cs->csm->gart_write_used = 0;
452
453 return 0;
454 }
455
456 static void inline r600_cs_free_reloc(void *relocs_p, int crelocs)
457 {
458 struct r600_cs_reloc_legacy *relocs = relocs_p;
459 int i;
460 if (!relocs_p)
461 return;
462 for (i = 0; i < crelocs; i++)
463 {
464 free(relocs[i].indices);
465 free(relocs[i].reloc_indices);
466 }
467 }
468
469 static int r600_cs_destroy(struct radeon_cs *cs)
470 {
471 r600_cs_free_reloc(cs->relocs, cs->crelocs);
472 free(cs->relocs);
473 free(cs->packets);
474 free(cs);
475 return 0;
476 }
477
478 static int r600_cs_erase(struct radeon_cs *cs)
479 {
480 r600_cs_free_reloc(cs->relocs, cs->crelocs);
481 free(cs->relocs);
482 cs->relocs_total_size = 0;
483 cs->relocs = NULL;
484 cs->crelocs = 0;
485 cs->cdw = 0;
486 cs->section = 0;
487 return 0;
488 }
489
490 static int r600_cs_need_flush(struct radeon_cs *cs)
491 {
492 /* this function used to flush when the BO usage got to
493 * a certain size, now the higher levels handle this better */
494 return 0;
495 }
496
497 static void r600_cs_print(struct radeon_cs *cs, FILE *file)
498 {
499 }
500
501 static int r600_cs_check_space(struct radeon_cs *cs, struct radeon_cs_space_check *bos, int num_bo)
502 {
503 struct radeon_cs_manager *csm = cs->csm;
504 int this_op_read = 0, this_op_gart_write = 0, this_op_vram_write = 0;
505 uint32_t read_domains, write_domain;
506 int i;
507 struct radeon_bo *bo;
508
509 /* check the totals for this operation */
510
511 if (num_bo == 0)
512 return 0;
513
514 /* prepare */
515 for (i = 0; i < num_bo; i++)
516 {
517 bo = bos[i].bo;
518
519 bos[i].new_accounted = 0;
520 read_domains = bos[i].read_domains;
521 write_domain = bos[i].write_domain;
522
523 /* pinned bos don't count */
524 if (radeon_bo_is_static(bo))
525 continue;
526
527 /* already accounted this bo */
528 if (write_domain && (write_domain == bo->space_accounted))
529 continue;
530
531 if (read_domains && ((read_domains << 16) == bo->space_accounted))
532 continue;
533
534 if (bo->space_accounted == 0)
535 {
536 if (write_domain == RADEON_GEM_DOMAIN_VRAM)
537 this_op_vram_write += bo->size;
538 else if (write_domain == RADEON_GEM_DOMAIN_GTT)
539 this_op_gart_write += bo->size;
540 else
541 this_op_read += bo->size;
542 bos[i].new_accounted = (read_domains << 16) | write_domain;
543 }
544 else
545 {
546 uint16_t old_read, old_write;
547
548 old_read = bo->space_accounted >> 16;
549 old_write = bo->space_accounted & 0xffff;
550
551 if (write_domain && (old_read & write_domain))
552 {
553 bos[i].new_accounted = write_domain;
554 /* moving from read to a write domain */
555 if (write_domain == RADEON_GEM_DOMAIN_VRAM)
556 {
557 this_op_read -= bo->size;
558 this_op_vram_write += bo->size;
559 }
560 else if (write_domain == RADEON_GEM_DOMAIN_VRAM)
561 {
562 this_op_read -= bo->size;
563 this_op_gart_write += bo->size;
564 }
565 }
566 else if (read_domains & old_write)
567 {
568 bos[i].new_accounted = bo->space_accounted & 0xffff;
569 }
570 else
571 {
572 /* rewrite the domains */
573 if (write_domain != old_write)
574 fprintf(stderr,"WRITE DOMAIN RELOC FAILURE 0x%x %d %d\n", bo->handle, write_domain, old_write);
575 if (read_domains != old_read)
576 fprintf(stderr,"READ DOMAIN RELOC FAILURE 0x%x %d %d\n", bo->handle, read_domains, old_read);
577 return RADEON_CS_SPACE_FLUSH;
578 }
579 }
580 }
581
582 if (this_op_read < 0)
583 this_op_read = 0;
584
585 /* check sizes - operation first */
586 if ((this_op_read + this_op_gart_write > csm->gart_limit) ||
587 (this_op_vram_write > csm->vram_limit)) {
588 return RADEON_CS_SPACE_OP_TO_BIG;
589 }
590
591 if (((csm->vram_write_used + this_op_vram_write) > csm->vram_limit) ||
592 ((csm->read_used + csm->gart_write_used + this_op_gart_write + this_op_read) > csm->gart_limit)) {
593 return RADEON_CS_SPACE_FLUSH;
594 }
595
596 csm->gart_write_used += this_op_gart_write;
597 csm->vram_write_used += this_op_vram_write;
598 csm->read_used += this_op_read;
599 /* commit */
600 for (i = 0; i < num_bo; i++) {
601 bo = bos[i].bo;
602 bo->space_accounted = bos[i].new_accounted;
603 }
604
605 return RADEON_CS_SPACE_OK;
606 }
607
608 static struct radeon_cs_funcs r600_cs_funcs = {
609 r600_cs_create,
610 r600_cs_write_reloc,
611 r600_cs_begin,
612 r600_cs_end,
613 r600_cs_emit,
614 r600_cs_destroy,
615 r600_cs_erase,
616 r600_cs_need_flush,
617 r600_cs_print,
618 r600_cs_check_space
619 };
620
621 struct radeon_cs_manager * r600_radeon_cs_manager_legacy_ctor(struct radeon_context *ctx)
622 {
623 struct r600_cs_manager_legacy *csm;
624
625 csm = (struct r600_cs_manager_legacy*)
626 calloc(1, sizeof(struct r600_cs_manager_legacy));
627 if (csm == NULL) {
628 return NULL;
629 }
630 csm->base.funcs = &r600_cs_funcs;
631 csm->base.fd = ctx->dri.fd;
632 csm->ctx = ctx;
633 csm->pending_age = 1;
634 return (struct radeon_cs_manager*)csm;
635 }
636
637 void r600InitCmdBuf(context_t *r600) /* from rcommonInitCmdBuf */
638 {
639 radeonContextPtr rmesa = &r600->radeon;
640
641 GLuint size;
642 /* Initialize command buffer */
643 size = 256 * driQueryOptioni(&rmesa->optionCache,
644 "command_buffer_size");
645 if (size < 2 * rmesa->hw.max_state_size) {
646 size = 2 * rmesa->hw.max_state_size + 65535;
647 }
648 if (size > 64 * 256)
649 size = 64 * 256;
650
651 if (rmesa->radeonScreen->kernel_mm) {
652 int fd = rmesa->radeonScreen->driScreen->fd;
653 rmesa->cmdbuf.csm = radeon_cs_manager_gem_ctor(fd);
654 } else {
655 rmesa->cmdbuf.csm = r600_radeon_cs_manager_legacy_ctor(rmesa);
656 }
657 if (rmesa->cmdbuf.csm == NULL) {
658 /* FIXME: fatal error */
659 return;
660 }
661 rmesa->cmdbuf.cs = radeon_cs_create(rmesa->cmdbuf.csm, size);
662 assert(rmesa->cmdbuf.cs != NULL);
663 rmesa->cmdbuf.size = size;
664
665 if (!rmesa->radeonScreen->kernel_mm) {
666 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, rmesa->radeonScreen->texSize[0]);
667 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, rmesa->radeonScreen->gartTextures.size);
668 } else {
669 struct drm_radeon_gem_info mminfo;
670
671 if (!drmCommandWriteRead(rmesa->dri.fd, DRM_RADEON_GEM_INFO, &mminfo, sizeof(mminfo)))
672 {
673 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, mminfo.vram_visible);
674 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, mminfo.gart_size);
675 }
676 }
677 }
678