2 * Copyright © 2008 Nicolai Haehnle
3 * Copyright © 2008 Jérôme Glisse
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
28 * Aapo Tahkola <aet@rasterburn.org>
29 * Nicolai Haehnle <prefect_@gmx.net>
30 * Jérôme Glisse <glisse@freedesktop.org>
35 #include "common_context.h"
36 #include "radeon_cs.h"
37 #include "radeon_cs_legacy.h"
38 #include "radeon_bo_legacy.h"
41 struct cs_manager_legacy
{
42 struct radeon_cs_manager base
;
43 struct radeon_context
*ctx
;
44 /* hack for scratch stuff */
46 uint32_t pending_count
;
51 struct cs_reloc_legacy
{
52 struct radeon_cs_reloc base
;
58 static struct radeon_cs
*cs_create(struct radeon_cs_manager
*csm
,
63 cs
= (struct radeon_cs
*)calloc(1, sizeof(struct radeon_cs
));
68 cs
->ndw
= (ndw
+ 0x3FF) & (~0x3FF);
69 cs
->packets
= (uint32_t*)malloc(4*cs
->ndw
);
70 if (cs
->packets
== NULL
) {
74 cs
->relocs_total_size
= 0;
78 int cs_write_dword(struct radeon_cs
*cs
, uint32_t dword
)
80 if (cs
->cdw
>= cs
->ndw
) {
82 tmp
= (cs
->cdw
+ 1 + 0x3FF) & (~0x3FF);
83 ptr
= (uint32_t*)realloc(cs
->packets
, 4 * tmp
);
90 cs
->packets
[cs
->cdw
++] = dword
;
97 static int cs_write_reloc(struct radeon_cs
*cs
,
100 uint32_t write_domain
,
103 struct cs_reloc_legacy
*relocs
;
106 relocs
= (struct cs_reloc_legacy
*)cs
->relocs
;
108 if ((read_domain
&& write_domain
) || (!read_domain
&& !write_domain
)) {
109 /* in one CS a bo can only be in read or write domain but not
110 * in read & write domain at the same sime
114 if (read_domain
== RADEON_GEM_DOMAIN_CPU
) {
117 if (write_domain
== RADEON_GEM_DOMAIN_CPU
) {
120 /* check if bo is already referenced */
121 for(i
= 0; i
< cs
->crelocs
; i
++) {
124 if (relocs
[i
].base
.bo
->handle
== bo
->handle
) {
125 /* Check domains must be in read or write. As we check already
126 * checked that in argument one of the read or write domain was
127 * set we only need to check that if previous reloc as the read
128 * domain set then the read_domain should also be set for this
131 if (relocs
[i
].base
.read_domain
&& !read_domain
) {
134 if (relocs
[i
].base
.write_domain
&& !write_domain
) {
137 relocs
[i
].base
.read_domain
|= read_domain
;
138 relocs
[i
].base
.write_domain
|= write_domain
;
140 relocs
[i
].cindices
++;
141 indices
= (uint32_t*)realloc(relocs
[i
].indices
,
142 relocs
[i
].cindices
* 4);
143 if (indices
== NULL
) {
144 relocs
[i
].cindices
-= 1;
147 relocs
[i
].indices
= indices
;
148 relocs
[i
].indices
[relocs
[i
].cindices
- 1] = cs
->cdw
- 1;
152 /* add bo to reloc */
153 relocs
= (struct cs_reloc_legacy
*)
155 sizeof(struct cs_reloc_legacy
) * (cs
->crelocs
+ 1));
156 if (relocs
== NULL
) {
160 relocs
[cs
->crelocs
].base
.bo
= bo
;
161 relocs
[cs
->crelocs
].base
.read_domain
= read_domain
;
162 relocs
[cs
->crelocs
].base
.write_domain
= write_domain
;
163 relocs
[cs
->crelocs
].base
.flags
= flags
;
164 relocs
[cs
->crelocs
].indices
= (uint32_t*)malloc(4);
165 if (relocs
[cs
->crelocs
].indices
== NULL
) {
168 relocs
[cs
->crelocs
].indices
[0] = cs
->cdw
- 1;
169 relocs
[cs
->crelocs
].cindices
= 1;
170 cs
->relocs_total_size
+= radeon_bo_legacy_relocs_size(bo
);
176 static int cs_begin(struct radeon_cs
*cs
,
183 fprintf(stderr
, "CS already in a section(%s,%s,%d)\n",
184 cs
->section_file
, cs
->section_func
, cs
->section_line
);
185 fprintf(stderr
, "CS can't start section(%s,%s,%d)\n",
190 cs
->section_ndw
= ndw
;
192 cs
->section_file
= file
;
193 cs
->section_func
= func
;
194 cs
->section_line
= line
;
198 static int cs_end(struct radeon_cs
*cs
,
205 fprintf(stderr
, "CS no section to end at (%s,%s,%d)\n",
210 if (cs
->section_ndw
!= cs
->section_cdw
) {
211 fprintf(stderr
, "CS section size missmatch start at (%s,%s,%d) %d vs %d\n",
212 cs
->section_file
, cs
->section_func
, cs
->section_line
, cs
->section_ndw
, cs
->section_cdw
);
213 fprintf(stderr
, "CS section end at (%s,%s,%d)\n",
220 static int cs_process_relocs(struct radeon_cs
*cs
)
222 struct cs_manager_legacy
*csm
= (struct cs_manager_legacy
*)cs
->csm
;
223 struct cs_reloc_legacy
*relocs
;
226 csm
= (struct cs_manager_legacy
*)cs
->csm
;
227 relocs
= (struct cs_reloc_legacy
*)cs
->relocs
;
228 for (i
= 0; i
< cs
->crelocs
; i
++) {
229 for (j
= 0; j
< relocs
[i
].cindices
; j
++) {
230 uint32_t soffset
, eoffset
;
232 r
= radeon_bo_legacy_validate(relocs
[i
].base
.bo
,
235 fprintf(stderr
, "validated %p [0x%08X, 0x%08X]\n",
236 relocs
[i
].base
.bo
, soffset
, eoffset
);
239 cs
->packets
[relocs
[i
].indices
[j
]] += soffset
;
240 if (cs
->packets
[relocs
[i
].indices
[j
]] >= eoffset
) {
241 radeon_bo_debug(relocs
[i
].base
.bo
, 12);
242 fprintf(stderr
, "validated %p [0x%08X, 0x%08X]\n",
243 relocs
[i
].base
.bo
, soffset
, eoffset
);
244 fprintf(stderr
, "above end: %p 0x%08X 0x%08X\n",
246 cs
->packets
[relocs
[i
].indices
[j
]],
256 static int cs_set_age(struct radeon_cs
*cs
)
258 struct cs_manager_legacy
*csm
= (struct cs_manager_legacy
*)cs
->csm
;
259 struct cs_reloc_legacy
*relocs
;
262 relocs
= (struct cs_reloc_legacy
*)cs
->relocs
;
263 for (i
= 0; i
< cs
->crelocs
; i
++) {
264 radeon_bo_legacy_pending(relocs
[i
].base
.bo
, csm
->pending_age
);
265 radeon_bo_unref(relocs
[i
].base
.bo
);
270 static void dump_cmdbuf(struct radeon_cs
*cs
)
273 for (i
= 0; i
< cs
->cdw
; i
++){
274 fprintf(stderr
,"%x: %08x\n", i
, cs
->packets
[i
]);
278 static int cs_emit(struct radeon_cs
*cs
)
280 struct cs_manager_legacy
*csm
= (struct cs_manager_legacy
*)cs
->csm
;
281 drm_radeon_cmd_buffer_t cmd
;
282 drm_r300_cmd_header_t age
;
286 csm
->ctx
->vtbl
.emit_cs_header(cs
, csm
->ctx
);
289 /* append buffer age */
290 if (IS_R300_CLASS(csm
->ctx
->radeonScreen
)) {
291 age
.scratch
.cmd_type
= R300_CMD_SCRATCH
;
292 /* Scratch register 2 corresponds to what radeonGetAge polls */
293 csm
->pending_age
= 0;
294 csm
->pending_count
= 1;
295 ull
= (uint64_t) (intptr_t) &csm
->pending_age
;
297 age
.scratch
.n_bufs
= 1;
298 age
.scratch
.flags
= 0;
299 radeon_cs_write_dword(cs
, age
.u
);
300 radeon_cs_write_dword(cs
, ull
& 0xffffffff);
301 radeon_cs_write_dword(cs
, ull
>> 32);
302 radeon_cs_write_dword(cs
, 0);
305 r
= cs_process_relocs(cs
);
310 cmd
.buf
= (char *)cs
->packets
;
311 cmd
.bufsz
= cs
->cdw
* 4;
312 if (csm
->ctx
->state
.scissor
.enabled
) {
313 cmd
.nbox
= csm
->ctx
->state
.scissor
.numClipRects
;
314 cmd
.boxes
= (drm_clip_rect_t
*) csm
->ctx
->state
.scissor
.pClipRects
;
316 cmd
.nbox
= csm
->ctx
->numClipRects
;
317 cmd
.boxes
= (drm_clip_rect_t
*) csm
->ctx
->pClipRects
;
322 r
= drmCommandWrite(cs
->csm
->fd
, DRM_RADEON_CMDBUF
, &cmd
, sizeof(cmd
));
326 if (!IS_R300_CLASS(csm
->ctx
->radeonScreen
)) {
327 drm_radeon_irq_emit_t emit_cmd
;
328 emit_cmd
.irq_seq
= &csm
->pending_age
;
329 r
= drmCommandWrite(cs
->csm
->fd
, DRM_RADEON_IRQ_EMIT
, &emit_cmd
, sizeof(emit_cmd
));
336 cs
->csm
->read_used
= 0;
337 cs
->csm
->vram_write_used
= 0;
338 cs
->csm
->gart_write_used
= 0;
342 static void inline cs_free_reloc(void *relocs_p
, int crelocs
)
344 struct cs_reloc_legacy
*relocs
= relocs_p
;
348 for (i
= 0; i
< crelocs
; i
++)
349 free(relocs
[i
].indices
);
352 static int cs_destroy(struct radeon_cs
*cs
)
354 cs_free_reloc(cs
->relocs
, cs
->crelocs
);
361 static int cs_erase(struct radeon_cs
*cs
)
363 cs_free_reloc(cs
->relocs
, cs
->crelocs
);
365 cs
->relocs_total_size
= 0;
373 static int cs_need_flush(struct radeon_cs
*cs
)
375 /* FIXME: we should get the texture heap size */
376 return (cs
->relocs_total_size
> (7*1024*1024));
379 static void cs_print(struct radeon_cs
*cs
, FILE *file
)
383 static int cs_check_space(struct radeon_cs
*cs
, struct radeon_cs_space_check
*bos
, int num_bo
)
385 struct radeon_cs_manager
*csm
= cs
->csm
;
386 int this_op_read
= 0, this_op_gart_write
= 0, this_op_vram_write
= 0;
387 uint32_t read_domains
, write_domain
;
389 struct radeon_bo
*bo
;
391 /* check the totals for this operation */
397 for (i
= 0; i
< num_bo
; i
++) {
400 bos
[i
].new_accounted
= 0;
401 read_domains
= bos
[i
].read_domains
;
402 write_domain
= bos
[i
].write_domain
;
404 /* pinned bos don't count */
405 if (radeon_legacy_bo_is_static(bo
))
408 /* already accounted this bo */
409 if (write_domain
&& (write_domain
== bo
->space_accounted
))
412 if (read_domains
&& ((read_domains
<< 16) == bo
->space_accounted
))
415 if (bo
->space_accounted
== 0) {
416 if (write_domain
== RADEON_GEM_DOMAIN_VRAM
)
417 this_op_vram_write
+= bo
->size
;
418 else if (write_domain
== RADEON_GEM_DOMAIN_GTT
)
419 this_op_gart_write
+= bo
->size
;
421 this_op_read
+= bo
->size
;
422 bos
[i
].new_accounted
= (read_domains
<< 16) | write_domain
;
424 uint16_t old_read
, old_write
;
426 old_read
= bo
->space_accounted
>> 16;
427 old_write
= bo
->space_accounted
& 0xffff;
429 if (write_domain
&& (old_read
& write_domain
)) {
430 bos
[i
].new_accounted
= write_domain
;
431 /* moving from read to a write domain */
432 if (write_domain
== RADEON_GEM_DOMAIN_VRAM
) {
433 this_op_read
-= bo
->size
;
434 this_op_vram_write
+= bo
->size
;
435 } else if (write_domain
== RADEON_GEM_DOMAIN_VRAM
) {
436 this_op_read
-= bo
->size
;
437 this_op_gart_write
+= bo
->size
;
439 } else if (read_domains
& old_write
) {
440 bos
[i
].new_accounted
= bo
->space_accounted
& 0xffff;
442 /* rewrite the domains */
443 if (write_domain
!= old_write
)
444 fprintf(stderr
,"WRITE DOMAIN RELOC FAILURE 0x%x %d %d\n", bo
->handle
, write_domain
, old_write
);
445 if (read_domains
!= old_read
)
446 fprintf(stderr
,"READ DOMAIN RELOC FAILURE 0x%x %d %d\n", bo
->handle
, read_domains
, old_read
);
447 return RADEON_CS_SPACE_FLUSH
;
452 if (this_op_read
< 0)
455 /* check sizes - operation first */
456 if ((this_op_read
+ this_op_gart_write
> csm
->gart_limit
) ||
457 (this_op_vram_write
> csm
->vram_limit
)) {
458 return RADEON_CS_SPACE_OP_TO_BIG
;
461 if (((csm
->vram_write_used
+ this_op_vram_write
) > csm
->vram_limit
) ||
462 ((csm
->read_used
+ csm
->gart_write_used
+ this_op_gart_write
+ this_op_read
) > csm
->gart_limit
)) {
463 return RADEON_CS_SPACE_FLUSH
;
466 csm
->gart_write_used
+= this_op_gart_write
;
467 csm
->vram_write_used
+= this_op_vram_write
;
468 csm
->read_used
+= this_op_read
;
470 for (i
= 0; i
< num_bo
; i
++) {
472 bo
->space_accounted
= bos
[i
].new_accounted
;
475 return RADEON_CS_SPACE_OK
;
478 static struct radeon_cs_funcs radeon_cs_legacy_funcs
= {
492 struct radeon_cs_manager
*radeon_cs_manager_legacy_ctor(struct radeon_context
*ctx
)
494 struct cs_manager_legacy
*csm
;
496 csm
= (struct cs_manager_legacy
*)
497 calloc(1, sizeof(struct cs_manager_legacy
));
501 csm
->base
.funcs
= &radeon_cs_legacy_funcs
;
502 csm
->base
.fd
= ctx
->dri
.fd
;
504 csm
->pending_age
= 1;
505 return (struct radeon_cs_manager
*)csm
;
508 void radeon_cs_manager_legacy_dtor(struct radeon_cs_manager
*csm
)