2 * Copyright © 2008 Nicolai Haehnle
3 * Copyright © 2008 Jérôme Glisse
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
28 * Aapo Tahkola <aet@rasterburn.org>
29 * Nicolai Haehnle <prefect_@gmx.net>
30 * Jérôme Glisse <glisse@freedesktop.org>
34 #include "radeon_bocs_wrapper.h"
36 struct cs_manager_legacy
{
37 struct radeon_cs_manager base
;
38 struct radeon_context
*ctx
;
39 /* hack for scratch stuff */
41 uint32_t pending_count
;
46 struct cs_reloc_legacy
{
47 struct radeon_cs_reloc base
;
53 static struct radeon_cs
*cs_create(struct radeon_cs_manager
*csm
,
58 cs
= (struct radeon_cs
*)calloc(1, sizeof(struct radeon_cs
));
63 cs
->ndw
= (ndw
+ 0x3FF) & (~0x3FF);
64 cs
->packets
= (uint32_t*)malloc(4*cs
->ndw
);
65 if (cs
->packets
== NULL
) {
69 cs
->relocs_total_size
= 0;
73 static int cs_write_reloc(struct radeon_cs
*cs
,
76 uint32_t write_domain
,
79 struct cs_reloc_legacy
*relocs
;
82 relocs
= (struct cs_reloc_legacy
*)cs
->relocs
;
84 if ((read_domain
&& write_domain
) || (!read_domain
&& !write_domain
)) {
85 /* in one CS a bo can only be in read or write domain but not
86 * in read & write domain at the same sime
90 if (read_domain
== RADEON_GEM_DOMAIN_CPU
) {
93 if (write_domain
== RADEON_GEM_DOMAIN_CPU
) {
96 /* check if bo is already referenced */
97 for(i
= 0; i
< cs
->crelocs
; i
++) {
100 if (relocs
[i
].base
.bo
->handle
== bo
->handle
) {
101 /* Check domains must be in read or write. As we check already
102 * checked that in argument one of the read or write domain was
103 * set we only need to check that if previous reloc as the read
104 * domain set then the read_domain should also be set for this
107 if (relocs
[i
].base
.read_domain
&& !read_domain
) {
110 if (relocs
[i
].base
.write_domain
&& !write_domain
) {
113 relocs
[i
].base
.read_domain
|= read_domain
;
114 relocs
[i
].base
.write_domain
|= write_domain
;
116 relocs
[i
].cindices
++;
117 indices
= (uint32_t*)realloc(relocs
[i
].indices
,
118 relocs
[i
].cindices
* 4);
119 if (indices
== NULL
) {
120 relocs
[i
].cindices
-= 1;
123 relocs
[i
].indices
= indices
;
124 relocs
[i
].indices
[relocs
[i
].cindices
- 1] = cs
->cdw
- 1;
128 /* add bo to reloc */
129 relocs
= (struct cs_reloc_legacy
*)
131 sizeof(struct cs_reloc_legacy
) * (cs
->crelocs
+ 1));
132 if (relocs
== NULL
) {
136 relocs
[cs
->crelocs
].base
.bo
= bo
;
137 relocs
[cs
->crelocs
].base
.read_domain
= read_domain
;
138 relocs
[cs
->crelocs
].base
.write_domain
= write_domain
;
139 relocs
[cs
->crelocs
].base
.flags
= flags
;
140 relocs
[cs
->crelocs
].indices
= (uint32_t*)malloc(4);
141 if (relocs
[cs
->crelocs
].indices
== NULL
) {
144 relocs
[cs
->crelocs
].indices
[0] = cs
->cdw
- 1;
145 relocs
[cs
->crelocs
].cindices
= 1;
146 cs
->relocs_total_size
+= radeon_bo_legacy_relocs_size(bo
);
152 static int cs_begin(struct radeon_cs
*cs
,
159 fprintf(stderr
, "CS already in a section(%s,%s,%d)\n",
160 cs
->section_file
, cs
->section_func
, cs
->section_line
);
161 fprintf(stderr
, "CS can't start section(%s,%s,%d)\n",
166 cs
->section_ndw
= ndw
;
168 cs
->section_file
= file
;
169 cs
->section_func
= func
;
170 cs
->section_line
= line
;
173 if (cs
->cdw
+ ndw
> cs
->ndw
) {
175 int num
= (ndw
> 0x3FF) ? ndw
: 0x3FF;
177 tmp
= (cs
->cdw
+ 1 + num
) & (~num
);
178 ptr
= (uint32_t*)realloc(cs
->packets
, 4 * tmp
);
189 static int cs_end(struct radeon_cs
*cs
,
196 fprintf(stderr
, "CS no section to end at (%s,%s,%d)\n",
201 if (cs
->section_ndw
!= cs
->section_cdw
) {
202 fprintf(stderr
, "CS section size missmatch start at (%s,%s,%d) %d vs %d\n",
203 cs
->section_file
, cs
->section_func
, cs
->section_line
, cs
->section_ndw
, cs
->section_cdw
);
204 fprintf(stderr
, "CS section end at (%s,%s,%d)\n",
211 static int cs_process_relocs(struct radeon_cs
*cs
)
213 struct cs_manager_legacy
*csm
= (struct cs_manager_legacy
*)cs
->csm
;
214 struct cs_reloc_legacy
*relocs
;
217 csm
= (struct cs_manager_legacy
*)cs
->csm
;
218 relocs
= (struct cs_reloc_legacy
*)cs
->relocs
;
220 for (i
= 0; i
< cs
->crelocs
; i
++) {
221 for (j
= 0; j
< relocs
[i
].cindices
; j
++) {
222 uint32_t soffset
, eoffset
;
224 r
= radeon_bo_legacy_validate(relocs
[i
].base
.bo
,
229 fprintf(stderr
, "validated %p [0x%08X, 0x%08X]\n",
230 relocs
[i
].base
.bo
, soffset
, eoffset
);
233 cs
->packets
[relocs
[i
].indices
[j
]] += soffset
;
234 if (cs
->packets
[relocs
[i
].indices
[j
]] >= eoffset
) {
235 /* radeon_bo_debug(relocs[i].base.bo, 12); */
236 fprintf(stderr
, "validated %p [0x%08X, 0x%08X]\n",
237 relocs
[i
].base
.bo
, soffset
, eoffset
);
238 fprintf(stderr
, "above end: %p 0x%08X 0x%08X\n",
240 cs
->packets
[relocs
[i
].indices
[j
]],
250 static int cs_set_age(struct radeon_cs
*cs
)
252 struct cs_manager_legacy
*csm
= (struct cs_manager_legacy
*)cs
->csm
;
253 struct cs_reloc_legacy
*relocs
;
256 relocs
= (struct cs_reloc_legacy
*)cs
->relocs
;
257 for (i
= 0; i
< cs
->crelocs
; i
++) {
258 radeon_bo_legacy_pending(relocs
[i
].base
.bo
, csm
->pending_age
);
259 radeon_bo_unref(relocs
[i
].base
.bo
);
264 static void dump_cmdbuf(struct radeon_cs
*cs
)
267 for (i
= 0; i
< cs
->cdw
; i
++){
268 fprintf(stderr
,"%x: %08x\n", i
, cs
->packets
[i
]);
272 static int cs_emit(struct radeon_cs
*cs
)
274 struct cs_manager_legacy
*csm
= (struct cs_manager_legacy
*)cs
->csm
;
275 drm_radeon_cmd_buffer_t cmd
;
276 drm_r300_cmd_header_t age
;
280 csm
->ctx
->vtbl
.emit_cs_header(cs
, csm
->ctx
);
282 /* append buffer age */
283 if (IS_R300_CLASS(csm
->ctx
->radeonScreen
)) {
284 age
.scratch
.cmd_type
= R300_CMD_SCRATCH
;
285 /* Scratch register 2 corresponds to what radeonGetAge polls */
286 csm
->pending_age
= 0;
287 csm
->pending_count
= 1;
288 ull
= (uint64_t) (intptr_t) &csm
->pending_age
;
290 age
.scratch
.n_bufs
= 1;
291 age
.scratch
.flags
= 0;
292 radeon_cs_write_dword(cs
, age
.u
);
293 radeon_cs_write_qword(cs
, ull
);
294 radeon_cs_write_dword(cs
, 0);
297 r
= cs_process_relocs(cs
);
302 cmd
.buf
= (char *)cs
->packets
;
303 cmd
.bufsz
= cs
->cdw
* 4;
304 if (csm
->ctx
->state
.scissor
.enabled
) {
305 cmd
.nbox
= csm
->ctx
->state
.scissor
.numClipRects
;
306 cmd
.boxes
= (drm_clip_rect_t
*) csm
->ctx
->state
.scissor
.pClipRects
;
308 cmd
.nbox
= csm
->ctx
->numClipRects
;
309 cmd
.boxes
= (drm_clip_rect_t
*) csm
->ctx
->pClipRects
;
314 r
= drmCommandWrite(cs
->csm
->fd
, DRM_RADEON_CMDBUF
, &cmd
, sizeof(cmd
));
318 if (!IS_R300_CLASS(csm
->ctx
->radeonScreen
)) {
319 drm_radeon_irq_emit_t emit_cmd
;
320 emit_cmd
.irq_seq
= &csm
->pending_age
;
321 r
= drmCommandWrite(cs
->csm
->fd
, DRM_RADEON_IRQ_EMIT
, &emit_cmd
, sizeof(emit_cmd
));
328 cs
->csm
->read_used
= 0;
329 cs
->csm
->vram_write_used
= 0;
330 cs
->csm
->gart_write_used
= 0;
334 static void inline cs_free_reloc(void *relocs_p
, int crelocs
)
336 struct cs_reloc_legacy
*relocs
= relocs_p
;
340 for (i
= 0; i
< crelocs
; i
++)
341 free(relocs
[i
].indices
);
344 static int cs_destroy(struct radeon_cs
*cs
)
346 cs_free_reloc(cs
->relocs
, cs
->crelocs
);
353 static int cs_erase(struct radeon_cs
*cs
)
355 cs_free_reloc(cs
->relocs
, cs
->crelocs
);
357 cs
->relocs_total_size
= 0;
365 static int cs_need_flush(struct radeon_cs
*cs
)
367 /* this function used to flush when the BO usage got to
368 * a certain size, now the higher levels handle this better */
372 static void cs_print(struct radeon_cs
*cs
, FILE *file
)
376 static int cs_check_space(struct radeon_cs
*cs
, struct radeon_cs_space_check
*bos
, int num_bo
)
378 struct radeon_cs_manager
*csm
= cs
->csm
;
379 int this_op_read
= 0, this_op_gart_write
= 0, this_op_vram_write
= 0;
380 uint32_t read_domains
, write_domain
;
382 struct radeon_bo
*bo
;
384 /* check the totals for this operation */
390 for (i
= 0; i
< num_bo
; i
++) {
393 bos
[i
].new_accounted
= 0;
394 read_domains
= bos
[i
].read_domains
;
395 write_domain
= bos
[i
].write_domain
;
397 /* pinned bos don't count */
398 if (radeon_legacy_bo_is_static(bo
))
401 /* already accounted this bo */
402 if (write_domain
&& (write_domain
== bo
->space_accounted
))
405 if (read_domains
&& ((read_domains
<< 16) == bo
->space_accounted
))
408 if (bo
->space_accounted
== 0) {
409 if (write_domain
== RADEON_GEM_DOMAIN_VRAM
)
410 this_op_vram_write
+= bo
->size
;
411 else if (write_domain
== RADEON_GEM_DOMAIN_GTT
)
412 this_op_gart_write
+= bo
->size
;
414 this_op_read
+= bo
->size
;
415 bos
[i
].new_accounted
= (read_domains
<< 16) | write_domain
;
417 uint16_t old_read
, old_write
;
419 old_read
= bo
->space_accounted
>> 16;
420 old_write
= bo
->space_accounted
& 0xffff;
422 if (write_domain
&& (old_read
& write_domain
)) {
423 bos
[i
].new_accounted
= write_domain
;
424 /* moving from read to a write domain */
425 if (write_domain
== RADEON_GEM_DOMAIN_VRAM
) {
426 this_op_read
-= bo
->size
;
427 this_op_vram_write
+= bo
->size
;
428 } else if (write_domain
== RADEON_GEM_DOMAIN_VRAM
) {
429 this_op_read
-= bo
->size
;
430 this_op_gart_write
+= bo
->size
;
432 } else if (read_domains
& old_write
) {
433 bos
[i
].new_accounted
= bo
->space_accounted
& 0xffff;
435 /* rewrite the domains */
436 if (write_domain
!= old_write
)
437 fprintf(stderr
,"WRITE DOMAIN RELOC FAILURE 0x%x %d %d\n", bo
->handle
, write_domain
, old_write
);
438 if (read_domains
!= old_read
)
439 fprintf(stderr
,"READ DOMAIN RELOC FAILURE 0x%x %d %d\n", bo
->handle
, read_domains
, old_read
);
440 return RADEON_CS_SPACE_FLUSH
;
445 if (this_op_read
< 0)
448 /* check sizes - operation first */
449 if ((this_op_read
+ this_op_gart_write
> csm
->gart_limit
) ||
450 (this_op_vram_write
> csm
->vram_limit
)) {
451 return RADEON_CS_SPACE_OP_TO_BIG
;
454 if (((csm
->vram_write_used
+ this_op_vram_write
) > csm
->vram_limit
) ||
455 ((csm
->read_used
+ csm
->gart_write_used
+ this_op_gart_write
+ this_op_read
) > csm
->gart_limit
)) {
456 return RADEON_CS_SPACE_FLUSH
;
459 csm
->gart_write_used
+= this_op_gart_write
;
460 csm
->vram_write_used
+= this_op_vram_write
;
461 csm
->read_used
+= this_op_read
;
463 for (i
= 0; i
< num_bo
; i
++) {
465 bo
->space_accounted
= bos
[i
].new_accounted
;
468 return RADEON_CS_SPACE_OK
;
471 static struct radeon_cs_funcs radeon_cs_legacy_funcs
= {
484 struct radeon_cs_manager
*radeon_cs_manager_legacy_ctor(struct radeon_context
*ctx
)
486 struct cs_manager_legacy
*csm
;
488 csm
= (struct cs_manager_legacy
*)
489 calloc(1, sizeof(struct cs_manager_legacy
));
493 csm
->base
.funcs
= &radeon_cs_legacy_funcs
;
494 csm
->base
.fd
= ctx
->dri
.fd
;
496 csm
->pending_age
= 1;
497 return (struct radeon_cs_manager
*)csm
;
500 void radeon_cs_manager_legacy_dtor(struct radeon_cs_manager
*csm
)