2 * Copyright © 2008 Nicolai Haehnle
3 * Copyright © 2008 Jérôme Glisse
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
28 * Aapo Tahkola <aet@rasterburn.org>
29 * Nicolai Haehnle <prefect_@gmx.net>
30 * Jérôme Glisse <glisse@freedesktop.org>
34 #include "radeon_bocs_wrapper.h"
36 struct cs_manager_legacy
{
37 struct radeon_cs_manager base
;
38 struct radeon_context
*ctx
;
39 /* hack for scratch stuff */
41 uint32_t pending_count
;
46 struct cs_reloc_legacy
{
47 struct radeon_cs_reloc base
;
53 static struct radeon_cs
*cs_create(struct radeon_cs_manager
*csm
,
58 cs
= (struct radeon_cs
*)calloc(1, sizeof(struct radeon_cs
));
63 cs
->ndw
= (ndw
+ 0x3FF) & (~0x3FF);
64 cs
->packets
= (uint32_t*)malloc(4*cs
->ndw
);
65 if (cs
->packets
== NULL
) {
69 cs
->relocs_total_size
= 0;
73 static int cs_write_reloc(struct radeon_cs
*cs
,
76 uint32_t write_domain
,
79 struct cs_reloc_legacy
*relocs
;
82 relocs
= (struct cs_reloc_legacy
*)cs
->relocs
;
84 if ((read_domain
&& write_domain
) || (!read_domain
&& !write_domain
)) {
85 /* in one CS a bo can only be in read or write domain but not
86 * in read & write domain at the same sime
90 if (read_domain
== RADEON_GEM_DOMAIN_CPU
) {
93 if (write_domain
== RADEON_GEM_DOMAIN_CPU
) {
96 /* check if bo is already referenced */
97 for(i
= 0; i
< cs
->crelocs
; i
++) {
100 if (relocs
[i
].base
.bo
->handle
== bo
->handle
) {
101 /* Check domains must be in read or write. As we check already
102 * checked that in argument one of the read or write domain was
103 * set we only need to check that if previous reloc as the read
104 * domain set then the read_domain should also be set for this
107 if (relocs
[i
].base
.read_domain
&& !read_domain
) {
110 if (relocs
[i
].base
.write_domain
&& !write_domain
) {
113 relocs
[i
].base
.read_domain
|= read_domain
;
114 relocs
[i
].base
.write_domain
|= write_domain
;
116 relocs
[i
].cindices
++;
117 indices
= (uint32_t*)realloc(relocs
[i
].indices
,
118 relocs
[i
].cindices
* 4);
119 if (indices
== NULL
) {
120 relocs
[i
].cindices
-= 1;
123 relocs
[i
].indices
= indices
;
124 relocs
[i
].indices
[relocs
[i
].cindices
- 1] = cs
->cdw
- 1;
128 /* add bo to reloc */
129 relocs
= (struct cs_reloc_legacy
*)
131 sizeof(struct cs_reloc_legacy
) * (cs
->crelocs
+ 1));
132 if (relocs
== NULL
) {
136 relocs
[cs
->crelocs
].base
.bo
= bo
;
137 relocs
[cs
->crelocs
].base
.read_domain
= read_domain
;
138 relocs
[cs
->crelocs
].base
.write_domain
= write_domain
;
139 relocs
[cs
->crelocs
].base
.flags
= flags
;
140 relocs
[cs
->crelocs
].indices
= (uint32_t*)malloc(4);
141 if (relocs
[cs
->crelocs
].indices
== NULL
) {
144 relocs
[cs
->crelocs
].indices
[0] = cs
->cdw
- 1;
145 relocs
[cs
->crelocs
].cindices
= 1;
146 cs
->relocs_total_size
+= radeon_bo_legacy_relocs_size(bo
);
152 static int cs_begin(struct radeon_cs
*cs
,
159 fprintf(stderr
, "CS already in a section(%s,%s,%d)\n",
160 cs
->section_file
, cs
->section_func
, cs
->section_line
);
161 fprintf(stderr
, "CS can't start section(%s,%s,%d)\n",
166 cs
->section_ndw
= ndw
;
168 cs
->section_file
= file
;
169 cs
->section_func
= func
;
170 cs
->section_line
= line
;
173 if (cs
->cdw
+ ndw
> cs
->ndw
) {
175 int num
= (ndw
> 0x3FF) ? ndw
: 0x3FF;
177 tmp
= (cs
->cdw
+ 1 + num
) & (~num
);
178 ptr
= (uint32_t*)realloc(cs
->packets
, 4 * tmp
);
189 static int cs_end(struct radeon_cs
*cs
,
196 fprintf(stderr
, "CS no section to end at (%s,%s,%d)\n",
201 if (cs
->section_ndw
!= cs
->section_cdw
) {
202 fprintf(stderr
, "CS section size missmatch start at (%s,%s,%d) %d vs %d\n",
203 cs
->section_file
, cs
->section_func
, cs
->section_line
, cs
->section_ndw
, cs
->section_cdw
);
204 fprintf(stderr
, "CS section end at (%s,%s,%d)\n",
211 static int cs_process_relocs(struct radeon_cs
*cs
)
213 struct cs_manager_legacy
*csm
= (struct cs_manager_legacy
*)cs
->csm
;
214 struct cs_reloc_legacy
*relocs
;
217 csm
= (struct cs_manager_legacy
*)cs
->csm
;
218 relocs
= (struct cs_reloc_legacy
*)cs
->relocs
;
220 for (i
= 0; i
< cs
->crelocs
; i
++) {
221 for (j
= 0; j
< relocs
[i
].cindices
; j
++) {
222 uint32_t soffset
, eoffset
;
224 r
= radeon_bo_legacy_validate(relocs
[i
].base
.bo
,
229 fprintf(stderr
, "validated %p [0x%08X, 0x%08X]\n",
230 relocs
[i
].base
.bo
, soffset
, eoffset
);
233 cs
->packets
[relocs
[i
].indices
[j
]] += soffset
;
234 if (cs
->packets
[relocs
[i
].indices
[j
]] >= eoffset
) {
235 /* radeon_bo_debug(relocs[i].base.bo, 12); */
236 fprintf(stderr
, "validated %p [0x%08X, 0x%08X]\n",
237 relocs
[i
].base
.bo
, soffset
, eoffset
);
238 fprintf(stderr
, "above end: %p 0x%08X 0x%08X\n",
240 cs
->packets
[relocs
[i
].indices
[j
]],
250 static int cs_set_age(struct radeon_cs
*cs
)
252 struct cs_manager_legacy
*csm
= (struct cs_manager_legacy
*)cs
->csm
;
253 struct cs_reloc_legacy
*relocs
;
256 relocs
= (struct cs_reloc_legacy
*)cs
->relocs
;
257 for (i
= 0; i
< cs
->crelocs
; i
++) {
258 radeon_bo_legacy_pending(relocs
[i
].base
.bo
, csm
->pending_age
);
259 radeon_bo_unref(relocs
[i
].base
.bo
);
264 static int cs_emit(struct radeon_cs
*cs
)
266 struct cs_manager_legacy
*csm
= (struct cs_manager_legacy
*)cs
->csm
;
267 drm_radeon_cmd_buffer_t cmd
;
268 drm_r300_cmd_header_t age
;
272 csm
->ctx
->vtbl
.emit_cs_header(cs
, csm
->ctx
);
274 /* append buffer age */
275 if (IS_R300_CLASS(csm
->ctx
->radeonScreen
)) {
276 age
.scratch
.cmd_type
= R300_CMD_SCRATCH
;
277 /* Scratch register 2 corresponds to what radeonGetAge polls */
278 csm
->pending_age
= 0;
279 csm
->pending_count
= 1;
280 ull
= (uint64_t) (intptr_t) &csm
->pending_age
;
282 age
.scratch
.n_bufs
= 1;
283 age
.scratch
.flags
= 0;
284 radeon_cs_write_dword(cs
, age
.u
);
285 radeon_cs_write_qword(cs
, ull
);
286 radeon_cs_write_dword(cs
, 0);
289 r
= cs_process_relocs(cs
);
294 cmd
.buf
= (char *)cs
->packets
;
295 cmd
.bufsz
= cs
->cdw
* 4;
296 if (csm
->ctx
->state
.scissor
.enabled
) {
297 cmd
.nbox
= csm
->ctx
->state
.scissor
.numClipRects
;
298 cmd
.boxes
= (drm_clip_rect_t
*) csm
->ctx
->state
.scissor
.pClipRects
;
300 cmd
.nbox
= csm
->ctx
->numClipRects
;
301 cmd
.boxes
= (drm_clip_rect_t
*) csm
->ctx
->pClipRects
;
306 r
= drmCommandWrite(cs
->csm
->fd
, DRM_RADEON_CMDBUF
, &cmd
, sizeof(cmd
));
310 if (!IS_R300_CLASS(csm
->ctx
->radeonScreen
)) {
311 drm_radeon_irq_emit_t emit_cmd
;
312 emit_cmd
.irq_seq
= &csm
->pending_age
;
313 r
= drmCommandWrite(cs
->csm
->fd
, DRM_RADEON_IRQ_EMIT
, &emit_cmd
, sizeof(emit_cmd
));
320 cs
->csm
->read_used
= 0;
321 cs
->csm
->vram_write_used
= 0;
322 cs
->csm
->gart_write_used
= 0;
326 static void inline cs_free_reloc(void *relocs_p
, int crelocs
)
328 struct cs_reloc_legacy
*relocs
= relocs_p
;
332 for (i
= 0; i
< crelocs
; i
++)
333 free(relocs
[i
].indices
);
336 static int cs_destroy(struct radeon_cs
*cs
)
338 cs_free_reloc(cs
->relocs
, cs
->crelocs
);
345 static int cs_erase(struct radeon_cs
*cs
)
347 cs_free_reloc(cs
->relocs
, cs
->crelocs
);
349 cs
->relocs_total_size
= 0;
357 static int cs_need_flush(struct radeon_cs
*cs
)
359 /* this function used to flush when the BO usage got to
360 * a certain size, now the higher levels handle this better */
364 static void cs_print(struct radeon_cs
*cs
, FILE *file
)
368 static int cs_check_space(struct radeon_cs
*cs
, struct radeon_cs_space_check
*bos
, int num_bo
)
370 struct radeon_cs_manager
*csm
= cs
->csm
;
371 int this_op_read
= 0, this_op_gart_write
= 0, this_op_vram_write
= 0;
372 uint32_t read_domains
, write_domain
;
374 struct radeon_bo
*bo
;
376 /* check the totals for this operation */
382 for (i
= 0; i
< num_bo
; i
++) {
385 bos
[i
].new_accounted
= 0;
386 read_domains
= bos
[i
].read_domains
;
387 write_domain
= bos
[i
].write_domain
;
389 /* pinned bos don't count */
390 if (radeon_legacy_bo_is_static(bo
))
393 /* already accounted this bo */
394 if (write_domain
&& (write_domain
== bo
->space_accounted
)) {
395 bos
[i
].new_accounted
= bo
->space_accounted
;
399 if (read_domains
&& ((read_domains
<< 16) == bo
->space_accounted
)) {
400 bos
[i
].new_accounted
= bo
->space_accounted
;
404 if (bo
->space_accounted
== 0) {
405 if (write_domain
== RADEON_GEM_DOMAIN_VRAM
)
406 this_op_vram_write
+= bo
->size
;
407 else if (write_domain
== RADEON_GEM_DOMAIN_GTT
)
408 this_op_gart_write
+= bo
->size
;
410 this_op_read
+= bo
->size
;
411 bos
[i
].new_accounted
= (read_domains
<< 16) | write_domain
;
413 uint16_t old_read
, old_write
;
415 old_read
= bo
->space_accounted
>> 16;
416 old_write
= bo
->space_accounted
& 0xffff;
418 if (write_domain
&& (old_read
& write_domain
)) {
419 bos
[i
].new_accounted
= write_domain
;
420 /* moving from read to a write domain */
421 if (write_domain
== RADEON_GEM_DOMAIN_VRAM
) {
422 this_op_read
-= bo
->size
;
423 this_op_vram_write
+= bo
->size
;
424 } else if (write_domain
== RADEON_GEM_DOMAIN_VRAM
) {
425 this_op_read
-= bo
->size
;
426 this_op_gart_write
+= bo
->size
;
428 } else if (read_domains
& old_write
) {
429 bos
[i
].new_accounted
= bo
->space_accounted
& 0xffff;
431 /* rewrite the domains */
432 if (write_domain
!= old_write
)
433 fprintf(stderr
,"WRITE DOMAIN RELOC FAILURE 0x%x %d %d\n", bo
->handle
, write_domain
, old_write
);
434 if (read_domains
!= old_read
)
435 fprintf(stderr
,"READ DOMAIN RELOC FAILURE 0x%x %d %d\n", bo
->handle
, read_domains
, old_read
);
436 return RADEON_CS_SPACE_FLUSH
;
441 if (this_op_read
< 0)
444 /* check sizes - operation first */
445 if ((this_op_read
+ this_op_gart_write
> csm
->gart_limit
) ||
446 (this_op_vram_write
> csm
->vram_limit
)) {
447 return RADEON_CS_SPACE_OP_TO_BIG
;
450 if (((csm
->vram_write_used
+ this_op_vram_write
) > csm
->vram_limit
) ||
451 ((csm
->read_used
+ csm
->gart_write_used
+ this_op_gart_write
+ this_op_read
) > csm
->gart_limit
)) {
452 return RADEON_CS_SPACE_FLUSH
;
455 csm
->gart_write_used
+= this_op_gart_write
;
456 csm
->vram_write_used
+= this_op_vram_write
;
457 csm
->read_used
+= this_op_read
;
459 for (i
= 0; i
< num_bo
; i
++) {
461 bo
->space_accounted
= bos
[i
].new_accounted
;
464 return RADEON_CS_SPACE_OK
;
467 static struct radeon_cs_funcs radeon_cs_legacy_funcs
= {
480 struct radeon_cs_manager
*radeon_cs_manager_legacy_ctor(struct radeon_context
*ctx
)
482 struct cs_manager_legacy
*csm
;
484 csm
= (struct cs_manager_legacy
*)
485 calloc(1, sizeof(struct cs_manager_legacy
));
489 csm
->base
.funcs
= &radeon_cs_legacy_funcs
;
490 csm
->base
.fd
= ctx
->dri
.fd
;
492 csm
->pending_age
= 1;
493 return (struct radeon_cs_manager
*)csm
;
496 void radeon_cs_manager_legacy_dtor(struct radeon_cs_manager
*csm
)