2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include "../radeon/r600_cs.h"
27 #include "radeonsi_pm4.h"
28 #include "radeonsi_pipe.h"
30 #include "util/u_memory.h"
33 #define GROUP_FORCE_NEW_BLOCK 0
35 /* Get backends mask */
36 void si_get_backend_mask(struct r600_context
*ctx
)
38 struct radeon_winsys_cs
*cs
= ctx
->b
.rings
.gfx
.cs
;
39 struct r600_resource
*buffer
;
41 unsigned num_backends
= ctx
->screen
->b
.info
.r600_num_backends
;
44 /* if backend_map query is supported by the kernel */
45 if (ctx
->screen
->b
.info
.r600_backend_map_valid
) {
46 unsigned num_tile_pipes
= ctx
->screen
->b
.info
.r600_num_tile_pipes
;
47 unsigned backend_map
= ctx
->screen
->b
.info
.r600_backend_map
;
48 unsigned item_width
= 4, item_mask
= 0x7;
50 while(num_tile_pipes
--) {
51 i
= backend_map
& item_mask
;
53 backend_map
>>= item_width
;
56 ctx
->backend_mask
= mask
;
61 /* otherwise backup path for older kernels */
63 /* create buffer for event data */
64 buffer
= r600_resource_create_custom(&ctx
->screen
->b
.b
,
70 /* initialize buffer with zeroes */
71 results
= ctx
->b
.ws
->buffer_map(buffer
->cs_buf
, ctx
->b
.rings
.gfx
.cs
, PIPE_TRANSFER_WRITE
);
75 memset(results
, 0, ctx
->max_db
* 4 * 4);
76 ctx
->b
.ws
->buffer_unmap(buffer
->cs_buf
);
78 /* emit EVENT_WRITE for ZPASS_DONE */
79 va
= r600_resource_va(&ctx
->screen
->b
.b
, (void *)buffer
);
80 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 2, 0);
81 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1);
82 cs
->buf
[cs
->cdw
++] = va
;
83 cs
->buf
[cs
->cdw
++] = va
>> 32;
85 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_NOP
, 0, 0);
86 cs
->buf
[cs
->cdw
++] = r600_context_bo_reloc(&ctx
->b
, &ctx
->b
.rings
.gfx
, buffer
, RADEON_USAGE_WRITE
);
89 results
= ctx
->b
.ws
->buffer_map(buffer
->cs_buf
, ctx
->b
.rings
.gfx
.cs
, PIPE_TRANSFER_READ
);
91 for(i
= 0; i
< ctx
->max_db
; i
++) {
92 /* at least highest bit will be set if backend is used */
96 ctx
->b
.ws
->buffer_unmap(buffer
->cs_buf
);
100 r600_resource_reference(&buffer
, NULL
);
103 ctx
->backend_mask
= mask
;
108 /* fallback to old method - set num_backends lower bits to 1 */
109 ctx
->backend_mask
= (~((uint32_t)0))>>(32-num_backends
);
113 bool si_is_timer_query(unsigned type
)
115 return type
== PIPE_QUERY_TIME_ELAPSED
||
116 type
== PIPE_QUERY_TIMESTAMP
||
117 type
== PIPE_QUERY_TIMESTAMP_DISJOINT
;
120 bool si_query_needs_begin(unsigned type
)
122 return type
!= PIPE_QUERY_TIMESTAMP
;
126 void si_need_cs_space(struct r600_context
*ctx
, unsigned num_dw
,
127 boolean count_draw_in
)
131 /* The number of dwords we already used in the CS so far. */
132 num_dw
+= ctx
->b
.rings
.gfx
.cs
->cdw
;
134 for (i
= 0; i
< SI_NUM_ATOMS(ctx
); i
++) {
135 if (ctx
->atoms
.array
[i
]->dirty
) {
136 num_dw
+= ctx
->atoms
.array
[i
]->num_dw
;
141 /* The number of dwords all the dirty states would take. */
142 num_dw
+= ctx
->pm4_dirty_cdwords
;
144 /* The upper-bound of how much a draw command would take. */
145 num_dw
+= SI_MAX_DRAW_CS_DWORDS
;
148 /* Count in queries_suspend. */
149 num_dw
+= ctx
->num_cs_dw_nontimer_queries_suspend
;
151 /* Count in streamout_end at the end of CS. */
152 if (ctx
->b
.streamout
.begin_emitted
) {
153 num_dw
+= ctx
->b
.streamout
.num_dw_for_end
;
156 /* Count in render_condition(NULL) at the end of CS. */
157 if (ctx
->predicate_drawing
) {
161 /* Count in framebuffer cache flushes at the end of CS. */
162 num_dw
+= ctx
->atoms
.cache_flush
->num_dw
;
165 if (ctx
->screen
->trace_bo
) {
166 num_dw
+= R600_TRACE_CS_DWORDS
;
170 /* Flush if there's not enough space. */
171 if (num_dw
> RADEON_MAX_CMDBUF_DWORDS
) {
172 radeonsi_flush(&ctx
->b
.b
, NULL
, RADEON_FLUSH_ASYNC
);
176 void si_context_flush(struct r600_context
*ctx
, unsigned flags
)
178 struct radeon_winsys_cs
*cs
= ctx
->b
.rings
.gfx
.cs
;
183 /* suspend queries */
184 ctx
->nontimer_queries_suspended
= false;
185 if (ctx
->num_cs_dw_nontimer_queries_suspend
) {
186 r600_context_queries_suspend(ctx
);
187 ctx
->nontimer_queries_suspended
= true;
190 ctx
->b
.streamout
.suspended
= false;
192 if (ctx
->b
.streamout
.begin_emitted
) {
193 r600_emit_streamout_end(&ctx
->b
);
194 ctx
->b
.streamout
.suspended
= true;
197 ctx
->b
.flags
|= R600_CONTEXT_FLUSH_AND_INV_CB
|
198 R600_CONTEXT_FLUSH_AND_INV_CB_META
|
199 R600_CONTEXT_FLUSH_AND_INV_DB
|
200 R600_CONTEXT_INV_TEX_CACHE
;
201 si_emit_cache_flush(&ctx
->b
, NULL
);
203 /* this is probably not needed anymore */
204 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 0, 0);
205 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH
) | EVENT_INDEX(4);
207 /* force to keep tiling flags */
208 flags
|= RADEON_FLUSH_KEEP_TILING_FLAGS
;
211 if (ctx
->screen
->trace_bo
) {
212 struct r600_screen
*rscreen
= ctx
->screen
;
215 for (i
= 0; i
< cs
->cdw
; i
++) {
216 fprintf(stderr
, "[%4d] [%5d] 0x%08x\n", rscreen
->cs_count
, i
, cs
->buf
[i
]);
223 ctx
->b
.ws
->cs_flush(ctx
->b
.rings
.gfx
.cs
, flags
, 0);
226 if (ctx
->screen
->trace_bo
) {
227 struct r600_screen
*rscreen
= ctx
->screen
;
230 for (i
= 0; i
< 10; i
++) {
232 if (!ctx
->ws
->buffer_is_busy(rscreen
->trace_bo
->buf
, RADEON_USAGE_READWRITE
)) {
237 fprintf(stderr
, "timeout on cs lockup likely happen at cs %d dw %d\n",
238 rscreen
->trace_ptr
[1], rscreen
->trace_ptr
[0]);
240 fprintf(stderr
, "cs %d executed in %dms\n", rscreen
->trace_ptr
[1], i
* 5);
245 si_begin_new_cs(ctx
);
248 void si_begin_new_cs(struct r600_context
*ctx
)
250 ctx
->pm4_dirty_cdwords
= 0;
252 /* Flush read caches at the beginning of CS. */
253 ctx
->b
.flags
|= R600_CONTEXT_INV_TEX_CACHE
|
254 R600_CONTEXT_INV_CONST_CACHE
|
255 R600_CONTEXT_INV_SHADER_CACHE
;
257 /* set all valid group as dirty so they get reemited on
260 si_pm4_reset_emitted(ctx
);
262 /* The CS initialization should be emitted before everything else. */
263 si_pm4_emit(ctx
, ctx
->queued
.named
.init
);
264 ctx
->emitted
.named
.init
= ctx
->queued
.named
.init
;
266 if (ctx
->b
.streamout
.suspended
) {
267 ctx
->b
.streamout
.append_bitmask
= ctx
->b
.streamout
.enabled_mask
;
268 r600_streamout_buffers_dirty(&ctx
->b
);
272 if (ctx
->nontimer_queries_suspended
) {
273 r600_context_queries_resume(ctx
);
276 si_all_descriptors_begin_new_cs(ctx
);
279 static unsigned r600_query_read_result(char *map
, unsigned start_index
, unsigned end_index
,
280 bool test_status_bit
)
282 uint32_t *current_result
= (uint32_t*)map
;
285 start
= (uint64_t)current_result
[start_index
] |
286 (uint64_t)current_result
[start_index
+1] << 32;
287 end
= (uint64_t)current_result
[end_index
] |
288 (uint64_t)current_result
[end_index
+1] << 32;
290 if (!test_status_bit
||
291 ((start
& 0x8000000000000000UL
) && (end
& 0x8000000000000000UL
))) {
297 static boolean
r600_query_result(struct r600_context
*ctx
, struct r600_query
*query
, boolean wait
)
299 unsigned results_base
= query
->results_start
;
302 map
= ctx
->b
.ws
->buffer_map(query
->buffer
->cs_buf
, ctx
->b
.rings
.gfx
.cs
,
304 (wait
? 0 : PIPE_TRANSFER_DONTBLOCK
));
308 /* count all results across all data blocks */
309 switch (query
->type
) {
310 case PIPE_QUERY_OCCLUSION_COUNTER
:
311 while (results_base
!= query
->results_end
) {
313 r600_query_read_result(map
+ results_base
, 0, 2, true);
314 results_base
= (results_base
+ 16) % query
->buffer
->b
.b
.width0
;
317 case PIPE_QUERY_OCCLUSION_PREDICATE
:
318 while (results_base
!= query
->results_end
) {
319 query
->result
.b
= query
->result
.b
||
320 r600_query_read_result(map
+ results_base
, 0, 2, true) != 0;
321 results_base
= (results_base
+ 16) % query
->buffer
->b
.b
.width0
;
324 case PIPE_QUERY_TIMESTAMP
:
326 uint32_t *current_result
= (uint32_t*)map
;
327 query
->result
.u64
= (uint64_t)current_result
[0] | (uint64_t)current_result
[1] << 32;
330 case PIPE_QUERY_TIME_ELAPSED
:
331 while (results_base
!= query
->results_end
) {
333 r600_query_read_result(map
+ results_base
, 0, 2, false);
334 results_base
= (results_base
+ query
->result_size
) % query
->buffer
->b
.b
.width0
;
337 case PIPE_QUERY_PRIMITIVES_EMITTED
:
338 /* SAMPLE_STREAMOUTSTATS stores this structure:
340 * u64 NumPrimitivesWritten;
341 * u64 PrimitiveStorageNeeded;
343 * We only need NumPrimitivesWritten here. */
344 while (results_base
!= query
->results_end
) {
346 r600_query_read_result(map
+ results_base
, 2, 6, true);
347 results_base
= (results_base
+ query
->result_size
) % query
->buffer
->b
.b
.width0
;
350 case PIPE_QUERY_PRIMITIVES_GENERATED
:
351 /* Here we read PrimitiveStorageNeeded. */
352 while (results_base
!= query
->results_end
) {
354 r600_query_read_result(map
+ results_base
, 0, 4, true);
355 results_base
= (results_base
+ query
->result_size
) % query
->buffer
->b
.b
.width0
;
358 case PIPE_QUERY_SO_STATISTICS
:
359 while (results_base
!= query
->results_end
) {
360 query
->result
.so
.num_primitives_written
+=
361 r600_query_read_result(map
+ results_base
, 2, 6, true);
362 query
->result
.so
.primitives_storage_needed
+=
363 r600_query_read_result(map
+ results_base
, 0, 4, true);
364 results_base
= (results_base
+ query
->result_size
) % query
->buffer
->b
.b
.width0
;
367 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
368 while (results_base
!= query
->results_end
) {
369 query
->result
.b
= query
->result
.b
||
370 r600_query_read_result(map
+ results_base
, 2, 6, true) !=
371 r600_query_read_result(map
+ results_base
, 0, 4, true);
372 results_base
= (results_base
+ query
->result_size
) % query
->buffer
->b
.b
.width0
;
379 query
->results_start
= query
->results_end
;
380 ctx
->b
.ws
->buffer_unmap(query
->buffer
->cs_buf
);
384 void r600_query_begin(struct r600_context
*ctx
, struct r600_query
*query
)
386 struct radeon_winsys_cs
*cs
= ctx
->b
.rings
.gfx
.cs
;
387 unsigned new_results_end
, i
;
391 si_need_cs_space(ctx
, query
->num_cs_dw
* 2, TRUE
);
393 new_results_end
= (query
->results_end
+ query
->result_size
) % query
->buffer
->b
.b
.width0
;
395 /* collect current results if query buffer is full */
396 if (new_results_end
== query
->results_start
) {
397 r600_query_result(ctx
, query
, TRUE
);
400 switch (query
->type
) {
401 case PIPE_QUERY_OCCLUSION_COUNTER
:
402 case PIPE_QUERY_OCCLUSION_PREDICATE
:
403 results
= ctx
->b
.ws
->buffer_map(query
->buffer
->cs_buf
, ctx
->b
.rings
.gfx
.cs
, PIPE_TRANSFER_WRITE
);
405 results
= (uint32_t*)((char*)results
+ query
->results_end
);
406 memset(results
, 0, query
->result_size
);
408 /* Set top bits for unused backends */
409 for (i
= 0; i
< ctx
->max_db
; i
++) {
410 if (!(ctx
->backend_mask
& (1<<i
))) {
411 results
[(i
* 4)+1] = 0x80000000;
412 results
[(i
* 4)+3] = 0x80000000;
415 ctx
->b
.ws
->buffer_unmap(query
->buffer
->cs_buf
);
418 case PIPE_QUERY_TIME_ELAPSED
:
420 case PIPE_QUERY_PRIMITIVES_EMITTED
:
421 case PIPE_QUERY_PRIMITIVES_GENERATED
:
422 case PIPE_QUERY_SO_STATISTICS
:
423 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
424 results
= ctx
->b
.ws
->buffer_map(query
->buffer
->cs_buf
, ctx
->b
.rings
.gfx
.cs
, PIPE_TRANSFER_WRITE
);
425 results
= (uint32_t*)((char*)results
+ query
->results_end
);
426 memset(results
, 0, query
->result_size
);
427 ctx
->b
.ws
->buffer_unmap(query
->buffer
->cs_buf
);
433 /* emit begin query */
434 va
= r600_resource_va(&ctx
->screen
->b
.b
, (void*)query
->buffer
);
435 va
+= query
->results_end
;
437 switch (query
->type
) {
438 case PIPE_QUERY_OCCLUSION_COUNTER
:
439 case PIPE_QUERY_OCCLUSION_PREDICATE
:
440 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 2, 0);
441 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1);
442 cs
->buf
[cs
->cdw
++] = va
;
443 cs
->buf
[cs
->cdw
++] = (va
>> 32UL) & 0xFF;
445 case PIPE_QUERY_PRIMITIVES_EMITTED
:
446 case PIPE_QUERY_PRIMITIVES_GENERATED
:
447 case PIPE_QUERY_SO_STATISTICS
:
448 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
449 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 2, 0);
450 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_SAMPLE_STREAMOUTSTATS
) | EVENT_INDEX(3);
451 cs
->buf
[cs
->cdw
++] = va
;
452 cs
->buf
[cs
->cdw
++] = (va
>> 32UL) & 0xFF;
454 case PIPE_QUERY_TIME_ELAPSED
:
455 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE_EOP
, 4, 0);
456 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT
) | EVENT_INDEX(5);
457 cs
->buf
[cs
->cdw
++] = va
;
458 cs
->buf
[cs
->cdw
++] = (3 << 29) | ((va
>> 32UL) & 0xFF);
459 cs
->buf
[cs
->cdw
++] = 0;
460 cs
->buf
[cs
->cdw
++] = 0;
465 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_NOP
, 0, 0);
466 cs
->buf
[cs
->cdw
++] = r600_context_bo_reloc(&ctx
->b
, &ctx
->b
.rings
.gfx
, query
->buffer
, RADEON_USAGE_WRITE
);
468 if (!si_is_timer_query(query
->type
)) {
469 ctx
->num_cs_dw_nontimer_queries_suspend
+= query
->num_cs_dw
;
473 void r600_query_end(struct r600_context
*ctx
, struct r600_query
*query
)
475 struct radeon_winsys_cs
*cs
= ctx
->b
.rings
.gfx
.cs
;
477 unsigned new_results_end
;
479 /* The queries which need begin already called this in begin_query. */
480 if (!si_query_needs_begin(query
->type
)) {
481 si_need_cs_space(ctx
, query
->num_cs_dw
, TRUE
);
483 new_results_end
= (query
->results_end
+ query
->result_size
) % query
->buffer
->b
.b
.width0
;
485 /* collect current results if query buffer is full */
486 if (new_results_end
== query
->results_start
) {
487 r600_query_result(ctx
, query
, TRUE
);
491 va
= r600_resource_va(&ctx
->screen
->b
.b
, (void*)query
->buffer
);
493 switch (query
->type
) {
494 case PIPE_QUERY_OCCLUSION_COUNTER
:
495 case PIPE_QUERY_OCCLUSION_PREDICATE
:
496 va
+= query
->results_end
+ 8;
497 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 2, 0);
498 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1);
499 cs
->buf
[cs
->cdw
++] = va
;
500 cs
->buf
[cs
->cdw
++] = (va
>> 32UL) & 0xFF;
502 case PIPE_QUERY_PRIMITIVES_EMITTED
:
503 case PIPE_QUERY_PRIMITIVES_GENERATED
:
504 case PIPE_QUERY_SO_STATISTICS
:
505 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
506 va
+= query
->results_end
+ query
->result_size
/2;
507 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 2, 0);
508 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_SAMPLE_STREAMOUTSTATS
) | EVENT_INDEX(3);
509 cs
->buf
[cs
->cdw
++] = va
;
510 cs
->buf
[cs
->cdw
++] = (va
>> 32UL) & 0xFF;
512 case PIPE_QUERY_TIME_ELAPSED
:
513 va
+= query
->results_end
+ query
->result_size
/2;
515 case PIPE_QUERY_TIMESTAMP
:
516 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE_EOP
, 4, 0);
517 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT
) | EVENT_INDEX(5);
518 cs
->buf
[cs
->cdw
++] = va
;
519 cs
->buf
[cs
->cdw
++] = (3 << 29) | ((va
>> 32UL) & 0xFF);
520 cs
->buf
[cs
->cdw
++] = 0;
521 cs
->buf
[cs
->cdw
++] = 0;
526 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_NOP
, 0, 0);
527 cs
->buf
[cs
->cdw
++] = r600_context_bo_reloc(&ctx
->b
, &ctx
->b
.rings
.gfx
, query
->buffer
, RADEON_USAGE_WRITE
);
529 query
->results_end
= (query
->results_end
+ query
->result_size
) % query
->buffer
->b
.b
.width0
;
531 if (si_query_needs_begin(query
->type
) && !si_is_timer_query(query
->type
)) {
532 ctx
->num_cs_dw_nontimer_queries_suspend
-= query
->num_cs_dw
;
536 void r600_query_predication(struct r600_context
*ctx
, struct r600_query
*query
, int operation
,
539 struct radeon_winsys_cs
*cs
= ctx
->b
.rings
.gfx
.cs
;
542 if (operation
== PREDICATION_OP_CLEAR
) {
543 si_need_cs_space(ctx
, 3, FALSE
);
545 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_SET_PREDICATION
, 1, 0);
546 cs
->buf
[cs
->cdw
++] = 0;
547 cs
->buf
[cs
->cdw
++] = PRED_OP(PREDICATION_OP_CLEAR
);
549 unsigned results_base
= query
->results_start
;
553 /* find count of the query data blocks */
554 count
= (query
->buffer
->b
.b
.width0
+ query
->results_end
- query
->results_start
) % query
->buffer
->b
.b
.width0
;
555 count
/= query
->result_size
;
557 si_need_cs_space(ctx
, 5 * count
, TRUE
);
559 op
= PRED_OP(operation
) | PREDICATION_DRAW_VISIBLE
|
560 (flag_wait
? PREDICATION_HINT_WAIT
: PREDICATION_HINT_NOWAIT_DRAW
);
561 va
= r600_resource_va(&ctx
->screen
->b
.b
, (void*)query
->buffer
);
563 /* emit predicate packets for all data blocks */
564 while (results_base
!= query
->results_end
) {
565 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_SET_PREDICATION
, 1, 0);
566 cs
->buf
[cs
->cdw
++] = (va
+ results_base
) & 0xFFFFFFFFUL
;
567 cs
->buf
[cs
->cdw
++] = op
| (((va
+ results_base
) >> 32UL) & 0xFF);
568 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_NOP
, 0, 0);
569 cs
->buf
[cs
->cdw
++] = r600_context_bo_reloc(&ctx
->b
, &ctx
->b
.rings
.gfx
,
570 query
->buffer
, RADEON_USAGE_READ
);
571 results_base
= (results_base
+ query
->result_size
) % query
->buffer
->b
.b
.width0
;
573 /* set CONTINUE bit for all packets except the first */
574 op
|= PREDICATION_CONTINUE
;
579 struct r600_query
*r600_context_query_create(struct r600_context
*ctx
, unsigned query_type
)
581 struct r600_query
*query
;
582 unsigned buffer_size
= 4096;
584 query
= CALLOC_STRUCT(r600_query
);
588 query
->type
= query_type
;
590 switch (query_type
) {
591 case PIPE_QUERY_OCCLUSION_COUNTER
:
592 case PIPE_QUERY_OCCLUSION_PREDICATE
:
593 query
->result_size
= 16 * ctx
->max_db
;
594 query
->num_cs_dw
= 6;
596 case PIPE_QUERY_TIMESTAMP
:
597 query
->result_size
= 8;
598 query
->num_cs_dw
= 8;
600 case PIPE_QUERY_TIME_ELAPSED
:
601 query
->result_size
= 16;
602 query
->num_cs_dw
= 8;
604 case PIPE_QUERY_PRIMITIVES_EMITTED
:
605 case PIPE_QUERY_PRIMITIVES_GENERATED
:
606 case PIPE_QUERY_SO_STATISTICS
:
607 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
608 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
609 query
->result_size
= 32;
610 query
->num_cs_dw
= 6;
618 /* adjust buffer size to simplify offsets wrapping math */
619 buffer_size
-= buffer_size
% query
->result_size
;
621 /* Queries are normally read by the CPU after
622 * being written by the gpu, hence staging is probably a good
625 query
->buffer
= r600_resource_create_custom(&ctx
->screen
->b
.b
,
628 if (!query
->buffer
) {
635 void r600_context_query_destroy(struct r600_context
*ctx
, struct r600_query
*query
)
637 r600_resource_reference(&query
->buffer
, NULL
);
641 boolean
r600_context_query_result(struct r600_context
*ctx
,
642 struct r600_query
*query
,
643 boolean wait
, void *vresult
)
645 boolean
*result_b
= (boolean
*)vresult
;
646 uint64_t *result_u64
= (uint64_t*)vresult
;
647 struct pipe_query_data_so_statistics
*result_so
=
648 (struct pipe_query_data_so_statistics
*)vresult
;
650 if (!r600_query_result(ctx
, query
, wait
))
653 switch (query
->type
) {
654 case PIPE_QUERY_OCCLUSION_COUNTER
:
655 case PIPE_QUERY_PRIMITIVES_EMITTED
:
656 case PIPE_QUERY_PRIMITIVES_GENERATED
:
657 *result_u64
= query
->result
.u64
;
659 case PIPE_QUERY_OCCLUSION_PREDICATE
:
660 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
661 *result_b
= query
->result
.b
;
663 case PIPE_QUERY_TIMESTAMP
:
664 case PIPE_QUERY_TIME_ELAPSED
:
665 *result_u64
= (1000000 * query
->result
.u64
) / ctx
->screen
->b
.info
.r600_clock_crystal_freq
;
667 case PIPE_QUERY_SO_STATISTICS
:
668 *result_so
= query
->result
.so
;
676 void r600_context_queries_suspend(struct r600_context
*ctx
)
678 struct r600_query
*query
;
680 LIST_FOR_EACH_ENTRY(query
, &ctx
->active_nontimer_query_list
, list
) {
681 r600_query_end(ctx
, query
);
683 assert(ctx
->num_cs_dw_nontimer_queries_suspend
== 0);
686 void r600_context_queries_resume(struct r600_context
*ctx
)
688 struct r600_query
*query
;
690 assert(ctx
->num_cs_dw_nontimer_queries_suspend
== 0);
692 LIST_FOR_EACH_ENTRY(query
, &ctx
->active_nontimer_query_list
, list
) {
693 r600_query_begin(ctx
, query
);
698 void r600_trace_emit(struct r600_context
*rctx
)
700 struct r600_screen
*rscreen
= rctx
->screen
;
701 struct radeon_winsys_cs
*cs
= rctx
->cs
;
704 va
= r600_resource_va(&rscreen
->screen
, (void*)rscreen
->trace_bo
);
705 r600_context_bo_reloc(rctx
, rscreen
->trace_bo
, RADEON_USAGE_READWRITE
);
706 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_WRITE_DATA
, 4, 0);
707 cs
->buf
[cs
->cdw
++] = PKT3_WRITE_DATA_DST_SEL(PKT3_WRITE_DATA_DST_SEL_MEM_SYNC
) |
708 PKT3_WRITE_DATA_WR_CONFIRM
|
709 PKT3_WRITE_DATA_ENGINE_SEL(PKT3_WRITE_DATA_ENGINE_SEL_ME
);
710 cs
->buf
[cs
->cdw
++] = va
& 0xFFFFFFFFUL
;
711 cs
->buf
[cs
->cdw
++] = (va
>> 32UL) & 0xFFFFFFFFUL
;
712 cs
->buf
[cs
->cdw
++] = cs
->cdw
;
713 cs
->buf
[cs
->cdw
++] = rscreen
->cs_count
;