2 * Copyright 2009 Corbin Simpson <MostAwesomeDude@gmail.com>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE. */
23 #include "util/u_memory.h"
24 #include "util/u_simple_list.h"
26 #include "r300_context.h"
27 #include "r300_screen.h"
28 #include "r300_emit.h"
32 static struct pipe_query
*r300_create_query(struct pipe_context
*pipe
,
35 struct r300_context
*r300
= r300_context(pipe
);
36 struct r300_screen
*r300screen
= r300
->screen
;
39 if (query_type
!= PIPE_QUERY_OCCLUSION_COUNTER
&&
40 query_type
!= PIPE_QUERY_OCCLUSION_PREDICATE
&&
41 query_type
!= PIPE_QUERY_GPU_FINISHED
) {
45 q
= CALLOC_STRUCT(r300_query
);
51 if (query_type
== PIPE_QUERY_GPU_FINISHED
) {
52 return (struct pipe_query
*)q
;
55 if (r300screen
->caps
.family
== CHIP_FAMILY_RV530
)
56 q
->num_pipes
= r300screen
->info
.r300_num_z_pipes
;
58 q
->num_pipes
= r300screen
->info
.r300_num_gb_pipes
;
60 q
->buf
= r300
->rws
->buffer_create(r300
->rws
, 4096, 4096,
61 PIPE_BIND_CUSTOM
, RADEON_DOMAIN_GTT
);
66 q
->cs_buf
= r300
->rws
->buffer_get_cs_handle(q
->buf
);
68 return (struct pipe_query
*)q
;
71 static void r300_destroy_query(struct pipe_context
* pipe
,
72 struct pipe_query
* query
)
74 struct r300_query
* q
= r300_query(query
);
76 pb_reference(&q
->buf
, NULL
);
80 void r300_resume_query(struct r300_context
*r300
,
81 struct r300_query
*query
)
83 r300
->query_current
= query
;
84 r300_mark_atom_dirty(r300
, &r300
->query_start
);
87 static void r300_begin_query(struct pipe_context
* pipe
,
88 struct pipe_query
* query
)
90 struct r300_context
* r300
= r300_context(pipe
);
91 struct r300_query
* q
= r300_query(query
);
93 if (q
->type
== PIPE_QUERY_GPU_FINISHED
)
96 if (r300
->query_current
!= NULL
) {
97 fprintf(stderr
, "r300: begin_query: "
98 "Some other query has already been started.\n");
104 r300_resume_query(r300
, q
);
107 void r300_stop_query(struct r300_context
*r300
)
109 r300_emit_query_end(r300
);
110 r300
->query_current
= NULL
;
113 static void r300_end_query(struct pipe_context
* pipe
,
114 struct pipe_query
* query
)
116 struct r300_context
* r300
= r300_context(pipe
);
117 struct r300_query
*q
= r300_query(query
);
119 if (q
->type
== PIPE_QUERY_GPU_FINISHED
) {
120 pb_reference(&q
->buf
, NULL
);
121 r300_flush(pipe
, RADEON_FLUSH_ASYNC
,
122 (struct pipe_fence_handle
**)&q
->buf
);
126 if (q
!= r300
->query_current
) {
127 fprintf(stderr
, "r300: end_query: Got invalid query.\n");
132 r300_stop_query(r300
);
135 static boolean
r300_get_query_result(struct pipe_context
* pipe
,
136 struct pipe_query
* query
,
138 union pipe_query_result
*vresult
)
140 struct r300_context
* r300
= r300_context(pipe
);
141 struct r300_query
*q
= r300_query(query
);
145 if (q
->type
== PIPE_QUERY_GPU_FINISHED
) {
147 r300
->rws
->buffer_wait(q
->buf
, RADEON_USAGE_READWRITE
);
150 vresult
->b
= !r300
->rws
->buffer_is_busy(q
->buf
, RADEON_USAGE_READWRITE
);
155 map
= r300
->rws
->buffer_map(q
->cs_buf
, r300
->cs
,
157 (!wait
? PIPE_TRANSFER_DONTBLOCK
: 0));
161 /* Sum up the results. */
163 for (i
= 0; i
< q
->num_results
; i
++) {
164 /* Convert little endian values written by GPU to CPU byte order */
165 temp
+= util_le32_to_cpu(*map
);
169 r300
->rws
->buffer_unmap(q
->cs_buf
);
171 if (q
->type
== PIPE_QUERY_OCCLUSION_PREDICATE
) {
172 vresult
->b
= temp
!= 0;
179 static void r300_render_condition(struct pipe_context
*pipe
,
180 struct pipe_query
*query
,
183 struct r300_context
*r300
= r300_context(pipe
);
184 union pipe_query_result result
;
187 r300
->skip_rendering
= FALSE
;
190 wait
= mode
== PIPE_RENDER_COND_WAIT
||
191 mode
== PIPE_RENDER_COND_BY_REGION_WAIT
;
193 if (r300_get_query_result(pipe
, query
, wait
, &result
)) {
194 if (r300_query(query
)->type
== PIPE_QUERY_OCCLUSION_PREDICATE
) {
195 r300
->skip_rendering
= !result
.b
;
197 r300
->skip_rendering
= !result
.u64
;
203 void r300_init_query_functions(struct r300_context
* r300
)
205 r300
->context
.create_query
= r300_create_query
;
206 r300
->context
.destroy_query
= r300_destroy_query
;
207 r300
->context
.begin_query
= r300_begin_query
;
208 r300
->context
.end_query
= r300_end_query
;
209 r300
->context
.get_query_result
= r300_get_query_result
;
210 r300
->context
.render_condition
= r300_render_condition
;