2 * Copyright (c) 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 /** @file hsw_queryobj.c
27 * Support for query buffer objects (GL_ARB_query_buffer_object) on Haswell+.
29 #include "main/imports.h"
31 #include "brw_context.h"
32 #include "brw_defines.h"
33 #include "intel_batchbuffer.h"
34 #include "intel_buffer_objects.h"
40 mult_gpr0_by_80(struct brw_context
*brw
)
42 static const uint32_t maths
[] = {
43 MI_MATH_ALU2(LOAD
, SRCA
, R0
),
44 MI_MATH_ALU2(LOAD
, SRCB
, R0
),
46 MI_MATH_ALU2(STORE
, R1
, ACCU
),
47 MI_MATH_ALU2(LOAD
, SRCA
, R1
),
48 MI_MATH_ALU2(LOAD
, SRCB
, R1
),
50 MI_MATH_ALU2(STORE
, R1
, ACCU
),
51 MI_MATH_ALU2(LOAD
, SRCA
, R1
),
52 MI_MATH_ALU2(LOAD
, SRCB
, R1
),
54 MI_MATH_ALU2(STORE
, R1
, ACCU
),
55 MI_MATH_ALU2(LOAD
, SRCA
, R1
),
56 MI_MATH_ALU2(LOAD
, SRCB
, R1
),
58 /* GPR1 = 16 * GPR0 */
59 MI_MATH_ALU2(STORE
, R1
, ACCU
),
60 MI_MATH_ALU2(LOAD
, SRCA
, R1
),
61 MI_MATH_ALU2(LOAD
, SRCB
, R1
),
63 MI_MATH_ALU2(STORE
, R2
, ACCU
),
64 MI_MATH_ALU2(LOAD
, SRCA
, R2
),
65 MI_MATH_ALU2(LOAD
, SRCB
, R2
),
67 /* GPR2 = 64 * GPR0 */
68 MI_MATH_ALU2(STORE
, R2
, ACCU
),
69 MI_MATH_ALU2(LOAD
, SRCA
, R1
),
70 MI_MATH_ALU2(LOAD
, SRCB
, R2
),
72 /* GPR0 = 80 * GPR0 */
73 MI_MATH_ALU2(STORE
, R0
, ACCU
),
76 BEGIN_BATCH(1 + ARRAY_SIZE(maths
));
77 OUT_BATCH(HSW_MI_MATH
| (1 + ARRAY_SIZE(maths
) - 2));
79 for (int m
= 0; m
< ARRAY_SIZE(maths
); m
++)
86 * GPR0 = GPR0 & ((1ull << n) - 1);
89 keep_gpr0_lower_n_bits(struct brw_context
*brw
, uint32_t n
)
91 static const uint32_t maths
[] = {
92 MI_MATH_ALU2(LOAD
, SRCA
, R0
),
93 MI_MATH_ALU2(LOAD
, SRCB
, R1
),
95 MI_MATH_ALU2(STORE
, R0
, ACCU
),
99 brw_load_register_imm64(brw
, HSW_CS_GPR(1), (1ull << n
) - 1);
101 BEGIN_BATCH(1 + ARRAY_SIZE(maths
));
102 OUT_BATCH(HSW_MI_MATH
| (1 + ARRAY_SIZE(maths
) - 2));
104 for (int m
= 0; m
< ARRAY_SIZE(maths
); m
++)
114 shl_gpr0_by_30_bits(struct brw_context
*brw
)
116 /* First we mask 34 bits of GPR0 to prevent overflow */
117 keep_gpr0_lower_n_bits(brw
, 34);
119 static const uint32_t shl_maths
[] = {
120 MI_MATH_ALU2(LOAD
, SRCA
, R0
),
121 MI_MATH_ALU2(LOAD
, SRCB
, R0
),
123 MI_MATH_ALU2(STORE
, R0
, ACCU
),
126 const uint32_t outer_count
= 5;
127 const uint32_t inner_count
= 6;
128 STATIC_ASSERT(outer_count
* inner_count
== 30);
129 const uint32_t cmd_len
= 1 + inner_count
* ARRAY_SIZE(shl_maths
);
130 const uint32_t batch_len
= cmd_len
* outer_count
;
132 BEGIN_BATCH(batch_len
);
134 /* We'll emit 5 commands, each shifting GPR0 left by 6 bits, for a total of
137 for (int o
= 0; o
< outer_count
; o
++) {
138 /* Submit one MI_MATH to shift left by 6 bits */
139 OUT_BATCH(HSW_MI_MATH
| (cmd_len
- 2));
140 for (int i
= 0; i
< inner_count
; i
++)
141 for (int m
= 0; m
< ARRAY_SIZE(shl_maths
); m
++)
142 OUT_BATCH(shl_maths
[m
]);
151 * Note that the upper 30 bits of GPR0 are lost!
154 shr_gpr0_by_2_bits(struct brw_context
*brw
)
156 shl_gpr0_by_30_bits(brw
);
157 brw_load_register_reg(brw
, HSW_CS_GPR(0) + 4, HSW_CS_GPR(0));
158 brw_load_register_imm32(brw
, HSW_CS_GPR(0) + 4, 0);
162 * GPR0 = (GPR0 == 0) ? 0 : 1;
165 gpr0_to_bool(struct brw_context
*brw
)
167 static const uint32_t maths
[] = {
168 MI_MATH_ALU2(LOAD
, SRCA
, R0
),
169 MI_MATH_ALU1(LOAD0
, SRCB
),
171 MI_MATH_ALU2(STOREINV
, R0
, ZF
),
172 MI_MATH_ALU2(LOAD
, SRCA
, R0
),
173 MI_MATH_ALU2(LOAD
, SRCB
, R1
),
175 MI_MATH_ALU2(STORE
, R0
, ACCU
),
178 brw_load_register_imm64(brw
, HSW_CS_GPR(1), 1ull);
180 BEGIN_BATCH(1 + ARRAY_SIZE(maths
));
181 OUT_BATCH(HSW_MI_MATH
| (1 + ARRAY_SIZE(maths
) - 2));
183 for (int m
= 0; m
< ARRAY_SIZE(maths
); m
++)
190 load_overflow_data_to_cs_gprs(struct brw_context
*brw
,
191 struct brw_query_object
*query
,
194 int offset
= idx
* sizeof(uint64_t) * 4;
196 brw_load_register_mem64(brw
,
199 I915_GEM_DOMAIN_INSTRUCTION
,
200 I915_GEM_DOMAIN_INSTRUCTION
,
203 offset
+= sizeof(uint64_t);
204 brw_load_register_mem64(brw
,
207 I915_GEM_DOMAIN_INSTRUCTION
,
208 I915_GEM_DOMAIN_INSTRUCTION
,
211 offset
+= sizeof(uint64_t);
212 brw_load_register_mem64(brw
,
215 I915_GEM_DOMAIN_INSTRUCTION
,
216 I915_GEM_DOMAIN_INSTRUCTION
,
219 offset
+= sizeof(uint64_t);
220 brw_load_register_mem64(brw
,
223 I915_GEM_DOMAIN_INSTRUCTION
,
224 I915_GEM_DOMAIN_INSTRUCTION
,
235 calc_overflow_for_stream(struct brw_context
*brw
)
237 static const uint32_t maths
[] = {
238 MI_MATH_ALU2(LOAD
, SRCA
, R4
),
239 MI_MATH_ALU2(LOAD
, SRCB
, R3
),
241 MI_MATH_ALU2(STORE
, R3
, ACCU
),
242 MI_MATH_ALU2(LOAD
, SRCA
, R2
),
243 MI_MATH_ALU2(LOAD
, SRCB
, R1
),
245 MI_MATH_ALU2(STORE
, R1
, ACCU
),
246 MI_MATH_ALU2(LOAD
, SRCA
, R3
),
247 MI_MATH_ALU2(LOAD
, SRCB
, R1
),
249 MI_MATH_ALU2(STORE
, R1
, ACCU
),
250 MI_MATH_ALU2(LOAD
, SRCA
, R1
),
251 MI_MATH_ALU2(LOAD
, SRCB
, R0
),
253 MI_MATH_ALU2(STORE
, R0
, ACCU
),
256 BEGIN_BATCH(1 + ARRAY_SIZE(maths
));
257 OUT_BATCH(HSW_MI_MATH
| (1 + ARRAY_SIZE(maths
) - 2));
259 for (int m
= 0; m
< ARRAY_SIZE(maths
); m
++)
266 calc_overflow_to_gpr0(struct brw_context
*brw
, struct brw_query_object
*query
,
269 brw_load_register_imm64(brw
, HSW_CS_GPR(0), 0ull);
271 for (int i
= 0; i
< count
; i
++) {
272 load_overflow_data_to_cs_gprs(brw
, query
, i
);
273 calc_overflow_for_stream(brw
);
278 * Take a query and calculate whether there was overflow during transform
279 * feedback. Store the result in the gpr0 register.
282 hsw_overflow_result_to_gpr0(struct brw_context
*brw
,
283 struct brw_query_object
*query
,
286 calc_overflow_to_gpr0(brw
, query
, count
);
291 hsw_result_to_gpr0(struct gl_context
*ctx
, struct brw_query_object
*query
,
292 struct gl_buffer_object
*buf
, intptr_t offset
,
293 GLenum pname
, GLenum ptype
)
295 struct brw_context
*brw
= brw_context(ctx
);
298 assert(pname
!= GL_QUERY_TARGET
);
300 if (pname
== GL_QUERY_RESULT_AVAILABLE
) {
301 /* The query result availability is stored at offset 0 of the buffer. */
302 brw_load_register_mem64(brw
,
305 I915_GEM_DOMAIN_INSTRUCTION
,
306 I915_GEM_DOMAIN_INSTRUCTION
,
307 2 * sizeof(uint64_t));
311 if (pname
== GL_QUERY_RESULT
) {
312 /* Since GL_QUERY_RESULT_NO_WAIT wasn't used, they want us to stall to
313 * make sure the query is available.
315 brw_emit_pipe_control_flush(brw
,
316 PIPE_CONTROL_CS_STALL
|
317 PIPE_CONTROL_STALL_AT_SCOREBOARD
);
320 if (query
->Base
.Target
== GL_TIMESTAMP
) {
321 brw_load_register_mem64(brw
,
324 I915_GEM_DOMAIN_INSTRUCTION
,
325 I915_GEM_DOMAIN_INSTRUCTION
,
326 0 * sizeof(uint64_t));
327 } else if (query
->Base
.Target
== GL_TRANSFORM_FEEDBACK_STREAM_OVERFLOW_ARB
328 || query
->Base
.Target
== GL_TRANSFORM_FEEDBACK_OVERFLOW_ARB
) {
329 /* Don't do anything in advance here, since the math for this is a little
333 brw_load_register_mem64(brw
,
336 I915_GEM_DOMAIN_INSTRUCTION
,
337 I915_GEM_DOMAIN_INSTRUCTION
,
338 0 * sizeof(uint64_t));
339 brw_load_register_mem64(brw
,
342 I915_GEM_DOMAIN_INSTRUCTION
,
343 I915_GEM_DOMAIN_INSTRUCTION
,
344 1 * sizeof(uint64_t));
347 OUT_BATCH(HSW_MI_MATH
| (5 - 2));
349 OUT_BATCH(MI_MATH_ALU2(LOAD
, SRCA
, R2
));
350 OUT_BATCH(MI_MATH_ALU2(LOAD
, SRCB
, R1
));
351 OUT_BATCH(MI_MATH_ALU0(SUB
));
352 OUT_BATCH(MI_MATH_ALU2(STORE
, R0
, ACCU
));
357 switch (query
->Base
.Target
) {
358 case GL_FRAGMENT_SHADER_INVOCATIONS_ARB
:
359 /* Implement the "WaDividePSInvocationCountBy4:HSW,BDW" workaround:
360 * "Invocation counter is 4 times actual. WA: SW to divide HW reported
361 * PS Invocations value by 4."
363 * Prior to Haswell, invocation count was counted by the WM, and it
364 * buggily counted invocations in units of subspans (2x2 unit). To get the
365 * correct value, the CS multiplied this by 4. With HSW the logic moved,
366 * and correctly emitted the number of pixel shader invocations, but,
367 * whomever forgot to undo the multiply by 4.
369 if (brw
->gen
== 8 || brw
->is_haswell
)
370 shr_gpr0_by_2_bits(brw
);
372 case GL_TIME_ELAPSED
:
374 mult_gpr0_by_80(brw
);
375 if (query
->Base
.Target
== GL_TIMESTAMP
) {
376 keep_gpr0_lower_n_bits(brw
, 36);
379 case GL_ANY_SAMPLES_PASSED
:
380 case GL_ANY_SAMPLES_PASSED_CONSERVATIVE
:
383 case GL_TRANSFORM_FEEDBACK_STREAM_OVERFLOW_ARB
:
384 hsw_overflow_result_to_gpr0(brw
, query
, 1);
386 case GL_TRANSFORM_FEEDBACK_OVERFLOW_ARB
:
387 hsw_overflow_result_to_gpr0(brw
, query
, MAX_VERTEX_STREAMS
);
393 * Store immediate data into the user buffer using the requested size.
396 store_query_result_imm(struct brw_context
*brw
, drm_bacon_bo
*bo
,
397 uint32_t offset
, GLenum ptype
, uint64_t imm
)
401 case GL_UNSIGNED_INT
:
402 brw_store_data_imm32(brw
, bo
, offset
, imm
);
405 case GL_UNSIGNED_INT64_ARB
:
406 brw_store_data_imm64(brw
, bo
, offset
, imm
);
409 unreachable("Unexpected result type");
414 set_predicate(struct brw_context
*brw
, drm_bacon_bo
*query_bo
)
416 brw_load_register_imm64(brw
, MI_PREDICATE_SRC1
, 0ull);
418 /* Load query availability into SRC0 */
419 brw_load_register_mem64(brw
, MI_PREDICATE_SRC0
, query_bo
,
420 I915_GEM_DOMAIN_INSTRUCTION
, 0,
421 2 * sizeof(uint64_t));
423 /* predicate = !(query_availability == 0); */
425 OUT_BATCH(GEN7_MI_PREDICATE
|
426 MI_PREDICATE_LOADOP_LOADINV
|
427 MI_PREDICATE_COMBINEOP_SET
|
428 MI_PREDICATE_COMPAREOP_SRCS_EQUAL
);
433 * Store data from the register into the user buffer using the requested size.
434 * The write also enables the predication to prevent writing the result if the
435 * query has not finished yet.
438 store_query_result_reg(struct brw_context
*brw
, drm_bacon_bo
*bo
,
439 uint32_t offset
, GLenum ptype
, uint32_t reg
,
440 const bool pipelined
)
442 uint32_t cmd_size
= brw
->gen
>= 8 ? 4 : 3;
443 uint32_t dwords
= (ptype
== GL_INT
|| ptype
== GL_UNSIGNED_INT
) ? 1 : 2;
444 assert(brw
->gen
>= 6);
446 BEGIN_BATCH(dwords
* cmd_size
);
447 for (int i
= 0; i
< dwords
; i
++) {
448 OUT_BATCH(MI_STORE_REGISTER_MEM
|
449 (pipelined
? MI_STORE_REGISTER_MEM_PREDICATE
: 0) |
451 OUT_BATCH(reg
+ 4 * i
);
453 OUT_RELOC64(bo
, I915_GEM_DOMAIN_INSTRUCTION
,
454 I915_GEM_DOMAIN_INSTRUCTION
, offset
+ 4 * i
);
456 OUT_RELOC(bo
, I915_GEM_DOMAIN_INSTRUCTION
,
457 I915_GEM_DOMAIN_INSTRUCTION
, offset
+ 4 * i
);
464 hsw_store_query_result(struct gl_context
*ctx
, struct gl_query_object
*q
,
465 struct gl_buffer_object
*buf
, intptr_t offset
,
466 GLenum pname
, GLenum ptype
)
468 struct brw_context
*brw
= brw_context(ctx
);
469 struct brw_query_object
*query
= (struct brw_query_object
*)q
;
470 struct intel_buffer_object
*bo
= intel_buffer_object(buf
);
471 const bool pipelined
= brw_is_query_pipelined(query
);
473 if (pname
== GL_QUERY_TARGET
) {
474 store_query_result_imm(brw
, bo
->buffer
, offset
, ptype
,
477 } else if (pname
== GL_QUERY_RESULT_AVAILABLE
&& !pipelined
) {
478 store_query_result_imm(brw
, bo
->buffer
, offset
, ptype
, 1ull);
479 } else if (query
->bo
) {
480 /* The query bo still around. Therefore, we:
482 * 1. Compute the current result in GPR0
483 * 2. Set the command streamer predicate based on query availability
484 * 3. (With predication) Write GPR0 to the requested buffer
486 hsw_result_to_gpr0(ctx
, query
, buf
, offset
, pname
, ptype
);
488 set_predicate(brw
, query
->bo
);
489 store_query_result_reg(brw
, bo
->buffer
, offset
, ptype
, HSW_CS_GPR(0),
492 /* The query bo is gone, so the query must have been processed into
493 * client memory. In this case we can fill the buffer location with the
494 * requested data using MI_STORE_DATA_IMM.
497 case GL_QUERY_RESULT_AVAILABLE
:
498 store_query_result_imm(brw
, bo
->buffer
, offset
, ptype
, 1ull);
500 case GL_QUERY_RESULT_NO_WAIT
:
501 case GL_QUERY_RESULT
:
502 store_query_result_imm(brw
, bo
->buffer
, offset
, ptype
,
506 unreachable("Unexpected result type");
512 /* Initialize hsw+-specific query object functions. */
513 void hsw_init_queryobj_functions(struct dd_function_table
*functions
)
515 gen6_init_queryobj_functions(functions
);
516 functions
->StoreQueryResult
= hsw_store_query_result
;