2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 #include "util/ralloc.h"
25 #include "brw_context.h"
29 #include "brw_shader.h"
30 #include "intel_mipmap_tree.h"
31 #include "intel_batchbuffer.h"
32 #include "brw_state.h"
35 get_cs_thread_count(const struct brw_cs_prog_data
*cs_prog_data
)
37 const unsigned simd_size
= cs_prog_data
->simd_size
;
38 unsigned group_size
= cs_prog_data
->local_size
[0] *
39 cs_prog_data
->local_size
[1] * cs_prog_data
->local_size
[2];
41 return (group_size
+ simd_size
- 1) / simd_size
;
46 brw_upload_cs_state(struct brw_context
*brw
)
48 if (!brw
->cs
.prog_data
)
52 uint32_t *desc
= (uint32_t*) brw_state_batch(brw
, AUB_TRACE_SURFACE_STATE
,
54 struct gl_program
*prog
= (struct gl_program
*) brw
->compute_program
;
55 struct brw_stage_state
*stage_state
= &brw
->cs
.base
;
56 struct brw_cs_prog_data
*cs_prog_data
= brw
->cs
.prog_data
;
57 struct brw_stage_prog_data
*prog_data
= &cs_prog_data
->base
;
59 if (INTEL_DEBUG
& DEBUG_SHADER_TIME
) {
60 brw
->vtbl
.emit_buffer_surface_state(
61 brw
, &stage_state
->surf_offset
[
62 prog_data
->binding_table
.shader_time_start
],
63 brw
->shader_time
.bo
, 0, BRW_SURFACEFORMAT_RAW
,
64 brw
->shader_time
.bo
->size
, 1, true);
67 uint32_t *bind
= (uint32_t*) brw_state_batch(brw
, AUB_TRACE_BINDING_TABLE
,
68 prog_data
->binding_table
.size_bytes
,
69 32, &stage_state
->bind_bo_offset
);
71 unsigned local_id_dwords
= 0;
73 if (prog
->SystemValuesRead
& SYSTEM_BIT_LOCAL_INVOCATION_ID
) {
75 brw_cs_prog_local_id_payload_dwords(prog
, cs_prog_data
->simd_size
);
78 unsigned push_constant_data_size
=
79 (prog_data
->nr_params
+ local_id_dwords
) * sizeof(gl_constant_value
);
80 unsigned reg_aligned_constant_size
= ALIGN(push_constant_data_size
, 32);
81 unsigned push_constant_regs
= reg_aligned_constant_size
/ 32;
82 unsigned threads
= get_cs_thread_count(cs_prog_data
);
84 uint32_t dwords
= brw
->gen
< 8 ? 8 : 9;
86 OUT_BATCH(MEDIA_VFE_STATE
<< 16 | (dwords
- 2));
88 if (prog_data
->total_scratch
) {
90 OUT_RELOC64(stage_state
->scratch_bo
,
91 I915_GEM_DOMAIN_RENDER
, I915_GEM_DOMAIN_RENDER
,
92 ffs(prog_data
->total_scratch
) - 11);
94 OUT_RELOC(stage_state
->scratch_bo
,
95 I915_GEM_DOMAIN_RENDER
, I915_GEM_DOMAIN_RENDER
,
96 ffs(prog_data
->total_scratch
) - 11);
103 const uint32_t vfe_num_urb_entries
= brw
->gen
>= 8 ? 2 : 0;
104 const uint32_t vfe_gpgpu_mode
=
105 brw
->gen
== 7 ? SET_FIELD(1, GEN7_MEDIA_VFE_STATE_GPGPU_MODE
) : 0;
106 OUT_BATCH(SET_FIELD(brw
->max_cs_threads
- 1, MEDIA_VFE_STATE_MAX_THREADS
) |
107 SET_FIELD(vfe_num_urb_entries
, MEDIA_VFE_STATE_URB_ENTRIES
) |
108 SET_FIELD(1, MEDIA_VFE_STATE_RESET_GTW_TIMER
) |
109 SET_FIELD(1, MEDIA_VFE_STATE_BYPASS_GTW
) |
113 const uint32_t vfe_urb_allocation
= brw
->gen
>= 8 ? 2 : 0;
115 /* We are uploading duplicated copies of push constant uniforms for each
116 * thread. Although the local id data needs to vary per thread, it won't
117 * change for other uniform data. Unfortunately this duplication is
118 * required for gen7. As of Haswell, this duplication can be avoided, but
119 * this older mechanism with duplicated data continues to work.
121 * FINISHME: As of Haswell, we could make use of the
122 * INTERFACE_DESCRIPTOR_DATA "Cross-Thread Constant Data Read Length" field
123 * to only store one copy of uniform data.
125 * FINISHME: Broadwell adds a new alternative "Indirect Payload Storage"
126 * which is described in the GPGPU_WALKER command and in the Broadwell PRM
127 * Volume 7: 3D Media GPGPU, under Media GPGPU Pipeline => Mode of
128 * Operations => GPGPU Mode => Indirect Payload Storage.
130 * Note: The constant data is built in brw_upload_cs_push_constants below.
132 const uint32_t vfe_curbe_allocation
= push_constant_regs
* threads
;
133 OUT_BATCH(SET_FIELD(vfe_urb_allocation
, MEDIA_VFE_STATE_URB_ALLOC
) |
134 SET_FIELD(vfe_curbe_allocation
, MEDIA_VFE_STATE_CURBE_ALLOC
));
140 if (reg_aligned_constant_size
> 0) {
142 OUT_BATCH(MEDIA_CURBE_LOAD
<< 16 | (4 - 2));
144 OUT_BATCH(reg_aligned_constant_size
* threads
);
145 OUT_BATCH(stage_state
->push_const_offset
);
149 /* BRW_NEW_SURFACES and BRW_NEW_*_CONSTBUF */
150 memcpy(bind
, stage_state
->surf_offset
,
151 prog_data
->binding_table
.size_bytes
);
153 memset(desc
, 0, 8 * 4);
156 desc
[dw
++] = brw
->cs
.base
.prog_offset
;
158 desc
[dw
++] = 0; /* Kernel Start Pointer High */
160 desc
[dw
++] = stage_state
->sampler_offset
|
161 ((stage_state
->sampler_count
+ 3) / 4);
162 desc
[dw
++] = stage_state
->bind_bo_offset
;
163 desc
[dw
++] = SET_FIELD(push_constant_regs
, MEDIA_CURBE_READ_LENGTH
);
164 const uint32_t media_threads
=
166 SET_FIELD(threads
, GEN8_MEDIA_GPGPU_THREAD_COUNT
) :
167 SET_FIELD(threads
, MEDIA_GPGPU_THREAD_COUNT
);
168 assert(threads
<= brw
->max_cs_threads
);
170 SET_FIELD(cs_prog_data
->uses_barrier
, MEDIA_BARRIER_ENABLE
) |
174 OUT_BATCH(MEDIA_INTERFACE_DESCRIPTOR_LOAD
<< 16 | (4 - 2));
181 const struct brw_tracked_state brw_cs_state
= {
183 .mesa
= _NEW_PROGRAM_CONSTANTS
,
184 .brw
= BRW_NEW_BATCH
|
185 BRW_NEW_CS_PROG_DATA
|
186 BRW_NEW_PUSH_CONSTANT_ALLOCATION
|
189 .emit
= brw_upload_cs_state
194 * We are building the local ID push constant data using the simplest possible
195 * method. We simply push the local IDs directly as they should appear in the
196 * registers for the uvec3 gl_LocalInvocationID variable.
198 * Therefore, for SIMD8, we use 3 full registers, and for SIMD16 we use 6
199 * registers worth of push constant space.
201 * Note: Any updates to brw_cs_prog_local_id_payload_dwords,
202 * fill_local_id_payload or fs_visitor::emit_cs_local_invocation_id_setup need
205 * FINISHME: There are a few easy optimizations to consider.
207 * 1. If gl_WorkGroupSize x, y or z is 1, we can just use zero, and there is
208 * no need for using push constant space for that dimension.
210 * 2. Since GL_MAX_COMPUTE_WORK_GROUP_SIZE is currently 1024 or less, we can
211 * easily use 16-bit words rather than 32-bit dwords in the push constant
214 * 3. If gl_WorkGroupSize x, y or z is small, then we can use bytes for
215 * conveying the data, and thereby reduce push constant usage.
219 brw_cs_prog_local_id_payload_dwords(const struct gl_program
*prog
,
220 unsigned dispatch_width
)
222 return 3 * dispatch_width
;
227 fill_local_id_payload(const struct brw_cs_prog_data
*cs_prog_data
,
228 void *buffer
, unsigned *x
, unsigned *y
, unsigned *z
)
230 uint32_t *param
= (uint32_t *)buffer
;
231 for (unsigned i
= 0; i
< cs_prog_data
->simd_size
; i
++) {
232 param
[0 * cs_prog_data
->simd_size
+ i
] = *x
;
233 param
[1 * cs_prog_data
->simd_size
+ i
] = *y
;
234 param
[2 * cs_prog_data
->simd_size
+ i
] = *z
;
237 if (*x
== cs_prog_data
->local_size
[0]) {
240 if (*y
== cs_prog_data
->local_size
[1]) {
243 if (*z
== cs_prog_data
->local_size
[2])
252 * Creates a region containing the push constants for the CS on gen7+.
254 * Push constants are constant values (such as GLSL uniforms) that are
255 * pre-loaded into a shader stage's register space at thread spawn time.
257 * For other stages, see brw_curbe.c:brw_upload_constant_buffer for the
258 * equivalent gen4/5 code and gen6_vs_state.c:gen6_upload_push_constants for
262 brw_upload_cs_push_constants(struct brw_context
*brw
,
263 const struct gl_program
*prog
,
264 const struct brw_cs_prog_data
*cs_prog_data
,
265 struct brw_stage_state
*stage_state
,
266 enum aub_state_struct_type type
)
268 struct gl_context
*ctx
= &brw
->ctx
;
269 const struct brw_stage_prog_data
*prog_data
=
270 (struct brw_stage_prog_data
*) cs_prog_data
;
271 unsigned local_id_dwords
= 0;
273 if (prog
->SystemValuesRead
& SYSTEM_BIT_LOCAL_INVOCATION_ID
) {
275 brw_cs_prog_local_id_payload_dwords(prog
, cs_prog_data
->simd_size
);
278 /* Updates the ParamaterValues[i] pointers for all parameters of the
279 * basic type of PROGRAM_STATE_VAR.
281 /* XXX: Should this happen somewhere before to get our state flag set? */
282 _mesa_load_state_parameters(ctx
, prog
->Parameters
);
284 if (prog_data
->nr_params
== 0 && local_id_dwords
== 0) {
285 stage_state
->push_const_size
= 0;
287 gl_constant_value
*param
;
290 const unsigned push_constant_data_size
=
291 (local_id_dwords
+ prog_data
->nr_params
) * sizeof(gl_constant_value
);
292 const unsigned reg_aligned_constant_size
= ALIGN(push_constant_data_size
, 32);
293 const unsigned param_aligned_count
=
294 reg_aligned_constant_size
/ sizeof(*param
);
296 unsigned threads
= get_cs_thread_count(cs_prog_data
);
298 param
= (gl_constant_value
*)
299 brw_state_batch(brw
, type
,
300 reg_aligned_constant_size
* threads
,
301 32, &stage_state
->push_const_offset
);
304 STATIC_ASSERT(sizeof(gl_constant_value
) == sizeof(float));
306 /* _NEW_PROGRAM_CONSTANTS */
307 unsigned x
= 0, y
= 0, z
= 0;
308 for (t
= 0; t
< threads
; t
++) {
309 gl_constant_value
*next_param
= ¶m
[t
* param_aligned_count
];
310 if (local_id_dwords
> 0) {
311 fill_local_id_payload(cs_prog_data
, (void*)next_param
, &x
, &y
, &z
);
312 next_param
+= local_id_dwords
;
314 for (i
= 0; i
< prog_data
->nr_params
; i
++) {
315 next_param
[i
] = *prog_data
->param
[i
];
319 stage_state
->push_const_size
= ALIGN(prog_data
->nr_params
, 8) / 8;
325 gen7_upload_cs_push_constants(struct brw_context
*brw
)
327 struct brw_stage_state
*stage_state
= &brw
->cs
.base
;
329 /* BRW_NEW_COMPUTE_PROGRAM */
330 const struct brw_compute_program
*cp
=
331 (struct brw_compute_program
*) brw
->compute_program
;
334 /* CACHE_NEW_CS_PROG */
335 struct brw_cs_prog_data
*cs_prog_data
= brw
->cs
.prog_data
;
337 brw_upload_cs_push_constants(brw
, &cp
->program
.Base
, cs_prog_data
,
338 stage_state
, AUB_TRACE_WM_CONSTANTS
);
342 const struct brw_tracked_state gen7_cs_push_constants
= {
344 .mesa
= _NEW_PROGRAM_CONSTANTS
,
345 .brw
= BRW_NEW_BATCH
|
346 BRW_NEW_COMPUTE_PROGRAM
|
347 BRW_NEW_PUSH_CONSTANT_ALLOCATION
,
349 .emit
= gen7_upload_cs_push_constants
,