2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 #include "util/ralloc.h"
25 #include "brw_context.h"
29 #include "brw_shader.h"
30 #include "intel_mipmap_tree.h"
31 #include "intel_batchbuffer.h"
32 #include "brw_state.h"
33 #include "program/prog_statevars.h"
34 #include "compiler/glsl/ir_uniform.h"
37 brw_upload_cs_state(struct brw_context
*brw
)
39 if (!brw
->cs
.prog_data
)
43 uint32_t *desc
= (uint32_t*) brw_state_batch(brw
, AUB_TRACE_SURFACE_STATE
,
45 struct gl_program
*prog
= (struct gl_program
*) brw
->compute_program
;
46 struct brw_stage_state
*stage_state
= &brw
->cs
.base
;
47 struct brw_cs_prog_data
*cs_prog_data
= brw
->cs
.prog_data
;
48 struct brw_stage_prog_data
*prog_data
= &cs_prog_data
->base
;
50 if (INTEL_DEBUG
& DEBUG_SHADER_TIME
) {
51 brw
->vtbl
.emit_buffer_surface_state(
52 brw
, &stage_state
->surf_offset
[
53 prog_data
->binding_table
.shader_time_start
],
54 brw
->shader_time
.bo
, 0, BRW_SURFACEFORMAT_RAW
,
55 brw
->shader_time
.bo
->size
, 1, true);
58 uint32_t *bind
= (uint32_t*) brw_state_batch(brw
, AUB_TRACE_BINDING_TABLE
,
59 prog_data
->binding_table
.size_bytes
,
60 32, &stage_state
->bind_bo_offset
);
62 unsigned local_id_dwords
= 0;
64 if (prog
->SystemValuesRead
& SYSTEM_BIT_LOCAL_INVOCATION_ID
)
65 local_id_dwords
= cs_prog_data
->local_invocation_id_regs
* 8;
67 unsigned push_constant_data_size
=
68 (prog_data
->nr_params
+ local_id_dwords
) * sizeof(gl_constant_value
);
69 unsigned reg_aligned_constant_size
= ALIGN(push_constant_data_size
, 32);
70 unsigned push_constant_regs
= reg_aligned_constant_size
/ 32;
72 uint32_t dwords
= brw
->gen
< 8 ? 8 : 9;
74 OUT_BATCH(MEDIA_VFE_STATE
<< 16 | (dwords
- 2));
76 if (prog_data
->total_scratch
) {
78 OUT_RELOC64(stage_state
->scratch_bo
,
79 I915_GEM_DOMAIN_RENDER
, I915_GEM_DOMAIN_RENDER
,
80 ffs(prog_data
->total_scratch
) - 11);
82 OUT_RELOC(stage_state
->scratch_bo
,
83 I915_GEM_DOMAIN_RENDER
, I915_GEM_DOMAIN_RENDER
,
84 ffs(prog_data
->total_scratch
) - 11);
91 const uint32_t vfe_num_urb_entries
= brw
->gen
>= 8 ? 2 : 0;
92 const uint32_t vfe_gpgpu_mode
=
93 brw
->gen
== 7 ? SET_FIELD(1, GEN7_MEDIA_VFE_STATE_GPGPU_MODE
) : 0;
94 OUT_BATCH(SET_FIELD(brw
->max_cs_threads
- 1, MEDIA_VFE_STATE_MAX_THREADS
) |
95 SET_FIELD(vfe_num_urb_entries
, MEDIA_VFE_STATE_URB_ENTRIES
) |
96 SET_FIELD(1, MEDIA_VFE_STATE_RESET_GTW_TIMER
) |
97 SET_FIELD(1, MEDIA_VFE_STATE_BYPASS_GTW
) |
101 const uint32_t vfe_urb_allocation
= brw
->gen
>= 8 ? 2 : 0;
103 /* We are uploading duplicated copies of push constant uniforms for each
104 * thread. Although the local id data needs to vary per thread, it won't
105 * change for other uniform data. Unfortunately this duplication is
106 * required for gen7. As of Haswell, this duplication can be avoided, but
107 * this older mechanism with duplicated data continues to work.
109 * FINISHME: As of Haswell, we could make use of the
110 * INTERFACE_DESCRIPTOR_DATA "Cross-Thread Constant Data Read Length" field
111 * to only store one copy of uniform data.
113 * FINISHME: Broadwell adds a new alternative "Indirect Payload Storage"
114 * which is described in the GPGPU_WALKER command and in the Broadwell PRM
115 * Volume 7: 3D Media GPGPU, under Media GPGPU Pipeline => Mode of
116 * Operations => GPGPU Mode => Indirect Payload Storage.
118 * Note: The constant data is built in brw_upload_cs_push_constants below.
120 const uint32_t vfe_curbe_allocation
=
121 push_constant_regs
* cs_prog_data
->threads
;
122 OUT_BATCH(SET_FIELD(vfe_urb_allocation
, MEDIA_VFE_STATE_URB_ALLOC
) |
123 SET_FIELD(vfe_curbe_allocation
, MEDIA_VFE_STATE_CURBE_ALLOC
));
129 if (reg_aligned_constant_size
> 0) {
131 OUT_BATCH(MEDIA_CURBE_LOAD
<< 16 | (4 - 2));
133 OUT_BATCH(ALIGN(reg_aligned_constant_size
* cs_prog_data
->threads
, 64));
134 OUT_BATCH(stage_state
->push_const_offset
);
138 /* BRW_NEW_SURFACES and BRW_NEW_*_CONSTBUF */
139 memcpy(bind
, stage_state
->surf_offset
,
140 prog_data
->binding_table
.size_bytes
);
142 memset(desc
, 0, 8 * 4);
145 desc
[dw
++] = brw
->cs
.base
.prog_offset
;
147 desc
[dw
++] = 0; /* Kernel Start Pointer High */
149 desc
[dw
++] = stage_state
->sampler_offset
|
150 ((stage_state
->sampler_count
+ 3) / 4);
151 desc
[dw
++] = stage_state
->bind_bo_offset
;
152 desc
[dw
++] = SET_FIELD(push_constant_regs
, MEDIA_CURBE_READ_LENGTH
);
153 const uint32_t media_threads
=
155 SET_FIELD(cs_prog_data
->threads
, GEN8_MEDIA_GPGPU_THREAD_COUNT
) :
156 SET_FIELD(cs_prog_data
->threads
, MEDIA_GPGPU_THREAD_COUNT
);
157 assert(cs_prog_data
->threads
<= brw
->max_cs_threads
);
159 assert(prog_data
->total_shared
<= 64 * 1024);
160 uint32_t slm_size
= 0;
161 if (prog_data
->total_shared
> 0) {
162 /* slm_size is in 4k increments, but must be a power of 2. */
164 while (slm_size
< prog_data
->total_shared
)
166 slm_size
/= 4 * 1024;
170 SET_FIELD(cs_prog_data
->uses_barrier
, MEDIA_BARRIER_ENABLE
) |
171 SET_FIELD(slm_size
, MEDIA_SHARED_LOCAL_MEMORY_SIZE
) |
175 OUT_BATCH(MEDIA_INTERFACE_DESCRIPTOR_LOAD
<< 16 | (4 - 2));
182 const struct brw_tracked_state brw_cs_state
= {
184 .mesa
= _NEW_PROGRAM_CONSTANTS
,
185 .brw
= BRW_NEW_BATCH
|
187 BRW_NEW_CS_PROG_DATA
|
188 BRW_NEW_PUSH_CONSTANT_ALLOCATION
|
189 BRW_NEW_SAMPLER_STATE_TABLE
|
192 .emit
= brw_upload_cs_state
197 * Creates a region containing the push constants for the CS on gen7+.
199 * Push constants are constant values (such as GLSL uniforms) that are
200 * pre-loaded into a shader stage's register space at thread spawn time.
202 * For other stages, see brw_curbe.c:brw_upload_constant_buffer for the
203 * equivalent gen4/5 code and gen6_vs_state.c:gen6_upload_push_constants for
207 brw_upload_cs_push_constants(struct brw_context
*brw
,
208 const struct gl_program
*prog
,
209 const struct brw_cs_prog_data
*cs_prog_data
,
210 struct brw_stage_state
*stage_state
,
211 enum aub_state_struct_type type
)
213 struct gl_context
*ctx
= &brw
->ctx
;
214 const struct brw_stage_prog_data
*prog_data
=
215 (struct brw_stage_prog_data
*) cs_prog_data
;
216 unsigned local_id_dwords
= 0;
218 if (prog
->SystemValuesRead
& SYSTEM_BIT_LOCAL_INVOCATION_ID
)
219 local_id_dwords
= cs_prog_data
->local_invocation_id_regs
* 8;
221 /* Updates the ParamaterValues[i] pointers for all parameters of the
222 * basic type of PROGRAM_STATE_VAR.
224 /* XXX: Should this happen somewhere before to get our state flag set? */
225 _mesa_load_state_parameters(ctx
, prog
->Parameters
);
227 if (prog_data
->nr_params
== 0 && local_id_dwords
== 0) {
228 stage_state
->push_const_size
= 0;
230 gl_constant_value
*param
;
233 const unsigned push_constant_data_size
=
234 (local_id_dwords
+ prog_data
->nr_params
) * sizeof(gl_constant_value
);
235 const unsigned reg_aligned_constant_size
= ALIGN(push_constant_data_size
, 32);
236 const unsigned param_aligned_count
=
237 reg_aligned_constant_size
/ sizeof(*param
);
239 param
= (gl_constant_value
*)
240 brw_state_batch(brw
, type
,
241 ALIGN(reg_aligned_constant_size
*
242 cs_prog_data
->threads
, 64),
243 64, &stage_state
->push_const_offset
);
246 STATIC_ASSERT(sizeof(gl_constant_value
) == sizeof(float));
248 brw_cs_fill_local_id_payload(cs_prog_data
, param
, cs_prog_data
->threads
,
249 reg_aligned_constant_size
);
251 /* _NEW_PROGRAM_CONSTANTS */
252 for (t
= 0; t
< cs_prog_data
->threads
; t
++) {
253 gl_constant_value
*next_param
=
254 ¶m
[t
* param_aligned_count
+ local_id_dwords
];
255 for (i
= 0; i
< prog_data
->nr_params
; i
++) {
256 next_param
[i
] = *prog_data
->param
[i
];
260 stage_state
->push_const_size
= ALIGN(prog_data
->nr_params
, 8) / 8;
266 gen7_upload_cs_push_constants(struct brw_context
*brw
)
268 struct brw_stage_state
*stage_state
= &brw
->cs
.base
;
270 /* BRW_NEW_COMPUTE_PROGRAM */
271 const struct brw_compute_program
*cp
=
272 (struct brw_compute_program
*) brw
->compute_program
;
275 /* CACHE_NEW_CS_PROG */
276 struct brw_cs_prog_data
*cs_prog_data
= brw
->cs
.prog_data
;
278 brw_upload_cs_push_constants(brw
, &cp
->program
.Base
, cs_prog_data
,
279 stage_state
, AUB_TRACE_WM_CONSTANTS
);
283 const struct brw_tracked_state gen7_cs_push_constants
= {
285 .mesa
= _NEW_PROGRAM_CONSTANTS
,
286 .brw
= BRW_NEW_BATCH
|
288 BRW_NEW_COMPUTE_PROGRAM
|
289 BRW_NEW_PUSH_CONSTANT_ALLOCATION
,
291 .emit
= gen7_upload_cs_push_constants
,
295 * Creates a new CS constant buffer reflecting the current CS program's
296 * constants, if needed by the CS program.
299 brw_upload_cs_pull_constants(struct brw_context
*brw
)
301 struct brw_stage_state
*stage_state
= &brw
->cs
.base
;
303 /* BRW_NEW_COMPUTE_PROGRAM */
304 struct brw_compute_program
*cp
=
305 (struct brw_compute_program
*) brw
->compute_program
;
307 /* BRW_NEW_CS_PROG_DATA */
308 const struct brw_stage_prog_data
*prog_data
= &brw
->cs
.prog_data
->base
;
310 /* _NEW_PROGRAM_CONSTANTS */
311 brw_upload_pull_constants(brw
, BRW_NEW_SURFACES
, &cp
->program
.Base
,
312 stage_state
, prog_data
);
315 const struct brw_tracked_state brw_cs_pull_constants
= {
317 .mesa
= _NEW_PROGRAM_CONSTANTS
,
318 .brw
= BRW_NEW_BATCH
|
320 BRW_NEW_COMPUTE_PROGRAM
|
321 BRW_NEW_CS_PROG_DATA
,
323 .emit
= brw_upload_cs_pull_constants
,