2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 #include "util/ralloc.h"
25 #include "brw_context.h"
29 #include "brw_shader.h"
30 #include "intel_mipmap_tree.h"
31 #include "intel_batchbuffer.h"
32 #include "brw_state.h"
33 #include "program/prog_statevars.h"
34 #include "compiler/glsl/ir_uniform.h"
37 brw_upload_cs_state(struct brw_context
*brw
)
39 if (!brw
->cs
.prog_data
)
43 uint32_t *desc
= (uint32_t*) brw_state_batch(brw
, AUB_TRACE_SURFACE_STATE
,
45 struct brw_stage_state
*stage_state
= &brw
->cs
.base
;
46 struct brw_cs_prog_data
*cs_prog_data
= brw
->cs
.prog_data
;
47 struct brw_stage_prog_data
*prog_data
= &cs_prog_data
->base
;
48 const struct brw_device_info
*devinfo
= brw
->intelScreen
->devinfo
;
50 if (INTEL_DEBUG
& DEBUG_SHADER_TIME
) {
51 brw
->vtbl
.emit_buffer_surface_state(
52 brw
, &stage_state
->surf_offset
[
53 prog_data
->binding_table
.shader_time_start
],
54 brw
->shader_time
.bo
, 0, BRW_SURFACEFORMAT_RAW
,
55 brw
->shader_time
.bo
->size
, 1, true);
58 uint32_t *bind
= (uint32_t*) brw_state_batch(brw
, AUB_TRACE_BINDING_TABLE
,
59 prog_data
->binding_table
.size_bytes
,
60 32, &stage_state
->bind_bo_offset
);
62 uint32_t dwords
= brw
->gen
< 8 ? 8 : 9;
64 OUT_BATCH(MEDIA_VFE_STATE
<< 16 | (dwords
- 2));
66 if (prog_data
->total_scratch
) {
68 OUT_RELOC64(stage_state
->scratch_bo
,
69 I915_GEM_DOMAIN_RENDER
, I915_GEM_DOMAIN_RENDER
,
70 ffs(prog_data
->total_scratch
) - 11);
72 OUT_RELOC(stage_state
->scratch_bo
,
73 I915_GEM_DOMAIN_RENDER
, I915_GEM_DOMAIN_RENDER
,
74 ffs(prog_data
->total_scratch
) - 11);
81 const uint32_t vfe_num_urb_entries
= brw
->gen
>= 8 ? 2 : 0;
82 const uint32_t vfe_gpgpu_mode
=
83 brw
->gen
== 7 ? SET_FIELD(1, GEN7_MEDIA_VFE_STATE_GPGPU_MODE
) : 0;
84 OUT_BATCH(SET_FIELD(brw
->max_cs_threads
- 1, MEDIA_VFE_STATE_MAX_THREADS
) |
85 SET_FIELD(vfe_num_urb_entries
, MEDIA_VFE_STATE_URB_ENTRIES
) |
86 SET_FIELD(1, MEDIA_VFE_STATE_RESET_GTW_TIMER
) |
87 SET_FIELD(1, MEDIA_VFE_STATE_BYPASS_GTW
) |
91 const uint32_t vfe_urb_allocation
= brw
->gen
>= 8 ? 2 : 0;
93 /* We are uploading duplicated copies of push constant uniforms for each
94 * thread. Although the local id data needs to vary per thread, it won't
95 * change for other uniform data. Unfortunately this duplication is
96 * required for gen7. As of Haswell, this duplication can be avoided, but
97 * this older mechanism with duplicated data continues to work.
99 * FINISHME: As of Haswell, we could make use of the
100 * INTERFACE_DESCRIPTOR_DATA "Cross-Thread Constant Data Read Length" field
101 * to only store one copy of uniform data.
103 * FINISHME: Broadwell adds a new alternative "Indirect Payload Storage"
104 * which is described in the GPGPU_WALKER command and in the Broadwell PRM
105 * Volume 7: 3D Media GPGPU, under Media GPGPU Pipeline => Mode of
106 * Operations => GPGPU Mode => Indirect Payload Storage.
108 * Note: The constant data is built in brw_upload_cs_push_constants below.
110 const uint32_t vfe_curbe_allocation
=
111 ALIGN(cs_prog_data
->push
.per_thread
.regs
* cs_prog_data
->threads
+
112 cs_prog_data
->push
.cross_thread
.regs
, 2);
113 OUT_BATCH(SET_FIELD(vfe_urb_allocation
, MEDIA_VFE_STATE_URB_ALLOC
) |
114 SET_FIELD(vfe_curbe_allocation
, MEDIA_VFE_STATE_CURBE_ALLOC
));
120 if (cs_prog_data
->push
.total
.size
> 0) {
122 OUT_BATCH(MEDIA_CURBE_LOAD
<< 16 | (4 - 2));
124 OUT_BATCH(ALIGN(cs_prog_data
->push
.total
.size
, 64));
125 OUT_BATCH(stage_state
->push_const_offset
);
129 /* BRW_NEW_SURFACES and BRW_NEW_*_CONSTBUF */
130 memcpy(bind
, stage_state
->surf_offset
,
131 prog_data
->binding_table
.size_bytes
);
133 memset(desc
, 0, 8 * 4);
136 desc
[dw
++] = brw
->cs
.base
.prog_offset
;
138 desc
[dw
++] = 0; /* Kernel Start Pointer High */
140 desc
[dw
++] = stage_state
->sampler_offset
|
141 ((stage_state
->sampler_count
+ 3) / 4);
142 desc
[dw
++] = stage_state
->bind_bo_offset
;
143 desc
[dw
++] = SET_FIELD(cs_prog_data
->push
.per_thread
.regs
,
144 MEDIA_CURBE_READ_LENGTH
);
145 const uint32_t media_threads
=
147 SET_FIELD(cs_prog_data
->threads
, GEN8_MEDIA_GPGPU_THREAD_COUNT
) :
148 SET_FIELD(cs_prog_data
->threads
, MEDIA_GPGPU_THREAD_COUNT
);
149 assert(cs_prog_data
->threads
<= brw
->max_cs_threads
);
151 const uint32_t slm_size
= encode_slm_size(devinfo
, prog_data
->total_shared
);
154 SET_FIELD(cs_prog_data
->uses_barrier
, MEDIA_BARRIER_ENABLE
) |
155 SET_FIELD(slm_size
, MEDIA_SHARED_LOCAL_MEMORY_SIZE
) |
159 SET_FIELD(cs_prog_data
->push
.cross_thread
.regs
, CROSS_THREAD_READ_LENGTH
);
162 OUT_BATCH(MEDIA_INTERFACE_DESCRIPTOR_LOAD
<< 16 | (4 - 2));
169 const struct brw_tracked_state brw_cs_state
= {
171 .mesa
= _NEW_PROGRAM_CONSTANTS
,
172 .brw
= BRW_NEW_BATCH
|
174 BRW_NEW_CS_PROG_DATA
|
175 BRW_NEW_PUSH_CONSTANT_ALLOCATION
|
176 BRW_NEW_SAMPLER_STATE_TABLE
|
179 .emit
= brw_upload_cs_state
184 * Creates a region containing the push constants for the CS on gen7+.
186 * Push constants are constant values (such as GLSL uniforms) that are
187 * pre-loaded into a shader stage's register space at thread spawn time.
189 * For other stages, see brw_curbe.c:brw_upload_constant_buffer for the
190 * equivalent gen4/5 code and gen6_vs_state.c:gen6_upload_push_constants for
194 brw_upload_cs_push_constants(struct brw_context
*brw
,
195 const struct gl_program
*prog
,
196 const struct brw_cs_prog_data
*cs_prog_data
,
197 struct brw_stage_state
*stage_state
,
198 enum aub_state_struct_type type
)
200 struct gl_context
*ctx
= &brw
->ctx
;
201 const struct brw_stage_prog_data
*prog_data
=
202 (struct brw_stage_prog_data
*) cs_prog_data
;
204 /* Updates the ParamaterValues[i] pointers for all parameters of the
205 * basic type of PROGRAM_STATE_VAR.
207 /* XXX: Should this happen somewhere before to get our state flag set? */
208 _mesa_load_state_parameters(ctx
, prog
->Parameters
);
210 if (cs_prog_data
->push
.total
.size
== 0) {
211 stage_state
->push_const_size
= 0;
216 gl_constant_value
*param
= (gl_constant_value
*)
217 brw_state_batch(brw
, type
, ALIGN(cs_prog_data
->push
.total
.size
, 64),
218 64, &stage_state
->push_const_offset
);
221 STATIC_ASSERT(sizeof(gl_constant_value
) == sizeof(float));
223 if (cs_prog_data
->push
.cross_thread
.size
> 0) {
224 gl_constant_value
*param_copy
= param
;
225 assert(cs_prog_data
->thread_local_id_index
< 0 ||
226 cs_prog_data
->thread_local_id_index
>=
227 cs_prog_data
->push
.cross_thread
.dwords
);
229 i
< cs_prog_data
->push
.cross_thread
.dwords
;
231 param_copy
[i
] = *prog_data
->param
[i
];
235 gl_constant_value thread_id
;
236 if (cs_prog_data
->push
.per_thread
.size
> 0) {
237 for (unsigned t
= 0; t
< cs_prog_data
->threads
; t
++) {
239 8 * (cs_prog_data
->push
.per_thread
.regs
* t
+
240 cs_prog_data
->push
.cross_thread
.regs
);
241 unsigned src
= cs_prog_data
->push
.cross_thread
.dwords
;
242 for ( ; src
< prog_data
->nr_params
; src
++, dst
++) {
243 if (src
!= cs_prog_data
->thread_local_id_index
)
244 param
[dst
] = *prog_data
->param
[src
];
246 thread_id
.u
= t
* cs_prog_data
->simd_size
;
247 param
[dst
] = thread_id
;
253 stage_state
->push_const_size
=
254 cs_prog_data
->push
.cross_thread
.regs
+
255 cs_prog_data
->push
.per_thread
.regs
;
260 gen7_upload_cs_push_constants(struct brw_context
*brw
)
262 struct brw_stage_state
*stage_state
= &brw
->cs
.base
;
264 /* BRW_NEW_COMPUTE_PROGRAM */
265 const struct brw_compute_program
*cp
=
266 (struct brw_compute_program
*) brw
->compute_program
;
269 /* CACHE_NEW_CS_PROG */
270 struct brw_cs_prog_data
*cs_prog_data
= brw
->cs
.prog_data
;
272 brw_upload_cs_push_constants(brw
, &cp
->program
.Base
, cs_prog_data
,
273 stage_state
, AUB_TRACE_WM_CONSTANTS
);
277 const struct brw_tracked_state gen7_cs_push_constants
= {
279 .mesa
= _NEW_PROGRAM_CONSTANTS
,
280 .brw
= BRW_NEW_BATCH
|
282 BRW_NEW_COMPUTE_PROGRAM
|
283 BRW_NEW_PUSH_CONSTANT_ALLOCATION
,
285 .emit
= gen7_upload_cs_push_constants
,
289 * Creates a new CS constant buffer reflecting the current CS program's
290 * constants, if needed by the CS program.
293 brw_upload_cs_pull_constants(struct brw_context
*brw
)
295 struct brw_stage_state
*stage_state
= &brw
->cs
.base
;
297 /* BRW_NEW_COMPUTE_PROGRAM */
298 struct brw_compute_program
*cp
=
299 (struct brw_compute_program
*) brw
->compute_program
;
301 /* BRW_NEW_CS_PROG_DATA */
302 const struct brw_stage_prog_data
*prog_data
= &brw
->cs
.prog_data
->base
;
304 /* _NEW_PROGRAM_CONSTANTS */
305 brw_upload_pull_constants(brw
, BRW_NEW_SURFACES
, &cp
->program
.Base
,
306 stage_state
, prog_data
);
309 const struct brw_tracked_state brw_cs_pull_constants
= {
311 .mesa
= _NEW_PROGRAM_CONSTANTS
,
312 .brw
= BRW_NEW_BATCH
|
314 BRW_NEW_COMPUTE_PROGRAM
|
315 BRW_NEW_CS_PROG_DATA
,
317 .emit
= brw_upload_cs_pull_constants
,