Merge remote-tracking branch 'origin/master' into vulkan
[mesa.git] / src / mesa / drivers / dri / i965 / gen7_cs_state.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "util/ralloc.h"
25 #include "brw_context.h"
26 #include "brw_cs.h"
27 #include "brw_eu.h"
28 #include "brw_wm.h"
29 #include "brw_shader.h"
30 #include "intel_mipmap_tree.h"
31 #include "intel_batchbuffer.h"
32 #include "brw_state.h"
33 #include "program/prog_statevars.h"
34 #include "compiler/glsl/ir_uniform.h"
35
36 static unsigned
37 get_cs_thread_count(const struct brw_cs_prog_data *cs_prog_data)
38 {
39 const unsigned simd_size = cs_prog_data->simd_size;
40 unsigned group_size = cs_prog_data->local_size[0] *
41 cs_prog_data->local_size[1] * cs_prog_data->local_size[2];
42
43 return (group_size + simd_size - 1) / simd_size;
44 }
45
46
47 static void
48 brw_upload_cs_state(struct brw_context *brw)
49 {
50 if (!brw->cs.prog_data)
51 return;
52
53 uint32_t offset;
54 uint32_t *desc = (uint32_t*) brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
55 8 * 4, 64, &offset);
56 struct gl_program *prog = (struct gl_program *) brw->compute_program;
57 struct brw_stage_state *stage_state = &brw->cs.base;
58 struct brw_cs_prog_data *cs_prog_data = brw->cs.prog_data;
59 struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
60
61 if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
62 brw->vtbl.emit_buffer_surface_state(
63 brw, &stage_state->surf_offset[
64 prog_data->binding_table.shader_time_start],
65 brw->shader_time.bo, 0, BRW_SURFACEFORMAT_RAW,
66 brw->shader_time.bo->size, 1, true);
67 }
68
69 uint32_t *bind = (uint32_t*) brw_state_batch(brw, AUB_TRACE_BINDING_TABLE,
70 prog_data->binding_table.size_bytes,
71 32, &stage_state->bind_bo_offset);
72
73 unsigned local_id_dwords = 0;
74
75 if (prog->SystemValuesRead & SYSTEM_BIT_LOCAL_INVOCATION_ID)
76 local_id_dwords = cs_prog_data->local_invocation_id_regs * 8;
77
78 unsigned push_constant_data_size =
79 (prog_data->nr_params + local_id_dwords) * sizeof(gl_constant_value);
80 unsigned reg_aligned_constant_size = ALIGN(push_constant_data_size, 32);
81 unsigned push_constant_regs = reg_aligned_constant_size / 32;
82 unsigned threads = get_cs_thread_count(cs_prog_data);
83
84 uint32_t dwords = brw->gen < 8 ? 8 : 9;
85 BEGIN_BATCH(dwords);
86 OUT_BATCH(MEDIA_VFE_STATE << 16 | (dwords - 2));
87
88 if (prog_data->total_scratch) {
89 if (brw->gen >= 8)
90 OUT_RELOC64(stage_state->scratch_bo,
91 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
92 ffs(prog_data->total_scratch) - 11);
93 else
94 OUT_RELOC(stage_state->scratch_bo,
95 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
96 ffs(prog_data->total_scratch) - 11);
97 } else {
98 OUT_BATCH(0);
99 if (brw->gen >= 8)
100 OUT_BATCH(0);
101 }
102
103 const uint32_t vfe_num_urb_entries = brw->gen >= 8 ? 2 : 0;
104 const uint32_t vfe_gpgpu_mode =
105 brw->gen == 7 ? SET_FIELD(1, GEN7_MEDIA_VFE_STATE_GPGPU_MODE) : 0;
106 OUT_BATCH(SET_FIELD(brw->max_cs_threads - 1, MEDIA_VFE_STATE_MAX_THREADS) |
107 SET_FIELD(vfe_num_urb_entries, MEDIA_VFE_STATE_URB_ENTRIES) |
108 SET_FIELD(1, MEDIA_VFE_STATE_RESET_GTW_TIMER) |
109 SET_FIELD(1, MEDIA_VFE_STATE_BYPASS_GTW) |
110 vfe_gpgpu_mode);
111
112 OUT_BATCH(0);
113 const uint32_t vfe_urb_allocation = brw->gen >= 8 ? 2 : 0;
114
115 /* We are uploading duplicated copies of push constant uniforms for each
116 * thread. Although the local id data needs to vary per thread, it won't
117 * change for other uniform data. Unfortunately this duplication is
118 * required for gen7. As of Haswell, this duplication can be avoided, but
119 * this older mechanism with duplicated data continues to work.
120 *
121 * FINISHME: As of Haswell, we could make use of the
122 * INTERFACE_DESCRIPTOR_DATA "Cross-Thread Constant Data Read Length" field
123 * to only store one copy of uniform data.
124 *
125 * FINISHME: Broadwell adds a new alternative "Indirect Payload Storage"
126 * which is described in the GPGPU_WALKER command and in the Broadwell PRM
127 * Volume 7: 3D Media GPGPU, under Media GPGPU Pipeline => Mode of
128 * Operations => GPGPU Mode => Indirect Payload Storage.
129 *
130 * Note: The constant data is built in brw_upload_cs_push_constants below.
131 */
132 const uint32_t vfe_curbe_allocation = push_constant_regs * threads;
133 OUT_BATCH(SET_FIELD(vfe_urb_allocation, MEDIA_VFE_STATE_URB_ALLOC) |
134 SET_FIELD(vfe_curbe_allocation, MEDIA_VFE_STATE_CURBE_ALLOC));
135 OUT_BATCH(0);
136 OUT_BATCH(0);
137 OUT_BATCH(0);
138 ADVANCE_BATCH();
139
140 if (reg_aligned_constant_size > 0) {
141 BEGIN_BATCH(4);
142 OUT_BATCH(MEDIA_CURBE_LOAD << 16 | (4 - 2));
143 OUT_BATCH(0);
144 OUT_BATCH(ALIGN(reg_aligned_constant_size * threads, 64));
145 OUT_BATCH(stage_state->push_const_offset);
146 ADVANCE_BATCH();
147 }
148
149 /* BRW_NEW_SURFACES and BRW_NEW_*_CONSTBUF */
150 memcpy(bind, stage_state->surf_offset,
151 prog_data->binding_table.size_bytes);
152
153 memset(desc, 0, 8 * 4);
154
155 int dw = 0;
156 desc[dw++] = brw->cs.base.prog_offset;
157 if (brw->gen >= 8)
158 desc[dw++] = 0; /* Kernel Start Pointer High */
159 desc[dw++] = 0;
160 desc[dw++] = stage_state->sampler_offset |
161 ((stage_state->sampler_count + 3) / 4);
162 desc[dw++] = stage_state->bind_bo_offset;
163 desc[dw++] = SET_FIELD(push_constant_regs, MEDIA_CURBE_READ_LENGTH);
164 const uint32_t media_threads =
165 brw->gen >= 8 ?
166 SET_FIELD(threads, GEN8_MEDIA_GPGPU_THREAD_COUNT) :
167 SET_FIELD(threads, MEDIA_GPGPU_THREAD_COUNT);
168 assert(threads <= brw->max_cs_threads);
169
170 assert(prog_data->total_shared <= 64 * 1024);
171 uint32_t slm_size = 0;
172 if (prog_data->total_shared > 0) {
173 /* slm_size is in 4k increments, but must be a power of 2. */
174 slm_size = 4 * 1024;
175 while (slm_size < prog_data->total_shared)
176 slm_size <<= 1;
177 slm_size /= 4 * 1024;
178 }
179
180 desc[dw++] =
181 SET_FIELD(cs_prog_data->uses_barrier, MEDIA_BARRIER_ENABLE) |
182 SET_FIELD(slm_size, MEDIA_SHARED_LOCAL_MEMORY_SIZE) |
183 media_threads;
184
185 BEGIN_BATCH(4);
186 OUT_BATCH(MEDIA_INTERFACE_DESCRIPTOR_LOAD << 16 | (4 - 2));
187 OUT_BATCH(0);
188 OUT_BATCH(8 * 4);
189 OUT_BATCH(offset);
190 ADVANCE_BATCH();
191 }
192
193 const struct brw_tracked_state brw_cs_state = {
194 .dirty = {
195 .mesa = _NEW_PROGRAM_CONSTANTS,
196 .brw = BRW_NEW_BATCH |
197 BRW_NEW_CS_PROG_DATA |
198 BRW_NEW_PUSH_CONSTANT_ALLOCATION |
199 BRW_NEW_SAMPLER_STATE_TABLE |
200 BRW_NEW_SURFACES,
201 },
202 .emit = brw_upload_cs_state
203 };
204
205
206 /**
207 * Creates a region containing the push constants for the CS on gen7+.
208 *
209 * Push constants are constant values (such as GLSL uniforms) that are
210 * pre-loaded into a shader stage's register space at thread spawn time.
211 *
212 * For other stages, see brw_curbe.c:brw_upload_constant_buffer for the
213 * equivalent gen4/5 code and gen6_vs_state.c:gen6_upload_push_constants for
214 * gen6+.
215 */
216 static void
217 brw_upload_cs_push_constants(struct brw_context *brw,
218 const struct gl_program *prog,
219 const struct brw_cs_prog_data *cs_prog_data,
220 struct brw_stage_state *stage_state,
221 enum aub_state_struct_type type)
222 {
223 struct gl_context *ctx = &brw->ctx;
224 const struct brw_stage_prog_data *prog_data =
225 (struct brw_stage_prog_data*) cs_prog_data;
226 unsigned local_id_dwords = 0;
227
228 if (prog->SystemValuesRead & SYSTEM_BIT_LOCAL_INVOCATION_ID)
229 local_id_dwords = cs_prog_data->local_invocation_id_regs * 8;
230
231 /* Updates the ParamaterValues[i] pointers for all parameters of the
232 * basic type of PROGRAM_STATE_VAR.
233 */
234 /* XXX: Should this happen somewhere before to get our state flag set? */
235 _mesa_load_state_parameters(ctx, prog->Parameters);
236
237 if (prog_data->nr_params == 0 && local_id_dwords == 0) {
238 stage_state->push_const_size = 0;
239 } else {
240 gl_constant_value *param;
241 unsigned i, t;
242
243 const unsigned push_constant_data_size =
244 (local_id_dwords + prog_data->nr_params) * sizeof(gl_constant_value);
245 const unsigned reg_aligned_constant_size = ALIGN(push_constant_data_size, 32);
246 const unsigned param_aligned_count =
247 reg_aligned_constant_size / sizeof(*param);
248
249 unsigned threads = get_cs_thread_count(cs_prog_data);
250
251 param = (gl_constant_value*)
252 brw_state_batch(brw, type,
253 ALIGN(reg_aligned_constant_size * threads, 64),
254 64, &stage_state->push_const_offset);
255 assert(param);
256
257 STATIC_ASSERT(sizeof(gl_constant_value) == sizeof(float));
258
259 brw_cs_fill_local_id_payload(cs_prog_data, param, threads,
260 reg_aligned_constant_size);
261
262 /* _NEW_PROGRAM_CONSTANTS */
263 for (t = 0; t < threads; t++) {
264 gl_constant_value *next_param =
265 &param[t * param_aligned_count + local_id_dwords];
266 for (i = 0; i < prog_data->nr_params; i++) {
267 next_param[i] = *prog_data->param[i];
268 }
269 }
270
271 stage_state->push_const_size = ALIGN(prog_data->nr_params, 8) / 8;
272 }
273 }
274
275
276 static void
277 gen7_upload_cs_push_constants(struct brw_context *brw)
278 {
279 struct brw_stage_state *stage_state = &brw->cs.base;
280
281 /* BRW_NEW_COMPUTE_PROGRAM */
282 const struct brw_compute_program *cp =
283 (struct brw_compute_program *) brw->compute_program;
284
285 if (cp) {
286 /* CACHE_NEW_CS_PROG */
287 struct brw_cs_prog_data *cs_prog_data = brw->cs.prog_data;
288
289 brw_upload_cs_push_constants(brw, &cp->program.Base, cs_prog_data,
290 stage_state, AUB_TRACE_WM_CONSTANTS);
291 }
292 }
293
294 const struct brw_tracked_state gen7_cs_push_constants = {
295 .dirty = {
296 .mesa = _NEW_PROGRAM_CONSTANTS,
297 .brw = BRW_NEW_BATCH |
298 BRW_NEW_COMPUTE_PROGRAM |
299 BRW_NEW_PUSH_CONSTANT_ALLOCATION,
300 },
301 .emit = gen7_upload_cs_push_constants,
302 };
303
304 /**
305 * Creates a new CS constant buffer reflecting the current CS program's
306 * constants, if needed by the CS program.
307 */
308 static void
309 brw_upload_cs_pull_constants(struct brw_context *brw)
310 {
311 struct brw_stage_state *stage_state = &brw->cs.base;
312
313 /* BRW_NEW_COMPUTE_PROGRAM */
314 struct brw_compute_program *cp =
315 (struct brw_compute_program *) brw->compute_program;
316
317 /* BRW_NEW_CS_PROG_DATA */
318 const struct brw_stage_prog_data *prog_data = &brw->cs.prog_data->base;
319
320 /* _NEW_PROGRAM_CONSTANTS */
321 brw_upload_pull_constants(brw, BRW_NEW_SURFACES, &cp->program.Base,
322 stage_state, prog_data);
323 }
324
325 const struct brw_tracked_state brw_cs_pull_constants = {
326 .dirty = {
327 .mesa = _NEW_PROGRAM_CONSTANTS,
328 .brw = BRW_NEW_BATCH |
329 BRW_NEW_COMPUTE_PROGRAM |
330 BRW_NEW_CS_PROG_DATA,
331 },
332 .emit = brw_upload_cs_pull_constants,
333 };