2 * Copyright (c) 2014 - 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
25 #include "util/ralloc.h"
26 #include "brw_context.h"
31 #include "intel_mipmap_tree.h"
32 #include "brw_state.h"
33 #include "intel_batchbuffer.h"
37 brw_cs_prog_data_compare(const void *in_a
, const void *in_b
)
39 const struct brw_cs_prog_data
*a
=
40 (const struct brw_cs_prog_data
*)in_a
;
41 const struct brw_cs_prog_data
*b
=
42 (const struct brw_cs_prog_data
*)in_b
;
44 /* Compare the base structure. */
45 if (!brw_stage_prog_data_compare(&a
->base
, &b
->base
))
48 /* Compare the rest of the structure. */
49 const unsigned offset
= sizeof(struct brw_stage_prog_data
);
50 if (memcmp(((char *) a
) + offset
, ((char *) b
) + offset
,
51 sizeof(struct brw_cs_prog_data
) - offset
))
58 static const unsigned *
59 brw_cs_emit(struct brw_context
*brw
,
61 const struct brw_cs_prog_key
*key
,
62 struct brw_cs_prog_data
*prog_data
,
63 struct gl_compute_program
*cp
,
64 struct gl_shader_program
*prog
,
65 unsigned *final_assembly_size
)
67 bool start_busy
= false;
68 double start_time
= 0;
70 if (unlikely(brw
->perf_debug
)) {
71 start_busy
= (brw
->batch
.last_bo
&&
72 drm_intel_bo_busy(brw
->batch
.last_bo
));
73 start_time
= get_time();
76 struct brw_shader
*shader
=
77 (struct brw_shader
*) prog
->_LinkedShaders
[MESA_SHADER_COMPUTE
];
79 if (unlikely(INTEL_DEBUG
& DEBUG_CS
))
80 brw_dump_ir("compute", prog
, &shader
->base
, &cp
->Base
);
82 prog_data
->local_size
[0] = cp
->LocalSize
[0];
83 prog_data
->local_size
[1] = cp
->LocalSize
[1];
84 prog_data
->local_size
[2] = cp
->LocalSize
[2];
85 int local_workgroup_size
=
86 cp
->LocalSize
[0] * cp
->LocalSize
[1] * cp
->LocalSize
[2];
89 const char *fail_msg
= NULL
;
92 if (INTEL_DEBUG
& DEBUG_SHADER_TIME
)
93 st_index
= brw_get_shader_time_index(brw
, prog
, &cp
->Base
, ST_CS
);
95 /* Now the main event: Visit the shader IR and generate our CS IR for it.
97 fs_visitor
v8(brw
->intelScreen
->compiler
, brw
,
98 mem_ctx
, MESA_SHADER_COMPUTE
, key
, &prog_data
->base
, prog
,
99 &cp
->Base
, 8, st_index
);
101 fail_msg
= v8
.fail_msg
;
102 } else if (local_workgroup_size
<= 8 * brw
->max_cs_threads
) {
104 prog_data
->simd_size
= 8;
107 fs_visitor
v16(brw
->intelScreen
->compiler
, brw
,
108 mem_ctx
, MESA_SHADER_COMPUTE
, key
, &prog_data
->base
, prog
,
109 &cp
->Base
, 16, st_index
);
110 if (likely(!(INTEL_DEBUG
& DEBUG_NO16
)) &&
111 !fail_msg
&& !v8
.simd16_unsupported
&&
112 local_workgroup_size
<= 16 * brw
->max_cs_threads
) {
113 /* Try a SIMD16 compile */
114 v16
.import_uniforms(&v8
);
116 perf_debug("SIMD16 shader failed to compile: %s", v16
.fail_msg
);
119 "Couldn't generate SIMD16 program and not "
120 "enough threads for SIMD8";
124 prog_data
->simd_size
= 16;
128 if (unlikely(cfg
== NULL
)) {
130 prog
->LinkStatus
= false;
131 ralloc_strcat(&prog
->InfoLog
, fail_msg
);
132 _mesa_problem(NULL
, "Failed to compile compute shader: %s\n",
137 fs_generator
g(brw
->intelScreen
->compiler
, brw
,
138 mem_ctx
, (void*) key
, &prog_data
->base
, &cp
->Base
,
139 v8
.promoted_constants
, v8
.runtime_check_aads_emit
, "CS");
140 if (INTEL_DEBUG
& DEBUG_CS
) {
141 char *name
= ralloc_asprintf(mem_ctx
, "%s compute shader %d",
142 prog
->Label
? prog
->Label
: "unnamed",
144 g
.enable_debug(name
);
147 g
.generate_code(cfg
, prog_data
->simd_size
);
149 if (unlikely(brw
->perf_debug
) && shader
) {
150 if (shader
->compiled_once
) {
151 _mesa_problem(&brw
->ctx
, "CS programs shouldn't need recompiles");
153 shader
->compiled_once
= true;
155 if (start_busy
&& !drm_intel_bo_busy(brw
->batch
.last_bo
)) {
156 perf_debug("CS compile took %.03f ms and stalled the GPU\n",
157 (get_time() - start_time
) * 1000);
161 return g
.get_assembly(final_assembly_size
);
165 brw_codegen_cs_prog(struct brw_context
*brw
,
166 struct gl_shader_program
*prog
,
167 struct brw_compute_program
*cp
,
168 struct brw_cs_prog_key
*key
)
170 struct gl_context
*ctx
= &brw
->ctx
;
171 const GLuint
*program
;
172 void *mem_ctx
= ralloc_context(NULL
);
174 struct brw_cs_prog_data prog_data
;
176 struct gl_shader
*cs
= prog
->_LinkedShaders
[MESA_SHADER_COMPUTE
];
179 memset(&prog_data
, 0, sizeof(prog_data
));
181 /* Allocate the references to the uniforms that will end up in the
182 * prog_data associated with the compiled program, and which will be freed
183 * by the state cache.
185 int param_count
= cs
->num_uniform_components
;
187 /* The backend also sometimes adds params for texture size. */
188 param_count
+= 2 * ctx
->Const
.Program
[MESA_SHADER_COMPUTE
].MaxTextureImageUnits
;
189 prog_data
.base
.param
=
190 rzalloc_array(NULL
, const gl_constant_value
*, param_count
);
191 prog_data
.base
.pull_param
=
192 rzalloc_array(NULL
, const gl_constant_value
*, param_count
);
193 prog_data
.base
.nr_params
= param_count
;
195 program
= brw_cs_emit(brw
, mem_ctx
, key
, &prog_data
,
196 &cp
->program
, prog
, &program_size
);
197 if (program
== NULL
) {
198 ralloc_free(mem_ctx
);
202 if (prog_data
.base
.total_scratch
) {
203 brw_get_scratch_bo(brw
, &brw
->cs
.base
.scratch_bo
,
204 prog_data
.base
.total_scratch
* brw
->max_cs_threads
);
207 if (unlikely(INTEL_DEBUG
& DEBUG_CS
))
208 fprintf(stderr
, "\n");
210 brw_upload_cache(&brw
->cache
, BRW_CACHE_CS_PROG
,
212 program
, program_size
,
213 &prog_data
, sizeof(prog_data
),
214 &brw
->cs
.base
.prog_offset
, &brw
->cs
.prog_data
);
215 ralloc_free(mem_ctx
);
222 brw_cs_populate_key(struct brw_context
*brw
, struct brw_cs_prog_key
*key
)
224 /* BRW_NEW_COMPUTE_PROGRAM */
225 const struct brw_compute_program
*cp
=
226 (struct brw_compute_program
*) brw
->compute_program
;
228 memset(key
, 0, sizeof(*key
));
230 /* The unique compute program ID */
231 key
->program_string_id
= cp
->id
;
237 brw_upload_cs_prog(struct brw_context
*brw
)
239 struct gl_context
*ctx
= &brw
->ctx
;
240 struct brw_cs_prog_key key
;
241 struct brw_compute_program
*cp
= (struct brw_compute_program
*)
242 brw
->compute_program
;
247 if (!brw_state_dirty(brw
, 0, BRW_NEW_COMPUTE_PROGRAM
))
250 brw_cs_populate_key(brw
, &key
);
252 if (!brw_search_cache(&brw
->cache
, BRW_CACHE_CS_PROG
,
254 &brw
->cs
.base
.prog_offset
, &brw
->cs
.prog_data
)) {
256 brw_codegen_cs_prog(brw
,
257 ctx
->Shader
.CurrentProgram
[MESA_SHADER_COMPUTE
],
262 brw
->cs
.base
.prog_data
= &brw
->cs
.prog_data
->base
;
267 brw_cs_precompile(struct gl_context
*ctx
,
268 struct gl_shader_program
*shader_prog
,
269 struct gl_program
*prog
)
271 struct brw_context
*brw
= brw_context(ctx
);
272 struct brw_cs_prog_key key
;
274 struct gl_compute_program
*cp
= (struct gl_compute_program
*) prog
;
275 struct brw_compute_program
*bcp
= brw_compute_program(cp
);
277 memset(&key
, 0, sizeof(key
));
278 key
.program_string_id
= bcp
->id
;
280 brw_setup_tex_for_precompile(brw
, &key
.tex
, prog
);
282 uint32_t old_prog_offset
= brw
->cs
.base
.prog_offset
;
283 struct brw_cs_prog_data
*old_prog_data
= brw
->cs
.prog_data
;
285 bool success
= brw_codegen_cs_prog(brw
, shader_prog
, bcp
, &key
);
287 brw
->cs
.base
.prog_offset
= old_prog_offset
;
288 brw
->cs
.prog_data
= old_prog_data
;
295 get_cs_thread_count(const struct brw_cs_prog_data
*cs_prog_data
)
297 const unsigned simd_size
= cs_prog_data
->simd_size
;
298 unsigned group_size
= cs_prog_data
->local_size
[0] *
299 cs_prog_data
->local_size
[1] * cs_prog_data
->local_size
[2];
301 return (group_size
+ simd_size
- 1) / simd_size
;
306 brw_upload_cs_state(struct brw_context
*brw
)
308 if (!brw
->cs
.prog_data
)
312 uint32_t *desc
= (uint32_t*) brw_state_batch(brw
, AUB_TRACE_SURFACE_STATE
,
314 struct brw_stage_state
*stage_state
= &brw
->cs
.base
;
315 struct brw_cs_prog_data
*cs_prog_data
= brw
->cs
.prog_data
;
316 struct brw_stage_prog_data
*prog_data
= &cs_prog_data
->base
;
318 if (INTEL_DEBUG
& DEBUG_SHADER_TIME
) {
319 brw
->vtbl
.emit_buffer_surface_state(
320 brw
, &stage_state
->surf_offset
[
321 prog_data
->binding_table
.shader_time_start
],
322 brw
->shader_time
.bo
, 0, BRW_SURFACEFORMAT_RAW
,
323 brw
->shader_time
.bo
->size
, 1, true);
326 uint32_t *bind
= (uint32_t*) brw_state_batch(brw
, AUB_TRACE_BINDING_TABLE
,
327 prog_data
->binding_table
.size_bytes
,
328 32, &stage_state
->bind_bo_offset
);
330 unsigned threads
= get_cs_thread_count(cs_prog_data
);
332 uint32_t dwords
= brw
->gen
< 8 ? 8 : 9;
334 OUT_BATCH(MEDIA_VFE_STATE
<< 16 | (dwords
- 2));
336 if (prog_data
->total_scratch
) {
338 OUT_RELOC64(stage_state
->scratch_bo
,
339 I915_GEM_DOMAIN_RENDER
, I915_GEM_DOMAIN_RENDER
,
340 ffs(prog_data
->total_scratch
) - 11);
342 OUT_RELOC(stage_state
->scratch_bo
,
343 I915_GEM_DOMAIN_RENDER
, I915_GEM_DOMAIN_RENDER
,
344 ffs(prog_data
->total_scratch
) - 11);
351 const uint32_t vfe_num_urb_entries
= brw
->gen
>= 8 ? 2 : 0;
352 const uint32_t vfe_gpgpu_mode
=
353 brw
->gen
== 7 ? SET_FIELD(1, GEN7_MEDIA_VFE_STATE_GPGPU_MODE
) : 0;
354 OUT_BATCH(SET_FIELD(brw
->max_cs_threads
- 1, MEDIA_VFE_STATE_MAX_THREADS
) |
355 SET_FIELD(vfe_num_urb_entries
, MEDIA_VFE_STATE_URB_ENTRIES
) |
356 SET_FIELD(1, MEDIA_VFE_STATE_RESET_GTW_TIMER
) |
357 SET_FIELD(1, MEDIA_VFE_STATE_BYPASS_GTW
) |
361 const uint32_t vfe_urb_allocation
= brw
->gen
>= 8 ? 2 : 0;
362 OUT_BATCH(SET_FIELD(vfe_urb_allocation
, MEDIA_VFE_STATE_URB_ALLOC
));
368 /* BRW_NEW_SURFACES and BRW_NEW_*_CONSTBUF */
369 memcpy(bind
, stage_state
->surf_offset
,
370 prog_data
->binding_table
.size_bytes
);
372 memset(desc
, 0, 8 * 4);
375 desc
[dw
++] = brw
->cs
.base
.prog_offset
;
377 desc
[dw
++] = 0; /* Kernel Start Pointer High */
380 desc
[dw
++] = stage_state
->bind_bo_offset
;
382 const uint32_t media_threads
=
384 SET_FIELD(threads
, GEN8_MEDIA_GPGPU_THREAD_COUNT
) :
385 SET_FIELD(threads
, MEDIA_GPGPU_THREAD_COUNT
);
386 assert(threads
<= brw
->max_cs_threads
);
387 desc
[dw
++] = media_threads
;
390 OUT_BATCH(MEDIA_INTERFACE_DESCRIPTOR_LOAD
<< 16 | (4 - 2));
399 const struct brw_tracked_state brw_cs_state
= {
400 /* explicit initialisers aren't valid C++, comment
401 * them for documentation purposes */
404 /* .brw = */ BRW_NEW_CS_PROG_DATA
,
406 /* .emit = */ brw_upload_cs_state