#include "intel_batchbuffer.h"
#include "brw_state.h"
#include "program/prog_statevars.h"
-#include "glsl/ir_uniform.h"
-
-static unsigned
-get_cs_thread_count(const struct brw_cs_prog_data *cs_prog_data)
-{
- const unsigned simd_size = cs_prog_data->simd_size;
- unsigned group_size = cs_prog_data->local_size[0] *
- cs_prog_data->local_size[1] * cs_prog_data->local_size[2];
-
- return (group_size + simd_size - 1) / simd_size;
-}
-
+#include "compiler/glsl/ir_uniform.h"
+#include "main/shaderapi.h"
static void
brw_upload_cs_state(struct brw_context *brw)
uint32_t offset;
uint32_t *desc = (uint32_t*) brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
8 * 4, 64, &offset);
- struct gl_program *prog = (struct gl_program *) brw->compute_program;
struct brw_stage_state *stage_state = &brw->cs.base;
struct brw_cs_prog_data *cs_prog_data = brw->cs.prog_data;
struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
+ const struct gen_device_info *devinfo = brw->intelScreen->devinfo;
if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
- brw->vtbl.emit_buffer_surface_state(
+ brw_emit_buffer_surface_state(
brw, &stage_state->surf_offset[
prog_data->binding_table.shader_time_start],
brw->shader_time.bo, 0, BRW_SURFACEFORMAT_RAW,
prog_data->binding_table.size_bytes,
32, &stage_state->bind_bo_offset);
- unsigned local_id_dwords = 0;
-
- if (prog->SystemValuesRead & SYSTEM_BIT_LOCAL_INVOCATION_ID)
- local_id_dwords = cs_prog_data->local_invocation_id_regs * 8;
-
- unsigned push_constant_data_size =
- (prog_data->nr_params + local_id_dwords) * sizeof(gl_constant_value);
- unsigned reg_aligned_constant_size = ALIGN(push_constant_data_size, 32);
- unsigned push_constant_regs = reg_aligned_constant_size / 32;
- unsigned threads = get_cs_thread_count(cs_prog_data);
-
uint32_t dwords = brw->gen < 8 ? 8 : 9;
BEGIN_BATCH(dwords);
OUT_BATCH(MEDIA_VFE_STATE << 16 | (dwords - 2));
if (prog_data->total_scratch) {
- if (brw->gen >= 8)
+ if (brw->gen >= 8) {
+ /* Broadwell's Per Thread Scratch Space is in the range [0, 11]
+ * where 0 = 1k, 1 = 2k, 2 = 4k, ..., 11 = 2M.
+ */
OUT_RELOC64(stage_state->scratch_bo,
I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
- ffs(prog_data->total_scratch) - 11);
- else
+ ffs(stage_state->per_thread_scratch) - 11);
+ } else if (brw->is_haswell) {
+ /* Haswell's Per Thread Scratch Space is in the range [0, 10]
+ * where 0 = 2k, 1 = 4k, 2 = 8k, ..., 10 = 2M.
+ */
+ OUT_RELOC(stage_state->scratch_bo,
+ I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
+ ffs(stage_state->per_thread_scratch) - 12);
+ } else {
+ /* Earlier platforms use the range [0, 11] to mean [1kB, 12kB]
+ * where 0 = 1kB, 1 = 2kB, 2 = 3kB, ..., 11 = 12kB.
+ */
OUT_RELOC(stage_state->scratch_bo,
I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
- ffs(prog_data->total_scratch) - 11);
+ stage_state->per_thread_scratch / 1024 - 1);
+ }
} else {
OUT_BATCH(0);
if (brw->gen >= 8)
const uint32_t vfe_num_urb_entries = brw->gen >= 8 ? 2 : 0;
const uint32_t vfe_gpgpu_mode =
brw->gen == 7 ? SET_FIELD(1, GEN7_MEDIA_VFE_STATE_GPGPU_MODE) : 0;
- OUT_BATCH(SET_FIELD(brw->max_cs_threads - 1, MEDIA_VFE_STATE_MAX_THREADS) |
+ const uint32_t subslices = MAX2(brw->intelScreen->subslice_total, 1);
+ OUT_BATCH(SET_FIELD(brw->max_cs_threads * subslices - 1,
+ MEDIA_VFE_STATE_MAX_THREADS) |
SET_FIELD(vfe_num_urb_entries, MEDIA_VFE_STATE_URB_ENTRIES) |
SET_FIELD(1, MEDIA_VFE_STATE_RESET_GTW_TIMER) |
SET_FIELD(1, MEDIA_VFE_STATE_BYPASS_GTW) |
*
* Note: The constant data is built in brw_upload_cs_push_constants below.
*/
- const uint32_t vfe_curbe_allocation = push_constant_regs * threads;
+ const uint32_t vfe_curbe_allocation =
+ ALIGN(cs_prog_data->push.per_thread.regs * cs_prog_data->threads +
+ cs_prog_data->push.cross_thread.regs, 2);
OUT_BATCH(SET_FIELD(vfe_urb_allocation, MEDIA_VFE_STATE_URB_ALLOC) |
SET_FIELD(vfe_curbe_allocation, MEDIA_VFE_STATE_CURBE_ALLOC));
OUT_BATCH(0);
OUT_BATCH(0);
ADVANCE_BATCH();
- if (reg_aligned_constant_size > 0) {
+ if (cs_prog_data->push.total.size > 0) {
BEGIN_BATCH(4);
OUT_BATCH(MEDIA_CURBE_LOAD << 16 | (4 - 2));
OUT_BATCH(0);
- OUT_BATCH(reg_aligned_constant_size * threads);
+ OUT_BATCH(ALIGN(cs_prog_data->push.total.size, 64));
OUT_BATCH(stage_state->push_const_offset);
ADVANCE_BATCH();
}
desc[dw++] = stage_state->sampler_offset |
((stage_state->sampler_count + 3) / 4);
desc[dw++] = stage_state->bind_bo_offset;
- desc[dw++] = SET_FIELD(push_constant_regs, MEDIA_CURBE_READ_LENGTH);
+ desc[dw++] = SET_FIELD(cs_prog_data->push.per_thread.regs,
+ MEDIA_CURBE_READ_LENGTH);
const uint32_t media_threads =
brw->gen >= 8 ?
- SET_FIELD(threads, GEN8_MEDIA_GPGPU_THREAD_COUNT) :
- SET_FIELD(threads, MEDIA_GPGPU_THREAD_COUNT);
- assert(threads <= brw->max_cs_threads);
+ SET_FIELD(cs_prog_data->threads, GEN8_MEDIA_GPGPU_THREAD_COUNT) :
+ SET_FIELD(cs_prog_data->threads, MEDIA_GPGPU_THREAD_COUNT);
+ assert(cs_prog_data->threads <= brw->max_cs_threads);
+
+ const uint32_t slm_size =
+ encode_slm_size(devinfo->gen, prog_data->total_shared);
+
desc[dw++] =
SET_FIELD(cs_prog_data->uses_barrier, MEDIA_BARRIER_ENABLE) |
+ SET_FIELD(slm_size, MEDIA_SHARED_LOCAL_MEMORY_SIZE) |
media_threads;
+ desc[dw++] =
+ SET_FIELD(cs_prog_data->push.cross_thread.regs, CROSS_THREAD_READ_LENGTH);
+
BEGIN_BATCH(4);
OUT_BATCH(MEDIA_INTERFACE_DESCRIPTOR_LOAD << 16 | (4 - 2));
OUT_BATCH(0);
.dirty = {
.mesa = _NEW_PROGRAM_CONSTANTS,
.brw = BRW_NEW_BATCH |
+ BRW_NEW_BLORP |
BRW_NEW_CS_PROG_DATA |
BRW_NEW_PUSH_CONSTANT_ALLOCATION |
+ BRW_NEW_SAMPLER_STATE_TABLE |
BRW_NEW_SURFACES,
},
.emit = brw_upload_cs_state
struct gl_context *ctx = &brw->ctx;
const struct brw_stage_prog_data *prog_data =
(struct brw_stage_prog_data*) cs_prog_data;
- unsigned local_id_dwords = 0;
-
- if (prog->SystemValuesRead & SYSTEM_BIT_LOCAL_INVOCATION_ID)
- local_id_dwords = cs_prog_data->local_invocation_id_regs * 8;
/* Updates the ParamaterValues[i] pointers for all parameters of the
* basic type of PROGRAM_STATE_VAR.
/* XXX: Should this happen somewhere before to get our state flag set? */
_mesa_load_state_parameters(ctx, prog->Parameters);
- if (prog_data->nr_params == 0 && local_id_dwords == 0) {
+ if (cs_prog_data->push.total.size == 0) {
stage_state->push_const_size = 0;
- } else {
- gl_constant_value *param;
- unsigned i, t;
-
- const unsigned push_constant_data_size =
- (local_id_dwords + prog_data->nr_params) * sizeof(gl_constant_value);
- const unsigned reg_aligned_constant_size = ALIGN(push_constant_data_size, 32);
- const unsigned param_aligned_count =
- reg_aligned_constant_size / sizeof(*param);
-
- unsigned threads = get_cs_thread_count(cs_prog_data);
-
- param = (gl_constant_value*)
- brw_state_batch(brw, type,
- reg_aligned_constant_size * threads,
- 32, &stage_state->push_const_offset);
- assert(param);
-
- STATIC_ASSERT(sizeof(gl_constant_value) == sizeof(float));
-
- brw_cs_fill_local_id_payload(cs_prog_data, param, threads,
- reg_aligned_constant_size);
-
- /* _NEW_PROGRAM_CONSTANTS */
- for (t = 0; t < threads; t++) {
- gl_constant_value *next_param =
- ¶m[t * param_aligned_count + local_id_dwords];
- for (i = 0; i < prog_data->nr_params; i++) {
- next_param[i] = *prog_data->param[i];
- }
+ return;
+ }
+
+
+ gl_constant_value *param = (gl_constant_value*)
+ brw_state_batch(brw, type, ALIGN(cs_prog_data->push.total.size, 64),
+ 64, &stage_state->push_const_offset);
+ assert(param);
+
+ STATIC_ASSERT(sizeof(gl_constant_value) == sizeof(float));
+
+ if (cs_prog_data->push.cross_thread.size > 0) {
+ gl_constant_value *param_copy = param;
+ assert(cs_prog_data->thread_local_id_index < 0 ||
+ cs_prog_data->thread_local_id_index >=
+ cs_prog_data->push.cross_thread.dwords);
+ for (unsigned i = 0;
+ i < cs_prog_data->push.cross_thread.dwords;
+ i++) {
+ param_copy[i] = *prog_data->param[i];
}
+ }
- stage_state->push_const_size = ALIGN(prog_data->nr_params, 8) / 8;
+ gl_constant_value thread_id;
+ if (cs_prog_data->push.per_thread.size > 0) {
+ for (unsigned t = 0; t < cs_prog_data->threads; t++) {
+ unsigned dst =
+ 8 * (cs_prog_data->push.per_thread.regs * t +
+ cs_prog_data->push.cross_thread.regs);
+ unsigned src = cs_prog_data->push.cross_thread.dwords;
+ for ( ; src < prog_data->nr_params; src++, dst++) {
+ if (src != cs_prog_data->thread_local_id_index)
+ param[dst] = *prog_data->param[src];
+ else {
+ thread_id.u = t * cs_prog_data->simd_size;
+ param[dst] = thread_id;
+ }
+ }
+ }
}
+
+ stage_state->push_const_size =
+ cs_prog_data->push.cross_thread.regs +
+ cs_prog_data->push.per_thread.regs;
}
/* CACHE_NEW_CS_PROG */
struct brw_cs_prog_data *cs_prog_data = brw->cs.prog_data;
+ _mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_COMPUTE);
brw_upload_cs_push_constants(brw, &cp->program.Base, cs_prog_data,
stage_state, AUB_TRACE_WM_CONSTANTS);
}
.dirty = {
.mesa = _NEW_PROGRAM_CONSTANTS,
.brw = BRW_NEW_BATCH |
+ BRW_NEW_BLORP |
BRW_NEW_COMPUTE_PROGRAM |
BRW_NEW_PUSH_CONSTANT_ALLOCATION,
},
/* BRW_NEW_CS_PROG_DATA */
const struct brw_stage_prog_data *prog_data = &brw->cs.prog_data->base;
+ _mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_COMPUTE);
/* _NEW_PROGRAM_CONSTANTS */
brw_upload_pull_constants(brw, BRW_NEW_SURFACES, &cp->program.Base,
stage_state, prog_data);
.dirty = {
.mesa = _NEW_PROGRAM_CONSTANTS,
.brw = BRW_NEW_BATCH |
+ BRW_NEW_BLORP |
BRW_NEW_COMPUTE_PROGRAM |
BRW_NEW_CS_PROG_DATA,
},