X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fmesa%2Fdrivers%2Fdri%2Fi965%2Fgen7_cs_state.c;h=26e4264018fe46ee7d76bfee5adac20d6afde8f2;hb=8c47ccb13a198f4d38c772df1de457de34dde23e;hp=dc3cc66407a415c4a747b22ee7f97f8a3d101e8c;hpb=8048c1953c97de75ccbe33d719ca81f67a5ba255;p=mesa.git diff --git a/src/mesa/drivers/dri/i965/gen7_cs_state.c b/src/mesa/drivers/dri/i965/gen7_cs_state.c index dc3cc66407a..26e4264018f 100644 --- a/src/mesa/drivers/dri/i965/gen7_cs_state.c +++ b/src/mesa/drivers/dri/i965/gen7_cs_state.c @@ -33,170 +33,6 @@ #include "compiler/glsl/ir_uniform.h" #include "main/shaderapi.h" -static void -brw_upload_cs_state(struct brw_context *brw) -{ - if (!brw->cs.base.prog_data) - return; - - uint32_t offset; - uint32_t *desc = (uint32_t*) brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, - 8 * 4, 64, &offset); - struct brw_stage_state *stage_state = &brw->cs.base; - struct brw_stage_prog_data *prog_data = stage_state->prog_data; - struct brw_cs_prog_data *cs_prog_data = brw_cs_prog_data(prog_data); - const struct gen_device_info *devinfo = &brw->screen->devinfo; - - if (INTEL_DEBUG & DEBUG_SHADER_TIME) { - brw_emit_buffer_surface_state( - brw, &stage_state->surf_offset[ - prog_data->binding_table.shader_time_start], - brw->shader_time.bo, 0, BRW_SURFACEFORMAT_RAW, - brw->shader_time.bo->size, 1, true); - } - - uint32_t *bind = (uint32_t*) brw_state_batch(brw, AUB_TRACE_BINDING_TABLE, - prog_data->binding_table.size_bytes, - 32, &stage_state->bind_bo_offset); - - uint32_t dwords = brw->gen < 8 ? 8 : 9; - BEGIN_BATCH(dwords); - OUT_BATCH(MEDIA_VFE_STATE << 16 | (dwords - 2)); - - if (prog_data->total_scratch) { - if (brw->gen >= 8) { - /* Broadwell's Per Thread Scratch Space is in the range [0, 11] - * where 0 = 1k, 1 = 2k, 2 = 4k, ..., 11 = 2M. - */ - OUT_RELOC64(stage_state->scratch_bo, - I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, - ffs(stage_state->per_thread_scratch) - 11); - } else if (brw->is_haswell) { - /* Haswell's Per Thread Scratch Space is in the range [0, 10] - * where 0 = 2k, 1 = 4k, 2 = 8k, ..., 10 = 2M. - */ - OUT_RELOC(stage_state->scratch_bo, - I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, - ffs(stage_state->per_thread_scratch) - 12); - } else { - /* Earlier platforms use the range [0, 11] to mean [1kB, 12kB] - * where 0 = 1kB, 1 = 2kB, 2 = 3kB, ..., 11 = 12kB. - */ - OUT_RELOC(stage_state->scratch_bo, - I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, - stage_state->per_thread_scratch / 1024 - 1); - } - } else { - OUT_BATCH(0); - if (brw->gen >= 8) - OUT_BATCH(0); - } - - const uint32_t vfe_num_urb_entries = brw->gen >= 8 ? 2 : 0; - const uint32_t vfe_gpgpu_mode = - brw->gen == 7 ? SET_FIELD(1, GEN7_MEDIA_VFE_STATE_GPGPU_MODE) : 0; - const uint32_t subslices = MAX2(brw->screen->subslice_total, 1); - OUT_BATCH(SET_FIELD(devinfo->max_cs_threads * subslices - 1, - MEDIA_VFE_STATE_MAX_THREADS) | - SET_FIELD(vfe_num_urb_entries, MEDIA_VFE_STATE_URB_ENTRIES) | - SET_FIELD(1, MEDIA_VFE_STATE_RESET_GTW_TIMER) | - SET_FIELD(1, MEDIA_VFE_STATE_BYPASS_GTW) | - vfe_gpgpu_mode); - - OUT_BATCH(0); - const uint32_t vfe_urb_allocation = brw->gen >= 8 ? 2 : 0; - - /* We are uploading duplicated copies of push constant uniforms for each - * thread. Although the local id data needs to vary per thread, it won't - * change for other uniform data. Unfortunately this duplication is - * required for gen7. As of Haswell, this duplication can be avoided, but - * this older mechanism with duplicated data continues to work. - * - * FINISHME: As of Haswell, we could make use of the - * INTERFACE_DESCRIPTOR_DATA "Cross-Thread Constant Data Read Length" field - * to only store one copy of uniform data. - * - * FINISHME: Broadwell adds a new alternative "Indirect Payload Storage" - * which is described in the GPGPU_WALKER command and in the Broadwell PRM - * Volume 7: 3D Media GPGPU, under Media GPGPU Pipeline => Mode of - * Operations => GPGPU Mode => Indirect Payload Storage. - * - * Note: The constant data is built in brw_upload_cs_push_constants below. - */ - const uint32_t vfe_curbe_allocation = - ALIGN(cs_prog_data->push.per_thread.regs * cs_prog_data->threads + - cs_prog_data->push.cross_thread.regs, 2); - OUT_BATCH(SET_FIELD(vfe_urb_allocation, MEDIA_VFE_STATE_URB_ALLOC) | - SET_FIELD(vfe_curbe_allocation, MEDIA_VFE_STATE_CURBE_ALLOC)); - OUT_BATCH(0); - OUT_BATCH(0); - OUT_BATCH(0); - ADVANCE_BATCH(); - - if (cs_prog_data->push.total.size > 0) { - BEGIN_BATCH(4); - OUT_BATCH(MEDIA_CURBE_LOAD << 16 | (4 - 2)); - OUT_BATCH(0); - OUT_BATCH(ALIGN(cs_prog_data->push.total.size, 64)); - OUT_BATCH(stage_state->push_const_offset); - ADVANCE_BATCH(); - } - - /* BRW_NEW_SURFACES and BRW_NEW_*_CONSTBUF */ - memcpy(bind, stage_state->surf_offset, - prog_data->binding_table.size_bytes); - - memset(desc, 0, 8 * 4); - - int dw = 0; - desc[dw++] = brw->cs.base.prog_offset; - if (brw->gen >= 8) - desc[dw++] = 0; /* Kernel Start Pointer High */ - desc[dw++] = 0; - desc[dw++] = stage_state->sampler_offset | - ((stage_state->sampler_count + 3) / 4); - desc[dw++] = stage_state->bind_bo_offset; - desc[dw++] = SET_FIELD(cs_prog_data->push.per_thread.regs, - MEDIA_CURBE_READ_LENGTH); - const uint32_t media_threads = - brw->gen >= 8 ? - SET_FIELD(cs_prog_data->threads, GEN8_MEDIA_GPGPU_THREAD_COUNT) : - SET_FIELD(cs_prog_data->threads, MEDIA_GPGPU_THREAD_COUNT); - assert(cs_prog_data->threads <= devinfo->max_cs_threads); - - const uint32_t slm_size = - encode_slm_size(devinfo->gen, prog_data->total_shared); - - desc[dw++] = - SET_FIELD(cs_prog_data->uses_barrier, MEDIA_BARRIER_ENABLE) | - SET_FIELD(slm_size, MEDIA_SHARED_LOCAL_MEMORY_SIZE) | - media_threads; - - desc[dw++] = - SET_FIELD(cs_prog_data->push.cross_thread.regs, CROSS_THREAD_READ_LENGTH); - - BEGIN_BATCH(4); - OUT_BATCH(MEDIA_INTERFACE_DESCRIPTOR_LOAD << 16 | (4 - 2)); - OUT_BATCH(0); - OUT_BATCH(8 * 4); - OUT_BATCH(offset); - ADVANCE_BATCH(); -} - -const struct brw_tracked_state brw_cs_state = { - .dirty = { - .mesa = _NEW_PROGRAM_CONSTANTS, - .brw = BRW_NEW_BATCH | - BRW_NEW_BLORP | - BRW_NEW_CS_PROG_DATA | - BRW_NEW_PUSH_CONSTANT_ALLOCATION | - BRW_NEW_SAMPLER_STATE_TABLE | - BRW_NEW_SURFACES, - }, - .emit = brw_upload_cs_state -}; - - /** * Creates a region containing the push constants for the CS on gen7+. * @@ -211,8 +47,7 @@ static void brw_upload_cs_push_constants(struct brw_context *brw, const struct gl_program *prog, const struct brw_cs_prog_data *cs_prog_data, - struct brw_stage_state *stage_state, - enum aub_state_struct_type type) + struct brw_stage_state *stage_state) { struct gl_context *ctx = &brw->ctx; const struct brw_stage_prog_data *prog_data = @@ -231,7 +66,7 @@ brw_upload_cs_push_constants(struct brw_context *brw, gl_constant_value *param = (gl_constant_value*) - brw_state_batch(brw, type, ALIGN(cs_prog_data->push.total.size, 64), + brw_state_batch(brw, ALIGN(cs_prog_data->push.total.size, 64), 64, &stage_state->push_const_offset); assert(param); @@ -288,7 +123,7 @@ gen7_upload_cs_push_constants(struct brw_context *brw) _mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_COMPUTE); brw_upload_cs_push_constants(brw, &cp->program, cs_prog_data, - stage_state, AUB_TRACE_WM_CONSTANTS); + stage_state); } } @@ -298,8 +133,7 @@ const struct brw_tracked_state gen7_cs_push_constants = { .brw = BRW_NEW_BATCH | BRW_NEW_BLORP | BRW_NEW_COMPUTE_PROGRAM | - BRW_NEW_CS_PROG_DATA | - BRW_NEW_PUSH_CONSTANT_ALLOCATION, + BRW_NEW_CS_PROG_DATA, }, .emit = gen7_upload_cs_push_constants, };