unsigned unit,
uint32_t *surf_offset,
bool for_gather);
- void (*update_renderbuffer_surface)(struct brw_context *brw,
- struct gl_renderbuffer *rb,
- bool layered,
- unsigned unit);
+ uint32_t (*update_renderbuffer_surface)(struct brw_context *brw,
+ struct gl_renderbuffer *rb,
+ bool layered, unsigned unit,
+ uint32_t surf_index);
void (*emit_texture_surface_state)(struct brw_context *brw,
struct intel_mipmap_tree *mt,
* While it is only used for the front/back buffer currently, it should be
* usable for further buffers when doing ARB_draw_buffer support.
*/
-static void
+static uint32_t
brw_update_renderbuffer_surface(struct brw_context *brw,
- struct gl_renderbuffer *rb,
- bool layered,
- unsigned int unit)
+ struct gl_renderbuffer *rb,
+ bool layered, unsigned unit,
+ uint32_t surf_index)
{
struct gl_context *ctx = &brw->ctx;
struct intel_renderbuffer *irb = intel_renderbuffer(rb);
uint32_t *surf;
uint32_t tile_x, tile_y;
uint32_t format = 0;
+ uint32_t offset;
/* _NEW_BUFFERS */
mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
/* BRW_NEW_FS_PROG_DATA */
- uint32_t surf_index =
- brw->wm.prog_data->binding_table.render_target_start + unit;
assert(!layered);
intel_miptree_used_for_rendering(irb->mt);
- surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
- &brw->wm.base.surf_offset[surf_index]);
+ surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32, &offset);
format = brw->render_target_format[rb_format];
if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
}
drm_intel_bo_emit_reloc(brw->batch.bo,
- brw->wm.base.surf_offset[surf_index] + 4,
- mt->bo,
- surf[1] - mt->bo->offset64,
- I915_GEM_DOMAIN_RENDER,
- I915_GEM_DOMAIN_RENDER);
+ offset + 4,
+ mt->bo,
+ surf[1] - mt->bo->offset64,
+ I915_GEM_DOMAIN_RENDER,
+ I915_GEM_DOMAIN_RENDER);
+
+ return offset;
}
/**
/* Update surfaces for drawing buffers */
if (ctx->DrawBuffer->_NumColorDrawBuffers >= 1) {
for (i = 0; i < ctx->DrawBuffer->_NumColorDrawBuffers; i++) {
+ const uint32_t surf_index =
+ brw->wm.prog_data->binding_table.render_target_start + i;
+
if (intel_renderbuffer(ctx->DrawBuffer->_ColorDrawBuffers[i])) {
- brw->vtbl.update_renderbuffer_surface(brw, ctx->DrawBuffer->_ColorDrawBuffers[i],
- ctx->DrawBuffer->MaxNumLayers > 0, i);
+ brw->wm.base.surf_offset[surf_index] =
+ brw->vtbl.update_renderbuffer_surface(
+ brw, ctx->DrawBuffer->_ColorDrawBuffers[i],
+ ctx->DrawBuffer->MaxNumLayers > 0, i, surf_index);
} else {
- const uint32_t surf_index =
- brw->wm.prog_data->binding_table.render_target_start + i;
-
brw->vtbl.emit_null_surface_state(
brw, fb->Width, fb->Height, fb->Visual.samples,
&brw->wm.base.surf_offset[surf_index]);
* While it is only used for the front/back buffer currently, it should be
* usable for further buffers when doing ARB_draw_buffer support.
*/
-static void
+static uint32_t
gen6_update_renderbuffer_surface(struct brw_context *brw,
struct gl_renderbuffer *rb,
- bool layered,
- unsigned int unit)
+ bool layered, unsigned unit /* unused */,
+ uint32_t surf_index)
{
struct gl_context *ctx = &brw->ctx;
struct intel_renderbuffer *irb = intel_renderbuffer(rb);
struct intel_mipmap_tree *mt = irb->mt;
uint32_t *surf;
uint32_t format = 0;
+ uint32_t offset;
/* _NEW_BUFFERS */
mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
uint32_t surftype;
const GLenum gl_target =
rb->TexImage ? rb->TexImage->TexObject->Target : GL_TEXTURE_2D;
- uint32_t surf_index =
- brw->wm.prog_data->binding_table.render_target_start + unit;
-
intel_miptree_used_for_rendering(irb->mt);
- surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
- &brw->wm.base.surf_offset[surf_index]);
+ surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32, &offset);
format = brw->render_target_format[rb_format];
if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
surf[5] = (mt->align_h == 4 ? BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0);
drm_intel_bo_emit_reloc(brw->batch.bo,
- brw->wm.base.surf_offset[surf_index] + 4,
- mt->bo,
- surf[1] - mt->bo->offset64,
- I915_GEM_DOMAIN_RENDER,
- I915_GEM_DOMAIN_RENDER);
+ offset + 4,
+ mt->bo,
+ surf[1] - mt->bo->offset64,
+ I915_GEM_DOMAIN_RENDER,
+ I915_GEM_DOMAIN_RENDER);
+
+ return offset;
}
void
* While it is only used for the front/back buffer currently, it should be
* usable for further buffers when doing ARB_draw_buffer support.
*/
-static void
+static uint32_t
gen7_update_renderbuffer_surface(struct brw_context *brw,
- struct gl_renderbuffer *rb,
- bool layered,
- unsigned int unit)
+ struct gl_renderbuffer *rb,
+ bool layered, unsigned unit /* unused */,
+ uint32_t surf_index)
{
struct gl_context *ctx = &brw->ctx;
struct intel_renderbuffer *irb = intel_renderbuffer(rb);
bool is_array = false;
int depth = MAX2(irb->layer_count, 1);
const uint8_t mocs = GEN7_MOCS_L3;
+ uint32_t offset;
int min_array_element = irb->mt_layer / MAX2(mt->num_samples, 1);
GLenum gl_target = rb->TexImage ?
rb->TexImage->TexObject->Target : GL_TEXTURE_2D;
- uint32_t surf_index =
- brw->wm.prog_data->binding_table.render_target_start + unit;
-
uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 8 * 4, 32,
- &brw->wm.base.surf_offset[surf_index]);
+ &offset);
memset(surf, 0, 8 * 4);
intel_miptree_used_for_rendering(irb->mt);
(depth - 1) << GEN7_SURFACE_RENDER_TARGET_VIEW_EXTENT_SHIFT;
if (irb->mt->mcs_mt) {
- gen7_set_surface_mcs_info(brw, surf, brw->wm.base.surf_offset[surf_index],
+ gen7_set_surface_mcs_info(brw, surf, offset,
irb->mt->mcs_mt, true /* is RT */);
}
}
drm_intel_bo_emit_reloc(brw->batch.bo,
- brw->wm.base.surf_offset[surf_index] + 4,
- mt->bo,
- surf[1] - mt->bo->offset64,
- I915_GEM_DOMAIN_RENDER,
- I915_GEM_DOMAIN_RENDER);
+ offset + 4,
+ mt->bo,
+ surf[1] - mt->bo->offset64,
+ I915_GEM_DOMAIN_RENDER,
+ I915_GEM_DOMAIN_RENDER);
gen7_check_surface_setup(surf, true /* is_render_target */);
+
+ return offset;
}
void
* While it is only used for the front/back buffer currently, it should be
* usable for further buffers when doing ARB_draw_buffer support.
*/
-static void
+static uint32_t
gen8_update_renderbuffer_surface(struct brw_context *brw,
struct gl_renderbuffer *rb,
- bool layered,
- unsigned unit)
+ bool layered, unsigned unit /* unused */,
+ uint32_t surf_index)
{
struct gl_context *ctx = &brw->ctx;
struct intel_renderbuffer *irb = intel_renderbuffer(rb);
uint32_t tiling = mt->tiling;
uint32_t format = 0;
uint32_t surf_type;
+ uint32_t offset;
bool is_array = false;
int depth = MAX2(irb->layer_count, 1);
const int min_array_element = (mt->format == MESA_FORMAT_S_UINT8) ?
irb->mt_layer : (irb->mt_layer / MAX2(mt->num_samples, 1));
GLenum gl_target =
rb->TexImage ? rb->TexImage->TexObject->Target : GL_TEXTURE_2D;
- uint32_t surf_index =
- brw->wm.prog_data->binding_table.render_target_start + unit;
/* FINISHME: Use PTE MOCS on Skylake. */
uint32_t mocs = brw->gen >= 9 ? SKL_MOCS_WT : BDW_MOCS_PTE;
aux_mode = GEN8_SURFACE_AUX_MODE_MCS;
}
- uint32_t *surf =
- allocate_surface_state(brw, &brw->wm.base.surf_offset[surf_index]);
+ uint32_t *surf = allocate_surface_state(brw, &offset);
surf[0] = (surf_type << BRW_SURFACE_TYPE_SHIFT) |
(is_array ? GEN7_SURFACE_IS_ARRAY : 0) |
if (aux_mt) {
*((uint64_t *) &surf[10]) = aux_mt->bo->offset64;
drm_intel_bo_emit_reloc(brw->batch.bo,
- brw->wm.base.surf_offset[surf_index] + 10 * 4,
+ offset + 10 * 4,
aux_mt->bo, 0,
I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
} else {
surf[12] = 0;
drm_intel_bo_emit_reloc(brw->batch.bo,
- brw->wm.base.surf_offset[surf_index] + 8 * 4,
+ offset + 8 * 4,
mt->bo,
mt->offset,
I915_GEM_DOMAIN_RENDER,
I915_GEM_DOMAIN_RENDER);
+
+ return offset;
}
void