X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fmesa%2Fdrivers%2Fdri%2Fi965%2Fbrw_wm_surface_state.c;h=c9dac5be0b4ece66c5c59b9e8cf11ab867cefc96;hb=4e56a9ad46ff7fe85308ce12e21719ff2b476516;hp=303b5cb966241a32f5c1935dd07eaa57ee1a20ed;hpb=ae8b066da5862b4cfc510b3a9a0e1273f9f6edd4;p=mesa.git diff --git a/src/mesa/drivers/dri/i965/brw_wm_surface_state.c b/src/mesa/drivers/dri/i965/brw_wm_surface_state.c index 303b5cb9662..c9dac5be0b4 100644 --- a/src/mesa/drivers/dri/i965/brw_wm_surface_state.c +++ b/src/mesa/drivers/dri/i965/brw_wm_surface_state.c @@ -73,8 +73,7 @@ translate_tex_target(GLenum target) return BRW_SURFACE_CUBE; default: - assert(0); - return 0; + unreachable("not reached"); } } @@ -101,6 +100,30 @@ brw_get_surface_num_multisamples(unsigned num_samples) return BRW_SURFACE_MULTISAMPLECOUNT_1; } +void +brw_configure_w_tiled(const struct intel_mipmap_tree *mt, + bool is_render_target, + unsigned *width, unsigned *height, + unsigned *pitch, uint32_t *tiling, unsigned *format) +{ + static const unsigned halign_stencil = 8; + + /* In Y-tiling row is twice as wide as in W-tiling, and subsequently + * there are half as many rows. + * In addition, mip-levels are accessed manually by the program and + * therefore the surface is setup to cover all the mip-levels for one slice. + * (Hardware is still used to access individual slices). + */ + *tiling = I915_TILING_Y; + *pitch = mt->pitch * 2; + *width = ALIGN(mt->total_width, halign_stencil) * 2; + *height = (mt->total_height / mt->physical_depth0) / 2; + + if (is_render_target) { + *format = BRW_SURFACEFORMAT_R8_UINT; + } +} + /** * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle @@ -164,6 +187,8 @@ brw_get_texture_swizzle(const struct gl_context *ctx, } } + GLenum datatype = _mesa_get_format_datatype(img->TexFormat); + /* If the texture's format is alpha-only, force R, G, and B to * 0.0. Similarly, if the texture's format has no alpha channel, * force the alpha value read to 1.0. This allows for the @@ -176,6 +201,30 @@ brw_get_texture_swizzle(const struct gl_context *ctx, swizzles[1] = SWIZZLE_ZERO; swizzles[2] = SWIZZLE_ZERO; break; + case GL_LUMINANCE: + if (t->_IsIntegerFormat || datatype == GL_SIGNED_NORMALIZED) { + swizzles[0] = SWIZZLE_X; + swizzles[1] = SWIZZLE_X; + swizzles[2] = SWIZZLE_X; + swizzles[3] = SWIZZLE_ONE; + } + break; + case GL_LUMINANCE_ALPHA: + if (datatype == GL_SIGNED_NORMALIZED) { + swizzles[0] = SWIZZLE_X; + swizzles[1] = SWIZZLE_X; + swizzles[2] = SWIZZLE_X; + swizzles[3] = SWIZZLE_W; + } + break; + case GL_INTENSITY: + if (datatype == GL_SIGNED_NORMALIZED) { + swizzles[0] = SWIZZLE_X; + swizzles[1] = SWIZZLE_X; + swizzles[2] = SWIZZLE_X; + swizzles[3] = SWIZZLE_X; + } + break; case GL_RED: case GL_RG: case GL_RGB: @@ -198,7 +247,6 @@ gen4_emit_buffer_surface_state(struct brw_context *brw, unsigned surface_format, unsigned buffer_size, unsigned pitch, - unsigned mocs, bool rw) { uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, @@ -256,7 +304,6 @@ brw_update_buffer_texture_surface(struct gl_context *ctx, brw_format, size / texel_size, texel_size, - 0, /* mocs */ false /* rw */); } @@ -273,7 +320,7 @@ brw_update_texture_surface(struct gl_context *ctx, struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit); uint32_t *surf; - /* BRW_NEW_UNIFORM_BUFFER */ + /* BRW_NEW_TEXTURE_BUFFER */ if (tObj->Target == GL_TEXTURE_BUFFER) { brw_update_buffer_texture_surface(ctx, unit, surf_offset); return; @@ -319,28 +366,27 @@ brw_update_texture_surface(struct gl_context *ctx, BRW_SURFACE_CUBEFACE_ENABLES | tex_format << BRW_SURFACE_FORMAT_SHIFT); - surf[1] = intelObj->mt->region->bo->offset64 + intelObj->mt->offset; /* reloc */ + surf[1] = mt->bo->offset64 + mt->offset; /* reloc */ surf[2] = ((intelObj->_MaxLevel - tObj->BaseLevel) << BRW_SURFACE_LOD_SHIFT | (mt->logical_width0 - 1) << BRW_SURFACE_WIDTH_SHIFT | (mt->logical_height0 - 1) << BRW_SURFACE_HEIGHT_SHIFT); - surf[3] = (brw_get_surface_tiling_bits(intelObj->mt->region->tiling) | + surf[3] = (brw_get_surface_tiling_bits(mt->tiling) | (mt->logical_depth0 - 1) << BRW_SURFACE_DEPTH_SHIFT | - (intelObj->mt->region->pitch - 1) << - BRW_SURFACE_PITCH_SHIFT); + (mt->pitch - 1) << BRW_SURFACE_PITCH_SHIFT); - surf[4] = (brw_get_surface_num_multisamples(intelObj->mt->num_samples) | + surf[4] = (brw_get_surface_num_multisamples(mt->num_samples) | SET_FIELD(tObj->BaseLevel - mt->first_level, BRW_SURFACE_MIN_LOD)); surf[5] = mt->align_h == 4 ? BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0; /* Emit relocation to surface contents */ drm_intel_bo_emit_reloc(brw->batch.bo, - *surf_offset + 4, - intelObj->mt->region->bo, - surf[1] - intelObj->mt->region->bo->offset64, - I915_GEM_DOMAIN_SAMPLER, 0); + *surf_offset + 4, + mt->bo, + surf[1] - mt->bo->offset64, + I915_GEM_DOMAIN_SAMPLER, 0); } /** @@ -360,14 +406,14 @@ brw_create_constant_surface(struct brw_context *brw, brw->vtbl.emit_buffer_surface_state(brw, out_offset, bo, offset, BRW_SURFACEFORMAT_R32G32B32A32_FLOAT, - elements, stride, 0, false); + elements, stride, false); } /** * Set up a binding table entry for use by stream output logic (transform * feedback). * - * buffer_size_minus_1 must me less than BRW_MAX_NUM_BUFFER_ENTRIES. + * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES. */ void brw_update_sol_surface(struct brw_context *brw, @@ -426,9 +472,7 @@ brw_update_sol_surface(struct brw_context *brw, surface_format = BRW_SURFACEFORMAT_R32G32B32A32_FLOAT; break; default: - assert(!"Invalid vector size for transform feedback output"); - surface_format = BRW_SURFACEFORMAT_R32_FLOAT; - break; + unreachable("Invalid vector size for transform feedback output"); } surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT | @@ -459,60 +503,42 @@ brw_update_sol_surface(struct brw_context *brw, static void brw_upload_wm_pull_constants(struct brw_context *brw) { - struct gl_context *ctx = &brw->ctx; + struct brw_stage_state *stage_state = &brw->wm.base; /* BRW_NEW_FRAGMENT_PROGRAM */ struct brw_fragment_program *fp = (struct brw_fragment_program *) brw->fragment_program; - struct gl_program_parameter_list *params = fp->program.Base.Parameters; - const int size = brw->wm.prog_data->base.nr_pull_params * sizeof(float); - const int surf_index = - brw->wm.prog_data->base.binding_table.pull_constants_start; - float *constants; - unsigned int i; - - _mesa_load_state_parameters(ctx, params); - - /* CACHE_NEW_WM_PROG */ - if (brw->wm.prog_data->base.nr_pull_params == 0) { - if (brw->wm.base.const_bo) { - drm_intel_bo_unreference(brw->wm.base.const_bo); - brw->wm.base.const_bo = NULL; - brw->wm.base.surf_offset[surf_index] = 0; - brw->state.dirty.brw |= BRW_NEW_SURFACES; - } - return; - } - - drm_intel_bo_unreference(brw->wm.base.const_bo); - brw->wm.base.const_bo = drm_intel_bo_alloc(brw->bufmgr, "WM const bo", - size, 64); + /* BRW_NEW_FS_PROG_DATA */ + struct brw_stage_prog_data *prog_data = &brw->wm.prog_data->base; /* _NEW_PROGRAM_CONSTANTS */ - drm_intel_gem_bo_map_gtt(brw->wm.base.const_bo); - constants = brw->wm.base.const_bo->virtual; - for (i = 0; i < brw->wm.prog_data->base.nr_pull_params; i++) { - constants[i] = *brw->wm.prog_data->base.pull_param[i]; - } - drm_intel_gem_bo_unmap_gtt(brw->wm.base.const_bo); - - brw_create_constant_surface(brw, brw->wm.base.const_bo, 0, size, - &brw->wm.base.surf_offset[surf_index], - true); - - brw->state.dirty.brw |= BRW_NEW_SURFACES; + brw_upload_pull_constants(brw, BRW_NEW_SURFACES, &fp->program.Base, + stage_state, prog_data, true); } const struct brw_tracked_state brw_wm_pull_constants = { .dirty = { - .mesa = (_NEW_PROGRAM_CONSTANTS), - .brw = (BRW_NEW_BATCH | BRW_NEW_FRAGMENT_PROGRAM), - .cache = CACHE_NEW_WM_PROG, + .mesa = _NEW_PROGRAM_CONSTANTS, + .brw = BRW_NEW_BATCH | + BRW_NEW_FRAGMENT_PROGRAM | + BRW_NEW_FS_PROG_DATA, }, .emit = brw_upload_wm_pull_constants, }; +/** + * Creates a null renderbuffer surface. + * + * This is used when the shader doesn't write to any color output. An FB + * write to target 0 will still be emitted, because that's how the thread is + * terminated (and computed depth is returned), so we need to have the + * hardware discard the target 0 color output.. + */ static void -brw_update_null_renderbuffer_surface(struct brw_context *brw, unsigned int unit) +brw_emit_null_surface_state(struct brw_context *brw, + unsigned width, + unsigned height, + unsigned samples, + uint32_t *out_offset) { /* From the Sandy bridge PRM, Vol4 Part1 p71 (Surface Type: Programming * Notes): @@ -532,22 +558,14 @@ brw_update_null_renderbuffer_surface(struct brw_context *brw, unsigned int unit) * * - Surface Format must be R8G8B8A8_UNORM. */ - struct gl_context *ctx = &brw->ctx; - uint32_t *surf; unsigned surface_type = BRW_SURFACE_NULL; drm_intel_bo *bo = NULL; unsigned pitch_minus_1 = 0; uint32_t multisampling_state = 0; - uint32_t surf_index = - brw->wm.prog_data->binding_table.render_target_start + unit; - - /* _NEW_BUFFERS */ - const struct gl_framebuffer *fb = ctx->DrawBuffer; - - surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32, - &brw->wm.base.surf_offset[surf_index]); + uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32, + out_offset); - if (fb->Visual.samples > 1) { + if (samples > 1) { /* On Gen6, null render targets seem to cause GPU hangs when * multisampling. So work around this problem by rendering into dummy * color buffer. @@ -562,16 +580,15 @@ brw_update_null_renderbuffer_surface(struct brw_context *brw, unsigned int unit) * width_in_tiles and height_in_tiles by dividing the width and height * by 16 rather than the normal Y-tile size of 32. */ - unsigned width_in_tiles = ALIGN(fb->Width, 16) / 16; - unsigned height_in_tiles = ALIGN(fb->Height, 16) / 16; + unsigned width_in_tiles = ALIGN(width, 16) / 16; + unsigned height_in_tiles = ALIGN(height, 16) / 16; unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096; brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo, size_needed); bo = brw->wm.multisampled_null_render_target_bo; surface_type = BRW_SURFACE_2D; pitch_minus_1 = 127; - multisampling_state = - brw_get_surface_num_multisamples(fb->Visual.samples); + multisampling_state = brw_get_surface_num_multisamples(samples); } surf[0] = (surface_type << BRW_SURFACE_TYPE_SHIFT | @@ -583,8 +600,8 @@ brw_update_null_renderbuffer_surface(struct brw_context *brw, unsigned int unit) 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT); } surf[1] = bo ? bo->offset64 : 0; - surf[2] = ((fb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT | - (fb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT); + surf[2] = ((width - 1) << BRW_SURFACE_WIDTH_SHIFT | + (height - 1) << BRW_SURFACE_HEIGHT_SHIFT); /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming * Notes): @@ -598,7 +615,7 @@ brw_update_null_renderbuffer_surface(struct brw_context *brw, unsigned int unit) if (bo) { drm_intel_bo_emit_reloc(brw->batch.bo, - brw->wm.base.surf_offset[surf_index] + 4, + *out_offset + 4, bo, 0, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER); } @@ -618,12 +635,12 @@ brw_update_renderbuffer_surface(struct brw_context *brw, struct gl_context *ctx = &brw->ctx; struct intel_renderbuffer *irb = intel_renderbuffer(rb); struct intel_mipmap_tree *mt = irb->mt; - struct intel_region *region; uint32_t *surf; uint32_t tile_x, tile_y; uint32_t format = 0; /* _NEW_BUFFERS */ mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb)); + /* BRW_NEW_FS_PROG_DATA */ uint32_t surf_index = brw->wm.prog_data->binding_table.render_target_start + unit; @@ -646,8 +663,6 @@ brw_update_renderbuffer_surface(struct brw_context *brw, intel_miptree_used_for_rendering(irb->mt); - region = irb->mt->region; - surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32, &brw->wm.base.surf_offset[surf_index]); @@ -661,14 +676,15 @@ brw_update_renderbuffer_surface(struct brw_context *brw, format << BRW_SURFACE_FORMAT_SHIFT); /* reloc */ + assert(mt->offset % mt->cpp == 0); surf[1] = (intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y) + - region->bo->offset64); + mt->bo->offset64 + mt->offset); surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT | (rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT); - surf[3] = (brw_get_surface_tiling_bits(region->tiling) | - (region->pitch - 1) << BRW_SURFACE_PITCH_SHIFT); + surf[3] = (brw_get_surface_tiling_bits(mt->tiling) | + (mt->pitch - 1) << BRW_SURFACE_PITCH_SHIFT); surf[4] = brw_get_surface_num_multisamples(mt->num_samples); @@ -706,8 +722,8 @@ brw_update_renderbuffer_surface(struct brw_context *brw, drm_intel_bo_emit_reloc(brw->batch.bo, brw->wm.base.surf_offset[surf_index] + 4, - region->bo, - surf[1] - region->bo->offset64, + mt->bo, + surf[1] - mt->bo->offset64, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER); } @@ -719,6 +735,8 @@ static void brw_update_renderbuffer_surfaces(struct brw_context *brw) { struct gl_context *ctx = &brw->ctx; + /* _NEW_BUFFERS */ + const struct gl_framebuffer *fb = ctx->DrawBuffer; GLuint i; /* _NEW_BUFFERS | _NEW_COLOR */ @@ -729,21 +747,31 @@ brw_update_renderbuffer_surfaces(struct brw_context *brw) brw->vtbl.update_renderbuffer_surface(brw, ctx->DrawBuffer->_ColorDrawBuffers[i], ctx->DrawBuffer->MaxNumLayers > 0, i); } else { - brw->vtbl.update_null_renderbuffer_surface(brw, i); + const uint32_t surf_index = + brw->wm.prog_data->binding_table.render_target_start + i; + + brw->vtbl.emit_null_surface_state( + brw, fb->Width, fb->Height, fb->Visual.samples, + &brw->wm.base.surf_offset[surf_index]); } } } else { - brw->vtbl.update_null_renderbuffer_surface(brw, 0); + const uint32_t surf_index = + brw->wm.prog_data->binding_table.render_target_start; + + brw->vtbl.emit_null_surface_state( + brw, fb->Width, fb->Height, fb->Visual.samples, + &brw->wm.base.surf_offset[surf_index]); } - brw->state.dirty.brw |= BRW_NEW_SURFACES; + brw->ctx.NewDriverState |= BRW_NEW_SURFACES; } const struct brw_tracked_state brw_renderbuffer_surfaces = { .dirty = { - .mesa = (_NEW_COLOR | - _NEW_BUFFERS), - .brw = BRW_NEW_BATCH, - .cache = 0 + .mesa = _NEW_BUFFERS | + _NEW_COLOR, + .brw = BRW_NEW_BATCH | + BRW_NEW_FS_PROG_DATA, }, .emit = brw_update_renderbuffer_surfaces, }; @@ -752,7 +780,6 @@ const struct brw_tracked_state gen6_renderbuffer_surfaces = { .dirty = { .mesa = _NEW_BUFFERS, .brw = BRW_NEW_BATCH, - .cache = 0 }, .emit = brw_update_renderbuffer_surfaces, }; @@ -770,6 +797,8 @@ update_stage_texture_surfaces(struct brw_context *brw, struct gl_context *ctx = &brw->ctx; uint32_t *surf_offset = stage_state->surf_offset; + + /* BRW_NEW_*_PROG_DATA */ if (for_gather) surf_offset += stage_state->prog_data->binding_table.gather_texture_start; else @@ -783,7 +812,7 @@ update_stage_texture_surfaces(struct brw_context *brw, const unsigned unit = prog->SamplerUnits[s]; /* _NEW_TEXTURE */ - if (ctx->Texture.Unit[unit]._ReallyEnabled) { + if (ctx->Texture.Unit[unit]._Current) { brw->vtbl.update_texture_surface(ctx, unit, surf_offset + s, for_gather); } } @@ -814,25 +843,29 @@ brw_update_texture_surfaces(struct brw_context *brw) /* emit alternate set of surface state for gather. this * allows the surface format to be overriden for only the * gather4 messages. */ - if (vs && vs->UsesGather) - update_stage_texture_surfaces(brw, vs, &brw->vs.base, true); - if (gs && gs->UsesGather) - update_stage_texture_surfaces(brw, gs, &brw->gs.base, true); - if (fs && fs->UsesGather) - update_stage_texture_surfaces(brw, fs, &brw->wm.base, true); - - brw->state.dirty.brw |= BRW_NEW_SURFACES; + if (brw->gen < 8) { + if (vs && vs->UsesGather) + update_stage_texture_surfaces(brw, vs, &brw->vs.base, true); + if (gs && gs->UsesGather) + update_stage_texture_surfaces(brw, gs, &brw->gs.base, true); + if (fs && fs->UsesGather) + update_stage_texture_surfaces(brw, fs, &brw->wm.base, true); + } + + brw->ctx.NewDriverState |= BRW_NEW_SURFACES; } const struct brw_tracked_state brw_texture_surfaces = { .dirty = { .mesa = _NEW_TEXTURE, .brw = BRW_NEW_BATCH | - BRW_NEW_UNIFORM_BUFFER | - BRW_NEW_VERTEX_PROGRAM | + BRW_NEW_FRAGMENT_PROGRAM | + BRW_NEW_FS_PROG_DATA | BRW_NEW_GEOMETRY_PROGRAM | - BRW_NEW_FRAGMENT_PROGRAM, - .cache = 0 + BRW_NEW_GS_PROG_DATA | + BRW_NEW_TEXTURE_BUFFER | + BRW_NEW_VERTEX_PROGRAM | + BRW_NEW_VS_PROG_DATA, }, .emit = brw_update_texture_surfaces, }; @@ -841,7 +874,8 @@ void brw_upload_ubo_surfaces(struct brw_context *brw, struct gl_shader *shader, struct brw_stage_state *stage_state, - struct brw_stage_prog_data *prog_data) + struct brw_stage_prog_data *prog_data, + bool dword_pitch) { struct gl_context *ctx = &brw->ctx; @@ -869,11 +903,11 @@ brw_upload_ubo_surfaces(struct brw_context *brw, brw_create_constant_surface(brw, bo, binding->Offset, bo->size - binding->Offset, &surf_offsets[i], - shader->Stage == MESA_SHADER_FRAGMENT); + dword_pitch); } if (shader->NumUniformBlocks) - brw->state.dirty.brw |= BRW_NEW_SURFACES; + brw->ctx.NewDriverState |= BRW_NEW_SURFACES; } static void @@ -881,21 +915,22 @@ brw_upload_wm_ubo_surfaces(struct brw_context *brw) { struct gl_context *ctx = &brw->ctx; /* _NEW_PROGRAM */ - struct gl_shader_program *prog = ctx->Shader._CurrentFragmentProgram; + struct gl_shader_program *prog = ctx->_Shader->_CurrentFragmentProgram; if (!prog) return; - /* CACHE_NEW_WM_PROG */ + /* BRW_NEW_FS_PROG_DATA */ brw_upload_ubo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_FRAGMENT], - &brw->wm.base, &brw->wm.prog_data->base); + &brw->wm.base, &brw->wm.prog_data->base, true); } const struct brw_tracked_state brw_wm_ubo_surfaces = { .dirty = { .mesa = _NEW_PROGRAM, - .brw = BRW_NEW_BATCH | BRW_NEW_UNIFORM_BUFFER, - .cache = CACHE_NEW_WM_PROG, + .brw = BRW_NEW_BATCH | + BRW_NEW_FS_PROG_DATA | + BRW_NEW_UNIFORM_BUFFER, }, .emit = brw_upload_wm_ubo_surfaces, }; @@ -918,13 +953,13 @@ brw_upload_abo_surfaces(struct brw_context *brw, drm_intel_bo *bo = intel_bufferobj_buffer( brw, intel_bo, binding->Offset, intel_bo->Base.Size - binding->Offset); - brw->vtbl.create_raw_surface(brw, bo, binding->Offset, - bo->size - binding->Offset, - &surf_offsets[i], true); + brw->vtbl.emit_buffer_surface_state(brw, &surf_offsets[i], bo, + binding->Offset, BRW_SURFACEFORMAT_RAW, + bo->size - binding->Offset, 1, true); } - if (prog->NumUniformBlocks) - brw->state.dirty.brw |= BRW_NEW_SURFACES; + if (prog->NumAtomicBuffers) + brw->ctx.NewDriverState |= BRW_NEW_SURFACES; } static void @@ -935,7 +970,7 @@ brw_upload_wm_abo_surfaces(struct brw_context *brw) struct gl_shader_program *prog = ctx->Shader._CurrentFragmentProgram; if (prog) { - /* CACHE_NEW_WM_PROG */ + /* BRW_NEW_FS_PROG_DATA */ brw_upload_abo_surfaces(brw, prog, &brw->wm.base, &brw->wm.prog_data->base); } @@ -944,8 +979,9 @@ brw_upload_wm_abo_surfaces(struct brw_context *brw) const struct brw_tracked_state brw_wm_abo_surfaces = { .dirty = { .mesa = _NEW_PROGRAM, - .brw = BRW_NEW_BATCH | BRW_NEW_ATOMIC_BUFFER, - .cache = CACHE_NEW_WM_PROG, + .brw = BRW_NEW_ATOMIC_BUFFER | + BRW_NEW_BATCH | + BRW_NEW_FS_PROG_DATA, }, .emit = brw_upload_wm_abo_surfaces, }; @@ -955,7 +991,6 @@ gen4_init_vtable_surface_functions(struct brw_context *brw) { brw->vtbl.update_texture_surface = brw_update_texture_surface; brw->vtbl.update_renderbuffer_surface = brw_update_renderbuffer_surface; - brw->vtbl.update_null_renderbuffer_surface = - brw_update_null_renderbuffer_surface; + brw->vtbl.emit_null_surface_state = brw_emit_null_surface_state; brw->vtbl.emit_buffer_surface_state = gen4_emit_buffer_surface_state; }