i965: fix fbo-srgb on i965.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_surface_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33 #include "main/mtypes.h"
34 #include "main/texstore.h"
35 #include "program/prog_parameter.h"
36
37 #include "intel_mipmap_tree.h"
38 #include "intel_batchbuffer.h"
39 #include "intel_tex.h"
40 #include "intel_fbo.h"
41
42 #include "brw_context.h"
43 #include "brw_state.h"
44 #include "brw_defines.h"
45 #include "brw_wm.h"
46
47 static GLuint translate_tex_target( GLenum target )
48 {
49 switch (target) {
50 case GL_TEXTURE_1D:
51 return BRW_SURFACE_1D;
52
53 case GL_TEXTURE_RECTANGLE_NV:
54 return BRW_SURFACE_2D;
55
56 case GL_TEXTURE_2D:
57 return BRW_SURFACE_2D;
58
59 case GL_TEXTURE_3D:
60 return BRW_SURFACE_3D;
61
62 case GL_TEXTURE_CUBE_MAP:
63 return BRW_SURFACE_CUBE;
64
65 default:
66 assert(0);
67 return 0;
68 }
69 }
70
71 static uint32_t brw_format_for_mesa_format[MESA_FORMAT_COUNT] =
72 {
73 [MESA_FORMAT_L8] = BRW_SURFACEFORMAT_L8_UNORM,
74 [MESA_FORMAT_I8] = BRW_SURFACEFORMAT_I8_UNORM,
75 [MESA_FORMAT_A8] = BRW_SURFACEFORMAT_A8_UNORM,
76 [MESA_FORMAT_AL88] = BRW_SURFACEFORMAT_L8A8_UNORM,
77 [MESA_FORMAT_AL1616] = BRW_SURFACEFORMAT_L16A16_UNORM,
78 [MESA_FORMAT_R8] = BRW_SURFACEFORMAT_R8_UNORM,
79 [MESA_FORMAT_R16] = BRW_SURFACEFORMAT_R16_UNORM,
80 [MESA_FORMAT_RG88] = BRW_SURFACEFORMAT_R8G8_UNORM,
81 [MESA_FORMAT_RG1616] = BRW_SURFACEFORMAT_R16G16_UNORM,
82 [MESA_FORMAT_ARGB8888] = BRW_SURFACEFORMAT_B8G8R8A8_UNORM,
83 [MESA_FORMAT_XRGB8888] = BRW_SURFACEFORMAT_B8G8R8X8_UNORM,
84 [MESA_FORMAT_RGB565] = BRW_SURFACEFORMAT_B5G6R5_UNORM,
85 [MESA_FORMAT_ARGB1555] = BRW_SURFACEFORMAT_B5G5R5A1_UNORM,
86 [MESA_FORMAT_ARGB4444] = BRW_SURFACEFORMAT_B4G4R4A4_UNORM,
87 [MESA_FORMAT_YCBCR_REV] = BRW_SURFACEFORMAT_YCRCB_NORMAL,
88 [MESA_FORMAT_YCBCR] = BRW_SURFACEFORMAT_YCRCB_SWAPUVY,
89 [MESA_FORMAT_RGB_FXT1] = BRW_SURFACEFORMAT_FXT1,
90 [MESA_FORMAT_RGBA_FXT1] = BRW_SURFACEFORMAT_FXT1,
91 [MESA_FORMAT_RGB_DXT1] = BRW_SURFACEFORMAT_DXT1_RGB,
92 [MESA_FORMAT_RGBA_DXT1] = BRW_SURFACEFORMAT_BC1_UNORM,
93 [MESA_FORMAT_RGBA_DXT3] = BRW_SURFACEFORMAT_BC2_UNORM,
94 [MESA_FORMAT_RGBA_DXT5] = BRW_SURFACEFORMAT_BC3_UNORM,
95 [MESA_FORMAT_SRGB_DXT1] = BRW_SURFACEFORMAT_DXT1_RGB_SRGB,
96 [MESA_FORMAT_SRGBA_DXT1] = BRW_SURFACEFORMAT_BC1_UNORM_SRGB,
97 [MESA_FORMAT_SRGBA_DXT3] = BRW_SURFACEFORMAT_BC2_UNORM_SRGB,
98 [MESA_FORMAT_SRGBA_DXT5] = BRW_SURFACEFORMAT_BC3_UNORM_SRGB,
99 [MESA_FORMAT_SARGB8] = BRW_SURFACEFORMAT_B8G8R8A8_UNORM_SRGB,
100 [MESA_FORMAT_SLA8] = BRW_SURFACEFORMAT_L8A8_UNORM_SRGB,
101 [MESA_FORMAT_SL8] = BRW_SURFACEFORMAT_L8_UNORM_SRGB,
102 [MESA_FORMAT_DUDV8] = BRW_SURFACEFORMAT_R8G8_SNORM,
103 [MESA_FORMAT_SIGNED_RGBA8888_REV] = BRW_SURFACEFORMAT_R8G8B8A8_SNORM,
104 };
105
106 bool
107 brw_render_target_supported(gl_format format)
108 {
109 if (format == MESA_FORMAT_S8_Z24 ||
110 format == MESA_FORMAT_X8_Z24 ||
111 format == MESA_FORMAT_Z16) {
112 return true;
113 }
114
115 /* Not exactly true, as some of those formats are not renderable.
116 * But at least we know how to translate them.
117 */
118 return brw_format_for_mesa_format[format] != 0;
119 }
120
121 static GLuint translate_tex_format( gl_format mesa_format,
122 GLenum internal_format,
123 GLenum depth_mode )
124 {
125 switch( mesa_format ) {
126
127 case MESA_FORMAT_Z16:
128 if (depth_mode == GL_INTENSITY)
129 return BRW_SURFACEFORMAT_I16_UNORM;
130 else if (depth_mode == GL_ALPHA)
131 return BRW_SURFACEFORMAT_A16_UNORM;
132 else if (depth_mode == GL_RED)
133 return BRW_SURFACEFORMAT_R16_UNORM;
134 else
135 return BRW_SURFACEFORMAT_L16_UNORM;
136
137 case MESA_FORMAT_S8_Z24:
138 /* XXX: these different surface formats don't seem to
139 * make any difference for shadow sampler/compares.
140 */
141 if (depth_mode == GL_INTENSITY)
142 return BRW_SURFACEFORMAT_I24X8_UNORM;
143 else if (depth_mode == GL_ALPHA)
144 return BRW_SURFACEFORMAT_A24X8_UNORM;
145 else if (depth_mode == GL_RED)
146 return BRW_SURFACEFORMAT_R24_UNORM_X8_TYPELESS;
147 else
148 return BRW_SURFACEFORMAT_L24X8_UNORM;
149
150 default:
151 assert(brw_format_for_mesa_format[mesa_format] != 0);
152 return brw_format_for_mesa_format[mesa_format];
153 }
154 }
155
156 static void
157 brw_set_surface_tiling(struct brw_surface_state *surf, uint32_t tiling)
158 {
159 switch (tiling) {
160 case I915_TILING_NONE:
161 surf->ss3.tiled_surface = 0;
162 surf->ss3.tile_walk = 0;
163 break;
164 case I915_TILING_X:
165 surf->ss3.tiled_surface = 1;
166 surf->ss3.tile_walk = BRW_TILEWALK_XMAJOR;
167 break;
168 case I915_TILING_Y:
169 surf->ss3.tiled_surface = 1;
170 surf->ss3.tile_walk = BRW_TILEWALK_YMAJOR;
171 break;
172 }
173 }
174
175 static void
176 brw_update_texture_surface( struct gl_context *ctx, GLuint unit )
177 {
178 struct brw_context *brw = brw_context(ctx);
179 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
180 struct intel_texture_object *intelObj = intel_texture_object(tObj);
181 struct gl_texture_image *firstImage = tObj->Image[0][tObj->BaseLevel];
182 const GLuint surf_index = SURF_INDEX_TEXTURE(unit);
183 struct brw_surface_state surf;
184 void *map;
185
186 memset(&surf, 0, sizeof(surf));
187
188 surf.ss0.mipmap_layout_mode = BRW_SURFACE_MIPMAPLAYOUT_BELOW;
189 surf.ss0.surface_type = translate_tex_target(tObj->Target);
190 surf.ss0.surface_format = translate_tex_format(firstImage->TexFormat,
191 firstImage->InternalFormat,
192 tObj->DepthMode);
193
194 /* This is ok for all textures with channel width 8bit or less:
195 */
196 /* surf.ss0.data_return_format = BRW_SURFACERETURNFORMAT_S1; */
197 surf.ss1.base_addr = intelObj->mt->region->buffer->offset; /* reloc */
198
199 surf.ss2.mip_count = intelObj->_MaxLevel - tObj->BaseLevel;
200 surf.ss2.width = firstImage->Width - 1;
201 surf.ss2.height = firstImage->Height - 1;
202 brw_set_surface_tiling(&surf, intelObj->mt->region->tiling);
203 surf.ss3.pitch = (intelObj->mt->region->pitch * intelObj->mt->cpp) - 1;
204 surf.ss3.depth = firstImage->Depth - 1;
205
206 surf.ss4.min_lod = 0;
207
208 if (tObj->Target == GL_TEXTURE_CUBE_MAP) {
209 surf.ss0.cube_pos_x = 1;
210 surf.ss0.cube_pos_y = 1;
211 surf.ss0.cube_pos_z = 1;
212 surf.ss0.cube_neg_x = 1;
213 surf.ss0.cube_neg_y = 1;
214 surf.ss0.cube_neg_z = 1;
215 }
216
217 map = brw_state_batch(brw, sizeof(surf), 32,
218 &brw->wm.surf_bo[surf_index],
219 &brw->wm.surf_offset[surf_index]);
220 memcpy(map, &surf, sizeof(surf));
221
222 /* Emit relocation to surface contents */
223 drm_intel_bo_emit_reloc(brw->wm.surf_bo[surf_index],
224 brw->wm.surf_offset[surf_index] +
225 offsetof(struct brw_surface_state, ss1),
226 intelObj->mt->region->buffer, 0,
227 I915_GEM_DOMAIN_SAMPLER, 0);
228 }
229
230 /**
231 * Create the constant buffer surface. Vertex/fragment shader constants will be
232 * read from this buffer with Data Port Read instructions/messages.
233 */
234 void
235 brw_create_constant_surface(struct brw_context *brw,
236 drm_intel_bo *bo,
237 int width,
238 drm_intel_bo **out_bo,
239 uint32_t *out_offset)
240 {
241 struct intel_context *intel = &brw->intel;
242 const GLint w = width - 1;
243 struct brw_surface_state surf;
244 void *map;
245
246 memset(&surf, 0, sizeof(surf));
247
248 surf.ss0.mipmap_layout_mode = BRW_SURFACE_MIPMAPLAYOUT_BELOW;
249 surf.ss0.surface_type = BRW_SURFACE_BUFFER;
250 surf.ss0.surface_format = BRW_SURFACEFORMAT_R32G32B32A32_FLOAT;
251
252 if (intel->gen >= 6)
253 surf.ss0.render_cache_read_write = 1;
254
255 assert(bo);
256 surf.ss1.base_addr = bo->offset; /* reloc */
257
258 surf.ss2.width = w & 0x7f; /* bits 6:0 of size or width */
259 surf.ss2.height = (w >> 7) & 0x1fff; /* bits 19:7 of size or width */
260 surf.ss3.depth = (w >> 20) & 0x7f; /* bits 26:20 of size or width */
261 surf.ss3.pitch = (width * 16) - 1; /* ignored?? */
262 brw_set_surface_tiling(&surf, I915_TILING_NONE); /* tiling now allowed */
263
264 map = brw_state_batch(brw, sizeof(surf), 32, out_bo, out_offset);
265 memcpy(map, &surf, sizeof(surf));
266
267 /* Emit relocation to surface contents. Section 5.1.1 of the gen4
268 * bspec ("Data Cache") says that the data cache does not exist as
269 * a separate cache and is just the sampler cache.
270 */
271 drm_intel_bo_emit_reloc(*out_bo, (*out_offset +
272 offsetof(struct brw_surface_state, ss1)),
273 bo, 0,
274 I915_GEM_DOMAIN_SAMPLER, 0);
275 }
276
277 /* Creates a new WM constant buffer reflecting the current fragment program's
278 * constants, if needed by the fragment program.
279 *
280 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
281 * state atom.
282 */
283 static void
284 prepare_wm_constants(struct brw_context *brw)
285 {
286 struct gl_context *ctx = &brw->intel.ctx;
287 struct intel_context *intel = &brw->intel;
288 struct brw_fragment_program *fp =
289 (struct brw_fragment_program *) brw->fragment_program;
290 const int size = brw->wm.prog_data->nr_pull_params * sizeof(float);
291 float *constants;
292 unsigned int i;
293
294 _mesa_load_state_parameters(ctx, fp->program.Base.Parameters);
295
296 /* BRW_NEW_FRAGMENT_PROGRAM */
297 if (brw->wm.prog_data->nr_pull_params == 0) {
298 if (brw->wm.const_bo) {
299 drm_intel_bo_unreference(brw->wm.const_bo);
300 brw->wm.const_bo = NULL;
301 brw->state.dirty.brw |= BRW_NEW_WM_CONSTBUF;
302 }
303 return;
304 }
305
306 drm_intel_bo_unreference(brw->wm.const_bo);
307 brw->wm.const_bo = drm_intel_bo_alloc(intel->bufmgr, "WM const bo",
308 size, 64);
309
310 /* _NEW_PROGRAM_CONSTANTS */
311 drm_intel_gem_bo_map_gtt(brw->wm.const_bo);
312 constants = brw->wm.const_bo->virtual;
313 for (i = 0; i < brw->wm.prog_data->nr_pull_params; i++) {
314 constants[i] = convert_param(brw->wm.prog_data->pull_param_convert[i],
315 *brw->wm.prog_data->pull_param[i]);
316 }
317 drm_intel_gem_bo_unmap_gtt(brw->wm.const_bo);
318
319 brw->state.dirty.brw |= BRW_NEW_WM_CONSTBUF;
320 }
321
322 const struct brw_tracked_state brw_wm_constants = {
323 .dirty = {
324 .mesa = (_NEW_PROGRAM_CONSTANTS),
325 .brw = (BRW_NEW_FRAGMENT_PROGRAM),
326 .cache = 0
327 },
328 .prepare = prepare_wm_constants,
329 };
330
331 /**
332 * Updates surface / buffer for fragment shader constant buffer, if
333 * one is required.
334 *
335 * This consumes the state updates for the constant buffer, and produces
336 * BRW_NEW_WM_SURFACES to get picked up by brw_prepare_wm_surfaces for
337 * inclusion in the binding table.
338 */
339 static void upload_wm_constant_surface(struct brw_context *brw )
340 {
341 GLuint surf = SURF_INDEX_FRAG_CONST_BUFFER;
342 struct brw_fragment_program *fp =
343 (struct brw_fragment_program *) brw->fragment_program;
344 const struct gl_program_parameter_list *params =
345 fp->program.Base.Parameters;
346
347 /* If there's no constant buffer, then no surface BO is needed to point at
348 * it.
349 */
350 if (brw->wm.const_bo == 0) {
351 if (brw->wm.surf_bo[surf] != NULL) {
352 drm_intel_bo_unreference(brw->wm.surf_bo[surf]);
353 brw->wm.surf_bo[surf] = NULL;
354 brw->state.dirty.brw |= BRW_NEW_WM_SURFACES;
355 }
356 return;
357 }
358
359 brw_create_constant_surface(brw, brw->wm.const_bo, params->NumParameters,
360 &brw->wm.surf_bo[surf],
361 &brw->wm.surf_offset[surf]);
362 brw->state.dirty.brw |= BRW_NEW_WM_SURFACES;
363 }
364
365 const struct brw_tracked_state brw_wm_constant_surface = {
366 .dirty = {
367 .mesa = 0,
368 .brw = (BRW_NEW_WM_CONSTBUF |
369 BRW_NEW_BATCH),
370 .cache = 0
371 },
372 .emit = upload_wm_constant_surface,
373 };
374
375 static void
376 brw_update_null_renderbuffer_surface(struct brw_context *brw, unsigned int unit)
377 {
378 struct intel_context *intel = &brw->intel;
379 struct brw_surface_state surf;
380 void *map;
381
382 memset(&surf, 0, sizeof(surf));
383
384 surf.ss0.surface_type = BRW_SURFACE_NULL;
385 surf.ss0.surface_format = BRW_SURFACEFORMAT_B8G8R8A8_UNORM;
386 surf.ss1.base_addr = 0;
387
388 surf.ss2.width = 0;
389 surf.ss2.height = 0;
390 brw_set_surface_tiling(&surf, I915_TILING_NONE);
391 surf.ss3.pitch = 0;
392
393 if (intel->gen < 6) {
394 /* _NEW_COLOR */
395 surf.ss0.color_blend = 0;
396 surf.ss0.writedisable_red = 1;
397 surf.ss0.writedisable_green = 1;
398 surf.ss0.writedisable_blue = 1;
399 surf.ss0.writedisable_alpha = 1;
400 }
401
402 map = brw_state_batch(brw, sizeof(surf), 32,
403 &brw->wm.surf_bo[unit],
404 &brw->wm.surf_offset[unit]);
405 memcpy(map, &surf, sizeof(surf));
406 }
407
408 /**
409 * Sets up a surface state structure to point at the given region.
410 * While it is only used for the front/back buffer currently, it should be
411 * usable for further buffers when doing ARB_draw_buffer support.
412 */
413 static void
414 brw_update_renderbuffer_surface(struct brw_context *brw,
415 struct gl_renderbuffer *rb,
416 unsigned int unit)
417 {
418 struct intel_context *intel = &brw->intel;
419 struct gl_context *ctx = &intel->ctx;
420 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
421 struct intel_region *region = irb->region;
422 struct brw_surface_state surf;
423 void *map;
424
425 memset(&surf, 0, sizeof(surf));
426
427 switch (irb->Base.Format) {
428 case MESA_FORMAT_XRGB8888:
429 /* XRGB is handled as ARGB because the chips in this family
430 * cannot render to XRGB targets. This means that we have to
431 * mask writes to alpha (ala glColorMask) and reconfigure the
432 * alpha blending hardware to use GL_ONE (or GL_ZERO) for
433 * cases where GL_DST_ALPHA (or GL_ONE_MINUS_DST_ALPHA) is
434 * used.
435 */
436 surf.ss0.surface_format = BRW_SURFACEFORMAT_B8G8R8A8_UNORM;
437 break;
438 case MESA_FORMAT_SARGB8:
439 /* without GL_EXT_framebuffer_sRGB we shouldn't bind sRGB
440 surfaces to the blend/update as sRGB */
441 surf.ss0.surface_format = BRW_SURFACEFORMAT_B8G8R8A8_UNORM;
442 break;
443 default:
444 surf.ss0.surface_format = brw_format_for_mesa_format[irb->Base.Format];
445 assert(surf.ss0.surface_format != 0);
446 }
447
448 surf.ss0.surface_type = BRW_SURFACE_2D;
449 if (region->tiling == I915_TILING_NONE) {
450 surf.ss1.base_addr = (region->draw_x +
451 region->draw_y * region->pitch) * region->cpp;
452 } else {
453 uint32_t tile_base, tile_x, tile_y;
454 uint32_t pitch = region->pitch * region->cpp;
455
456 if (region->tiling == I915_TILING_X) {
457 tile_x = region->draw_x % (512 / region->cpp);
458 tile_y = region->draw_y % 8;
459 tile_base = ((region->draw_y / 8) * (8 * pitch));
460 tile_base += (region->draw_x - tile_x) / (512 / region->cpp) * 4096;
461 } else {
462 /* Y */
463 tile_x = region->draw_x % (128 / region->cpp);
464 tile_y = region->draw_y % 32;
465 tile_base = ((region->draw_y / 32) * (32 * pitch));
466 tile_base += (region->draw_x - tile_x) / (128 / region->cpp) * 4096;
467 }
468 assert(brw->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
469 assert(tile_x % 4 == 0);
470 assert(tile_y % 2 == 0);
471 /* Note that the low bits of these fields are missing, so
472 * there's the possibility of getting in trouble.
473 */
474 surf.ss1.base_addr = tile_base;
475 surf.ss5.x_offset = tile_x / 4;
476 surf.ss5.y_offset = tile_y / 2;
477 }
478 surf.ss1.base_addr += region->buffer->offset; /* reloc */
479
480 surf.ss2.width = rb->Width - 1;
481 surf.ss2.height = rb->Height - 1;
482 brw_set_surface_tiling(&surf, region->tiling);
483 surf.ss3.pitch = (region->pitch * region->cpp) - 1;
484
485 if (intel->gen < 6) {
486 /* _NEW_COLOR */
487 surf.ss0.color_blend = (!ctx->Color._LogicOpEnabled &&
488 (ctx->Color.BlendEnabled & (1 << unit)));
489 surf.ss0.writedisable_red = !ctx->Color.ColorMask[unit][0];
490 surf.ss0.writedisable_green = !ctx->Color.ColorMask[unit][1];
491 surf.ss0.writedisable_blue = !ctx->Color.ColorMask[unit][2];
492 /* As mentioned above, disable writes to the alpha component when the
493 * renderbuffer is XRGB.
494 */
495 if (ctx->DrawBuffer->Visual.alphaBits == 0)
496 surf.ss0.writedisable_alpha = 1;
497 else
498 surf.ss0.writedisable_alpha = !ctx->Color.ColorMask[unit][3];
499 }
500
501 map = brw_state_batch(brw, sizeof(surf), 32,
502 &brw->wm.surf_bo[unit],
503 &brw->wm.surf_offset[unit]);
504 memcpy(map, &surf, sizeof(surf));
505
506 drm_intel_bo_emit_reloc(brw->wm.surf_bo[unit],
507 brw->wm.surf_offset[unit] +
508 offsetof(struct brw_surface_state, ss1),
509 region->buffer,
510 surf.ss1.base_addr - region->buffer->offset,
511 I915_GEM_DOMAIN_RENDER,
512 I915_GEM_DOMAIN_RENDER);
513 }
514
515 static void
516 prepare_wm_surfaces(struct brw_context *brw)
517 {
518 struct gl_context *ctx = &brw->intel.ctx;
519 int i;
520 int nr_surfaces = 0;
521
522 if (ctx->DrawBuffer->_NumColorDrawBuffers >= 1) {
523 for (i = 0; i < ctx->DrawBuffer->_NumColorDrawBuffers; i++) {
524 struct gl_renderbuffer *rb = ctx->DrawBuffer->_ColorDrawBuffers[i];
525 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
526 struct intel_region *region = irb ? irb->region : NULL;
527
528 brw_add_validated_bo(brw, region->buffer);
529 nr_surfaces = SURF_INDEX_DRAW(i) + 1;
530 }
531 }
532
533 if (brw->wm.const_bo) {
534 brw_add_validated_bo(brw, brw->wm.const_bo);
535 nr_surfaces = SURF_INDEX_FRAG_CONST_BUFFER + 1;
536 }
537
538 for (i = 0; i < BRW_MAX_TEX_UNIT; i++) {
539 const struct gl_texture_unit *texUnit = &ctx->Texture.Unit[i];
540 struct gl_texture_object *tObj = texUnit->_Current;
541 struct intel_texture_object *intelObj = intel_texture_object(tObj);
542
543 if (texUnit->_ReallyEnabled) {
544 brw_add_validated_bo(brw, intelObj->mt->region->buffer);
545 nr_surfaces = SURF_INDEX_TEXTURE(i) + 1;
546 }
547 }
548
549 /* Have to update this in our prepare, since the unit's prepare
550 * relies on it.
551 */
552 if (brw->wm.nr_surfaces != nr_surfaces) {
553 brw->wm.nr_surfaces = nr_surfaces;
554 brw->state.dirty.brw |= BRW_NEW_NR_WM_SURFACES;
555 }
556 }
557
558 /**
559 * Constructs the set of surface state objects pointed to by the
560 * binding table.
561 */
562 static void
563 upload_wm_surfaces(struct brw_context *brw)
564 {
565 struct gl_context *ctx = &brw->intel.ctx;
566 GLuint i;
567
568 /* _NEW_BUFFERS | _NEW_COLOR */
569 /* Update surfaces for drawing buffers */
570 if (ctx->DrawBuffer->_NumColorDrawBuffers >= 1) {
571 for (i = 0; i < ctx->DrawBuffer->_NumColorDrawBuffers; i++) {
572 if (intel_renderbuffer(ctx->DrawBuffer->_ColorDrawBuffers[i])) {
573 brw_update_renderbuffer_surface(brw,
574 ctx->DrawBuffer->_ColorDrawBuffers[i],
575 i);
576 } else {
577 brw_update_null_renderbuffer_surface(brw, i);
578 }
579 }
580 } else {
581 brw_update_null_renderbuffer_surface(brw, 0);
582 }
583
584 /* Update surfaces for textures */
585 for (i = 0; i < BRW_MAX_TEX_UNIT; i++) {
586 const struct gl_texture_unit *texUnit = &ctx->Texture.Unit[i];
587 const GLuint surf = SURF_INDEX_TEXTURE(i);
588
589 /* _NEW_TEXTURE */
590 if (texUnit->_ReallyEnabled) {
591 brw_update_texture_surface(ctx, i);
592 } else {
593 drm_intel_bo_unreference(brw->wm.surf_bo[surf]);
594 brw->wm.surf_bo[surf] = NULL;
595 }
596 }
597
598 brw->state.dirty.brw |= BRW_NEW_WM_SURFACES;
599 }
600
601 const struct brw_tracked_state brw_wm_surfaces = {
602 .dirty = {
603 .mesa = (_NEW_COLOR |
604 _NEW_TEXTURE |
605 _NEW_BUFFERS),
606 .brw = (BRW_NEW_BATCH),
607 .cache = 0
608 },
609 .prepare = prepare_wm_surfaces,
610 .emit = upload_wm_surfaces,
611 };
612
613 /**
614 * Constructs the binding table for the WM surface state, which maps unit
615 * numbers to surface state objects.
616 */
617 static void
618 brw_wm_upload_binding_table(struct brw_context *brw)
619 {
620 uint32_t *bind;
621 int i;
622
623 /* Might want to calculate nr_surfaces first, to avoid taking up so much
624 * space for the binding table.
625 */
626 bind = brw_state_batch(brw, sizeof(uint32_t) * BRW_WM_MAX_SURF,
627 32, &brw->wm.bind_bo, &brw->wm.bind_bo_offset);
628
629 for (i = 0; i < BRW_WM_MAX_SURF; i++) {
630 /* BRW_NEW_WM_SURFACES */
631 bind[i] = brw->wm.surf_offset[i];
632 if (brw->wm.surf_bo[i]) {
633 bind[i] = brw->wm.surf_offset[i];
634 } else {
635 bind[i] = 0;
636 }
637 }
638
639 brw->state.dirty.brw |= BRW_NEW_BINDING_TABLE;
640 }
641
642 const struct brw_tracked_state brw_wm_binding_table = {
643 .dirty = {
644 .mesa = 0,
645 .brw = (BRW_NEW_BATCH |
646 BRW_NEW_WM_SURFACES),
647 .cache = 0
648 },
649 .emit = brw_wm_upload_binding_table,
650 };