i965: Put common info on converting MESA_FORMAT to BRW_FORMAT in a table.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_surface_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33 #include "main/mtypes.h"
34 #include "main/texstore.h"
35 #include "program/prog_parameter.h"
36
37 #include "intel_mipmap_tree.h"
38 #include "intel_batchbuffer.h"
39 #include "intel_tex.h"
40 #include "intel_fbo.h"
41
42 #include "brw_context.h"
43 #include "brw_state.h"
44 #include "brw_defines.h"
45
46
47 static GLuint translate_tex_target( GLenum target )
48 {
49 switch (target) {
50 case GL_TEXTURE_1D:
51 return BRW_SURFACE_1D;
52
53 case GL_TEXTURE_RECTANGLE_NV:
54 return BRW_SURFACE_2D;
55
56 case GL_TEXTURE_2D:
57 return BRW_SURFACE_2D;
58
59 case GL_TEXTURE_3D:
60 return BRW_SURFACE_3D;
61
62 case GL_TEXTURE_CUBE_MAP:
63 return BRW_SURFACE_CUBE;
64
65 default:
66 assert(0);
67 return 0;
68 }
69 }
70
71 static uint32_t brw_format_for_mesa_format[MESA_FORMAT_COUNT] =
72 {
73 [MESA_FORMAT_L8] = BRW_SURFACEFORMAT_L8_UNORM,
74 [MESA_FORMAT_I8] = BRW_SURFACEFORMAT_I8_UNORM,
75 [MESA_FORMAT_A8] = BRW_SURFACEFORMAT_A8_UNORM,
76 [MESA_FORMAT_AL88] = BRW_SURFACEFORMAT_L8A8_UNORM,
77 [MESA_FORMAT_AL1616] = BRW_SURFACEFORMAT_L16A16_UNORM,
78 [MESA_FORMAT_R8] = BRW_SURFACEFORMAT_R8_UNORM,
79 [MESA_FORMAT_R16] = BRW_SURFACEFORMAT_R16_UNORM,
80 [MESA_FORMAT_RG88] = BRW_SURFACEFORMAT_R8G8_UNORM,
81 [MESA_FORMAT_RG1616] = BRW_SURFACEFORMAT_R16G16_UNORM,
82 [MESA_FORMAT_ARGB8888] = BRW_SURFACEFORMAT_B8G8R8A8_UNORM,
83 [MESA_FORMAT_XRGB8888] = BRW_SURFACEFORMAT_B8G8R8X8_UNORM,
84 [MESA_FORMAT_RGB565] = BRW_SURFACEFORMAT_B5G6R5_UNORM,
85 [MESA_FORMAT_ARGB1555] = BRW_SURFACEFORMAT_B5G5R5A1_UNORM,
86 [MESA_FORMAT_ARGB4444] = BRW_SURFACEFORMAT_B4G4R4A4_UNORM,
87 [MESA_FORMAT_YCBCR_REV] = BRW_SURFACEFORMAT_YCRCB_NORMAL,
88 [MESA_FORMAT_YCBCR] = BRW_SURFACEFORMAT_YCRCB_SWAPUVY,
89 [MESA_FORMAT_RGB_FXT1] = BRW_SURFACEFORMAT_FXT1,
90 [MESA_FORMAT_RGBA_FXT1] = BRW_SURFACEFORMAT_FXT1,
91 [MESA_FORMAT_RGB_DXT1] = BRW_SURFACEFORMAT_DXT1_RGB,
92 [MESA_FORMAT_RGBA_DXT1] = BRW_SURFACEFORMAT_BC1_UNORM,
93 [MESA_FORMAT_RGBA_DXT3] = BRW_SURFACEFORMAT_BC2_UNORM,
94 [MESA_FORMAT_RGBA_DXT5] = BRW_SURFACEFORMAT_BC3_UNORM,
95 [MESA_FORMAT_SRGB_DXT1] = BRW_SURFACEFORMAT_BC1_UNORM_SRGB,
96 [MESA_FORMAT_SARGB8] = BRW_SURFACEFORMAT_B8G8R8A8_UNORM_SRGB,
97 [MESA_FORMAT_SLA8] = BRW_SURFACEFORMAT_L8A8_UNORM_SRGB,
98 [MESA_FORMAT_SL8] = BRW_SURFACEFORMAT_L8_UNORM_SRGB,
99 [MESA_FORMAT_DUDV8] = BRW_SURFACEFORMAT_R8G8_SNORM,
100 [MESA_FORMAT_SIGNED_RGBA8888_REV] = BRW_SURFACEFORMAT_R8G8B8A8_SNORM,
101 };
102
103 static GLuint translate_tex_format( gl_format mesa_format,
104 GLenum internal_format,
105 GLenum depth_mode )
106 {
107 switch( mesa_format ) {
108
109 case MESA_FORMAT_Z16:
110 if (depth_mode == GL_INTENSITY)
111 return BRW_SURFACEFORMAT_I16_UNORM;
112 else if (depth_mode == GL_ALPHA)
113 return BRW_SURFACEFORMAT_A16_UNORM;
114 else if (depth_mode == GL_RED)
115 return BRW_SURFACEFORMAT_R16_UNORM;
116 else
117 return BRW_SURFACEFORMAT_L16_UNORM;
118
119 case MESA_FORMAT_S8_Z24:
120 /* XXX: these different surface formats don't seem to
121 * make any difference for shadow sampler/compares.
122 */
123 if (depth_mode == GL_INTENSITY)
124 return BRW_SURFACEFORMAT_I24X8_UNORM;
125 else if (depth_mode == GL_ALPHA)
126 return BRW_SURFACEFORMAT_A24X8_UNORM;
127 else if (depth_mode == GL_RED)
128 return BRW_SURFACEFORMAT_R24_UNORM_X8_TYPELESS;
129 else
130 return BRW_SURFACEFORMAT_L24X8_UNORM;
131
132 default:
133 assert(brw_format_for_mesa_format[mesa_format] != 0);
134 return brw_format_for_mesa_format[mesa_format];
135 }
136 }
137
138 static void
139 brw_set_surface_tiling(struct brw_surface_state *surf, uint32_t tiling)
140 {
141 switch (tiling) {
142 case I915_TILING_NONE:
143 surf->ss3.tiled_surface = 0;
144 surf->ss3.tile_walk = 0;
145 break;
146 case I915_TILING_X:
147 surf->ss3.tiled_surface = 1;
148 surf->ss3.tile_walk = BRW_TILEWALK_XMAJOR;
149 break;
150 case I915_TILING_Y:
151 surf->ss3.tiled_surface = 1;
152 surf->ss3.tile_walk = BRW_TILEWALK_YMAJOR;
153 break;
154 }
155 }
156
157 static void
158 brw_update_texture_surface( struct gl_context *ctx, GLuint unit )
159 {
160 struct brw_context *brw = brw_context(ctx);
161 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
162 struct intel_texture_object *intelObj = intel_texture_object(tObj);
163 struct gl_texture_image *firstImage = tObj->Image[0][intelObj->firstLevel];
164 const GLuint surf_index = SURF_INDEX_TEXTURE(unit);
165 struct brw_surface_state surf;
166 void *map;
167
168 memset(&surf, 0, sizeof(surf));
169
170 surf.ss0.mipmap_layout_mode = BRW_SURFACE_MIPMAPLAYOUT_BELOW;
171 surf.ss0.surface_type = translate_tex_target(tObj->Target);
172 surf.ss0.surface_format = translate_tex_format(firstImage->TexFormat,
173 firstImage->InternalFormat,
174 tObj->DepthMode);
175
176 /* This is ok for all textures with channel width 8bit or less:
177 */
178 /* surf.ss0.data_return_format = BRW_SURFACERETURNFORMAT_S1; */
179 surf.ss1.base_addr = intelObj->mt->region->buffer->offset; /* reloc */
180
181 surf.ss2.mip_count = intelObj->lastLevel - intelObj->firstLevel;
182 surf.ss2.width = firstImage->Width - 1;
183 surf.ss2.height = firstImage->Height - 1;
184 brw_set_surface_tiling(&surf, intelObj->mt->region->tiling);
185 surf.ss3.pitch = (intelObj->mt->region->pitch * intelObj->mt->cpp) - 1;
186 surf.ss3.depth = firstImage->Depth - 1;
187
188 surf.ss4.min_lod = 0;
189
190 if (tObj->Target == GL_TEXTURE_CUBE_MAP) {
191 surf.ss0.cube_pos_x = 1;
192 surf.ss0.cube_pos_y = 1;
193 surf.ss0.cube_pos_z = 1;
194 surf.ss0.cube_neg_x = 1;
195 surf.ss0.cube_neg_y = 1;
196 surf.ss0.cube_neg_z = 1;
197 }
198
199 map = brw_state_batch(brw, sizeof(surf), 32,
200 &brw->wm.surf_bo[surf_index],
201 &brw->wm.surf_offset[surf_index]);
202 memcpy(map, &surf, sizeof(surf));
203
204 /* Emit relocation to surface contents */
205 drm_intel_bo_emit_reloc(brw->wm.surf_bo[surf_index],
206 brw->wm.surf_offset[surf_index] +
207 offsetof(struct brw_surface_state, ss1),
208 intelObj->mt->region->buffer, 0,
209 I915_GEM_DOMAIN_SAMPLER, 0);
210 }
211
212 /**
213 * Create the constant buffer surface. Vertex/fragment shader constants will be
214 * read from this buffer with Data Port Read instructions/messages.
215 */
216 void
217 brw_create_constant_surface(struct brw_context *brw,
218 drm_intel_bo *bo,
219 int width,
220 drm_intel_bo **out_bo,
221 uint32_t *out_offset)
222 {
223 struct intel_context *intel = &brw->intel;
224 const GLint w = width - 1;
225 struct brw_surface_state surf;
226 void *map;
227
228 memset(&surf, 0, sizeof(surf));
229
230 surf.ss0.mipmap_layout_mode = BRW_SURFACE_MIPMAPLAYOUT_BELOW;
231 surf.ss0.surface_type = BRW_SURFACE_BUFFER;
232 surf.ss0.surface_format = BRW_SURFACEFORMAT_R32G32B32A32_FLOAT;
233
234 if (intel->gen >= 6)
235 surf.ss0.render_cache_read_write = 1;
236
237 assert(bo);
238 surf.ss1.base_addr = bo->offset; /* reloc */
239
240 surf.ss2.width = w & 0x7f; /* bits 6:0 of size or width */
241 surf.ss2.height = (w >> 7) & 0x1fff; /* bits 19:7 of size or width */
242 surf.ss3.depth = (w >> 20) & 0x7f; /* bits 26:20 of size or width */
243 surf.ss3.pitch = (width * 16) - 1; /* ignored?? */
244 brw_set_surface_tiling(&surf, I915_TILING_NONE); /* tiling now allowed */
245
246 map = brw_state_batch(brw, sizeof(surf), 32, out_bo, out_offset);
247 memcpy(map, &surf, sizeof(surf));
248
249 /* Emit relocation to surface contents. Section 5.1.1 of the gen4
250 * bspec ("Data Cache") says that the data cache does not exist as
251 * a separate cache and is just the sampler cache.
252 */
253 drm_intel_bo_emit_reloc(*out_bo, (*out_offset +
254 offsetof(struct brw_surface_state, ss1)),
255 bo, 0,
256 I915_GEM_DOMAIN_SAMPLER, 0);
257 }
258
259 /* Creates a new WM constant buffer reflecting the current fragment program's
260 * constants, if needed by the fragment program.
261 *
262 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
263 * state atom.
264 */
265 static void
266 prepare_wm_constants(struct brw_context *brw)
267 {
268 struct gl_context *ctx = &brw->intel.ctx;
269 struct intel_context *intel = &brw->intel;
270 struct brw_fragment_program *fp =
271 (struct brw_fragment_program *) brw->fragment_program;
272 const int size = brw->wm.prog_data->nr_pull_params * sizeof(float);
273 float *constants;
274 unsigned int i;
275
276 _mesa_load_state_parameters(ctx, fp->program.Base.Parameters);
277
278 /* BRW_NEW_FRAGMENT_PROGRAM */
279 if (brw->wm.prog_data->nr_pull_params == 0) {
280 if (brw->wm.const_bo) {
281 drm_intel_bo_unreference(brw->wm.const_bo);
282 brw->wm.const_bo = NULL;
283 brw->state.dirty.brw |= BRW_NEW_WM_CONSTBUF;
284 }
285 return;
286 }
287
288 drm_intel_bo_unreference(brw->wm.const_bo);
289 brw->wm.const_bo = drm_intel_bo_alloc(intel->bufmgr, "WM const bo",
290 size, 64);
291
292 /* _NEW_PROGRAM_CONSTANTS */
293 drm_intel_gem_bo_map_gtt(brw->wm.const_bo);
294 constants = brw->wm.const_bo->virtual;
295 for (i = 0; i < brw->wm.prog_data->nr_pull_params; i++) {
296 constants[i] = convert_param(brw->wm.prog_data->pull_param_convert[i],
297 *brw->wm.prog_data->pull_param[i]);
298 }
299 drm_intel_gem_bo_unmap_gtt(brw->wm.const_bo);
300
301 brw->state.dirty.brw |= BRW_NEW_WM_CONSTBUF;
302 }
303
304 const struct brw_tracked_state brw_wm_constants = {
305 .dirty = {
306 .mesa = (_NEW_PROGRAM_CONSTANTS),
307 .brw = (BRW_NEW_FRAGMENT_PROGRAM),
308 .cache = 0
309 },
310 .prepare = prepare_wm_constants,
311 };
312
313 /**
314 * Updates surface / buffer for fragment shader constant buffer, if
315 * one is required.
316 *
317 * This consumes the state updates for the constant buffer, and produces
318 * BRW_NEW_WM_SURFACES to get picked up by brw_prepare_wm_surfaces for
319 * inclusion in the binding table.
320 */
321 static void upload_wm_constant_surface(struct brw_context *brw )
322 {
323 GLuint surf = SURF_INDEX_FRAG_CONST_BUFFER;
324 struct brw_fragment_program *fp =
325 (struct brw_fragment_program *) brw->fragment_program;
326 const struct gl_program_parameter_list *params =
327 fp->program.Base.Parameters;
328
329 /* If there's no constant buffer, then no surface BO is needed to point at
330 * it.
331 */
332 if (brw->wm.const_bo == 0) {
333 if (brw->wm.surf_bo[surf] != NULL) {
334 drm_intel_bo_unreference(brw->wm.surf_bo[surf]);
335 brw->wm.surf_bo[surf] = NULL;
336 brw->state.dirty.brw |= BRW_NEW_WM_SURFACES;
337 }
338 return;
339 }
340
341 brw_create_constant_surface(brw, brw->wm.const_bo, params->NumParameters,
342 &brw->wm.surf_bo[surf],
343 &brw->wm.surf_offset[surf]);
344 brw->state.dirty.brw |= BRW_NEW_WM_SURFACES;
345 }
346
347 const struct brw_tracked_state brw_wm_constant_surface = {
348 .dirty = {
349 .mesa = 0,
350 .brw = (BRW_NEW_WM_CONSTBUF |
351 BRW_NEW_BATCH),
352 .cache = 0
353 },
354 .emit = upload_wm_constant_surface,
355 };
356
357
358 /**
359 * Sets up a surface state structure to point at the given region.
360 * While it is only used for the front/back buffer currently, it should be
361 * usable for further buffers when doing ARB_draw_buffer support.
362 */
363 static void
364 brw_update_renderbuffer_surface(struct brw_context *brw,
365 struct gl_renderbuffer *rb,
366 unsigned int unit)
367 {
368 struct intel_context *intel = &brw->intel;
369 struct gl_context *ctx = &intel->ctx;
370 drm_intel_bo *region_bo = NULL;
371 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
372 struct intel_region *region = irb ? irb->region : NULL;
373 struct {
374 unsigned int surface_type;
375 unsigned int surface_format;
376 unsigned int width, height, pitch, cpp;
377 GLubyte color_mask[4];
378 GLboolean color_blend;
379 uint32_t tiling;
380 uint32_t draw_x;
381 uint32_t draw_y;
382 } key;
383 struct brw_surface_state surf;
384 void *map;
385
386 memset(&key, 0, sizeof(key));
387
388 if (region != NULL) {
389 region_bo = region->buffer;
390
391 key.surface_type = BRW_SURFACE_2D;
392 switch (irb->Base.Format) {
393 case MESA_FORMAT_XRGB8888:
394 /* XRGB is handled as ARGB because the chips in this family
395 * cannot render to XRGB targets. This means that we have to
396 * mask writes to alpha (ala glColorMask) and reconfigure the
397 * alpha blending hardware to use GL_ONE (or GL_ZERO) for
398 * cases where GL_DST_ALPHA (or GL_ONE_MINUS_DST_ALPHA) is
399 * used.
400 */
401 key.surface_format = BRW_SURFACEFORMAT_B8G8R8A8_UNORM;
402 break;
403 default:
404 key.surface_format = brw_format_for_mesa_format[irb->Base.Format];
405 assert(key.surface_format != 0);
406 }
407 key.tiling = region->tiling;
408 key.width = rb->Width;
409 key.height = rb->Height;
410 key.pitch = region->pitch;
411 key.cpp = region->cpp;
412 key.draw_x = region->draw_x;
413 key.draw_y = region->draw_y;
414 } else {
415 key.surface_type = BRW_SURFACE_NULL;
416 key.surface_format = BRW_SURFACEFORMAT_B8G8R8A8_UNORM;
417 key.tiling = I915_TILING_X;
418 key.width = 1;
419 key.height = 1;
420 key.cpp = 4;
421 key.draw_x = 0;
422 key.draw_y = 0;
423 }
424
425 if (intel->gen < 6) {
426 /* _NEW_COLOR */
427 memcpy(key.color_mask, ctx->Color.ColorMask[unit],
428 sizeof(key.color_mask));
429
430 /* As mentioned above, disable writes to the alpha component when the
431 * renderbuffer is XRGB.
432 */
433 if (ctx->DrawBuffer->Visual.alphaBits == 0)
434 key.color_mask[3] = GL_FALSE;
435
436 key.color_blend = (!ctx->Color._LogicOpEnabled &&
437 (ctx->Color.BlendEnabled & (1 << unit)));
438 }
439
440 memset(&surf, 0, sizeof(surf));
441
442 surf.ss0.surface_format = key.surface_format;
443 surf.ss0.surface_type = key.surface_type;
444 if (key.tiling == I915_TILING_NONE) {
445 surf.ss1.base_addr = (key.draw_x + key.draw_y * key.pitch) * key.cpp;
446 } else {
447 uint32_t tile_base, tile_x, tile_y;
448 uint32_t pitch = key.pitch * key.cpp;
449
450 if (key.tiling == I915_TILING_X) {
451 tile_x = key.draw_x % (512 / key.cpp);
452 tile_y = key.draw_y % 8;
453 tile_base = ((key.draw_y / 8) * (8 * pitch));
454 tile_base += (key.draw_x - tile_x) / (512 / key.cpp) * 4096;
455 } else {
456 /* Y */
457 tile_x = key.draw_x % (128 / key.cpp);
458 tile_y = key.draw_y % 32;
459 tile_base = ((key.draw_y / 32) * (32 * pitch));
460 tile_base += (key.draw_x - tile_x) / (128 / key.cpp) * 4096;
461 }
462 assert(brw->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
463 assert(tile_x % 4 == 0);
464 assert(tile_y % 2 == 0);
465 /* Note that the low bits of these fields are missing, so
466 * there's the possibility of getting in trouble.
467 */
468 surf.ss1.base_addr = tile_base;
469 surf.ss5.x_offset = tile_x / 4;
470 surf.ss5.y_offset = tile_y / 2;
471 }
472 if (region_bo != NULL)
473 surf.ss1.base_addr += region_bo->offset; /* reloc */
474
475 surf.ss2.width = key.width - 1;
476 surf.ss2.height = key.height - 1;
477 brw_set_surface_tiling(&surf, key.tiling);
478 surf.ss3.pitch = (key.pitch * key.cpp) - 1;
479
480 if (intel->gen < 6) {
481 /* _NEW_COLOR */
482 surf.ss0.color_blend = key.color_blend;
483 surf.ss0.writedisable_red = !key.color_mask[0];
484 surf.ss0.writedisable_green = !key.color_mask[1];
485 surf.ss0.writedisable_blue = !key.color_mask[2];
486 surf.ss0.writedisable_alpha = !key.color_mask[3];
487 }
488
489 map = brw_state_batch(brw, sizeof(surf), 32,
490 &brw->wm.surf_bo[unit],
491 &brw->wm.surf_offset[unit]);
492 memcpy(map, &surf, sizeof(surf));
493
494 if (region_bo != NULL) {
495 drm_intel_bo_emit_reloc(brw->wm.surf_bo[unit],
496 brw->wm.surf_offset[unit] +
497 offsetof(struct brw_surface_state, ss1),
498 region_bo,
499 surf.ss1.base_addr - region_bo->offset,
500 I915_GEM_DOMAIN_RENDER,
501 I915_GEM_DOMAIN_RENDER);
502 }
503 }
504
505 static void
506 prepare_wm_surfaces(struct brw_context *brw)
507 {
508 struct gl_context *ctx = &brw->intel.ctx;
509 int i;
510 int nr_surfaces = 0;
511
512 if (ctx->DrawBuffer->_NumColorDrawBuffers >= 1) {
513 for (i = 0; i < ctx->DrawBuffer->_NumColorDrawBuffers; i++) {
514 struct gl_renderbuffer *rb = ctx->DrawBuffer->_ColorDrawBuffers[i];
515 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
516 struct intel_region *region = irb ? irb->region : NULL;
517
518 brw_add_validated_bo(brw, region->buffer);
519 nr_surfaces = SURF_INDEX_DRAW(i) + 1;
520 }
521 }
522
523 if (brw->wm.const_bo) {
524 brw_add_validated_bo(brw, brw->wm.const_bo);
525 nr_surfaces = SURF_INDEX_FRAG_CONST_BUFFER + 1;
526 }
527
528 for (i = 0; i < BRW_MAX_TEX_UNIT; i++) {
529 const struct gl_texture_unit *texUnit = &ctx->Texture.Unit[i];
530 struct gl_texture_object *tObj = texUnit->_Current;
531 struct intel_texture_object *intelObj = intel_texture_object(tObj);
532
533 if (texUnit->_ReallyEnabled) {
534 brw_add_validated_bo(brw, intelObj->mt->region->buffer);
535 nr_surfaces = SURF_INDEX_TEXTURE(i) + 1;
536 }
537 }
538
539 /* Have to update this in our prepare, since the unit's prepare
540 * relies on it.
541 */
542 if (brw->wm.nr_surfaces != nr_surfaces) {
543 brw->wm.nr_surfaces = nr_surfaces;
544 brw->state.dirty.brw |= BRW_NEW_NR_WM_SURFACES;
545 }
546 }
547
548 /**
549 * Constructs the set of surface state objects pointed to by the
550 * binding table.
551 */
552 static void
553 upload_wm_surfaces(struct brw_context *brw)
554 {
555 struct gl_context *ctx = &brw->intel.ctx;
556 GLuint i;
557
558 /* _NEW_BUFFERS | _NEW_COLOR */
559 /* Update surfaces for drawing buffers */
560 if (ctx->DrawBuffer->_NumColorDrawBuffers >= 1) {
561 for (i = 0; i < ctx->DrawBuffer->_NumColorDrawBuffers; i++) {
562 brw_update_renderbuffer_surface(brw,
563 ctx->DrawBuffer->_ColorDrawBuffers[i],
564 i);
565 }
566 } else {
567 brw_update_renderbuffer_surface(brw, NULL, 0);
568 }
569
570 /* Update surfaces for textures */
571 for (i = 0; i < BRW_MAX_TEX_UNIT; i++) {
572 const struct gl_texture_unit *texUnit = &ctx->Texture.Unit[i];
573 const GLuint surf = SURF_INDEX_TEXTURE(i);
574
575 /* _NEW_TEXTURE */
576 if (texUnit->_ReallyEnabled) {
577 brw_update_texture_surface(ctx, i);
578 } else {
579 drm_intel_bo_unreference(brw->wm.surf_bo[surf]);
580 brw->wm.surf_bo[surf] = NULL;
581 }
582 }
583
584 brw->state.dirty.brw |= BRW_NEW_WM_SURFACES;
585 }
586
587 const struct brw_tracked_state brw_wm_surfaces = {
588 .dirty = {
589 .mesa = (_NEW_COLOR |
590 _NEW_TEXTURE |
591 _NEW_BUFFERS),
592 .brw = (BRW_NEW_BATCH),
593 .cache = 0
594 },
595 .prepare = prepare_wm_surfaces,
596 .emit = upload_wm_surfaces,
597 };
598
599 /**
600 * Constructs the binding table for the WM surface state, which maps unit
601 * numbers to surface state objects.
602 */
603 static void
604 brw_wm_upload_binding_table(struct brw_context *brw)
605 {
606 uint32_t *bind;
607 int i;
608
609 /* Might want to calculate nr_surfaces first, to avoid taking up so much
610 * space for the binding table.
611 */
612 bind = brw_state_batch(brw, sizeof(uint32_t) * BRW_WM_MAX_SURF,
613 32, &brw->wm.bind_bo, &brw->wm.bind_bo_offset);
614
615 for (i = 0; i < BRW_WM_MAX_SURF; i++) {
616 /* BRW_NEW_WM_SURFACES */
617 bind[i] = brw->wm.surf_offset[i];
618 if (brw->wm.surf_bo[i]) {
619 bind[i] = brw->wm.surf_offset[i];
620 } else {
621 bind[i] = 0;
622 }
623 }
624
625 brw->state.dirty.brw |= BRW_NEW_BINDING_TABLE;
626 }
627
628 const struct brw_tracked_state brw_wm_binding_table = {
629 .dirty = {
630 .mesa = 0,
631 .brw = (BRW_NEW_BATCH |
632 BRW_NEW_WM_SURFACES),
633 .cache = 0
634 },
635 .emit = brw_wm_upload_binding_table,
636 };