i965: Simplify the renderbuffer setup code.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_surface_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33 #include "main/mtypes.h"
34 #include "main/texstore.h"
35 #include "program/prog_parameter.h"
36
37 #include "intel_mipmap_tree.h"
38 #include "intel_batchbuffer.h"
39 #include "intel_tex.h"
40 #include "intel_fbo.h"
41
42 #include "brw_context.h"
43 #include "brw_state.h"
44 #include "brw_defines.h"
45
46
47 static GLuint translate_tex_target( GLenum target )
48 {
49 switch (target) {
50 case GL_TEXTURE_1D:
51 return BRW_SURFACE_1D;
52
53 case GL_TEXTURE_RECTANGLE_NV:
54 return BRW_SURFACE_2D;
55
56 case GL_TEXTURE_2D:
57 return BRW_SURFACE_2D;
58
59 case GL_TEXTURE_3D:
60 return BRW_SURFACE_3D;
61
62 case GL_TEXTURE_CUBE_MAP:
63 return BRW_SURFACE_CUBE;
64
65 default:
66 assert(0);
67 return 0;
68 }
69 }
70
71 static uint32_t brw_format_for_mesa_format[MESA_FORMAT_COUNT] =
72 {
73 [MESA_FORMAT_L8] = BRW_SURFACEFORMAT_L8_UNORM,
74 [MESA_FORMAT_I8] = BRW_SURFACEFORMAT_I8_UNORM,
75 [MESA_FORMAT_A8] = BRW_SURFACEFORMAT_A8_UNORM,
76 [MESA_FORMAT_AL88] = BRW_SURFACEFORMAT_L8A8_UNORM,
77 [MESA_FORMAT_AL1616] = BRW_SURFACEFORMAT_L16A16_UNORM,
78 [MESA_FORMAT_R8] = BRW_SURFACEFORMAT_R8_UNORM,
79 [MESA_FORMAT_R16] = BRW_SURFACEFORMAT_R16_UNORM,
80 [MESA_FORMAT_RG88] = BRW_SURFACEFORMAT_R8G8_UNORM,
81 [MESA_FORMAT_RG1616] = BRW_SURFACEFORMAT_R16G16_UNORM,
82 [MESA_FORMAT_ARGB8888] = BRW_SURFACEFORMAT_B8G8R8A8_UNORM,
83 [MESA_FORMAT_XRGB8888] = BRW_SURFACEFORMAT_B8G8R8X8_UNORM,
84 [MESA_FORMAT_RGB565] = BRW_SURFACEFORMAT_B5G6R5_UNORM,
85 [MESA_FORMAT_ARGB1555] = BRW_SURFACEFORMAT_B5G5R5A1_UNORM,
86 [MESA_FORMAT_ARGB4444] = BRW_SURFACEFORMAT_B4G4R4A4_UNORM,
87 [MESA_FORMAT_YCBCR_REV] = BRW_SURFACEFORMAT_YCRCB_NORMAL,
88 [MESA_FORMAT_YCBCR] = BRW_SURFACEFORMAT_YCRCB_SWAPUVY,
89 [MESA_FORMAT_RGB_FXT1] = BRW_SURFACEFORMAT_FXT1,
90 [MESA_FORMAT_RGBA_FXT1] = BRW_SURFACEFORMAT_FXT1,
91 [MESA_FORMAT_RGB_DXT1] = BRW_SURFACEFORMAT_DXT1_RGB,
92 [MESA_FORMAT_RGBA_DXT1] = BRW_SURFACEFORMAT_BC1_UNORM,
93 [MESA_FORMAT_RGBA_DXT3] = BRW_SURFACEFORMAT_BC2_UNORM,
94 [MESA_FORMAT_RGBA_DXT5] = BRW_SURFACEFORMAT_BC3_UNORM,
95 [MESA_FORMAT_SRGB_DXT1] = BRW_SURFACEFORMAT_DXT1_RGB_SRGB,
96 [MESA_FORMAT_SRGBA_DXT1] = BRW_SURFACEFORMAT_BC1_UNORM_SRGB,
97 [MESA_FORMAT_SRGBA_DXT3] = BRW_SURFACEFORMAT_BC2_UNORM_SRGB,
98 [MESA_FORMAT_SRGBA_DXT5] = BRW_SURFACEFORMAT_BC3_UNORM_SRGB,
99 [MESA_FORMAT_SARGB8] = BRW_SURFACEFORMAT_B8G8R8A8_UNORM_SRGB,
100 [MESA_FORMAT_SLA8] = BRW_SURFACEFORMAT_L8A8_UNORM_SRGB,
101 [MESA_FORMAT_SL8] = BRW_SURFACEFORMAT_L8_UNORM_SRGB,
102 [MESA_FORMAT_DUDV8] = BRW_SURFACEFORMAT_R8G8_SNORM,
103 [MESA_FORMAT_SIGNED_RGBA8888_REV] = BRW_SURFACEFORMAT_R8G8B8A8_SNORM,
104 };
105
106 static GLuint translate_tex_format( gl_format mesa_format,
107 GLenum internal_format,
108 GLenum depth_mode )
109 {
110 switch( mesa_format ) {
111
112 case MESA_FORMAT_Z16:
113 if (depth_mode == GL_INTENSITY)
114 return BRW_SURFACEFORMAT_I16_UNORM;
115 else if (depth_mode == GL_ALPHA)
116 return BRW_SURFACEFORMAT_A16_UNORM;
117 else if (depth_mode == GL_RED)
118 return BRW_SURFACEFORMAT_R16_UNORM;
119 else
120 return BRW_SURFACEFORMAT_L16_UNORM;
121
122 case MESA_FORMAT_S8_Z24:
123 /* XXX: these different surface formats don't seem to
124 * make any difference for shadow sampler/compares.
125 */
126 if (depth_mode == GL_INTENSITY)
127 return BRW_SURFACEFORMAT_I24X8_UNORM;
128 else if (depth_mode == GL_ALPHA)
129 return BRW_SURFACEFORMAT_A24X8_UNORM;
130 else if (depth_mode == GL_RED)
131 return BRW_SURFACEFORMAT_R24_UNORM_X8_TYPELESS;
132 else
133 return BRW_SURFACEFORMAT_L24X8_UNORM;
134
135 default:
136 assert(brw_format_for_mesa_format[mesa_format] != 0);
137 return brw_format_for_mesa_format[mesa_format];
138 }
139 }
140
141 static void
142 brw_set_surface_tiling(struct brw_surface_state *surf, uint32_t tiling)
143 {
144 switch (tiling) {
145 case I915_TILING_NONE:
146 surf->ss3.tiled_surface = 0;
147 surf->ss3.tile_walk = 0;
148 break;
149 case I915_TILING_X:
150 surf->ss3.tiled_surface = 1;
151 surf->ss3.tile_walk = BRW_TILEWALK_XMAJOR;
152 break;
153 case I915_TILING_Y:
154 surf->ss3.tiled_surface = 1;
155 surf->ss3.tile_walk = BRW_TILEWALK_YMAJOR;
156 break;
157 }
158 }
159
160 static void
161 brw_update_texture_surface( struct gl_context *ctx, GLuint unit )
162 {
163 struct brw_context *brw = brw_context(ctx);
164 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
165 struct intel_texture_object *intelObj = intel_texture_object(tObj);
166 struct gl_texture_image *firstImage = tObj->Image[0][intelObj->firstLevel];
167 const GLuint surf_index = SURF_INDEX_TEXTURE(unit);
168 struct brw_surface_state surf;
169 void *map;
170
171 memset(&surf, 0, sizeof(surf));
172
173 surf.ss0.mipmap_layout_mode = BRW_SURFACE_MIPMAPLAYOUT_BELOW;
174 surf.ss0.surface_type = translate_tex_target(tObj->Target);
175 surf.ss0.surface_format = translate_tex_format(firstImage->TexFormat,
176 firstImage->InternalFormat,
177 tObj->DepthMode);
178
179 /* This is ok for all textures with channel width 8bit or less:
180 */
181 /* surf.ss0.data_return_format = BRW_SURFACERETURNFORMAT_S1; */
182 surf.ss1.base_addr = intelObj->mt->region->buffer->offset; /* reloc */
183
184 surf.ss2.mip_count = intelObj->lastLevel - intelObj->firstLevel;
185 surf.ss2.width = firstImage->Width - 1;
186 surf.ss2.height = firstImage->Height - 1;
187 brw_set_surface_tiling(&surf, intelObj->mt->region->tiling);
188 surf.ss3.pitch = (intelObj->mt->region->pitch * intelObj->mt->cpp) - 1;
189 surf.ss3.depth = firstImage->Depth - 1;
190
191 surf.ss4.min_lod = 0;
192
193 if (tObj->Target == GL_TEXTURE_CUBE_MAP) {
194 surf.ss0.cube_pos_x = 1;
195 surf.ss0.cube_pos_y = 1;
196 surf.ss0.cube_pos_z = 1;
197 surf.ss0.cube_neg_x = 1;
198 surf.ss0.cube_neg_y = 1;
199 surf.ss0.cube_neg_z = 1;
200 }
201
202 map = brw_state_batch(brw, sizeof(surf), 32,
203 &brw->wm.surf_bo[surf_index],
204 &brw->wm.surf_offset[surf_index]);
205 memcpy(map, &surf, sizeof(surf));
206
207 /* Emit relocation to surface contents */
208 drm_intel_bo_emit_reloc(brw->wm.surf_bo[surf_index],
209 brw->wm.surf_offset[surf_index] +
210 offsetof(struct brw_surface_state, ss1),
211 intelObj->mt->region->buffer, 0,
212 I915_GEM_DOMAIN_SAMPLER, 0);
213 }
214
215 /**
216 * Create the constant buffer surface. Vertex/fragment shader constants will be
217 * read from this buffer with Data Port Read instructions/messages.
218 */
219 void
220 brw_create_constant_surface(struct brw_context *brw,
221 drm_intel_bo *bo,
222 int width,
223 drm_intel_bo **out_bo,
224 uint32_t *out_offset)
225 {
226 struct intel_context *intel = &brw->intel;
227 const GLint w = width - 1;
228 struct brw_surface_state surf;
229 void *map;
230
231 memset(&surf, 0, sizeof(surf));
232
233 surf.ss0.mipmap_layout_mode = BRW_SURFACE_MIPMAPLAYOUT_BELOW;
234 surf.ss0.surface_type = BRW_SURFACE_BUFFER;
235 surf.ss0.surface_format = BRW_SURFACEFORMAT_R32G32B32A32_FLOAT;
236
237 if (intel->gen >= 6)
238 surf.ss0.render_cache_read_write = 1;
239
240 assert(bo);
241 surf.ss1.base_addr = bo->offset; /* reloc */
242
243 surf.ss2.width = w & 0x7f; /* bits 6:0 of size or width */
244 surf.ss2.height = (w >> 7) & 0x1fff; /* bits 19:7 of size or width */
245 surf.ss3.depth = (w >> 20) & 0x7f; /* bits 26:20 of size or width */
246 surf.ss3.pitch = (width * 16) - 1; /* ignored?? */
247 brw_set_surface_tiling(&surf, I915_TILING_NONE); /* tiling now allowed */
248
249 map = brw_state_batch(brw, sizeof(surf), 32, out_bo, out_offset);
250 memcpy(map, &surf, sizeof(surf));
251
252 /* Emit relocation to surface contents. Section 5.1.1 of the gen4
253 * bspec ("Data Cache") says that the data cache does not exist as
254 * a separate cache and is just the sampler cache.
255 */
256 drm_intel_bo_emit_reloc(*out_bo, (*out_offset +
257 offsetof(struct brw_surface_state, ss1)),
258 bo, 0,
259 I915_GEM_DOMAIN_SAMPLER, 0);
260 }
261
262 /* Creates a new WM constant buffer reflecting the current fragment program's
263 * constants, if needed by the fragment program.
264 *
265 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
266 * state atom.
267 */
268 static void
269 prepare_wm_constants(struct brw_context *brw)
270 {
271 struct gl_context *ctx = &brw->intel.ctx;
272 struct intel_context *intel = &brw->intel;
273 struct brw_fragment_program *fp =
274 (struct brw_fragment_program *) brw->fragment_program;
275 const int size = brw->wm.prog_data->nr_pull_params * sizeof(float);
276 float *constants;
277 unsigned int i;
278
279 _mesa_load_state_parameters(ctx, fp->program.Base.Parameters);
280
281 /* BRW_NEW_FRAGMENT_PROGRAM */
282 if (brw->wm.prog_data->nr_pull_params == 0) {
283 if (brw->wm.const_bo) {
284 drm_intel_bo_unreference(brw->wm.const_bo);
285 brw->wm.const_bo = NULL;
286 brw->state.dirty.brw |= BRW_NEW_WM_CONSTBUF;
287 }
288 return;
289 }
290
291 drm_intel_bo_unreference(brw->wm.const_bo);
292 brw->wm.const_bo = drm_intel_bo_alloc(intel->bufmgr, "WM const bo",
293 size, 64);
294
295 /* _NEW_PROGRAM_CONSTANTS */
296 drm_intel_gem_bo_map_gtt(brw->wm.const_bo);
297 constants = brw->wm.const_bo->virtual;
298 for (i = 0; i < brw->wm.prog_data->nr_pull_params; i++) {
299 constants[i] = convert_param(brw->wm.prog_data->pull_param_convert[i],
300 *brw->wm.prog_data->pull_param[i]);
301 }
302 drm_intel_gem_bo_unmap_gtt(brw->wm.const_bo);
303
304 brw->state.dirty.brw |= BRW_NEW_WM_CONSTBUF;
305 }
306
307 const struct brw_tracked_state brw_wm_constants = {
308 .dirty = {
309 .mesa = (_NEW_PROGRAM_CONSTANTS),
310 .brw = (BRW_NEW_FRAGMENT_PROGRAM),
311 .cache = 0
312 },
313 .prepare = prepare_wm_constants,
314 };
315
316 /**
317 * Updates surface / buffer for fragment shader constant buffer, if
318 * one is required.
319 *
320 * This consumes the state updates for the constant buffer, and produces
321 * BRW_NEW_WM_SURFACES to get picked up by brw_prepare_wm_surfaces for
322 * inclusion in the binding table.
323 */
324 static void upload_wm_constant_surface(struct brw_context *brw )
325 {
326 GLuint surf = SURF_INDEX_FRAG_CONST_BUFFER;
327 struct brw_fragment_program *fp =
328 (struct brw_fragment_program *) brw->fragment_program;
329 const struct gl_program_parameter_list *params =
330 fp->program.Base.Parameters;
331
332 /* If there's no constant buffer, then no surface BO is needed to point at
333 * it.
334 */
335 if (brw->wm.const_bo == 0) {
336 if (brw->wm.surf_bo[surf] != NULL) {
337 drm_intel_bo_unreference(brw->wm.surf_bo[surf]);
338 brw->wm.surf_bo[surf] = NULL;
339 brw->state.dirty.brw |= BRW_NEW_WM_SURFACES;
340 }
341 return;
342 }
343
344 brw_create_constant_surface(brw, brw->wm.const_bo, params->NumParameters,
345 &brw->wm.surf_bo[surf],
346 &brw->wm.surf_offset[surf]);
347 brw->state.dirty.brw |= BRW_NEW_WM_SURFACES;
348 }
349
350 const struct brw_tracked_state brw_wm_constant_surface = {
351 .dirty = {
352 .mesa = 0,
353 .brw = (BRW_NEW_WM_CONSTBUF |
354 BRW_NEW_BATCH),
355 .cache = 0
356 },
357 .emit = upload_wm_constant_surface,
358 };
359
360 static void
361 brw_update_null_renderbuffer_surface(struct brw_context *brw, unsigned int unit)
362 {
363 struct intel_context *intel = &brw->intel;
364 struct brw_surface_state surf;
365 void *map;
366
367 memset(&surf, 0, sizeof(surf));
368
369 surf.ss0.surface_type = BRW_SURFACE_NULL;
370 surf.ss0.surface_format = BRW_SURFACEFORMAT_B8G8R8A8_UNORM;
371 surf.ss1.base_addr = 0;
372
373 surf.ss2.width = 0;
374 surf.ss2.height = 0;
375 brw_set_surface_tiling(&surf, I915_TILING_NONE);
376 surf.ss3.pitch = 0;
377
378 if (intel->gen < 6) {
379 /* _NEW_COLOR */
380 surf.ss0.color_blend = 0;
381 surf.ss0.writedisable_red = 1;
382 surf.ss0.writedisable_green = 1;
383 surf.ss0.writedisable_blue = 1;
384 surf.ss0.writedisable_alpha = 1;
385 }
386
387 map = brw_state_batch(brw, sizeof(surf), 32,
388 &brw->wm.surf_bo[unit],
389 &brw->wm.surf_offset[unit]);
390 memcpy(map, &surf, sizeof(surf));
391 }
392
393 /**
394 * Sets up a surface state structure to point at the given region.
395 * While it is only used for the front/back buffer currently, it should be
396 * usable for further buffers when doing ARB_draw_buffer support.
397 */
398 static void
399 brw_update_renderbuffer_surface(struct brw_context *brw,
400 struct gl_renderbuffer *rb,
401 unsigned int unit)
402 {
403 struct intel_context *intel = &brw->intel;
404 struct gl_context *ctx = &intel->ctx;
405 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
406 struct intel_region *region = irb->region;
407 struct brw_surface_state surf;
408 void *map;
409
410 memset(&surf, 0, sizeof(surf));
411
412 switch (irb->Base.Format) {
413 case MESA_FORMAT_XRGB8888:
414 /* XRGB is handled as ARGB because the chips in this family
415 * cannot render to XRGB targets. This means that we have to
416 * mask writes to alpha (ala glColorMask) and reconfigure the
417 * alpha blending hardware to use GL_ONE (or GL_ZERO) for
418 * cases where GL_DST_ALPHA (or GL_ONE_MINUS_DST_ALPHA) is
419 * used.
420 */
421 surf.ss0.surface_format = BRW_SURFACEFORMAT_B8G8R8A8_UNORM;
422 break;
423 default:
424 surf.ss0.surface_format = brw_format_for_mesa_format[irb->Base.Format];
425 assert(surf.ss0.surface_format != 0);
426 }
427
428 surf.ss0.surface_type = BRW_SURFACE_2D;
429 if (region->tiling == I915_TILING_NONE) {
430 surf.ss1.base_addr = (region->draw_x +
431 region->draw_y * region->pitch) * region->cpp;
432 } else {
433 uint32_t tile_base, tile_x, tile_y;
434 uint32_t pitch = region->pitch * region->cpp;
435
436 if (region->tiling == I915_TILING_X) {
437 tile_x = region->draw_x % (512 / region->cpp);
438 tile_y = region->draw_y % 8;
439 tile_base = ((region->draw_y / 8) * (8 * pitch));
440 tile_base += (region->draw_x - tile_x) / (512 / region->cpp) * 4096;
441 } else {
442 /* Y */
443 tile_x = region->draw_x % (128 / region->cpp);
444 tile_y = region->draw_y % 32;
445 tile_base = ((region->draw_y / 32) * (32 * pitch));
446 tile_base += (region->draw_x - tile_x) / (128 / region->cpp) * 4096;
447 }
448 assert(brw->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
449 assert(tile_x % 4 == 0);
450 assert(tile_y % 2 == 0);
451 /* Note that the low bits of these fields are missing, so
452 * there's the possibility of getting in trouble.
453 */
454 surf.ss1.base_addr = tile_base;
455 surf.ss5.x_offset = tile_x / 4;
456 surf.ss5.y_offset = tile_y / 2;
457 }
458 surf.ss1.base_addr += region->buffer->offset; /* reloc */
459
460 surf.ss2.width = rb->Width - 1;
461 surf.ss2.height = rb->Height - 1;
462 brw_set_surface_tiling(&surf, region->tiling);
463 surf.ss3.pitch = (region->pitch * region->cpp) - 1;
464
465 if (intel->gen < 6) {
466 /* _NEW_COLOR */
467 surf.ss0.color_blend = (!ctx->Color._LogicOpEnabled &&
468 (ctx->Color.BlendEnabled & (1 << unit)));
469 surf.ss0.writedisable_red = !ctx->Color.ColorMask[unit][0];
470 surf.ss0.writedisable_green = !ctx->Color.ColorMask[unit][1];
471 surf.ss0.writedisable_blue = !ctx->Color.ColorMask[unit][2];
472 /* As mentioned above, disable writes to the alpha component when the
473 * renderbuffer is XRGB.
474 */
475 if (ctx->DrawBuffer->Visual.alphaBits == 0)
476 surf.ss0.writedisable_alpha = 1;
477 else
478 surf.ss0.writedisable_alpha = !ctx->Color.ColorMask[unit][3];
479 }
480
481 map = brw_state_batch(brw, sizeof(surf), 32,
482 &brw->wm.surf_bo[unit],
483 &brw->wm.surf_offset[unit]);
484 memcpy(map, &surf, sizeof(surf));
485
486 drm_intel_bo_emit_reloc(brw->wm.surf_bo[unit],
487 brw->wm.surf_offset[unit] +
488 offsetof(struct brw_surface_state, ss1),
489 region->buffer,
490 surf.ss1.base_addr - region->buffer->offset,
491 I915_GEM_DOMAIN_RENDER,
492 I915_GEM_DOMAIN_RENDER);
493 }
494
495 static void
496 prepare_wm_surfaces(struct brw_context *brw)
497 {
498 struct gl_context *ctx = &brw->intel.ctx;
499 int i;
500 int nr_surfaces = 0;
501
502 if (ctx->DrawBuffer->_NumColorDrawBuffers >= 1) {
503 for (i = 0; i < ctx->DrawBuffer->_NumColorDrawBuffers; i++) {
504 struct gl_renderbuffer *rb = ctx->DrawBuffer->_ColorDrawBuffers[i];
505 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
506 struct intel_region *region = irb ? irb->region : NULL;
507
508 brw_add_validated_bo(brw, region->buffer);
509 nr_surfaces = SURF_INDEX_DRAW(i) + 1;
510 }
511 }
512
513 if (brw->wm.const_bo) {
514 brw_add_validated_bo(brw, brw->wm.const_bo);
515 nr_surfaces = SURF_INDEX_FRAG_CONST_BUFFER + 1;
516 }
517
518 for (i = 0; i < BRW_MAX_TEX_UNIT; i++) {
519 const struct gl_texture_unit *texUnit = &ctx->Texture.Unit[i];
520 struct gl_texture_object *tObj = texUnit->_Current;
521 struct intel_texture_object *intelObj = intel_texture_object(tObj);
522
523 if (texUnit->_ReallyEnabled) {
524 brw_add_validated_bo(brw, intelObj->mt->region->buffer);
525 nr_surfaces = SURF_INDEX_TEXTURE(i) + 1;
526 }
527 }
528
529 /* Have to update this in our prepare, since the unit's prepare
530 * relies on it.
531 */
532 if (brw->wm.nr_surfaces != nr_surfaces) {
533 brw->wm.nr_surfaces = nr_surfaces;
534 brw->state.dirty.brw |= BRW_NEW_NR_WM_SURFACES;
535 }
536 }
537
538 /**
539 * Constructs the set of surface state objects pointed to by the
540 * binding table.
541 */
542 static void
543 upload_wm_surfaces(struct brw_context *brw)
544 {
545 struct gl_context *ctx = &brw->intel.ctx;
546 GLuint i;
547
548 /* _NEW_BUFFERS | _NEW_COLOR */
549 /* Update surfaces for drawing buffers */
550 if (ctx->DrawBuffer->_NumColorDrawBuffers >= 1) {
551 for (i = 0; i < ctx->DrawBuffer->_NumColorDrawBuffers; i++) {
552 if (intel_renderbuffer(ctx->DrawBuffer->_ColorDrawBuffers[i])) {
553 brw_update_renderbuffer_surface(brw,
554 ctx->DrawBuffer->_ColorDrawBuffers[i],
555 i);
556 } else {
557 brw_update_null_renderbuffer_surface(brw, i);
558 }
559 }
560 } else {
561 brw_update_null_renderbuffer_surface(brw, 0);
562 }
563
564 /* Update surfaces for textures */
565 for (i = 0; i < BRW_MAX_TEX_UNIT; i++) {
566 const struct gl_texture_unit *texUnit = &ctx->Texture.Unit[i];
567 const GLuint surf = SURF_INDEX_TEXTURE(i);
568
569 /* _NEW_TEXTURE */
570 if (texUnit->_ReallyEnabled) {
571 brw_update_texture_surface(ctx, i);
572 } else {
573 drm_intel_bo_unreference(brw->wm.surf_bo[surf]);
574 brw->wm.surf_bo[surf] = NULL;
575 }
576 }
577
578 brw->state.dirty.brw |= BRW_NEW_WM_SURFACES;
579 }
580
581 const struct brw_tracked_state brw_wm_surfaces = {
582 .dirty = {
583 .mesa = (_NEW_COLOR |
584 _NEW_TEXTURE |
585 _NEW_BUFFERS),
586 .brw = (BRW_NEW_BATCH),
587 .cache = 0
588 },
589 .prepare = prepare_wm_surfaces,
590 .emit = upload_wm_surfaces,
591 };
592
593 /**
594 * Constructs the binding table for the WM surface state, which maps unit
595 * numbers to surface state objects.
596 */
597 static void
598 brw_wm_upload_binding_table(struct brw_context *brw)
599 {
600 uint32_t *bind;
601 int i;
602
603 /* Might want to calculate nr_surfaces first, to avoid taking up so much
604 * space for the binding table.
605 */
606 bind = brw_state_batch(brw, sizeof(uint32_t) * BRW_WM_MAX_SURF,
607 32, &brw->wm.bind_bo, &brw->wm.bind_bo_offset);
608
609 for (i = 0; i < BRW_WM_MAX_SURF; i++) {
610 /* BRW_NEW_WM_SURFACES */
611 bind[i] = brw->wm.surf_offset[i];
612 if (brw->wm.surf_bo[i]) {
613 bind[i] = brw->wm.surf_offset[i];
614 } else {
615 bind[i] = 0;
616 }
617 }
618
619 brw->state.dirty.brw |= BRW_NEW_BINDING_TABLE;
620 }
621
622 const struct brw_tracked_state brw_wm_binding_table = {
623 .dirty = {
624 .mesa = 0,
625 .brw = (BRW_NEW_BATCH |
626 BRW_NEW_WM_SURFACES),
627 .cache = 0
628 },
629 .emit = brw_wm_upload_binding_table,
630 };