4570af66cd92544218cf0735cf64e716b8a189b1
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_surface_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33 #include "main/mtypes.h"
34 #include "main/texstore.h"
35 #include "program/prog_parameter.h"
36
37 #include "intel_mipmap_tree.h"
38 #include "intel_batchbuffer.h"
39 #include "intel_tex.h"
40 #include "intel_fbo.h"
41
42 #include "brw_context.h"
43 #include "brw_state.h"
44 #include "brw_defines.h"
45 #include "brw_wm.h"
46
47 static GLuint translate_tex_target( GLenum target )
48 {
49 switch (target) {
50 case GL_TEXTURE_1D:
51 return BRW_SURFACE_1D;
52
53 case GL_TEXTURE_RECTANGLE_NV:
54 return BRW_SURFACE_2D;
55
56 case GL_TEXTURE_2D:
57 return BRW_SURFACE_2D;
58
59 case GL_TEXTURE_3D:
60 return BRW_SURFACE_3D;
61
62 case GL_TEXTURE_CUBE_MAP:
63 return BRW_SURFACE_CUBE;
64
65 default:
66 assert(0);
67 return 0;
68 }
69 }
70
71 static uint32_t brw_format_for_mesa_format[MESA_FORMAT_COUNT] =
72 {
73 [MESA_FORMAT_L8] = BRW_SURFACEFORMAT_L8_UNORM,
74 [MESA_FORMAT_I8] = BRW_SURFACEFORMAT_I8_UNORM,
75 [MESA_FORMAT_A8] = BRW_SURFACEFORMAT_A8_UNORM,
76 [MESA_FORMAT_AL88] = BRW_SURFACEFORMAT_L8A8_UNORM,
77 [MESA_FORMAT_AL1616] = BRW_SURFACEFORMAT_L16A16_UNORM,
78 [MESA_FORMAT_R8] = BRW_SURFACEFORMAT_R8_UNORM,
79 [MESA_FORMAT_R16] = BRW_SURFACEFORMAT_R16_UNORM,
80 [MESA_FORMAT_RG88] = BRW_SURFACEFORMAT_R8G8_UNORM,
81 [MESA_FORMAT_RG1616] = BRW_SURFACEFORMAT_R16G16_UNORM,
82 [MESA_FORMAT_ARGB8888] = BRW_SURFACEFORMAT_B8G8R8A8_UNORM,
83 [MESA_FORMAT_XRGB8888] = BRW_SURFACEFORMAT_B8G8R8X8_UNORM,
84 [MESA_FORMAT_RGB565] = BRW_SURFACEFORMAT_B5G6R5_UNORM,
85 [MESA_FORMAT_ARGB1555] = BRW_SURFACEFORMAT_B5G5R5A1_UNORM,
86 [MESA_FORMAT_ARGB4444] = BRW_SURFACEFORMAT_B4G4R4A4_UNORM,
87 [MESA_FORMAT_YCBCR_REV] = BRW_SURFACEFORMAT_YCRCB_NORMAL,
88 [MESA_FORMAT_YCBCR] = BRW_SURFACEFORMAT_YCRCB_SWAPUVY,
89 [MESA_FORMAT_RGB_FXT1] = BRW_SURFACEFORMAT_FXT1,
90 [MESA_FORMAT_RGBA_FXT1] = BRW_SURFACEFORMAT_FXT1,
91 [MESA_FORMAT_RGB_DXT1] = BRW_SURFACEFORMAT_DXT1_RGB,
92 [MESA_FORMAT_RGBA_DXT1] = BRW_SURFACEFORMAT_BC1_UNORM,
93 [MESA_FORMAT_RGBA_DXT3] = BRW_SURFACEFORMAT_BC2_UNORM,
94 [MESA_FORMAT_RGBA_DXT5] = BRW_SURFACEFORMAT_BC3_UNORM,
95 [MESA_FORMAT_SRGB_DXT1] = BRW_SURFACEFORMAT_DXT1_RGB_SRGB,
96 [MESA_FORMAT_SRGBA_DXT1] = BRW_SURFACEFORMAT_BC1_UNORM_SRGB,
97 [MESA_FORMAT_SRGBA_DXT3] = BRW_SURFACEFORMAT_BC2_UNORM_SRGB,
98 [MESA_FORMAT_SRGBA_DXT5] = BRW_SURFACEFORMAT_BC3_UNORM_SRGB,
99 [MESA_FORMAT_SARGB8] = BRW_SURFACEFORMAT_B8G8R8A8_UNORM_SRGB,
100 [MESA_FORMAT_SLA8] = BRW_SURFACEFORMAT_L8A8_UNORM_SRGB,
101 [MESA_FORMAT_SL8] = BRW_SURFACEFORMAT_L8_UNORM_SRGB,
102 [MESA_FORMAT_DUDV8] = BRW_SURFACEFORMAT_R8G8_SNORM,
103 [MESA_FORMAT_SIGNED_RGBA8888_REV] = BRW_SURFACEFORMAT_R8G8B8A8_SNORM,
104 };
105
106 bool
107 brw_render_target_supported(gl_format format)
108 {
109 if (format == MESA_FORMAT_S8_Z24 ||
110 format == MESA_FORMAT_X8_Z24 ||
111 format == MESA_FORMAT_Z16) {
112 return true;
113 }
114
115 /* Not exactly true, as some of those formats are not renderable.
116 * But at least we know how to translate them.
117 */
118 return brw_format_for_mesa_format[format] != 0;
119 }
120
121 static GLuint translate_tex_format( gl_format mesa_format,
122 GLenum internal_format,
123 GLenum depth_mode )
124 {
125 switch( mesa_format ) {
126
127 case MESA_FORMAT_Z16:
128 if (depth_mode == GL_INTENSITY)
129 return BRW_SURFACEFORMAT_I16_UNORM;
130 else if (depth_mode == GL_ALPHA)
131 return BRW_SURFACEFORMAT_A16_UNORM;
132 else if (depth_mode == GL_RED)
133 return BRW_SURFACEFORMAT_R16_UNORM;
134 else
135 return BRW_SURFACEFORMAT_L16_UNORM;
136
137 case MESA_FORMAT_S8_Z24:
138 /* XXX: these different surface formats don't seem to
139 * make any difference for shadow sampler/compares.
140 */
141 if (depth_mode == GL_INTENSITY)
142 return BRW_SURFACEFORMAT_I24X8_UNORM;
143 else if (depth_mode == GL_ALPHA)
144 return BRW_SURFACEFORMAT_A24X8_UNORM;
145 else if (depth_mode == GL_RED)
146 return BRW_SURFACEFORMAT_R24_UNORM_X8_TYPELESS;
147 else
148 return BRW_SURFACEFORMAT_L24X8_UNORM;
149
150 default:
151 assert(brw_format_for_mesa_format[mesa_format] != 0);
152 return brw_format_for_mesa_format[mesa_format];
153 }
154 }
155
156 static void
157 brw_set_surface_tiling(struct brw_surface_state *surf, uint32_t tiling)
158 {
159 switch (tiling) {
160 case I915_TILING_NONE:
161 surf->ss3.tiled_surface = 0;
162 surf->ss3.tile_walk = 0;
163 break;
164 case I915_TILING_X:
165 surf->ss3.tiled_surface = 1;
166 surf->ss3.tile_walk = BRW_TILEWALK_XMAJOR;
167 break;
168 case I915_TILING_Y:
169 surf->ss3.tiled_surface = 1;
170 surf->ss3.tile_walk = BRW_TILEWALK_YMAJOR;
171 break;
172 }
173 }
174
175 static void
176 brw_update_texture_surface( struct gl_context *ctx, GLuint unit )
177 {
178 struct brw_context *brw = brw_context(ctx);
179 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
180 struct intel_texture_object *intelObj = intel_texture_object(tObj);
181 struct gl_texture_image *firstImage = tObj->Image[0][tObj->BaseLevel];
182 const GLuint surf_index = SURF_INDEX_TEXTURE(unit);
183 struct brw_surface_state surf;
184 void *map;
185
186 memset(&surf, 0, sizeof(surf));
187
188 surf.ss0.mipmap_layout_mode = BRW_SURFACE_MIPMAPLAYOUT_BELOW;
189 surf.ss0.surface_type = translate_tex_target(tObj->Target);
190 surf.ss0.surface_format = translate_tex_format(firstImage->TexFormat,
191 firstImage->InternalFormat,
192 tObj->DepthMode);
193
194 /* This is ok for all textures with channel width 8bit or less:
195 */
196 /* surf.ss0.data_return_format = BRW_SURFACERETURNFORMAT_S1; */
197 surf.ss1.base_addr = intelObj->mt->region->buffer->offset; /* reloc */
198
199 /* mip_count is #levels - 1 */
200 surf.ss2.mip_count = intelObj->_MaxLevel - tObj->BaseLevel;
201 surf.ss2.width = intelObj->mt->width0 - 1;
202 surf.ss2.height = intelObj->mt->height0 - 1;
203 brw_set_surface_tiling(&surf, intelObj->mt->region->tiling);
204 surf.ss3.pitch = (intelObj->mt->region->pitch * intelObj->mt->cpp) - 1;
205 surf.ss3.depth = intelObj->mt->depth0 - 1;
206 surf.ss4.min_lod = tObj->BaseLevel;
207
208 if (tObj->Target == GL_TEXTURE_CUBE_MAP) {
209 surf.ss0.cube_pos_x = 1;
210 surf.ss0.cube_pos_y = 1;
211 surf.ss0.cube_pos_z = 1;
212 surf.ss0.cube_neg_x = 1;
213 surf.ss0.cube_neg_y = 1;
214 surf.ss0.cube_neg_z = 1;
215 }
216
217 map = brw_state_batch(brw, sizeof(surf), 32,
218 &brw->wm.surf_bo[surf_index],
219 &brw->wm.surf_offset[surf_index]);
220 memcpy(map, &surf, sizeof(surf));
221
222 /* Emit relocation to surface contents */
223 drm_intel_bo_emit_reloc(brw->wm.surf_bo[surf_index],
224 brw->wm.surf_offset[surf_index] +
225 offsetof(struct brw_surface_state, ss1),
226 intelObj->mt->region->buffer, 0,
227 I915_GEM_DOMAIN_SAMPLER, 0);
228 }
229
230 /**
231 * Create the constant buffer surface. Vertex/fragment shader constants will be
232 * read from this buffer with Data Port Read instructions/messages.
233 */
234 void
235 brw_create_constant_surface(struct brw_context *brw,
236 drm_intel_bo *bo,
237 int width,
238 drm_intel_bo **out_bo,
239 uint32_t *out_offset)
240 {
241 struct intel_context *intel = &brw->intel;
242 const GLint w = width - 1;
243 struct brw_surface_state surf;
244 void *map;
245
246 memset(&surf, 0, sizeof(surf));
247
248 surf.ss0.mipmap_layout_mode = BRW_SURFACE_MIPMAPLAYOUT_BELOW;
249 surf.ss0.surface_type = BRW_SURFACE_BUFFER;
250 surf.ss0.surface_format = BRW_SURFACEFORMAT_R32G32B32A32_FLOAT;
251
252 if (intel->gen >= 6)
253 surf.ss0.render_cache_read_write = 1;
254
255 assert(bo);
256 surf.ss1.base_addr = bo->offset; /* reloc */
257
258 surf.ss2.width = w & 0x7f; /* bits 6:0 of size or width */
259 surf.ss2.height = (w >> 7) & 0x1fff; /* bits 19:7 of size or width */
260 surf.ss3.depth = (w >> 20) & 0x7f; /* bits 26:20 of size or width */
261 surf.ss3.pitch = (width * 16) - 1; /* ignored?? */
262 brw_set_surface_tiling(&surf, I915_TILING_NONE); /* tiling now allowed */
263
264 map = brw_state_batch(brw, sizeof(surf), 32, out_bo, out_offset);
265 memcpy(map, &surf, sizeof(surf));
266
267 /* Emit relocation to surface contents. Section 5.1.1 of the gen4
268 * bspec ("Data Cache") says that the data cache does not exist as
269 * a separate cache and is just the sampler cache.
270 */
271 drm_intel_bo_emit_reloc(*out_bo, (*out_offset +
272 offsetof(struct brw_surface_state, ss1)),
273 bo, 0,
274 I915_GEM_DOMAIN_SAMPLER, 0);
275 }
276
277 /* Creates a new WM constant buffer reflecting the current fragment program's
278 * constants, if needed by the fragment program.
279 *
280 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
281 * state atom.
282 */
283 static void
284 prepare_wm_constants(struct brw_context *brw)
285 {
286 struct gl_context *ctx = &brw->intel.ctx;
287 struct intel_context *intel = &brw->intel;
288 struct brw_fragment_program *fp =
289 (struct brw_fragment_program *) brw->fragment_program;
290 const int size = brw->wm.prog_data->nr_pull_params * sizeof(float);
291 float *constants;
292 unsigned int i;
293
294 _mesa_load_state_parameters(ctx, fp->program.Base.Parameters);
295
296 /* BRW_NEW_FRAGMENT_PROGRAM */
297 if (brw->wm.prog_data->nr_pull_params == 0) {
298 if (brw->wm.const_bo) {
299 drm_intel_bo_unreference(brw->wm.const_bo);
300 brw->wm.const_bo = NULL;
301 brw->state.dirty.brw |= BRW_NEW_WM_CONSTBUF;
302 }
303 return;
304 }
305
306 drm_intel_bo_unreference(brw->wm.const_bo);
307 brw->wm.const_bo = drm_intel_bo_alloc(intel->bufmgr, "WM const bo",
308 size, 64);
309
310 /* _NEW_PROGRAM_CONSTANTS */
311 drm_intel_gem_bo_map_gtt(brw->wm.const_bo);
312 constants = brw->wm.const_bo->virtual;
313 for (i = 0; i < brw->wm.prog_data->nr_pull_params; i++) {
314 constants[i] = convert_param(brw->wm.prog_data->pull_param_convert[i],
315 *brw->wm.prog_data->pull_param[i]);
316 }
317 drm_intel_gem_bo_unmap_gtt(brw->wm.const_bo);
318
319 brw->state.dirty.brw |= BRW_NEW_WM_CONSTBUF;
320 }
321
322 const struct brw_tracked_state brw_wm_constants = {
323 .dirty = {
324 .mesa = (_NEW_PROGRAM_CONSTANTS),
325 .brw = (BRW_NEW_FRAGMENT_PROGRAM),
326 .cache = 0
327 },
328 .prepare = prepare_wm_constants,
329 };
330
331 /**
332 * Updates surface / buffer for fragment shader constant buffer, if
333 * one is required.
334 *
335 * This consumes the state updates for the constant buffer, and produces
336 * BRW_NEW_WM_SURFACES to get picked up by brw_prepare_wm_surfaces for
337 * inclusion in the binding table.
338 */
339 static void upload_wm_constant_surface(struct brw_context *brw )
340 {
341 GLuint surf = SURF_INDEX_FRAG_CONST_BUFFER;
342 struct brw_fragment_program *fp =
343 (struct brw_fragment_program *) brw->fragment_program;
344 const struct gl_program_parameter_list *params =
345 fp->program.Base.Parameters;
346
347 /* If there's no constant buffer, then no surface BO is needed to point at
348 * it.
349 */
350 if (brw->wm.const_bo == 0) {
351 if (brw->wm.surf_bo[surf] != NULL) {
352 drm_intel_bo_unreference(brw->wm.surf_bo[surf]);
353 brw->wm.surf_bo[surf] = NULL;
354 brw->state.dirty.brw |= BRW_NEW_WM_SURFACES;
355 }
356 return;
357 }
358
359 brw_create_constant_surface(brw, brw->wm.const_bo, params->NumParameters,
360 &brw->wm.surf_bo[surf],
361 &brw->wm.surf_offset[surf]);
362 brw->state.dirty.brw |= BRW_NEW_WM_SURFACES;
363 }
364
365 const struct brw_tracked_state brw_wm_constant_surface = {
366 .dirty = {
367 .mesa = 0,
368 .brw = (BRW_NEW_WM_CONSTBUF |
369 BRW_NEW_BATCH),
370 .cache = 0
371 },
372 .emit = upload_wm_constant_surface,
373 };
374
375 static void
376 brw_update_null_renderbuffer_surface(struct brw_context *brw, unsigned int unit)
377 {
378 struct intel_context *intel = &brw->intel;
379 struct brw_surface_state surf;
380 void *map;
381
382 memset(&surf, 0, sizeof(surf));
383
384 surf.ss0.surface_type = BRW_SURFACE_NULL;
385 surf.ss0.surface_format = BRW_SURFACEFORMAT_B8G8R8A8_UNORM;
386 surf.ss1.base_addr = 0;
387
388 surf.ss2.width = 0;
389 surf.ss2.height = 0;
390 brw_set_surface_tiling(&surf, I915_TILING_NONE);
391 surf.ss3.pitch = 0;
392
393 if (intel->gen < 6) {
394 /* _NEW_COLOR */
395 surf.ss0.color_blend = 0;
396 surf.ss0.writedisable_red = 1;
397 surf.ss0.writedisable_green = 1;
398 surf.ss0.writedisable_blue = 1;
399 surf.ss0.writedisable_alpha = 1;
400 }
401
402 map = brw_state_batch(brw, sizeof(surf), 32,
403 &brw->wm.surf_bo[unit],
404 &brw->wm.surf_offset[unit]);
405 memcpy(map, &surf, sizeof(surf));
406 }
407
408 /**
409 * Sets up a surface state structure to point at the given region.
410 * While it is only used for the front/back buffer currently, it should be
411 * usable for further buffers when doing ARB_draw_buffer support.
412 */
413 static void
414 brw_update_renderbuffer_surface(struct brw_context *brw,
415 struct gl_renderbuffer *rb,
416 unsigned int unit)
417 {
418 struct intel_context *intel = &brw->intel;
419 struct gl_context *ctx = &intel->ctx;
420 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
421 struct intel_region *region = irb->region;
422 struct brw_surface_state surf;
423 void *map;
424
425 memset(&surf, 0, sizeof(surf));
426
427 switch (irb->Base.Format) {
428 case MESA_FORMAT_XRGB8888:
429 /* XRGB is handled as ARGB because the chips in this family
430 * cannot render to XRGB targets. This means that we have to
431 * mask writes to alpha (ala glColorMask) and reconfigure the
432 * alpha blending hardware to use GL_ONE (or GL_ZERO) for
433 * cases where GL_DST_ALPHA (or GL_ONE_MINUS_DST_ALPHA) is
434 * used.
435 */
436 surf.ss0.surface_format = BRW_SURFACEFORMAT_B8G8R8A8_UNORM;
437 break;
438 default:
439 surf.ss0.surface_format = brw_format_for_mesa_format[irb->Base.Format];
440 assert(surf.ss0.surface_format != 0);
441 }
442
443 surf.ss0.surface_type = BRW_SURFACE_2D;
444 if (region->tiling == I915_TILING_NONE) {
445 surf.ss1.base_addr = (region->draw_x +
446 region->draw_y * region->pitch) * region->cpp;
447 } else {
448 uint32_t tile_base, tile_x, tile_y;
449 uint32_t pitch = region->pitch * region->cpp;
450
451 if (region->tiling == I915_TILING_X) {
452 tile_x = region->draw_x % (512 / region->cpp);
453 tile_y = region->draw_y % 8;
454 tile_base = ((region->draw_y / 8) * (8 * pitch));
455 tile_base += (region->draw_x - tile_x) / (512 / region->cpp) * 4096;
456 } else {
457 /* Y */
458 tile_x = region->draw_x % (128 / region->cpp);
459 tile_y = region->draw_y % 32;
460 tile_base = ((region->draw_y / 32) * (32 * pitch));
461 tile_base += (region->draw_x - tile_x) / (128 / region->cpp) * 4096;
462 }
463 assert(brw->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
464 assert(tile_x % 4 == 0);
465 assert(tile_y % 2 == 0);
466 /* Note that the low bits of these fields are missing, so
467 * there's the possibility of getting in trouble.
468 */
469 surf.ss1.base_addr = tile_base;
470 surf.ss5.x_offset = tile_x / 4;
471 surf.ss5.y_offset = tile_y / 2;
472 }
473 surf.ss1.base_addr += region->buffer->offset; /* reloc */
474
475 surf.ss2.width = rb->Width - 1;
476 surf.ss2.height = rb->Height - 1;
477 brw_set_surface_tiling(&surf, region->tiling);
478 surf.ss3.pitch = (region->pitch * region->cpp) - 1;
479
480 if (intel->gen < 6) {
481 /* _NEW_COLOR */
482 surf.ss0.color_blend = (!ctx->Color._LogicOpEnabled &&
483 (ctx->Color.BlendEnabled & (1 << unit)));
484 surf.ss0.writedisable_red = !ctx->Color.ColorMask[unit][0];
485 surf.ss0.writedisable_green = !ctx->Color.ColorMask[unit][1];
486 surf.ss0.writedisable_blue = !ctx->Color.ColorMask[unit][2];
487 /* As mentioned above, disable writes to the alpha component when the
488 * renderbuffer is XRGB.
489 */
490 if (ctx->DrawBuffer->Visual.alphaBits == 0)
491 surf.ss0.writedisable_alpha = 1;
492 else
493 surf.ss0.writedisable_alpha = !ctx->Color.ColorMask[unit][3];
494 }
495
496 map = brw_state_batch(brw, sizeof(surf), 32,
497 &brw->wm.surf_bo[unit],
498 &brw->wm.surf_offset[unit]);
499 memcpy(map, &surf, sizeof(surf));
500
501 drm_intel_bo_emit_reloc(brw->wm.surf_bo[unit],
502 brw->wm.surf_offset[unit] +
503 offsetof(struct brw_surface_state, ss1),
504 region->buffer,
505 surf.ss1.base_addr - region->buffer->offset,
506 I915_GEM_DOMAIN_RENDER,
507 I915_GEM_DOMAIN_RENDER);
508 }
509
510 static void
511 prepare_wm_surfaces(struct brw_context *brw)
512 {
513 struct gl_context *ctx = &brw->intel.ctx;
514 int i;
515 int nr_surfaces = 0;
516
517 if (ctx->DrawBuffer->_NumColorDrawBuffers >= 1) {
518 for (i = 0; i < ctx->DrawBuffer->_NumColorDrawBuffers; i++) {
519 struct gl_renderbuffer *rb = ctx->DrawBuffer->_ColorDrawBuffers[i];
520 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
521 struct intel_region *region = irb ? irb->region : NULL;
522
523 brw_add_validated_bo(brw, region->buffer);
524 nr_surfaces = SURF_INDEX_DRAW(i) + 1;
525 }
526 }
527
528 if (brw->wm.const_bo) {
529 brw_add_validated_bo(brw, brw->wm.const_bo);
530 nr_surfaces = SURF_INDEX_FRAG_CONST_BUFFER + 1;
531 }
532
533 for (i = 0; i < BRW_MAX_TEX_UNIT; i++) {
534 const struct gl_texture_unit *texUnit = &ctx->Texture.Unit[i];
535 struct gl_texture_object *tObj = texUnit->_Current;
536 struct intel_texture_object *intelObj = intel_texture_object(tObj);
537
538 if (texUnit->_ReallyEnabled) {
539 brw_add_validated_bo(brw, intelObj->mt->region->buffer);
540 nr_surfaces = SURF_INDEX_TEXTURE(i) + 1;
541 }
542 }
543
544 /* Have to update this in our prepare, since the unit's prepare
545 * relies on it.
546 */
547 if (brw->wm.nr_surfaces != nr_surfaces) {
548 brw->wm.nr_surfaces = nr_surfaces;
549 brw->state.dirty.brw |= BRW_NEW_NR_WM_SURFACES;
550 }
551 }
552
553 /**
554 * Constructs the set of surface state objects pointed to by the
555 * binding table.
556 */
557 static void
558 upload_wm_surfaces(struct brw_context *brw)
559 {
560 struct gl_context *ctx = &brw->intel.ctx;
561 GLuint i;
562
563 /* _NEW_BUFFERS | _NEW_COLOR */
564 /* Update surfaces for drawing buffers */
565 if (ctx->DrawBuffer->_NumColorDrawBuffers >= 1) {
566 for (i = 0; i < ctx->DrawBuffer->_NumColorDrawBuffers; i++) {
567 if (intel_renderbuffer(ctx->DrawBuffer->_ColorDrawBuffers[i])) {
568 brw_update_renderbuffer_surface(brw,
569 ctx->DrawBuffer->_ColorDrawBuffers[i],
570 i);
571 } else {
572 brw_update_null_renderbuffer_surface(brw, i);
573 }
574 }
575 } else {
576 brw_update_null_renderbuffer_surface(brw, 0);
577 }
578
579 /* Update surfaces for textures */
580 for (i = 0; i < BRW_MAX_TEX_UNIT; i++) {
581 const struct gl_texture_unit *texUnit = &ctx->Texture.Unit[i];
582 const GLuint surf = SURF_INDEX_TEXTURE(i);
583
584 /* _NEW_TEXTURE */
585 if (texUnit->_ReallyEnabled) {
586 brw_update_texture_surface(ctx, i);
587 } else {
588 drm_intel_bo_unreference(brw->wm.surf_bo[surf]);
589 brw->wm.surf_bo[surf] = NULL;
590 }
591 }
592
593 brw->state.dirty.brw |= BRW_NEW_WM_SURFACES;
594 }
595
596 const struct brw_tracked_state brw_wm_surfaces = {
597 .dirty = {
598 .mesa = (_NEW_COLOR |
599 _NEW_TEXTURE |
600 _NEW_BUFFERS),
601 .brw = (BRW_NEW_BATCH),
602 .cache = 0
603 },
604 .prepare = prepare_wm_surfaces,
605 .emit = upload_wm_surfaces,
606 };
607
608 /**
609 * Constructs the binding table for the WM surface state, which maps unit
610 * numbers to surface state objects.
611 */
612 static void
613 brw_wm_upload_binding_table(struct brw_context *brw)
614 {
615 uint32_t *bind;
616 int i;
617
618 /* Might want to calculate nr_surfaces first, to avoid taking up so much
619 * space for the binding table.
620 */
621 bind = brw_state_batch(brw, sizeof(uint32_t) * BRW_WM_MAX_SURF,
622 32, &brw->wm.bind_bo, &brw->wm.bind_bo_offset);
623
624 for (i = 0; i < BRW_WM_MAX_SURF; i++) {
625 /* BRW_NEW_WM_SURFACES */
626 bind[i] = brw->wm.surf_offset[i];
627 if (brw->wm.surf_bo[i]) {
628 bind[i] = brw->wm.surf_offset[i];
629 } else {
630 bind[i] = 0;
631 }
632 }
633
634 brw->state.dirty.brw |= BRW_NEW_BINDING_TABLE;
635 }
636
637 const struct brw_tracked_state brw_wm_binding_table = {
638 .dirty = {
639 .mesa = 0,
640 .brw = (BRW_NEW_BATCH |
641 BRW_NEW_WM_SURFACES),
642 .cache = 0
643 },
644 .emit = brw_wm_upload_binding_table,
645 };