i965: clip: Add VUE map computation to clip stage for Gen4-5.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_surface_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33 #include "main/mtypes.h"
34 #include "main/samplerobj.h"
35 #include "main/texstore.h"
36 #include "program/prog_parameter.h"
37
38 #include "intel_mipmap_tree.h"
39 #include "intel_batchbuffer.h"
40 #include "intel_tex.h"
41 #include "intel_fbo.h"
42
43 #include "brw_context.h"
44 #include "brw_state.h"
45 #include "brw_defines.h"
46 #include "brw_wm.h"
47
48 GLuint
49 translate_tex_target(GLenum target)
50 {
51 switch (target) {
52 case GL_TEXTURE_1D:
53 return BRW_SURFACE_1D;
54
55 case GL_TEXTURE_RECTANGLE_NV:
56 return BRW_SURFACE_2D;
57
58 case GL_TEXTURE_2D:
59 return BRW_SURFACE_2D;
60
61 case GL_TEXTURE_3D:
62 return BRW_SURFACE_3D;
63
64 case GL_TEXTURE_CUBE_MAP:
65 return BRW_SURFACE_CUBE;
66
67 default:
68 assert(0);
69 return 0;
70 }
71 }
72
73 uint32_t
74 brw_format_for_mesa_format(gl_format mesa_format)
75 {
76 static const uint32_t table[MESA_FORMAT_COUNT] =
77 {
78 [MESA_FORMAT_L8] = BRW_SURFACEFORMAT_L8_UNORM,
79 [MESA_FORMAT_I8] = BRW_SURFACEFORMAT_I8_UNORM,
80 [MESA_FORMAT_A8] = BRW_SURFACEFORMAT_A8_UNORM,
81 [MESA_FORMAT_AL88] = BRW_SURFACEFORMAT_L8A8_UNORM,
82 [MESA_FORMAT_AL1616] = BRW_SURFACEFORMAT_L16A16_UNORM,
83 [MESA_FORMAT_R8] = BRW_SURFACEFORMAT_R8_UNORM,
84 [MESA_FORMAT_R16] = BRW_SURFACEFORMAT_R16_UNORM,
85 [MESA_FORMAT_RG88] = BRW_SURFACEFORMAT_R8G8_UNORM,
86 [MESA_FORMAT_RG1616] = BRW_SURFACEFORMAT_R16G16_UNORM,
87 [MESA_FORMAT_ARGB8888] = BRW_SURFACEFORMAT_B8G8R8A8_UNORM,
88 [MESA_FORMAT_XRGB8888] = BRW_SURFACEFORMAT_B8G8R8X8_UNORM,
89 [MESA_FORMAT_RGB565] = BRW_SURFACEFORMAT_B5G6R5_UNORM,
90 [MESA_FORMAT_ARGB1555] = BRW_SURFACEFORMAT_B5G5R5A1_UNORM,
91 [MESA_FORMAT_ARGB4444] = BRW_SURFACEFORMAT_B4G4R4A4_UNORM,
92 [MESA_FORMAT_YCBCR_REV] = BRW_SURFACEFORMAT_YCRCB_NORMAL,
93 [MESA_FORMAT_YCBCR] = BRW_SURFACEFORMAT_YCRCB_SWAPUVY,
94 [MESA_FORMAT_RGB_FXT1] = BRW_SURFACEFORMAT_FXT1,
95 [MESA_FORMAT_RGBA_FXT1] = BRW_SURFACEFORMAT_FXT1,
96 [MESA_FORMAT_RGB_DXT1] = BRW_SURFACEFORMAT_DXT1_RGB,
97 [MESA_FORMAT_RGBA_DXT1] = BRW_SURFACEFORMAT_BC1_UNORM,
98 [MESA_FORMAT_RGBA_DXT3] = BRW_SURFACEFORMAT_BC2_UNORM,
99 [MESA_FORMAT_RGBA_DXT5] = BRW_SURFACEFORMAT_BC3_UNORM,
100 [MESA_FORMAT_SRGB_DXT1] = BRW_SURFACEFORMAT_DXT1_RGB_SRGB,
101 [MESA_FORMAT_SRGBA_DXT1] = BRW_SURFACEFORMAT_BC1_UNORM_SRGB,
102 [MESA_FORMAT_SRGBA_DXT3] = BRW_SURFACEFORMAT_BC2_UNORM_SRGB,
103 [MESA_FORMAT_SRGBA_DXT5] = BRW_SURFACEFORMAT_BC3_UNORM_SRGB,
104 [MESA_FORMAT_SARGB8] = BRW_SURFACEFORMAT_B8G8R8A8_UNORM_SRGB,
105 [MESA_FORMAT_SLA8] = BRW_SURFACEFORMAT_L8A8_UNORM_SRGB,
106 [MESA_FORMAT_SL8] = BRW_SURFACEFORMAT_L8_UNORM_SRGB,
107 [MESA_FORMAT_DUDV8] = BRW_SURFACEFORMAT_R8G8_SNORM,
108 [MESA_FORMAT_SIGNED_R8] = BRW_SURFACEFORMAT_R8_SNORM,
109 [MESA_FORMAT_SIGNED_RG88_REV] = BRW_SURFACEFORMAT_R8G8_SNORM,
110 [MESA_FORMAT_SIGNED_RGBA8888_REV] = BRW_SURFACEFORMAT_R8G8B8A8_SNORM,
111 [MESA_FORMAT_SIGNED_R16] = BRW_SURFACEFORMAT_R16_SNORM,
112 [MESA_FORMAT_SIGNED_GR1616] = BRW_SURFACEFORMAT_R16G16_SNORM,
113 [MESA_FORMAT_RGBA_FLOAT32] = BRW_SURFACEFORMAT_R32G32B32A32_FLOAT,
114 [MESA_FORMAT_RG_FLOAT32] = BRW_SURFACEFORMAT_R32G32_FLOAT,
115 [MESA_FORMAT_R_FLOAT32] = BRW_SURFACEFORMAT_R32_FLOAT,
116 [MESA_FORMAT_INTENSITY_FLOAT32] = BRW_SURFACEFORMAT_I32_FLOAT,
117 [MESA_FORMAT_LUMINANCE_FLOAT32] = BRW_SURFACEFORMAT_L32_FLOAT,
118 [MESA_FORMAT_ALPHA_FLOAT32] = BRW_SURFACEFORMAT_A32_FLOAT,
119 [MESA_FORMAT_LUMINANCE_ALPHA_FLOAT32] = BRW_SURFACEFORMAT_L32A32_FLOAT,
120 [MESA_FORMAT_RED_RGTC1] = BRW_SURFACEFORMAT_BC4_UNORM,
121 [MESA_FORMAT_SIGNED_RED_RGTC1] = BRW_SURFACEFORMAT_BC4_SNORM,
122 [MESA_FORMAT_RG_RGTC2] = BRW_SURFACEFORMAT_BC5_UNORM,
123 [MESA_FORMAT_SIGNED_RG_RGTC2] = BRW_SURFACEFORMAT_BC5_SNORM,
124 };
125 assert(mesa_format < MESA_FORMAT_COUNT);
126 return table[mesa_format];
127 }
128
129 bool
130 brw_render_target_supported(gl_format format)
131 {
132 /* These are not color render targets like the table holds, but we
133 * ask the question for FBO completeness.
134 */
135 if (format == MESA_FORMAT_S8_Z24 ||
136 format == MESA_FORMAT_X8_Z24 ||
137 format == MESA_FORMAT_S8 ||
138 format == MESA_FORMAT_Z16) {
139 return true;
140 }
141
142 /* The value of this BRW_SURFACEFORMAT is 0, so hardcode it.
143 */
144 if (format == MESA_FORMAT_RGBA_FLOAT32)
145 return true;
146
147 /* Not exactly true, as some of those formats are not renderable.
148 * But at least we know how to translate them.
149 */
150 return brw_format_for_mesa_format(format) != 0;
151 }
152
153 GLuint
154 translate_tex_format(gl_format mesa_format,
155 GLenum internal_format,
156 GLenum depth_mode,
157 GLenum srgb_decode)
158 {
159 switch( mesa_format ) {
160
161 case MESA_FORMAT_Z16:
162 if (depth_mode == GL_INTENSITY)
163 return BRW_SURFACEFORMAT_I16_UNORM;
164 else if (depth_mode == GL_ALPHA)
165 return BRW_SURFACEFORMAT_A16_UNORM;
166 else if (depth_mode == GL_RED)
167 return BRW_SURFACEFORMAT_R16_UNORM;
168 else
169 return BRW_SURFACEFORMAT_L16_UNORM;
170
171 case MESA_FORMAT_S8_Z24:
172 case MESA_FORMAT_X8_Z24:
173 /* XXX: these different surface formats don't seem to
174 * make any difference for shadow sampler/compares.
175 */
176 if (depth_mode == GL_INTENSITY)
177 return BRW_SURFACEFORMAT_I24X8_UNORM;
178 else if (depth_mode == GL_ALPHA)
179 return BRW_SURFACEFORMAT_A24X8_UNORM;
180 else if (depth_mode == GL_RED)
181 return BRW_SURFACEFORMAT_R24_UNORM_X8_TYPELESS;
182 else
183 return BRW_SURFACEFORMAT_L24X8_UNORM;
184
185 case MESA_FORMAT_SARGB8:
186 case MESA_FORMAT_SLA8:
187 case MESA_FORMAT_SL8:
188 if (srgb_decode == GL_DECODE_EXT)
189 return brw_format_for_mesa_format(mesa_format);
190 else if (srgb_decode == GL_SKIP_DECODE_EXT)
191 return brw_format_for_mesa_format(_mesa_get_srgb_format_linear(mesa_format));
192
193 case MESA_FORMAT_RGBA_FLOAT32:
194 /* The value of this BRW_SURFACEFORMAT is 0, which tricks the
195 * assertion below.
196 */
197 return BRW_SURFACEFORMAT_R32G32B32A32_FLOAT;
198
199 default:
200 assert(brw_format_for_mesa_format(mesa_format) != 0);
201 return brw_format_for_mesa_format(mesa_format);
202 }
203 }
204
205 static uint32_t
206 brw_get_surface_tiling_bits(uint32_t tiling)
207 {
208 switch (tiling) {
209 case I915_TILING_X:
210 return BRW_SURFACE_TILED;
211 case I915_TILING_Y:
212 return BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y;
213 default:
214 return 0;
215 }
216 }
217
218 static void
219 brw_update_texture_surface( struct gl_context *ctx, GLuint unit )
220 {
221 struct brw_context *brw = brw_context(ctx);
222 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
223 struct intel_texture_object *intelObj = intel_texture_object(tObj);
224 struct gl_texture_image *firstImage = tObj->Image[0][tObj->BaseLevel];
225 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
226 const GLuint surf_index = SURF_INDEX_TEXTURE(unit);
227 uint32_t *surf;
228
229 surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
230 6 * 4, 32, &brw->wm.surf_offset[surf_index]);
231
232 surf[0] = (translate_tex_target(tObj->Target) << BRW_SURFACE_TYPE_SHIFT |
233 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
234 BRW_SURFACE_CUBEFACE_ENABLES |
235 (translate_tex_format(firstImage->TexFormat,
236 firstImage->InternalFormat,
237 sampler->DepthMode,
238 sampler->sRGBDecode) <<
239 BRW_SURFACE_FORMAT_SHIFT));
240
241 surf[1] = intelObj->mt->region->buffer->offset; /* reloc */
242
243 surf[2] = ((intelObj->_MaxLevel - tObj->BaseLevel) << BRW_SURFACE_LOD_SHIFT |
244 (firstImage->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
245 (firstImage->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
246
247 surf[3] = (brw_get_surface_tiling_bits(intelObj->mt->region->tiling) |
248 (firstImage->Depth - 1) << BRW_SURFACE_DEPTH_SHIFT |
249 ((intelObj->mt->region->pitch * intelObj->mt->cpp) - 1) <<
250 BRW_SURFACE_PITCH_SHIFT);
251
252 surf[4] = 0;
253 surf[5] = 0;
254
255 /* Emit relocation to surface contents */
256 drm_intel_bo_emit_reloc(brw->intel.batch.bo,
257 brw->wm.surf_offset[surf_index] + 4,
258 intelObj->mt->region->buffer, 0,
259 I915_GEM_DOMAIN_SAMPLER, 0);
260 }
261
262 /**
263 * Create the constant buffer surface. Vertex/fragment shader constants will be
264 * read from this buffer with Data Port Read instructions/messages.
265 */
266 void
267 brw_create_constant_surface(struct brw_context *brw,
268 drm_intel_bo *bo,
269 int width,
270 uint32_t *out_offset)
271 {
272 struct intel_context *intel = &brw->intel;
273 const GLint w = width - 1;
274 uint32_t *surf;
275
276 surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
277 6 * 4, 32, out_offset);
278
279 surf[0] = (BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
280 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
281 BRW_SURFACEFORMAT_R32G32B32A32_FLOAT << BRW_SURFACE_FORMAT_SHIFT);
282
283 if (intel->gen >= 6)
284 surf[0] |= BRW_SURFACE_RC_READ_WRITE;
285
286 surf[1] = bo->offset; /* reloc */
287
288 surf[2] = (((w & 0x7f) - 1) << BRW_SURFACE_WIDTH_SHIFT |
289 (((w >> 7) & 0x1fff) - 1) << BRW_SURFACE_HEIGHT_SHIFT);
290
291 surf[3] = ((((w >> 20) & 0x7f) - 1) << BRW_SURFACE_DEPTH_SHIFT |
292 (width * 16 - 1) << BRW_SURFACE_PITCH_SHIFT);
293
294 surf[4] = 0;
295 surf[5] = 0;
296
297 /* Emit relocation to surface contents. Section 5.1.1 of the gen4
298 * bspec ("Data Cache") says that the data cache does not exist as
299 * a separate cache and is just the sampler cache.
300 */
301 drm_intel_bo_emit_reloc(brw->intel.batch.bo,
302 *out_offset + 4,
303 bo, 0,
304 I915_GEM_DOMAIN_SAMPLER, 0);
305 }
306
307 /* Creates a new WM constant buffer reflecting the current fragment program's
308 * constants, if needed by the fragment program.
309 *
310 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
311 * state atom.
312 */
313 static void
314 prepare_wm_pull_constants(struct brw_context *brw)
315 {
316 struct gl_context *ctx = &brw->intel.ctx;
317 struct intel_context *intel = &brw->intel;
318 struct brw_fragment_program *fp =
319 (struct brw_fragment_program *) brw->fragment_program;
320 const int size = brw->wm.prog_data->nr_pull_params * sizeof(float);
321 float *constants;
322 unsigned int i;
323
324 _mesa_load_state_parameters(ctx, fp->program.Base.Parameters);
325
326 /* BRW_NEW_FRAGMENT_PROGRAM */
327 if (brw->wm.prog_data->nr_pull_params == 0) {
328 if (brw->wm.const_bo) {
329 drm_intel_bo_unreference(brw->wm.const_bo);
330 brw->wm.const_bo = NULL;
331 brw->state.dirty.brw |= BRW_NEW_WM_CONSTBUF;
332 }
333 return;
334 }
335
336 drm_intel_bo_unreference(brw->wm.const_bo);
337 brw->wm.const_bo = drm_intel_bo_alloc(intel->bufmgr, "WM const bo",
338 size, 64);
339
340 /* _NEW_PROGRAM_CONSTANTS */
341 drm_intel_gem_bo_map_gtt(brw->wm.const_bo);
342 constants = brw->wm.const_bo->virtual;
343 for (i = 0; i < brw->wm.prog_data->nr_pull_params; i++) {
344 constants[i] = convert_param(brw->wm.prog_data->pull_param_convert[i],
345 brw->wm.prog_data->pull_param[i]);
346 }
347 drm_intel_gem_bo_unmap_gtt(brw->wm.const_bo);
348
349 brw->state.dirty.brw |= BRW_NEW_WM_CONSTBUF;
350 }
351
352 const struct brw_tracked_state brw_wm_constants = {
353 .dirty = {
354 .mesa = (_NEW_PROGRAM_CONSTANTS),
355 .brw = (BRW_NEW_FRAGMENT_PROGRAM),
356 .cache = 0
357 },
358 .prepare = prepare_wm_pull_constants,
359 };
360
361 /**
362 * Updates surface / buffer for fragment shader constant buffer, if
363 * one is required.
364 *
365 * This consumes the state updates for the constant buffer, and produces
366 * BRW_NEW_WM_SURFACES to get picked up by brw_prepare_wm_surfaces for
367 * inclusion in the binding table.
368 */
369 static void upload_wm_constant_surface(struct brw_context *brw )
370 {
371 GLuint surf = SURF_INDEX_FRAG_CONST_BUFFER;
372 struct brw_fragment_program *fp =
373 (struct brw_fragment_program *) brw->fragment_program;
374 const struct gl_program_parameter_list *params =
375 fp->program.Base.Parameters;
376
377 /* If there's no constant buffer, then no surface BO is needed to point at
378 * it.
379 */
380 if (brw->wm.const_bo == 0) {
381 if (brw->wm.surf_offset[surf]) {
382 brw->state.dirty.brw |= BRW_NEW_WM_SURFACES;
383 brw->wm.surf_offset[surf] = 0;
384 }
385 return;
386 }
387
388 brw_create_constant_surface(brw, brw->wm.const_bo, params->NumParameters,
389 &brw->wm.surf_offset[surf]);
390 brw->state.dirty.brw |= BRW_NEW_WM_SURFACES;
391 }
392
393 const struct brw_tracked_state brw_wm_constant_surface = {
394 .dirty = {
395 .mesa = 0,
396 .brw = (BRW_NEW_WM_CONSTBUF |
397 BRW_NEW_BATCH),
398 .cache = 0
399 },
400 .emit = upload_wm_constant_surface,
401 };
402
403 static void
404 brw_update_null_renderbuffer_surface(struct brw_context *brw, unsigned int unit)
405 {
406 struct intel_context *intel = &brw->intel;
407 uint32_t *surf;
408
409 surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
410 6 * 4, 32, &brw->wm.surf_offset[unit]);
411
412 surf[0] = (BRW_SURFACE_NULL << BRW_SURFACE_TYPE_SHIFT |
413 BRW_SURFACEFORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
414 if (intel->gen < 6) {
415 surf[0] |= (1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT |
416 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT |
417 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT |
418 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT);
419 }
420 surf[1] = 0;
421 surf[2] = 0;
422 surf[3] = 0;
423 surf[4] = 0;
424 surf[5] = 0;
425 }
426
427 /**
428 * Sets up a surface state structure to point at the given region.
429 * While it is only used for the front/back buffer currently, it should be
430 * usable for further buffers when doing ARB_draw_buffer support.
431 */
432 static void
433 brw_update_renderbuffer_surface(struct brw_context *brw,
434 struct gl_renderbuffer *rb,
435 unsigned int unit)
436 {
437 struct intel_context *intel = &brw->intel;
438 struct gl_context *ctx = &intel->ctx;
439 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
440 struct intel_region *region = irb->region;
441 uint32_t *surf;
442 uint32_t tile_x, tile_y;
443 uint32_t format = 0;
444
445 surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
446 6 * 4, 32, &brw->wm.surf_offset[unit]);
447
448 switch (irb->Base.Format) {
449 case MESA_FORMAT_XRGB8888:
450 /* XRGB is handled as ARGB because the chips in this family
451 * cannot render to XRGB targets. This means that we have to
452 * mask writes to alpha (ala glColorMask) and reconfigure the
453 * alpha blending hardware to use GL_ONE (or GL_ZERO) for
454 * cases where GL_DST_ALPHA (or GL_ONE_MINUS_DST_ALPHA) is
455 * used.
456 */
457 format = BRW_SURFACEFORMAT_B8G8R8A8_UNORM;
458 break;
459 case MESA_FORMAT_INTENSITY_FLOAT32:
460 case MESA_FORMAT_LUMINANCE_FLOAT32:
461 /* For these formats, we just need to read/write the first
462 * channel into R, which is to say that we just treat them as
463 * GL_RED.
464 */
465 format = BRW_SURFACEFORMAT_R32_FLOAT;
466 break;
467 case MESA_FORMAT_SARGB8:
468 /* without GL_EXT_framebuffer_sRGB we shouldn't bind sRGB
469 surfaces to the blend/update as sRGB */
470 if (ctx->Color.sRGBEnabled)
471 format = brw_format_for_mesa_format(irb->Base.Format);
472 else
473 format = BRW_SURFACEFORMAT_B8G8R8A8_UNORM;
474 break;
475 default:
476 assert(brw_render_target_supported(irb->Base.Format));
477 format = brw_format_for_mesa_format(irb->Base.Format);
478 }
479
480 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
481 format << BRW_SURFACE_FORMAT_SHIFT);
482
483 /* reloc */
484 surf[1] = (intel_renderbuffer_tile_offsets(irb, &tile_x, &tile_y) +
485 region->buffer->offset);
486
487 surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
488 (rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
489
490 surf[3] = (brw_get_surface_tiling_bits(region->tiling) |
491 ((region->pitch * region->cpp) - 1) << BRW_SURFACE_PITCH_SHIFT);
492
493 surf[4] = 0;
494
495 assert(brw->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
496 /* Note that the low bits of these fields are missing, so
497 * there's the possibility of getting in trouble.
498 */
499 assert(tile_x % 4 == 0);
500 assert(tile_y % 2 == 0);
501 surf[5] = ((tile_x / 4) << BRW_SURFACE_X_OFFSET_SHIFT |
502 (tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT);
503
504 if (intel->gen < 6) {
505 /* _NEW_COLOR */
506 if (!ctx->Color._LogicOpEnabled &&
507 (ctx->Color.BlendEnabled & (1 << unit)))
508 surf[0] |= BRW_SURFACE_BLEND_ENABLED;
509
510 if (!ctx->Color.ColorMask[unit][0])
511 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT;
512 if (!ctx->Color.ColorMask[unit][1])
513 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT;
514 if (!ctx->Color.ColorMask[unit][2])
515 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT;
516
517 /* As mentioned above, disable writes to the alpha component when the
518 * renderbuffer is XRGB.
519 */
520 if (ctx->DrawBuffer->Visual.alphaBits == 0 ||
521 !ctx->Color.ColorMask[unit][3]) {
522 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT;
523 }
524 }
525
526 drm_intel_bo_emit_reloc(brw->intel.batch.bo,
527 brw->wm.surf_offset[unit] + 4,
528 region->buffer,
529 surf[1] - region->buffer->offset,
530 I915_GEM_DOMAIN_RENDER,
531 I915_GEM_DOMAIN_RENDER);
532 }
533
534 static void
535 prepare_wm_surfaces(struct brw_context *brw)
536 {
537 struct gl_context *ctx = &brw->intel.ctx;
538 int i;
539 int nr_surfaces = 0;
540
541 for (i = 0; i < ctx->DrawBuffer->_NumColorDrawBuffers; i++) {
542 struct gl_renderbuffer *rb = ctx->DrawBuffer->_ColorDrawBuffers[i];
543 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
544 struct intel_region *region = irb ? irb->region : NULL;
545
546 if (region)
547 brw_add_validated_bo(brw, region->buffer);
548 nr_surfaces = SURF_INDEX_DRAW(i) + 1;
549 }
550
551 if (brw->wm.const_bo) {
552 brw_add_validated_bo(brw, brw->wm.const_bo);
553 nr_surfaces = SURF_INDEX_FRAG_CONST_BUFFER + 1;
554 }
555
556 for (i = 0; i < BRW_MAX_TEX_UNIT; i++) {
557 const struct gl_texture_unit *texUnit = &ctx->Texture.Unit[i];
558
559 if (texUnit->_ReallyEnabled) {
560 struct gl_texture_object *tObj = texUnit->_Current;
561 struct intel_texture_object *intelObj = intel_texture_object(tObj);
562
563 brw_add_validated_bo(brw, intelObj->mt->region->buffer);
564 nr_surfaces = SURF_INDEX_TEXTURE(i) + 1;
565 }
566 }
567
568 /* Have to update this in our prepare, since the unit's prepare
569 * relies on it.
570 */
571 if (brw->wm.nr_surfaces != nr_surfaces) {
572 brw->wm.nr_surfaces = nr_surfaces;
573 brw->state.dirty.brw |= BRW_NEW_NR_WM_SURFACES;
574 }
575 }
576
577 /**
578 * Constructs the set of surface state objects pointed to by the
579 * binding table.
580 */
581 static void
582 upload_wm_surfaces(struct brw_context *brw)
583 {
584 struct gl_context *ctx = &brw->intel.ctx;
585 GLuint i;
586
587 /* _NEW_BUFFERS | _NEW_COLOR */
588 /* Update surfaces for drawing buffers */
589 if (ctx->DrawBuffer->_NumColorDrawBuffers >= 1) {
590 for (i = 0; i < ctx->DrawBuffer->_NumColorDrawBuffers; i++) {
591 if (intel_renderbuffer(ctx->DrawBuffer->_ColorDrawBuffers[i])) {
592 brw_update_renderbuffer_surface(brw,
593 ctx->DrawBuffer->_ColorDrawBuffers[i],
594 i);
595 } else {
596 brw_update_null_renderbuffer_surface(brw, i);
597 }
598 }
599 } else {
600 brw_update_null_renderbuffer_surface(brw, 0);
601 }
602
603 /* Update surfaces for textures */
604 for (i = 0; i < BRW_MAX_TEX_UNIT; i++) {
605 const struct gl_texture_unit *texUnit = &ctx->Texture.Unit[i];
606 const GLuint surf = SURF_INDEX_TEXTURE(i);
607
608 /* _NEW_TEXTURE */
609 if (texUnit->_ReallyEnabled) {
610 brw_update_texture_surface(ctx, i);
611 } else {
612 brw->wm.surf_offset[surf] = 0;
613 }
614 }
615
616 brw->state.dirty.brw |= BRW_NEW_WM_SURFACES;
617 }
618
619 const struct brw_tracked_state brw_wm_surfaces = {
620 .dirty = {
621 .mesa = (_NEW_COLOR |
622 _NEW_TEXTURE |
623 _NEW_BUFFERS),
624 .brw = (BRW_NEW_BATCH),
625 .cache = 0
626 },
627 .prepare = prepare_wm_surfaces,
628 .emit = upload_wm_surfaces,
629 };
630
631 /**
632 * Constructs the binding table for the WM surface state, which maps unit
633 * numbers to surface state objects.
634 */
635 static void
636 brw_wm_upload_binding_table(struct brw_context *brw)
637 {
638 uint32_t *bind;
639 int i;
640
641 /* Might want to calculate nr_surfaces first, to avoid taking up so much
642 * space for the binding table.
643 */
644 bind = brw_state_batch(brw, AUB_TRACE_BINDING_TABLE,
645 sizeof(uint32_t) * BRW_WM_MAX_SURF,
646 32, &brw->wm.bind_bo_offset);
647
648 for (i = 0; i < BRW_WM_MAX_SURF; i++) {
649 /* BRW_NEW_WM_SURFACES */
650 bind[i] = brw->wm.surf_offset[i];
651 }
652
653 brw->state.dirty.brw |= BRW_NEW_PS_BINDING_TABLE;
654 }
655
656 const struct brw_tracked_state brw_wm_binding_table = {
657 .dirty = {
658 .mesa = 0,
659 .brw = (BRW_NEW_BATCH |
660 BRW_NEW_WM_SURFACES),
661 .cache = 0
662 },
663 .emit = brw_wm_upload_binding_table,
664 };