2b216fddbb5369885116af1f24f1b35addff4012
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_surface_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33 #include "main/mtypes.h"
34 #include "main/texstore.h"
35 #include "shader/prog_parameter.h"
36
37 #include "intel_mipmap_tree.h"
38 #include "intel_batchbuffer.h"
39 #include "intel_tex.h"
40 #include "intel_fbo.h"
41
42 #include "brw_context.h"
43 #include "brw_state.h"
44 #include "brw_defines.h"
45
46
47 static GLuint translate_tex_target( GLenum target )
48 {
49 switch (target) {
50 case GL_TEXTURE_1D:
51 return BRW_SURFACE_1D;
52
53 case GL_TEXTURE_RECTANGLE_NV:
54 return BRW_SURFACE_2D;
55
56 case GL_TEXTURE_2D:
57 return BRW_SURFACE_2D;
58
59 case GL_TEXTURE_3D:
60 return BRW_SURFACE_3D;
61
62 case GL_TEXTURE_CUBE_MAP:
63 return BRW_SURFACE_CUBE;
64
65 default:
66 assert(0);
67 return 0;
68 }
69 }
70
71
72 static GLuint translate_tex_format( gl_format mesa_format,
73 GLenum internal_format,
74 GLenum depth_mode )
75 {
76 switch( mesa_format ) {
77 case MESA_FORMAT_L8:
78 return BRW_SURFACEFORMAT_L8_UNORM;
79
80 case MESA_FORMAT_I8:
81 return BRW_SURFACEFORMAT_I8_UNORM;
82
83 case MESA_FORMAT_A8:
84 return BRW_SURFACEFORMAT_A8_UNORM;
85
86 case MESA_FORMAT_AL88:
87 return BRW_SURFACEFORMAT_L8A8_UNORM;
88
89 case MESA_FORMAT_AL1616:
90 return BRW_SURFACEFORMAT_L16A16_UNORM;
91
92 case MESA_FORMAT_RGB888:
93 assert(0); /* not supported for sampling */
94 return BRW_SURFACEFORMAT_R8G8B8_UNORM;
95
96 case MESA_FORMAT_ARGB8888:
97 return BRW_SURFACEFORMAT_B8G8R8A8_UNORM;
98
99 case MESA_FORMAT_XRGB8888:
100 return BRW_SURFACEFORMAT_B8G8R8X8_UNORM;
101
102 case MESA_FORMAT_RGBA8888_REV:
103 _mesa_problem(NULL, "unexpected format in i965:translate_tex_format()");
104 return BRW_SURFACEFORMAT_R8G8B8A8_UNORM;
105
106 case MESA_FORMAT_RGB565:
107 return BRW_SURFACEFORMAT_B5G6R5_UNORM;
108
109 case MESA_FORMAT_ARGB1555:
110 return BRW_SURFACEFORMAT_B5G5R5A1_UNORM;
111
112 case MESA_FORMAT_ARGB4444:
113 return BRW_SURFACEFORMAT_B4G4R4A4_UNORM;
114
115 case MESA_FORMAT_YCBCR_REV:
116 return BRW_SURFACEFORMAT_YCRCB_NORMAL;
117
118 case MESA_FORMAT_YCBCR:
119 return BRW_SURFACEFORMAT_YCRCB_SWAPUVY;
120
121 case MESA_FORMAT_RGB_FXT1:
122 case MESA_FORMAT_RGBA_FXT1:
123 return BRW_SURFACEFORMAT_FXT1;
124
125 case MESA_FORMAT_Z16:
126 if (depth_mode == GL_INTENSITY)
127 return BRW_SURFACEFORMAT_I16_UNORM;
128 else if (depth_mode == GL_ALPHA)
129 return BRW_SURFACEFORMAT_A16_UNORM;
130 else
131 return BRW_SURFACEFORMAT_L16_UNORM;
132
133 case MESA_FORMAT_RGB_DXT1:
134 return BRW_SURFACEFORMAT_DXT1_RGB;
135
136 case MESA_FORMAT_RGBA_DXT1:
137 return BRW_SURFACEFORMAT_BC1_UNORM;
138
139 case MESA_FORMAT_RGBA_DXT3:
140 return BRW_SURFACEFORMAT_BC2_UNORM;
141
142 case MESA_FORMAT_RGBA_DXT5:
143 return BRW_SURFACEFORMAT_BC3_UNORM;
144
145 case MESA_FORMAT_SARGB8:
146 return BRW_SURFACEFORMAT_B8G8R8A8_UNORM_SRGB;
147
148 case MESA_FORMAT_SLA8:
149 return BRW_SURFACEFORMAT_L8A8_UNORM_SRGB;
150
151 case MESA_FORMAT_SL8:
152 return BRW_SURFACEFORMAT_L8_UNORM_SRGB;
153
154 case MESA_FORMAT_SRGB_DXT1:
155 return BRW_SURFACEFORMAT_BC1_UNORM_SRGB;
156
157 case MESA_FORMAT_S8_Z24:
158 /* XXX: these different surface formats don't seem to
159 * make any difference for shadow sampler/compares.
160 */
161 if (depth_mode == GL_INTENSITY)
162 return BRW_SURFACEFORMAT_I24X8_UNORM;
163 else if (depth_mode == GL_ALPHA)
164 return BRW_SURFACEFORMAT_A24X8_UNORM;
165 else
166 return BRW_SURFACEFORMAT_L24X8_UNORM;
167
168 case MESA_FORMAT_DUDV8:
169 return BRW_SURFACEFORMAT_R8G8_SNORM;
170
171 case MESA_FORMAT_SIGNED_RGBA8888_REV:
172 return BRW_SURFACEFORMAT_R8G8B8A8_SNORM;
173
174 default:
175 assert(0);
176 return 0;
177 }
178 }
179
180 static void
181 brw_set_surface_tiling(struct brw_surface_state *surf, uint32_t tiling)
182 {
183 switch (tiling) {
184 case I915_TILING_NONE:
185 surf->ss3.tiled_surface = 0;
186 surf->ss3.tile_walk = 0;
187 break;
188 case I915_TILING_X:
189 surf->ss3.tiled_surface = 1;
190 surf->ss3.tile_walk = BRW_TILEWALK_XMAJOR;
191 break;
192 case I915_TILING_Y:
193 surf->ss3.tiled_surface = 1;
194 surf->ss3.tile_walk = BRW_TILEWALK_YMAJOR;
195 break;
196 }
197 }
198
199 static drm_intel_bo *
200 brw_create_texture_surface( struct brw_context *brw,
201 struct brw_surface_key *key )
202 {
203 struct brw_surface_state surf;
204 drm_intel_bo *bo;
205
206 memset(&surf, 0, sizeof(surf));
207
208 surf.ss0.mipmap_layout_mode = BRW_SURFACE_MIPMAPLAYOUT_BELOW;
209 surf.ss0.surface_type = translate_tex_target(key->target);
210 surf.ss0.surface_format = translate_tex_format(key->format,
211 key->internal_format,
212 key->depthmode);
213
214 /* This is ok for all textures with channel width 8bit or less:
215 */
216 /* surf.ss0.data_return_format = BRW_SURFACERETURNFORMAT_S1; */
217 surf.ss1.base_addr = key->bo->offset; /* reloc */
218
219 surf.ss2.mip_count = key->last_level - key->first_level;
220 surf.ss2.width = key->width - 1;
221 surf.ss2.height = key->height - 1;
222 brw_set_surface_tiling(&surf, key->tiling);
223 surf.ss3.pitch = (key->pitch * key->cpp) - 1;
224 surf.ss3.depth = key->depth - 1;
225
226 surf.ss4.min_lod = 0;
227
228 if (key->target == GL_TEXTURE_CUBE_MAP) {
229 surf.ss0.cube_pos_x = 1;
230 surf.ss0.cube_pos_y = 1;
231 surf.ss0.cube_pos_z = 1;
232 surf.ss0.cube_neg_x = 1;
233 surf.ss0.cube_neg_y = 1;
234 surf.ss0.cube_neg_z = 1;
235 }
236
237 bo = brw_upload_cache(&brw->surface_cache, BRW_SS_SURFACE,
238 key, sizeof(*key),
239 &key->bo, 1,
240 &surf, sizeof(surf));
241
242 /* Emit relocation to surface contents */
243 drm_intel_bo_emit_reloc(bo, offsetof(struct brw_surface_state, ss1),
244 key->bo, 0,
245 I915_GEM_DOMAIN_SAMPLER, 0);
246
247 return bo;
248 }
249
250 static void
251 brw_update_texture_surface( GLcontext *ctx, GLuint unit )
252 {
253 struct brw_context *brw = brw_context(ctx);
254 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
255 struct intel_texture_object *intelObj = intel_texture_object(tObj);
256 struct gl_texture_image *firstImage = tObj->Image[0][intelObj->firstLevel];
257 struct brw_surface_key key;
258 const GLuint surf = SURF_INDEX_TEXTURE(unit);
259
260 memset(&key, 0, sizeof(key));
261
262 key.format = firstImage->TexFormat;
263 key.internal_format = firstImage->InternalFormat;
264 key.pitch = intelObj->mt->region->pitch;
265 key.depth = firstImage->Depth;
266 key.bo = intelObj->mt->region->buffer;
267 key.offset = 0;
268
269 key.target = tObj->Target;
270 key.depthmode = tObj->DepthMode;
271 key.first_level = intelObj->firstLevel;
272 key.last_level = intelObj->lastLevel;
273 key.width = firstImage->Width;
274 key.height = firstImage->Height;
275 key.cpp = intelObj->mt->cpp;
276 key.tiling = intelObj->mt->region->tiling;
277
278 drm_intel_bo_unreference(brw->wm.surf_bo[surf]);
279 brw->wm.surf_bo[surf] = brw_search_cache(&brw->surface_cache,
280 BRW_SS_SURFACE,
281 &key, sizeof(key),
282 &key.bo, 1,
283 NULL);
284 if (brw->wm.surf_bo[surf] == NULL) {
285 brw->wm.surf_bo[surf] = brw_create_texture_surface(brw, &key);
286 }
287 }
288
289
290
291 /**
292 * Create the constant buffer surface. Vertex/fragment shader constants will be
293 * read from this buffer with Data Port Read instructions/messages.
294 */
295 drm_intel_bo *
296 brw_create_constant_surface( struct brw_context *brw,
297 struct brw_surface_key *key )
298 {
299 const GLint w = key->width - 1;
300 struct brw_surface_state surf;
301 drm_intel_bo *bo;
302
303 memset(&surf, 0, sizeof(surf));
304
305 surf.ss0.mipmap_layout_mode = BRW_SURFACE_MIPMAPLAYOUT_BELOW;
306 surf.ss0.surface_type = BRW_SURFACE_BUFFER;
307 surf.ss0.surface_format = BRW_SURFACEFORMAT_R32G32B32A32_FLOAT;
308
309 assert(key->bo);
310 surf.ss1.base_addr = key->bo->offset; /* reloc */
311
312 surf.ss2.width = w & 0x7f; /* bits 6:0 of size or width */
313 surf.ss2.height = (w >> 7) & 0x1fff; /* bits 19:7 of size or width */
314 surf.ss3.depth = (w >> 20) & 0x7f; /* bits 26:20 of size or width */
315 surf.ss3.pitch = (key->pitch * key->cpp) - 1; /* ignored?? */
316 brw_set_surface_tiling(&surf, key->tiling); /* tiling now allowed */
317
318 bo = brw_upload_cache(&brw->surface_cache, BRW_SS_SURFACE,
319 key, sizeof(*key),
320 &key->bo, 1,
321 &surf, sizeof(surf));
322
323 /* Emit relocation to surface contents. Section 5.1.1 of the gen4
324 * bspec ("Data Cache") says that the data cache does not exist as
325 * a separate cache and is just the sampler cache.
326 */
327 drm_intel_bo_emit_reloc(bo, offsetof(struct brw_surface_state, ss1),
328 key->bo, 0,
329 I915_GEM_DOMAIN_SAMPLER, 0);
330
331 return bo;
332 }
333
334 /* Creates a new WM constant buffer reflecting the current fragment program's
335 * constants, if needed by the fragment program.
336 *
337 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
338 * state atom.
339 */
340 static void
341 prepare_wm_constants(struct brw_context *brw)
342 {
343 GLcontext *ctx = &brw->intel.ctx;
344 struct intel_context *intel = &brw->intel;
345 struct brw_fragment_program *fp =
346 (struct brw_fragment_program *) brw->fragment_program;
347 const struct gl_program_parameter_list *params = fp->program.Base.Parameters;
348 const int size = params->NumParameters * 4 * sizeof(GLfloat);
349
350 _mesa_load_state_parameters(ctx, fp->program.Base.Parameters);
351
352 /* BRW_NEW_FRAGMENT_PROGRAM */
353 if (!fp->use_const_buffer) {
354 if (brw->wm.const_bo) {
355 drm_intel_bo_unreference(brw->wm.const_bo);
356 brw->wm.const_bo = NULL;
357 brw->state.dirty.brw |= BRW_NEW_WM_CONSTBUF;
358 }
359 return;
360 }
361
362 drm_intel_bo_unreference(brw->wm.const_bo);
363 brw->wm.const_bo = drm_intel_bo_alloc(intel->bufmgr, "vp_const_buffer",
364 size, 64);
365
366 /* _NEW_PROGRAM_CONSTANTS */
367 drm_intel_bo_subdata(brw->wm.const_bo, 0, size, params->ParameterValues);
368 }
369
370 const struct brw_tracked_state brw_wm_constants = {
371 .dirty = {
372 .mesa = (_NEW_PROGRAM_CONSTANTS),
373 .brw = (BRW_NEW_FRAGMENT_PROGRAM),
374 .cache = 0
375 },
376 .prepare = prepare_wm_constants,
377 };
378
379 /**
380 * Update the surface state for a WM constant buffer.
381 * The constant buffer will be (re)allocated here if needed.
382 */
383 static void
384 brw_update_wm_constant_surface( GLcontext *ctx,
385 GLuint surf)
386 {
387 struct brw_context *brw = brw_context(ctx);
388 struct brw_surface_key key;
389 struct brw_fragment_program *fp =
390 (struct brw_fragment_program *) brw->fragment_program;
391 const struct gl_program_parameter_list *params =
392 fp->program.Base.Parameters;
393
394 /* If there's no constant buffer, then no surface BO is needed to point at
395 * it.
396 */
397 if (brw->wm.const_bo == NULL) {
398 drm_intel_bo_unreference(brw->wm.surf_bo[surf]);
399 brw->wm.surf_bo[surf] = NULL;
400 return;
401 }
402
403 memset(&key, 0, sizeof(key));
404
405 key.format = MESA_FORMAT_RGBA_FLOAT32;
406 key.internal_format = GL_RGBA;
407 key.bo = brw->wm.const_bo;
408 key.depthmode = GL_NONE;
409 key.pitch = params->NumParameters;
410 key.width = params->NumParameters;
411 key.height = 1;
412 key.depth = 1;
413 key.cpp = 16;
414
415 /*
416 printf("%s:\n", __FUNCTION__);
417 printf(" width %d height %d depth %d cpp %d pitch %d\n",
418 key.width, key.height, key.depth, key.cpp, key.pitch);
419 */
420
421 drm_intel_bo_unreference(brw->wm.surf_bo[surf]);
422 brw->wm.surf_bo[surf] = brw_search_cache(&brw->surface_cache,
423 BRW_SS_SURFACE,
424 &key, sizeof(key),
425 &key.bo, 1,
426 NULL);
427 if (brw->wm.surf_bo[surf] == NULL) {
428 brw->wm.surf_bo[surf] = brw_create_constant_surface(brw, &key);
429 }
430 brw->state.dirty.brw |= BRW_NEW_WM_SURFACES;
431 }
432
433 /**
434 * Updates surface / buffer for fragment shader constant buffer, if
435 * one is required.
436 *
437 * This consumes the state updates for the constant buffer, and produces
438 * BRW_NEW_WM_SURFACES to get picked up by brw_prepare_wm_surfaces for
439 * inclusion in the binding table.
440 */
441 static void prepare_wm_constant_surface(struct brw_context *brw )
442 {
443 GLcontext *ctx = &brw->intel.ctx;
444 GLuint surf = SURF_INDEX_FRAG_CONST_BUFFER;
445
446 /* If there's no constant buffer, then no surface BO is needed to point at
447 * it.
448 */
449 if (brw->wm.const_bo == 0) {
450 if (brw->wm.surf_bo[surf] != NULL) {
451 drm_intel_bo_unreference(brw->wm.surf_bo[surf]);
452 brw->wm.surf_bo[surf] = NULL;
453 brw->state.dirty.brw |= BRW_NEW_WM_SURFACES;
454 }
455 return;
456 }
457
458 brw_update_wm_constant_surface(ctx, surf);
459 }
460
461 const struct brw_tracked_state brw_wm_constant_surface = {
462 .dirty = {
463 .mesa = 0,
464 .brw = (BRW_NEW_WM_CONSTBUF),
465 .cache = 0
466 },
467 .prepare = prepare_wm_constant_surface,
468 };
469
470
471 /**
472 * Sets up a surface state structure to point at the given region.
473 * While it is only used for the front/back buffer currently, it should be
474 * usable for further buffers when doing ARB_draw_buffer support.
475 */
476 static void
477 brw_update_renderbuffer_surface(struct brw_context *brw,
478 struct gl_renderbuffer *rb,
479 unsigned int unit)
480 {
481 struct intel_context *intel = &brw->intel;
482 GLcontext *ctx = &intel->ctx;
483 drm_intel_bo *region_bo = NULL;
484 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
485 struct intel_region *region = irb ? irb->region : NULL;
486 struct {
487 unsigned int surface_type;
488 unsigned int surface_format;
489 unsigned int width, height, pitch, cpp;
490 GLubyte color_mask[4];
491 GLboolean color_blend;
492 uint32_t tiling;
493 uint32_t draw_x;
494 uint32_t draw_y;
495 } key;
496
497 memset(&key, 0, sizeof(key));
498
499 if (region != NULL) {
500 region_bo = region->buffer;
501
502 key.surface_type = BRW_SURFACE_2D;
503 switch (irb->Base.Format) {
504 /* XRGB and ARGB are treated the same here because the chips in this
505 * family cannot render to XRGB targets. This means that we have to
506 * mask writes to alpha (ala glColorMask) and reconfigure the alpha
507 * blending hardware to use GL_ONE (or GL_ZERO) for cases where
508 * GL_DST_ALPHA (or GL_ONE_MINUS_DST_ALPHA) is used.
509 */
510 case MESA_FORMAT_ARGB8888:
511 case MESA_FORMAT_XRGB8888:
512 key.surface_format = BRW_SURFACEFORMAT_B8G8R8A8_UNORM;
513 break;
514 case MESA_FORMAT_RGB565:
515 key.surface_format = BRW_SURFACEFORMAT_B5G6R5_UNORM;
516 break;
517 case MESA_FORMAT_ARGB1555:
518 key.surface_format = BRW_SURFACEFORMAT_B5G5R5A1_UNORM;
519 break;
520 case MESA_FORMAT_ARGB4444:
521 key.surface_format = BRW_SURFACEFORMAT_B4G4R4A4_UNORM;
522 break;
523 case MESA_FORMAT_A8:
524 key.surface_format = BRW_SURFACEFORMAT_A8_UNORM;
525 break;
526 default:
527 _mesa_problem(ctx, "Bad renderbuffer format: %d\n", irb->Base.Format);
528 }
529 key.tiling = region->tiling;
530 key.width = rb->Width;
531 key.height = rb->Height;
532 key.pitch = region->pitch;
533 key.cpp = region->cpp;
534 key.draw_x = region->draw_x;
535 key.draw_y = region->draw_y;
536 } else {
537 key.surface_type = BRW_SURFACE_NULL;
538 key.surface_format = BRW_SURFACEFORMAT_B8G8R8A8_UNORM;
539 key.tiling = I915_TILING_X;
540 key.width = 1;
541 key.height = 1;
542 key.cpp = 4;
543 key.draw_x = 0;
544 key.draw_y = 0;
545 }
546
547 if (intel->gen < 6) {
548 /* _NEW_COLOR */
549 memcpy(key.color_mask, ctx->Color.ColorMask[unit],
550 sizeof(key.color_mask));
551
552 /* As mentioned above, disable writes to the alpha component when the
553 * renderbuffer is XRGB.
554 */
555 if (ctx->DrawBuffer->Visual.alphaBits == 0)
556 key.color_mask[3] = GL_FALSE;
557
558 key.color_blend = (!ctx->Color._LogicOpEnabled &&
559 (ctx->Color.BlendEnabled & (1 << unit)));
560 }
561
562 drm_intel_bo_unreference(brw->wm.surf_bo[unit]);
563 brw->wm.surf_bo[unit] = brw_search_cache(&brw->surface_cache,
564 BRW_SS_SURFACE,
565 &key, sizeof(key),
566 &region_bo, 1,
567 NULL);
568
569 if (brw->wm.surf_bo[unit] == NULL) {
570 struct brw_surface_state surf;
571
572 memset(&surf, 0, sizeof(surf));
573
574 surf.ss0.surface_format = key.surface_format;
575 surf.ss0.surface_type = key.surface_type;
576 if (key.tiling == I915_TILING_NONE) {
577 surf.ss1.base_addr = (key.draw_x + key.draw_y * key.pitch) * key.cpp;
578 } else {
579 uint32_t tile_base, tile_x, tile_y;
580 uint32_t pitch = key.pitch * key.cpp;
581
582 if (key.tiling == I915_TILING_X) {
583 tile_x = key.draw_x % (512 / key.cpp);
584 tile_y = key.draw_y % 8;
585 tile_base = ((key.draw_y / 8) * (8 * pitch));
586 tile_base += (key.draw_x - tile_x) / (512 / key.cpp) * 4096;
587 } else {
588 /* Y */
589 tile_x = key.draw_x % (128 / key.cpp);
590 tile_y = key.draw_y % 32;
591 tile_base = ((key.draw_y / 32) * (32 * pitch));
592 tile_base += (key.draw_x - tile_x) / (128 / key.cpp) * 4096;
593 }
594 assert(brw->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
595 assert(tile_x % 4 == 0);
596 assert(tile_y % 2 == 0);
597 /* Note that the low bits of these fields are missing, so
598 * there's the possibility of getting in trouble.
599 */
600 surf.ss1.base_addr = tile_base;
601 surf.ss5.x_offset = tile_x / 4;
602 surf.ss5.y_offset = tile_y / 2;
603 }
604 if (region_bo != NULL)
605 surf.ss1.base_addr += region_bo->offset; /* reloc */
606
607 surf.ss2.width = key.width - 1;
608 surf.ss2.height = key.height - 1;
609 brw_set_surface_tiling(&surf, key.tiling);
610 surf.ss3.pitch = (key.pitch * key.cpp) - 1;
611
612 if (intel->gen < 6) {
613 /* _NEW_COLOR */
614 surf.ss0.color_blend = key.color_blend;
615 surf.ss0.writedisable_red = !key.color_mask[0];
616 surf.ss0.writedisable_green = !key.color_mask[1];
617 surf.ss0.writedisable_blue = !key.color_mask[2];
618 surf.ss0.writedisable_alpha = !key.color_mask[3];
619 }
620
621 /* Key size will never match key size for textures, so we're safe. */
622 brw->wm.surf_bo[unit] = brw_upload_cache(&brw->surface_cache,
623 BRW_SS_SURFACE,
624 &key, sizeof(key),
625 &region_bo, 1,
626 &surf, sizeof(surf));
627 if (region_bo != NULL) {
628 /* We might sample from it, and we might render to it, so flag
629 * them both. We might be able to figure out from other state
630 * a more restrictive relocation to emit.
631 */
632 drm_intel_bo_emit_reloc(brw->wm.surf_bo[unit],
633 offsetof(struct brw_surface_state, ss1),
634 region_bo,
635 surf.ss1.base_addr - region_bo->offset,
636 I915_GEM_DOMAIN_RENDER,
637 I915_GEM_DOMAIN_RENDER);
638 }
639 }
640 }
641
642
643 /**
644 * Constructs the binding table for the WM surface state, which maps unit
645 * numbers to surface state objects.
646 */
647 static drm_intel_bo *
648 brw_wm_get_binding_table(struct brw_context *brw)
649 {
650 drm_intel_bo *bind_bo;
651
652 assert(brw->wm.nr_surfaces <= BRW_WM_MAX_SURF);
653
654 bind_bo = brw_search_cache(&brw->surface_cache, BRW_SS_SURF_BIND,
655 NULL, 0,
656 brw->wm.surf_bo, brw->wm.nr_surfaces,
657 NULL);
658
659 if (bind_bo == NULL) {
660 GLuint data_size = brw->wm.nr_surfaces * sizeof(GLuint);
661 uint32_t data[BRW_WM_MAX_SURF];
662 int i;
663
664 for (i = 0; i < brw->wm.nr_surfaces; i++)
665 if (brw->wm.surf_bo[i])
666 data[i] = brw->wm.surf_bo[i]->offset;
667 else
668 data[i] = 0;
669
670 bind_bo = brw_upload_cache( &brw->surface_cache, BRW_SS_SURF_BIND,
671 NULL, 0,
672 brw->wm.surf_bo, brw->wm.nr_surfaces,
673 data, data_size);
674
675 /* Emit binding table relocations to surface state */
676 for (i = 0; i < BRW_WM_MAX_SURF; i++) {
677 if (brw->wm.surf_bo[i] != NULL) {
678 drm_intel_bo_emit_reloc(bind_bo, i * sizeof(GLuint),
679 brw->wm.surf_bo[i], 0,
680 I915_GEM_DOMAIN_INSTRUCTION, 0);
681 }
682 }
683 }
684
685 return bind_bo;
686 }
687
688 static void prepare_wm_surfaces(struct brw_context *brw )
689 {
690 GLcontext *ctx = &brw->intel.ctx;
691 GLuint i;
692 int old_nr_surfaces;
693
694 /* _NEW_BUFFERS | _NEW_COLOR */
695 /* Update surfaces for drawing buffers */
696 if (ctx->DrawBuffer->_NumColorDrawBuffers >= 1) {
697 for (i = 0; i < ctx->DrawBuffer->_NumColorDrawBuffers; i++) {
698 brw_update_renderbuffer_surface(brw,
699 ctx->DrawBuffer->_ColorDrawBuffers[i],
700 i);
701 }
702 } else {
703 brw_update_renderbuffer_surface(brw, NULL, 0);
704 }
705
706 old_nr_surfaces = brw->wm.nr_surfaces;
707 brw->wm.nr_surfaces = BRW_MAX_DRAW_BUFFERS;
708
709 if (brw->wm.surf_bo[SURF_INDEX_FRAG_CONST_BUFFER] != NULL)
710 brw->wm.nr_surfaces = SURF_INDEX_FRAG_CONST_BUFFER + 1;
711
712 /* Update surfaces for textures */
713 for (i = 0; i < BRW_MAX_TEX_UNIT; i++) {
714 const struct gl_texture_unit *texUnit = &ctx->Texture.Unit[i];
715 const GLuint surf = SURF_INDEX_TEXTURE(i);
716
717 /* _NEW_TEXTURE, BRW_NEW_TEXDATA */
718 if (texUnit->_ReallyEnabled) {
719 brw_update_texture_surface(ctx, i);
720 brw->wm.nr_surfaces = surf + 1;
721 } else {
722 drm_intel_bo_unreference(brw->wm.surf_bo[surf]);
723 brw->wm.surf_bo[surf] = NULL;
724 }
725 }
726
727 drm_intel_bo_unreference(brw->wm.bind_bo);
728 brw->wm.bind_bo = brw_wm_get_binding_table(brw);
729
730 if (brw->wm.nr_surfaces != old_nr_surfaces)
731 brw->state.dirty.brw |= BRW_NEW_NR_WM_SURFACES;
732 }
733
734 const struct brw_tracked_state brw_wm_surfaces = {
735 .dirty = {
736 .mesa = (_NEW_COLOR |
737 _NEW_TEXTURE |
738 _NEW_BUFFERS),
739 .brw = (BRW_NEW_CONTEXT |
740 BRW_NEW_WM_SURFACES),
741 .cache = 0
742 },
743 .prepare = prepare_wm_surfaces,
744 };
745
746
747