i965/formats: Update the three-channel DXT1 mappings
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_surface_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "compiler/nir/nir.h"
34 #include "main/context.h"
35 #include "main/blend.h"
36 #include "main/mtypes.h"
37 #include "main/samplerobj.h"
38 #include "main/shaderimage.h"
39 #include "main/teximage.h"
40 #include "program/prog_parameter.h"
41 #include "program/prog_instruction.h"
42 #include "main/framebuffer.h"
43 #include "main/shaderapi.h"
44
45 #include "isl/isl.h"
46
47 #include "intel_mipmap_tree.h"
48 #include "intel_batchbuffer.h"
49 #include "intel_tex.h"
50 #include "intel_fbo.h"
51 #include "intel_buffer_objects.h"
52
53 #include "brw_context.h"
54 #include "brw_state.h"
55 #include "brw_defines.h"
56 #include "brw_wm.h"
57
58 enum {
59 INTEL_RENDERBUFFER_LAYERED = 1 << 0,
60 INTEL_AUX_BUFFER_DISABLED = 1 << 1,
61 };
62
63 uint32_t tex_mocs[] = {
64 [7] = GEN7_MOCS_L3,
65 [8] = BDW_MOCS_WB,
66 [9] = SKL_MOCS_WB,
67 };
68
69 uint32_t rb_mocs[] = {
70 [7] = GEN7_MOCS_L3,
71 [8] = BDW_MOCS_PTE,
72 [9] = SKL_MOCS_PTE,
73 };
74
75 static void
76 brw_emit_surface_state(struct brw_context *brw,
77 struct intel_mipmap_tree *mt, uint32_t flags,
78 GLenum target, struct isl_view view,
79 uint32_t mocs, uint32_t *surf_offset, int surf_index,
80 unsigned read_domains, unsigned write_domains)
81 {
82 uint32_t tile_x = mt->level[0].slice[0].x_offset;
83 uint32_t tile_y = mt->level[0].slice[0].y_offset;
84 uint32_t offset = mt->offset;
85
86 struct isl_surf surf;
87 intel_miptree_get_isl_surf(brw, mt, &surf);
88
89 surf.dim = get_isl_surf_dim(target);
90
91 const enum isl_dim_layout dim_layout =
92 get_isl_dim_layout(&brw->screen->devinfo, mt->tiling, target);
93
94 if (surf.dim_layout != dim_layout) {
95 /* The layout of the specified texture target is not compatible with the
96 * actual layout of the miptree structure in memory -- You're entering
97 * dangerous territory, this can only possibly work if you only intended
98 * to access a single level and slice of the texture, and the hardware
99 * supports the tile offset feature in order to allow non-tile-aligned
100 * base offsets, since we'll have to point the hardware to the first
101 * texel of the level instead of relying on the usual base level/layer
102 * controls.
103 */
104 assert(brw->has_surface_tile_offset);
105 assert(view.levels == 1 && view.array_len == 1);
106 assert(tile_x == 0 && tile_y == 0);
107
108 offset += intel_miptree_get_tile_offsets(mt, view.base_level,
109 view.base_array_layer,
110 &tile_x, &tile_y);
111
112 /* Minify the logical dimensions of the texture. */
113 const unsigned l = view.base_level - mt->first_level;
114 surf.logical_level0_px.width = minify(surf.logical_level0_px.width, l);
115 surf.logical_level0_px.height = surf.dim <= ISL_SURF_DIM_1D ? 1 :
116 minify(surf.logical_level0_px.height, l);
117 surf.logical_level0_px.depth = surf.dim <= ISL_SURF_DIM_2D ? 1 :
118 minify(surf.logical_level0_px.depth, l);
119
120 /* Only the base level and layer can be addressed with the overridden
121 * layout.
122 */
123 surf.logical_level0_px.array_len = 1;
124 surf.levels = 1;
125 surf.dim_layout = dim_layout;
126
127 /* The requested slice of the texture is now at the base level and
128 * layer.
129 */
130 view.base_level = 0;
131 view.base_array_layer = 0;
132 }
133
134 union isl_color_value clear_color = { .u32 = { 0, 0, 0, 0 } };
135
136 struct brw_bo *aux_bo;
137 struct isl_surf *aux_surf = NULL, aux_surf_s;
138 uint64_t aux_offset = 0;
139 enum isl_aux_usage aux_usage = ISL_AUX_USAGE_NONE;
140 if ((mt->mcs_buf || intel_miptree_sample_with_hiz(brw, mt)) &&
141 !(flags & INTEL_AUX_BUFFER_DISABLED)) {
142 intel_miptree_get_aux_isl_surf(brw, mt, &aux_surf_s, &aux_usage);
143 aux_surf = &aux_surf_s;
144
145 if (mt->mcs_buf) {
146 aux_bo = mt->mcs_buf->bo;
147 aux_offset = mt->mcs_buf->bo->offset64 + mt->mcs_buf->offset;
148 } else {
149 aux_bo = mt->hiz_buf->aux_base.bo;
150 aux_offset = mt->hiz_buf->aux_base.bo->offset64;
151 }
152
153 /* We only really need a clear color if we also have an auxiliary
154 * surface. Without one, it does nothing.
155 */
156 clear_color = intel_miptree_get_isl_clear_color(brw, mt);
157 }
158
159 void *state = brw_state_batch(brw,
160 brw->isl_dev.ss.size,
161 brw->isl_dev.ss.align,
162 surf_offset);
163
164 isl_surf_fill_state(&brw->isl_dev, state, .surf = &surf, .view = &view,
165 .address = mt->bo->offset64 + offset,
166 .aux_surf = aux_surf, .aux_usage = aux_usage,
167 .aux_address = aux_offset,
168 .mocs = mocs, .clear_color = clear_color,
169 .x_offset_sa = tile_x, .y_offset_sa = tile_y);
170
171 brw_emit_reloc(&brw->batch, *surf_offset + brw->isl_dev.ss.addr_offset,
172 mt->bo, offset, read_domains, write_domains);
173
174 if (aux_surf) {
175 /* On gen7 and prior, the upper 20 bits of surface state DWORD 6 are the
176 * upper 20 bits of the GPU address of the MCS buffer; the lower 12 bits
177 * contain other control information. Since buffer addresses are always
178 * on 4k boundaries (and thus have their lower 12 bits zero), we can use
179 * an ordinary reloc to do the necessary address translation.
180 */
181 assert((aux_offset & 0xfff) == 0);
182 uint32_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
183 brw_emit_reloc(&brw->batch,
184 *surf_offset + brw->isl_dev.ss.aux_addr_offset,
185 aux_bo, *aux_addr - aux_bo->offset64,
186 read_domains, write_domains);
187 }
188 }
189
190 uint32_t
191 brw_update_renderbuffer_surface(struct brw_context *brw,
192 struct gl_renderbuffer *rb,
193 uint32_t flags, unsigned unit /* unused */,
194 uint32_t surf_index)
195 {
196 struct gl_context *ctx = &brw->ctx;
197 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
198 struct intel_mipmap_tree *mt = irb->mt;
199
200 if (brw->gen < 9) {
201 assert(!(flags & INTEL_AUX_BUFFER_DISABLED));
202 }
203
204 assert(brw_render_target_supported(brw, rb));
205
206 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
207 if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
208 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
209 __func__, _mesa_get_format_name(rb_format));
210 }
211
212 const unsigned layer_multiplier =
213 (irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
214 irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) ?
215 MAX2(irb->mt->num_samples, 1) : 1;
216
217 struct isl_view view = {
218 .format = brw->render_target_format[rb_format],
219 .base_level = irb->mt_level - irb->mt->first_level,
220 .levels = 1,
221 .base_array_layer = irb->mt_layer / layer_multiplier,
222 .array_len = MAX2(irb->layer_count, 1),
223 .swizzle = ISL_SWIZZLE_IDENTITY,
224 .usage = ISL_SURF_USAGE_RENDER_TARGET_BIT,
225 };
226
227 uint32_t offset;
228 brw_emit_surface_state(brw, mt, flags, mt->target, view,
229 rb_mocs[brw->gen],
230 &offset, surf_index,
231 I915_GEM_DOMAIN_RENDER,
232 I915_GEM_DOMAIN_RENDER);
233 return offset;
234 }
235
236 GLuint
237 translate_tex_target(GLenum target)
238 {
239 switch (target) {
240 case GL_TEXTURE_1D:
241 case GL_TEXTURE_1D_ARRAY_EXT:
242 return BRW_SURFACE_1D;
243
244 case GL_TEXTURE_RECTANGLE_NV:
245 return BRW_SURFACE_2D;
246
247 case GL_TEXTURE_2D:
248 case GL_TEXTURE_2D_ARRAY_EXT:
249 case GL_TEXTURE_EXTERNAL_OES:
250 case GL_TEXTURE_2D_MULTISAMPLE:
251 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
252 return BRW_SURFACE_2D;
253
254 case GL_TEXTURE_3D:
255 return BRW_SURFACE_3D;
256
257 case GL_TEXTURE_CUBE_MAP:
258 case GL_TEXTURE_CUBE_MAP_ARRAY:
259 return BRW_SURFACE_CUBE;
260
261 default:
262 unreachable("not reached");
263 }
264 }
265
266 uint32_t
267 brw_get_surface_tiling_bits(uint32_t tiling)
268 {
269 switch (tiling) {
270 case I915_TILING_X:
271 return BRW_SURFACE_TILED;
272 case I915_TILING_Y:
273 return BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y;
274 default:
275 return 0;
276 }
277 }
278
279
280 uint32_t
281 brw_get_surface_num_multisamples(unsigned num_samples)
282 {
283 if (num_samples > 1)
284 return BRW_SURFACE_MULTISAMPLECOUNT_4;
285 else
286 return BRW_SURFACE_MULTISAMPLECOUNT_1;
287 }
288
289 /**
290 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
291 * swizzling.
292 */
293 int
294 brw_get_texture_swizzle(const struct gl_context *ctx,
295 const struct gl_texture_object *t)
296 {
297 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
298
299 int swizzles[SWIZZLE_NIL + 1] = {
300 SWIZZLE_X,
301 SWIZZLE_Y,
302 SWIZZLE_Z,
303 SWIZZLE_W,
304 SWIZZLE_ZERO,
305 SWIZZLE_ONE,
306 SWIZZLE_NIL
307 };
308
309 if (img->_BaseFormat == GL_DEPTH_COMPONENT ||
310 img->_BaseFormat == GL_DEPTH_STENCIL) {
311 GLenum depth_mode = t->DepthMode;
312
313 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
314 * with depth component data specified with a sized internal format.
315 * Otherwise, it's left at the old default, GL_LUMINANCE.
316 */
317 if (_mesa_is_gles3(ctx) &&
318 img->InternalFormat != GL_DEPTH_COMPONENT &&
319 img->InternalFormat != GL_DEPTH_STENCIL) {
320 depth_mode = GL_RED;
321 }
322
323 switch (depth_mode) {
324 case GL_ALPHA:
325 swizzles[0] = SWIZZLE_ZERO;
326 swizzles[1] = SWIZZLE_ZERO;
327 swizzles[2] = SWIZZLE_ZERO;
328 swizzles[3] = SWIZZLE_X;
329 break;
330 case GL_LUMINANCE:
331 swizzles[0] = SWIZZLE_X;
332 swizzles[1] = SWIZZLE_X;
333 swizzles[2] = SWIZZLE_X;
334 swizzles[3] = SWIZZLE_ONE;
335 break;
336 case GL_INTENSITY:
337 swizzles[0] = SWIZZLE_X;
338 swizzles[1] = SWIZZLE_X;
339 swizzles[2] = SWIZZLE_X;
340 swizzles[3] = SWIZZLE_X;
341 break;
342 case GL_RED:
343 swizzles[0] = SWIZZLE_X;
344 swizzles[1] = SWIZZLE_ZERO;
345 swizzles[2] = SWIZZLE_ZERO;
346 swizzles[3] = SWIZZLE_ONE;
347 break;
348 }
349 }
350
351 GLenum datatype = _mesa_get_format_datatype(img->TexFormat);
352
353 /* If the texture's format is alpha-only, force R, G, and B to
354 * 0.0. Similarly, if the texture's format has no alpha channel,
355 * force the alpha value read to 1.0. This allows for the
356 * implementation to use an RGBA texture for any of these formats
357 * without leaking any unexpected values.
358 */
359 switch (img->_BaseFormat) {
360 case GL_ALPHA:
361 swizzles[0] = SWIZZLE_ZERO;
362 swizzles[1] = SWIZZLE_ZERO;
363 swizzles[2] = SWIZZLE_ZERO;
364 break;
365 case GL_LUMINANCE:
366 if (t->_IsIntegerFormat || datatype == GL_SIGNED_NORMALIZED) {
367 swizzles[0] = SWIZZLE_X;
368 swizzles[1] = SWIZZLE_X;
369 swizzles[2] = SWIZZLE_X;
370 swizzles[3] = SWIZZLE_ONE;
371 }
372 break;
373 case GL_LUMINANCE_ALPHA:
374 if (datatype == GL_SIGNED_NORMALIZED) {
375 swizzles[0] = SWIZZLE_X;
376 swizzles[1] = SWIZZLE_X;
377 swizzles[2] = SWIZZLE_X;
378 swizzles[3] = SWIZZLE_W;
379 }
380 break;
381 case GL_INTENSITY:
382 if (datatype == GL_SIGNED_NORMALIZED) {
383 swizzles[0] = SWIZZLE_X;
384 swizzles[1] = SWIZZLE_X;
385 swizzles[2] = SWIZZLE_X;
386 swizzles[3] = SWIZZLE_X;
387 }
388 break;
389 case GL_RED:
390 case GL_RG:
391 case GL_RGB:
392 if (_mesa_get_format_bits(img->TexFormat, GL_ALPHA_BITS) > 0 ||
393 img->TexFormat == MESA_FORMAT_RGB_DXT1 ||
394 img->TexFormat == MESA_FORMAT_SRGB_DXT1)
395 swizzles[3] = SWIZZLE_ONE;
396 break;
397 }
398
399 return MAKE_SWIZZLE4(swizzles[GET_SWZ(t->_Swizzle, 0)],
400 swizzles[GET_SWZ(t->_Swizzle, 1)],
401 swizzles[GET_SWZ(t->_Swizzle, 2)],
402 swizzles[GET_SWZ(t->_Swizzle, 3)]);
403 }
404
405 /**
406 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
407 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
408 *
409 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
410 * 0 1 2 3 4 5
411 * 4 5 6 7 0 1
412 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
413 *
414 * which is simply adding 4 then modding by 8 (or anding with 7).
415 *
416 * We then may need to apply workarounds for textureGather hardware bugs.
417 */
418 static unsigned
419 swizzle_to_scs(GLenum swizzle, bool need_green_to_blue)
420 {
421 unsigned scs = (swizzle + 4) & 7;
422
423 return (need_green_to_blue && scs == HSW_SCS_GREEN) ? HSW_SCS_BLUE : scs;
424 }
425
426 static unsigned
427 brw_find_matching_rb(const struct gl_framebuffer *fb,
428 const struct intel_mipmap_tree *mt)
429 {
430 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
431 const struct intel_renderbuffer *irb =
432 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
433
434 if (irb && irb->mt == mt)
435 return i;
436 }
437
438 return fb->_NumColorDrawBuffers;
439 }
440
441 static inline bool
442 brw_texture_view_sane(const struct brw_context *brw,
443 const struct intel_mipmap_tree *mt,
444 const struct isl_view *view)
445 {
446 /* There are special cases only for lossless compression. */
447 if (!intel_miptree_is_lossless_compressed(brw, mt))
448 return true;
449
450 if (isl_format_supports_ccs_e(&brw->screen->devinfo, view->format))
451 return true;
452
453 /* Logic elsewhere needs to take care to resolve the color buffer prior
454 * to sampling it as non-compressed.
455 */
456 if (intel_miptree_has_color_unresolved(mt, view->base_level, view->levels,
457 view->base_array_layer,
458 view->array_len))
459 return false;
460
461 const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
462 const unsigned rb_index = brw_find_matching_rb(fb, mt);
463
464 if (rb_index == fb->_NumColorDrawBuffers)
465 return true;
466
467 /* Underlying surface is compressed but it is sampled using a format that
468 * the sampling engine doesn't support as compressed. Compression must be
469 * disabled for both sampling engine and data port in case the same surface
470 * is used also as render target.
471 */
472 return brw->draw_aux_buffer_disabled[rb_index];
473 }
474
475 static bool
476 brw_disable_aux_surface(const struct brw_context *brw,
477 const struct intel_mipmap_tree *mt,
478 const struct isl_view *view)
479 {
480 /* Nothing to disable. */
481 if (!mt->mcs_buf)
482 return false;
483
484 const bool is_unresolved = intel_miptree_has_color_unresolved(
485 mt, view->base_level, view->levels,
486 view->base_array_layer, view->array_len);
487
488 /* There are special cases only for lossless compression. */
489 if (!intel_miptree_is_lossless_compressed(brw, mt))
490 return !is_unresolved;
491
492 const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
493 const unsigned rb_index = brw_find_matching_rb(fb, mt);
494
495 /* If we are drawing into this with compression enabled, then we must also
496 * enable compression when texturing from it regardless of
497 * fast_clear_state. If we don't then, after the first draw call with
498 * this setup, there will be data in the CCS which won't get picked up by
499 * subsequent texturing operations as required by ARB_texture_barrier.
500 * Since we don't want to re-emit the binding table or do a resolve
501 * operation every draw call, the easiest thing to do is just enable
502 * compression on the texturing side. This is completely safe to do
503 * since, if compressed texturing weren't allowed, we would have disabled
504 * compression of render targets in whatever_that_function_is_called().
505 */
506 if (rb_index < fb->_NumColorDrawBuffers) {
507 if (brw->draw_aux_buffer_disabled[rb_index]) {
508 assert(!is_unresolved);
509 }
510
511 return brw->draw_aux_buffer_disabled[rb_index];
512 }
513
514 return !is_unresolved;
515 }
516
517 void
518 brw_update_texture_surface(struct gl_context *ctx,
519 unsigned unit,
520 uint32_t *surf_offset,
521 bool for_gather,
522 uint32_t plane)
523 {
524 struct brw_context *brw = brw_context(ctx);
525 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
526
527 if (obj->Target == GL_TEXTURE_BUFFER) {
528 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
529
530 } else {
531 struct intel_texture_object *intel_obj = intel_texture_object(obj);
532 struct intel_mipmap_tree *mt = intel_obj->mt;
533
534 if (plane > 0) {
535 if (mt->plane[plane - 1] == NULL)
536 return;
537 mt = mt->plane[plane - 1];
538 }
539
540 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
541 /* If this is a view with restricted NumLayers, then our effective depth
542 * is not just the miptree depth.
543 */
544 const unsigned view_num_layers =
545 (obj->Immutable && obj->Target != GL_TEXTURE_3D) ? obj->NumLayers :
546 mt->logical_depth0;
547
548 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
549 * texturing functions that return a float, as our code generation always
550 * selects the .x channel (which would always be 0).
551 */
552 struct gl_texture_image *firstImage = obj->Image[0][obj->BaseLevel];
553 const bool alpha_depth = obj->DepthMode == GL_ALPHA &&
554 (firstImage->_BaseFormat == GL_DEPTH_COMPONENT ||
555 firstImage->_BaseFormat == GL_DEPTH_STENCIL);
556 const unsigned swizzle = (unlikely(alpha_depth) ? SWIZZLE_XYZW :
557 brw_get_texture_swizzle(&brw->ctx, obj));
558
559 mesa_format mesa_fmt = plane == 0 ? intel_obj->_Format : mt->format;
560 unsigned format = translate_tex_format(brw, mesa_fmt,
561 sampler->sRGBDecode);
562
563 /* Implement gen6 and gen7 gather work-around */
564 bool need_green_to_blue = false;
565 if (for_gather) {
566 if (brw->gen == 7 && (format == ISL_FORMAT_R32G32_FLOAT ||
567 format == ISL_FORMAT_R32G32_SINT ||
568 format == ISL_FORMAT_R32G32_UINT)) {
569 format = ISL_FORMAT_R32G32_FLOAT_LD;
570 need_green_to_blue = brw->is_haswell;
571 } else if (brw->gen == 6) {
572 /* Sandybridge's gather4 message is broken for integer formats.
573 * To work around this, we pretend the surface is UNORM for
574 * 8 or 16-bit formats, and emit shader instructions to recover
575 * the real INT/UINT value. For 32-bit formats, we pretend
576 * the surface is FLOAT, and simply reinterpret the resulting
577 * bits.
578 */
579 switch (format) {
580 case ISL_FORMAT_R8_SINT:
581 case ISL_FORMAT_R8_UINT:
582 format = ISL_FORMAT_R8_UNORM;
583 break;
584
585 case ISL_FORMAT_R16_SINT:
586 case ISL_FORMAT_R16_UINT:
587 format = ISL_FORMAT_R16_UNORM;
588 break;
589
590 case ISL_FORMAT_R32_SINT:
591 case ISL_FORMAT_R32_UINT:
592 format = ISL_FORMAT_R32_FLOAT;
593 break;
594
595 default:
596 break;
597 }
598 }
599 }
600
601 if (obj->StencilSampling && firstImage->_BaseFormat == GL_DEPTH_STENCIL) {
602 if (brw->gen <= 7) {
603 assert(mt->r8stencil_mt && !mt->stencil_mt->r8stencil_needs_update);
604 mt = mt->r8stencil_mt;
605 } else {
606 mt = mt->stencil_mt;
607 }
608 format = ISL_FORMAT_R8_UINT;
609 } else if (brw->gen <= 7 && mt->format == MESA_FORMAT_S_UINT8) {
610 assert(mt->r8stencil_mt && !mt->r8stencil_needs_update);
611 mt = mt->r8stencil_mt;
612 format = ISL_FORMAT_R8_UINT;
613 }
614
615 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
616
617 struct isl_view view = {
618 .format = format,
619 .base_level = obj->MinLevel + obj->BaseLevel,
620 .levels = intel_obj->_MaxLevel - obj->BaseLevel + 1,
621 .base_array_layer = obj->MinLayer,
622 .array_len = view_num_layers,
623 .swizzle = {
624 .r = swizzle_to_scs(GET_SWZ(swizzle, 0), need_green_to_blue),
625 .g = swizzle_to_scs(GET_SWZ(swizzle, 1), need_green_to_blue),
626 .b = swizzle_to_scs(GET_SWZ(swizzle, 2), need_green_to_blue),
627 .a = swizzle_to_scs(GET_SWZ(swizzle, 3), need_green_to_blue),
628 },
629 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
630 };
631
632 if (obj->Target == GL_TEXTURE_CUBE_MAP ||
633 obj->Target == GL_TEXTURE_CUBE_MAP_ARRAY)
634 view.usage |= ISL_SURF_USAGE_CUBE_BIT;
635
636 assert(brw_texture_view_sane(brw, mt, &view));
637
638 const int flags = brw_disable_aux_surface(brw, mt, &view) ?
639 INTEL_AUX_BUFFER_DISABLED : 0;
640 brw_emit_surface_state(brw, mt, flags, mt->target, view,
641 tex_mocs[brw->gen],
642 surf_offset, surf_index,
643 I915_GEM_DOMAIN_SAMPLER, 0);
644 }
645 }
646
647 void
648 brw_emit_buffer_surface_state(struct brw_context *brw,
649 uint32_t *out_offset,
650 struct brw_bo *bo,
651 unsigned buffer_offset,
652 unsigned surface_format,
653 unsigned buffer_size,
654 unsigned pitch,
655 bool rw)
656 {
657 uint32_t *dw = brw_state_batch(brw,
658 brw->isl_dev.ss.size,
659 brw->isl_dev.ss.align,
660 out_offset);
661
662 isl_buffer_fill_state(&brw->isl_dev, dw,
663 .address = (bo ? bo->offset64 : 0) + buffer_offset,
664 .size = buffer_size,
665 .format = surface_format,
666 .stride = pitch,
667 .mocs = tex_mocs[brw->gen]);
668
669 if (bo) {
670 brw_emit_reloc(&brw->batch, *out_offset + brw->isl_dev.ss.addr_offset,
671 bo, buffer_offset,
672 I915_GEM_DOMAIN_SAMPLER,
673 (rw ? I915_GEM_DOMAIN_SAMPLER : 0));
674 }
675 }
676
677 void
678 brw_update_buffer_texture_surface(struct gl_context *ctx,
679 unsigned unit,
680 uint32_t *surf_offset)
681 {
682 struct brw_context *brw = brw_context(ctx);
683 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
684 struct intel_buffer_object *intel_obj =
685 intel_buffer_object(tObj->BufferObject);
686 uint32_t size = tObj->BufferSize;
687 struct brw_bo *bo = NULL;
688 mesa_format format = tObj->_BufferObjectFormat;
689 uint32_t brw_format = brw_isl_format_for_mesa_format(format);
690 int texel_size = _mesa_get_format_bytes(format);
691
692 if (intel_obj) {
693 size = MIN2(size, intel_obj->Base.Size);
694 bo = intel_bufferobj_buffer(brw, intel_obj, tObj->BufferOffset, size);
695 }
696
697 /* The ARB_texture_buffer_specification says:
698 *
699 * "The number of texels in the buffer texture's texel array is given by
700 *
701 * floor(<buffer_size> / (<components> * sizeof(<base_type>)),
702 *
703 * where <buffer_size> is the size of the buffer object, in basic
704 * machine units and <components> and <base_type> are the element count
705 * and base data type for elements, as specified in Table X.1. The
706 * number of texels in the texel array is then clamped to the
707 * implementation-dependent limit MAX_TEXTURE_BUFFER_SIZE_ARB."
708 *
709 * We need to clamp the size in bytes to MAX_TEXTURE_BUFFER_SIZE * stride,
710 * so that when ISL divides by stride to obtain the number of texels, that
711 * texel count is clamped to MAX_TEXTURE_BUFFER_SIZE.
712 */
713 size = MIN2(size, ctx->Const.MaxTextureBufferSize * (unsigned) texel_size);
714
715 if (brw_format == 0 && format != MESA_FORMAT_RGBA_FLOAT32) {
716 _mesa_problem(NULL, "bad format %s for texture buffer\n",
717 _mesa_get_format_name(format));
718 }
719
720 brw_emit_buffer_surface_state(brw, surf_offset, bo,
721 tObj->BufferOffset,
722 brw_format,
723 size,
724 texel_size,
725 false /* rw */);
726 }
727
728 /**
729 * Create the constant buffer surface. Vertex/fragment shader constants will be
730 * read from this buffer with Data Port Read instructions/messages.
731 */
732 void
733 brw_create_constant_surface(struct brw_context *brw,
734 struct brw_bo *bo,
735 uint32_t offset,
736 uint32_t size,
737 uint32_t *out_offset)
738 {
739 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
740 ISL_FORMAT_R32G32B32A32_FLOAT,
741 size, 1, false);
742 }
743
744 /**
745 * Create the buffer surface. Shader buffer variables will be
746 * read from / write to this buffer with Data Port Read/Write
747 * instructions/messages.
748 */
749 void
750 brw_create_buffer_surface(struct brw_context *brw,
751 struct brw_bo *bo,
752 uint32_t offset,
753 uint32_t size,
754 uint32_t *out_offset)
755 {
756 /* Use a raw surface so we can reuse existing untyped read/write/atomic
757 * messages. We need these specifically for the fragment shader since they
758 * include a pixel mask header that we need to ensure correct behavior
759 * with helper invocations, which cannot write to the buffer.
760 */
761 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
762 ISL_FORMAT_RAW,
763 size, 1, true);
764 }
765
766 /**
767 * Set up a binding table entry for use by stream output logic (transform
768 * feedback).
769 *
770 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
771 */
772 void
773 brw_update_sol_surface(struct brw_context *brw,
774 struct gl_buffer_object *buffer_obj,
775 uint32_t *out_offset, unsigned num_vector_components,
776 unsigned stride_dwords, unsigned offset_dwords)
777 {
778 struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
779 uint32_t offset_bytes = 4 * offset_dwords;
780 struct brw_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
781 offset_bytes,
782 buffer_obj->Size - offset_bytes);
783 uint32_t *surf = brw_state_batch(brw, 6 * 4, 32, out_offset);
784 uint32_t pitch_minus_1 = 4*stride_dwords - 1;
785 size_t size_dwords = buffer_obj->Size / 4;
786 uint32_t buffer_size_minus_1, width, height, depth, surface_format;
787
788 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
789 * too big to map using a single binding table entry?
790 */
791 assert((size_dwords - offset_dwords) / stride_dwords
792 <= BRW_MAX_NUM_BUFFER_ENTRIES);
793
794 if (size_dwords > offset_dwords + num_vector_components) {
795 /* There is room for at least 1 transform feedback output in the buffer.
796 * Compute the number of additional transform feedback outputs the
797 * buffer has room for.
798 */
799 buffer_size_minus_1 =
800 (size_dwords - offset_dwords - num_vector_components) / stride_dwords;
801 } else {
802 /* There isn't even room for a single transform feedback output in the
803 * buffer. We can't configure the binding table entry to prevent output
804 * entirely; we'll have to rely on the geometry shader to detect
805 * overflow. But to minimize the damage in case of a bug, set up the
806 * binding table entry to just allow a single output.
807 */
808 buffer_size_minus_1 = 0;
809 }
810 width = buffer_size_minus_1 & 0x7f;
811 height = (buffer_size_minus_1 & 0xfff80) >> 7;
812 depth = (buffer_size_minus_1 & 0x7f00000) >> 20;
813
814 switch (num_vector_components) {
815 case 1:
816 surface_format = ISL_FORMAT_R32_FLOAT;
817 break;
818 case 2:
819 surface_format = ISL_FORMAT_R32G32_FLOAT;
820 break;
821 case 3:
822 surface_format = ISL_FORMAT_R32G32B32_FLOAT;
823 break;
824 case 4:
825 surface_format = ISL_FORMAT_R32G32B32A32_FLOAT;
826 break;
827 default:
828 unreachable("Invalid vector size for transform feedback output");
829 }
830
831 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
832 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
833 surface_format << BRW_SURFACE_FORMAT_SHIFT |
834 BRW_SURFACE_RC_READ_WRITE;
835 surf[1] = bo->offset64 + offset_bytes; /* reloc */
836 surf[2] = (width << BRW_SURFACE_WIDTH_SHIFT |
837 height << BRW_SURFACE_HEIGHT_SHIFT);
838 surf[3] = (depth << BRW_SURFACE_DEPTH_SHIFT |
839 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
840 surf[4] = 0;
841 surf[5] = 0;
842
843 /* Emit relocation to surface contents. */
844 brw_emit_reloc(&brw->batch, *out_offset + 4, bo, offset_bytes,
845 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
846 }
847
848 /* Creates a new WM constant buffer reflecting the current fragment program's
849 * constants, if needed by the fragment program.
850 *
851 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
852 * state atom.
853 */
854 static void
855 brw_upload_wm_pull_constants(struct brw_context *brw)
856 {
857 struct brw_stage_state *stage_state = &brw->wm.base;
858 /* BRW_NEW_FRAGMENT_PROGRAM */
859 struct brw_program *fp = (struct brw_program *) brw->fragment_program;
860 /* BRW_NEW_FS_PROG_DATA */
861 struct brw_stage_prog_data *prog_data = brw->wm.base.prog_data;
862
863 _mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_FRAGMENT);
864 /* _NEW_PROGRAM_CONSTANTS */
865 brw_upload_pull_constants(brw, BRW_NEW_SURFACES, &fp->program,
866 stage_state, prog_data);
867 }
868
869 const struct brw_tracked_state brw_wm_pull_constants = {
870 .dirty = {
871 .mesa = _NEW_PROGRAM_CONSTANTS,
872 .brw = BRW_NEW_BATCH |
873 BRW_NEW_BLORP |
874 BRW_NEW_FRAGMENT_PROGRAM |
875 BRW_NEW_FS_PROG_DATA,
876 },
877 .emit = brw_upload_wm_pull_constants,
878 };
879
880 /**
881 * Creates a null renderbuffer surface.
882 *
883 * This is used when the shader doesn't write to any color output. An FB
884 * write to target 0 will still be emitted, because that's how the thread is
885 * terminated (and computed depth is returned), so we need to have the
886 * hardware discard the target 0 color output..
887 */
888 static void
889 brw_emit_null_surface_state(struct brw_context *brw,
890 unsigned width,
891 unsigned height,
892 unsigned samples,
893 uint32_t *out_offset)
894 {
895 /* From the Sandy bridge PRM, Vol4 Part1 p71 (Surface Type: Programming
896 * Notes):
897 *
898 * A null surface will be used in instances where an actual surface is
899 * not bound. When a write message is generated to a null surface, no
900 * actual surface is written to. When a read message (including any
901 * sampling engine message) is generated to a null surface, the result
902 * is all zeros. Note that a null surface type is allowed to be used
903 * with all messages, even if it is not specificially indicated as
904 * supported. All of the remaining fields in surface state are ignored
905 * for null surfaces, with the following exceptions:
906 *
907 * - [DevSNB+]: Width, Height, Depth, and LOD fields must match the
908 * depth buffer’s corresponding state for all render target surfaces,
909 * including null.
910 *
911 * - Surface Format must be R8G8B8A8_UNORM.
912 */
913 unsigned surface_type = BRW_SURFACE_NULL;
914 struct brw_bo *bo = NULL;
915 unsigned pitch_minus_1 = 0;
916 uint32_t multisampling_state = 0;
917 uint32_t *surf = brw_state_batch(brw, 6 * 4, 32, out_offset);
918
919 if (samples > 1) {
920 /* On Gen6, null render targets seem to cause GPU hangs when
921 * multisampling. So work around this problem by rendering into dummy
922 * color buffer.
923 *
924 * To decrease the amount of memory needed by the workaround buffer, we
925 * set its pitch to 128 bytes (the width of a Y tile). This means that
926 * the amount of memory needed for the workaround buffer is
927 * (width_in_tiles + height_in_tiles - 1) tiles.
928 *
929 * Note that since the workaround buffer will be interpreted by the
930 * hardware as an interleaved multisampled buffer, we need to compute
931 * width_in_tiles and height_in_tiles by dividing the width and height
932 * by 16 rather than the normal Y-tile size of 32.
933 */
934 unsigned width_in_tiles = ALIGN(width, 16) / 16;
935 unsigned height_in_tiles = ALIGN(height, 16) / 16;
936 unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
937 brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
938 size_needed);
939 bo = brw->wm.multisampled_null_render_target_bo;
940 surface_type = BRW_SURFACE_2D;
941 pitch_minus_1 = 127;
942 multisampling_state = brw_get_surface_num_multisamples(samples);
943 }
944
945 surf[0] = (surface_type << BRW_SURFACE_TYPE_SHIFT |
946 ISL_FORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
947 if (brw->gen < 6) {
948 surf[0] |= (1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT |
949 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT |
950 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT |
951 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT);
952 }
953 surf[1] = bo ? bo->offset64 : 0;
954 surf[2] = ((width - 1) << BRW_SURFACE_WIDTH_SHIFT |
955 (height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
956
957 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
958 * Notes):
959 *
960 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
961 */
962 surf[3] = (BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y |
963 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
964 surf[4] = multisampling_state;
965 surf[5] = 0;
966
967 if (bo) {
968 brw_emit_reloc(&brw->batch, *out_offset + 4, bo, 0,
969 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
970 }
971 }
972
973 /**
974 * Sets up a surface state structure to point at the given region.
975 * While it is only used for the front/back buffer currently, it should be
976 * usable for further buffers when doing ARB_draw_buffer support.
977 */
978 static uint32_t
979 gen4_update_renderbuffer_surface(struct brw_context *brw,
980 struct gl_renderbuffer *rb,
981 uint32_t flags, unsigned unit,
982 uint32_t surf_index)
983 {
984 struct gl_context *ctx = &brw->ctx;
985 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
986 struct intel_mipmap_tree *mt = irb->mt;
987 uint32_t *surf;
988 uint32_t tile_x, tile_y;
989 uint32_t format = 0;
990 uint32_t offset;
991 /* _NEW_BUFFERS */
992 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
993 /* BRW_NEW_FS_PROG_DATA */
994
995 assert(!(flags & INTEL_RENDERBUFFER_LAYERED));
996 assert(!(flags & INTEL_AUX_BUFFER_DISABLED));
997
998 if (rb->TexImage && !brw->has_surface_tile_offset) {
999 intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y);
1000
1001 if (tile_x != 0 || tile_y != 0) {
1002 /* Original gen4 hardware couldn't draw to a non-tile-aligned
1003 * destination in a miptree unless you actually setup your renderbuffer
1004 * as a miptree and used the fragile lod/array_index/etc. controls to
1005 * select the image. So, instead, we just make a new single-level
1006 * miptree and render into that.
1007 */
1008 intel_renderbuffer_move_to_temp(brw, irb, false);
1009 mt = irb->mt;
1010 }
1011 }
1012
1013 surf = brw_state_batch(brw, 6 * 4, 32, &offset);
1014
1015 format = brw->render_target_format[rb_format];
1016 if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
1017 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
1018 __func__, _mesa_get_format_name(rb_format));
1019 }
1020
1021 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
1022 format << BRW_SURFACE_FORMAT_SHIFT);
1023
1024 /* reloc */
1025 assert(mt->offset % mt->cpp == 0);
1026 surf[1] = (intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y) +
1027 mt->bo->offset64 + mt->offset);
1028
1029 surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
1030 (rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
1031
1032 surf[3] = (brw_get_surface_tiling_bits(mt->tiling) |
1033 (mt->pitch - 1) << BRW_SURFACE_PITCH_SHIFT);
1034
1035 surf[4] = brw_get_surface_num_multisamples(mt->num_samples);
1036
1037 assert(brw->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
1038 /* Note that the low bits of these fields are missing, so
1039 * there's the possibility of getting in trouble.
1040 */
1041 assert(tile_x % 4 == 0);
1042 assert(tile_y % 2 == 0);
1043 surf[5] = ((tile_x / 4) << BRW_SURFACE_X_OFFSET_SHIFT |
1044 (tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT |
1045 (mt->valign == 4 ? BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
1046
1047 if (brw->gen < 6) {
1048 /* _NEW_COLOR */
1049 if (!ctx->Color.ColorLogicOpEnabled && !ctx->Color._AdvancedBlendMode &&
1050 (ctx->Color.BlendEnabled & (1 << unit)))
1051 surf[0] |= BRW_SURFACE_BLEND_ENABLED;
1052
1053 if (!ctx->Color.ColorMask[unit][0])
1054 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT;
1055 if (!ctx->Color.ColorMask[unit][1])
1056 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT;
1057 if (!ctx->Color.ColorMask[unit][2])
1058 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT;
1059
1060 /* As mentioned above, disable writes to the alpha component when the
1061 * renderbuffer is XRGB.
1062 */
1063 if (ctx->DrawBuffer->Visual.alphaBits == 0 ||
1064 !ctx->Color.ColorMask[unit][3]) {
1065 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT;
1066 }
1067 }
1068
1069 brw_emit_reloc(&brw->batch, offset + 4, mt->bo, surf[1] - mt->bo->offset64,
1070 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
1071
1072 return offset;
1073 }
1074
1075 /**
1076 * Construct SURFACE_STATE objects for renderbuffers/draw buffers.
1077 */
1078 void
1079 brw_update_renderbuffer_surfaces(struct brw_context *brw,
1080 const struct gl_framebuffer *fb,
1081 uint32_t render_target_start,
1082 uint32_t *surf_offset)
1083 {
1084 GLuint i;
1085 const unsigned int w = _mesa_geometric_width(fb);
1086 const unsigned int h = _mesa_geometric_height(fb);
1087 const unsigned int s = _mesa_geometric_samples(fb);
1088
1089 /* Update surfaces for drawing buffers */
1090 if (fb->_NumColorDrawBuffers >= 1) {
1091 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
1092 const uint32_t surf_index = render_target_start + i;
1093 const int flags = (_mesa_geometric_layers(fb) > 0 ?
1094 INTEL_RENDERBUFFER_LAYERED : 0) |
1095 (brw->draw_aux_buffer_disabled[i] ?
1096 INTEL_AUX_BUFFER_DISABLED : 0);
1097
1098 if (intel_renderbuffer(fb->_ColorDrawBuffers[i])) {
1099 surf_offset[surf_index] =
1100 brw->vtbl.update_renderbuffer_surface(
1101 brw, fb->_ColorDrawBuffers[i], flags, i, surf_index);
1102 } else {
1103 brw->vtbl.emit_null_surface_state(brw, w, h, s,
1104 &surf_offset[surf_index]);
1105 }
1106 }
1107 } else {
1108 const uint32_t surf_index = render_target_start;
1109 brw->vtbl.emit_null_surface_state(brw, w, h, s,
1110 &surf_offset[surf_index]);
1111 }
1112 }
1113
1114 static void
1115 update_renderbuffer_surfaces(struct brw_context *brw)
1116 {
1117 const struct gl_context *ctx = &brw->ctx;
1118
1119 /* BRW_NEW_FS_PROG_DATA */
1120 const struct brw_wm_prog_data *wm_prog_data =
1121 brw_wm_prog_data(brw->wm.base.prog_data);
1122
1123 /* _NEW_BUFFERS | _NEW_COLOR */
1124 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1125 brw_update_renderbuffer_surfaces(
1126 brw, fb,
1127 wm_prog_data->binding_table.render_target_start,
1128 brw->wm.base.surf_offset);
1129 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1130 }
1131
1132 const struct brw_tracked_state brw_renderbuffer_surfaces = {
1133 .dirty = {
1134 .mesa = _NEW_BUFFERS |
1135 _NEW_COLOR,
1136 .brw = BRW_NEW_BATCH |
1137 BRW_NEW_BLORP |
1138 BRW_NEW_FS_PROG_DATA,
1139 },
1140 .emit = update_renderbuffer_surfaces,
1141 };
1142
1143 const struct brw_tracked_state gen6_renderbuffer_surfaces = {
1144 .dirty = {
1145 .mesa = _NEW_BUFFERS,
1146 .brw = BRW_NEW_BATCH |
1147 BRW_NEW_BLORP,
1148 },
1149 .emit = update_renderbuffer_surfaces,
1150 };
1151
1152 static void
1153 update_renderbuffer_read_surfaces(struct brw_context *brw)
1154 {
1155 const struct gl_context *ctx = &brw->ctx;
1156
1157 /* BRW_NEW_FS_PROG_DATA */
1158 const struct brw_wm_prog_data *wm_prog_data =
1159 brw_wm_prog_data(brw->wm.base.prog_data);
1160
1161 /* BRW_NEW_FRAGMENT_PROGRAM */
1162 if (!ctx->Extensions.MESA_shader_framebuffer_fetch &&
1163 brw->fragment_program && brw->fragment_program->info.outputs_read) {
1164 /* _NEW_BUFFERS */
1165 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1166
1167 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
1168 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
1169 const struct intel_renderbuffer *irb = intel_renderbuffer(rb);
1170 const unsigned surf_index =
1171 wm_prog_data->binding_table.render_target_read_start + i;
1172 uint32_t *surf_offset = &brw->wm.base.surf_offset[surf_index];
1173
1174 if (irb) {
1175 const unsigned format = brw->render_target_format[
1176 _mesa_get_render_format(ctx, intel_rb_format(irb))];
1177 assert(isl_format_supports_sampling(&brw->screen->devinfo,
1178 format));
1179
1180 /* Override the target of the texture if the render buffer is a
1181 * single slice of a 3D texture (since the minimum array element
1182 * field of the surface state structure is ignored by the sampler
1183 * unit for 3D textures on some hardware), or if the render buffer
1184 * is a 1D array (since shaders always provide the array index
1185 * coordinate at the Z component to avoid state-dependent
1186 * recompiles when changing the texture target of the
1187 * framebuffer).
1188 */
1189 const GLenum target =
1190 (irb->mt->target == GL_TEXTURE_3D &&
1191 irb->layer_count == 1) ? GL_TEXTURE_2D :
1192 irb->mt->target == GL_TEXTURE_1D_ARRAY ? GL_TEXTURE_2D_ARRAY :
1193 irb->mt->target;
1194
1195 /* intel_renderbuffer::mt_layer is expressed in sample units for
1196 * the UMS and CMS multisample layouts, but
1197 * intel_renderbuffer::layer_count is expressed in units of whole
1198 * logical layers regardless of the multisample layout.
1199 */
1200 const unsigned mt_layer_unit =
1201 (irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
1202 irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) ?
1203 MAX2(irb->mt->num_samples, 1) : 1;
1204
1205 const struct isl_view view = {
1206 .format = format,
1207 .base_level = irb->mt_level - irb->mt->first_level,
1208 .levels = 1,
1209 .base_array_layer = irb->mt_layer / mt_layer_unit,
1210 .array_len = irb->layer_count,
1211 .swizzle = ISL_SWIZZLE_IDENTITY,
1212 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
1213 };
1214
1215 const int flags = brw->draw_aux_buffer_disabled[i] ?
1216 INTEL_AUX_BUFFER_DISABLED : 0;
1217 brw_emit_surface_state(brw, irb->mt, flags, target, view,
1218 tex_mocs[brw->gen],
1219 surf_offset, surf_index,
1220 I915_GEM_DOMAIN_SAMPLER, 0);
1221
1222 } else {
1223 brw->vtbl.emit_null_surface_state(
1224 brw, _mesa_geometric_width(fb), _mesa_geometric_height(fb),
1225 _mesa_geometric_samples(fb), surf_offset);
1226 }
1227 }
1228
1229 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1230 }
1231 }
1232
1233 const struct brw_tracked_state brw_renderbuffer_read_surfaces = {
1234 .dirty = {
1235 .mesa = _NEW_BUFFERS,
1236 .brw = BRW_NEW_BATCH |
1237 BRW_NEW_FRAGMENT_PROGRAM |
1238 BRW_NEW_FS_PROG_DATA,
1239 },
1240 .emit = update_renderbuffer_read_surfaces,
1241 };
1242
1243 static void
1244 update_stage_texture_surfaces(struct brw_context *brw,
1245 const struct gl_program *prog,
1246 struct brw_stage_state *stage_state,
1247 bool for_gather, uint32_t plane)
1248 {
1249 if (!prog)
1250 return;
1251
1252 struct gl_context *ctx = &brw->ctx;
1253
1254 uint32_t *surf_offset = stage_state->surf_offset;
1255
1256 /* BRW_NEW_*_PROG_DATA */
1257 if (for_gather)
1258 surf_offset += stage_state->prog_data->binding_table.gather_texture_start;
1259 else
1260 surf_offset += stage_state->prog_data->binding_table.plane_start[plane];
1261
1262 unsigned num_samplers = util_last_bit(prog->SamplersUsed);
1263 for (unsigned s = 0; s < num_samplers; s++) {
1264 surf_offset[s] = 0;
1265
1266 if (prog->SamplersUsed & (1 << s)) {
1267 const unsigned unit = prog->SamplerUnits[s];
1268
1269 /* _NEW_TEXTURE */
1270 if (ctx->Texture.Unit[unit]._Current) {
1271 brw_update_texture_surface(ctx, unit, surf_offset + s, for_gather, plane);
1272 }
1273 }
1274 }
1275 }
1276
1277
1278 /**
1279 * Construct SURFACE_STATE objects for enabled textures.
1280 */
1281 static void
1282 brw_update_texture_surfaces(struct brw_context *brw)
1283 {
1284 /* BRW_NEW_VERTEX_PROGRAM */
1285 struct gl_program *vs = (struct gl_program *) brw->vertex_program;
1286
1287 /* BRW_NEW_TESS_PROGRAMS */
1288 struct gl_program *tcs = (struct gl_program *) brw->tess_ctrl_program;
1289 struct gl_program *tes = (struct gl_program *) brw->tess_eval_program;
1290
1291 /* BRW_NEW_GEOMETRY_PROGRAM */
1292 struct gl_program *gs = (struct gl_program *) brw->geometry_program;
1293
1294 /* BRW_NEW_FRAGMENT_PROGRAM */
1295 struct gl_program *fs = (struct gl_program *) brw->fragment_program;
1296
1297 /* _NEW_TEXTURE */
1298 update_stage_texture_surfaces(brw, vs, &brw->vs.base, false, 0);
1299 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, false, 0);
1300 update_stage_texture_surfaces(brw, tes, &brw->tes.base, false, 0);
1301 update_stage_texture_surfaces(brw, gs, &brw->gs.base, false, 0);
1302 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 0);
1303
1304 /* emit alternate set of surface state for gather. this
1305 * allows the surface format to be overriden for only the
1306 * gather4 messages. */
1307 if (brw->gen < 8) {
1308 if (vs && vs->nir->info.uses_texture_gather)
1309 update_stage_texture_surfaces(brw, vs, &brw->vs.base, true, 0);
1310 if (tcs && tcs->nir->info.uses_texture_gather)
1311 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, true, 0);
1312 if (tes && tes->nir->info.uses_texture_gather)
1313 update_stage_texture_surfaces(brw, tes, &brw->tes.base, true, 0);
1314 if (gs && gs->nir->info.uses_texture_gather)
1315 update_stage_texture_surfaces(brw, gs, &brw->gs.base, true, 0);
1316 if (fs && fs->nir->info.uses_texture_gather)
1317 update_stage_texture_surfaces(brw, fs, &brw->wm.base, true, 0);
1318 }
1319
1320 if (fs) {
1321 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 1);
1322 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 2);
1323 }
1324
1325 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1326 }
1327
1328 const struct brw_tracked_state brw_texture_surfaces = {
1329 .dirty = {
1330 .mesa = _NEW_TEXTURE,
1331 .brw = BRW_NEW_BATCH |
1332 BRW_NEW_BLORP |
1333 BRW_NEW_FRAGMENT_PROGRAM |
1334 BRW_NEW_FS_PROG_DATA |
1335 BRW_NEW_GEOMETRY_PROGRAM |
1336 BRW_NEW_GS_PROG_DATA |
1337 BRW_NEW_TESS_PROGRAMS |
1338 BRW_NEW_TCS_PROG_DATA |
1339 BRW_NEW_TES_PROG_DATA |
1340 BRW_NEW_TEXTURE_BUFFER |
1341 BRW_NEW_VERTEX_PROGRAM |
1342 BRW_NEW_VS_PROG_DATA,
1343 },
1344 .emit = brw_update_texture_surfaces,
1345 };
1346
1347 static void
1348 brw_update_cs_texture_surfaces(struct brw_context *brw)
1349 {
1350 /* BRW_NEW_COMPUTE_PROGRAM */
1351 struct gl_program *cs = (struct gl_program *) brw->compute_program;
1352
1353 /* _NEW_TEXTURE */
1354 update_stage_texture_surfaces(brw, cs, &brw->cs.base, false, 0);
1355
1356 /* emit alternate set of surface state for gather. this
1357 * allows the surface format to be overriden for only the
1358 * gather4 messages.
1359 */
1360 if (brw->gen < 8) {
1361 if (cs && cs->nir->info.uses_texture_gather)
1362 update_stage_texture_surfaces(brw, cs, &brw->cs.base, true, 0);
1363 }
1364
1365 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1366 }
1367
1368 const struct brw_tracked_state brw_cs_texture_surfaces = {
1369 .dirty = {
1370 .mesa = _NEW_TEXTURE,
1371 .brw = BRW_NEW_BATCH |
1372 BRW_NEW_BLORP |
1373 BRW_NEW_COMPUTE_PROGRAM,
1374 },
1375 .emit = brw_update_cs_texture_surfaces,
1376 };
1377
1378
1379 void
1380 brw_upload_ubo_surfaces(struct brw_context *brw, struct gl_program *prog,
1381 struct brw_stage_state *stage_state,
1382 struct brw_stage_prog_data *prog_data)
1383 {
1384 struct gl_context *ctx = &brw->ctx;
1385
1386 if (!prog)
1387 return;
1388
1389 uint32_t *ubo_surf_offsets =
1390 &stage_state->surf_offset[prog_data->binding_table.ubo_start];
1391
1392 for (int i = 0; i < prog->info.num_ubos; i++) {
1393 struct gl_uniform_buffer_binding *binding =
1394 &ctx->UniformBufferBindings[prog->sh.UniformBlocks[i]->Binding];
1395
1396 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1397 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ubo_surf_offsets[i]);
1398 } else {
1399 struct intel_buffer_object *intel_bo =
1400 intel_buffer_object(binding->BufferObject);
1401 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1402 if (!binding->AutomaticSize)
1403 size = MIN2(size, binding->Size);
1404 struct brw_bo *bo =
1405 intel_bufferobj_buffer(brw, intel_bo,
1406 binding->Offset,
1407 size);
1408 brw_create_constant_surface(brw, bo, binding->Offset,
1409 size,
1410 &ubo_surf_offsets[i]);
1411 }
1412 }
1413
1414 uint32_t *ssbo_surf_offsets =
1415 &stage_state->surf_offset[prog_data->binding_table.ssbo_start];
1416
1417 for (int i = 0; i < prog->info.num_ssbos; i++) {
1418 struct gl_shader_storage_buffer_binding *binding =
1419 &ctx->ShaderStorageBufferBindings[prog->sh.ShaderStorageBlocks[i]->Binding];
1420
1421 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1422 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ssbo_surf_offsets[i]);
1423 } else {
1424 struct intel_buffer_object *intel_bo =
1425 intel_buffer_object(binding->BufferObject);
1426 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1427 if (!binding->AutomaticSize)
1428 size = MIN2(size, binding->Size);
1429 struct brw_bo *bo =
1430 intel_bufferobj_buffer(brw, intel_bo,
1431 binding->Offset,
1432 size);
1433 brw_create_buffer_surface(brw, bo, binding->Offset,
1434 size,
1435 &ssbo_surf_offsets[i]);
1436 }
1437 }
1438
1439 if (prog->info.num_ubos || prog->info.num_ssbos)
1440 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1441 }
1442
1443 static void
1444 brw_upload_wm_ubo_surfaces(struct brw_context *brw)
1445 {
1446 struct gl_context *ctx = &brw->ctx;
1447 /* _NEW_PROGRAM */
1448 struct gl_program *prog = ctx->FragmentProgram._Current;
1449
1450 /* BRW_NEW_FS_PROG_DATA */
1451 brw_upload_ubo_surfaces(brw, prog, &brw->wm.base, brw->wm.base.prog_data);
1452 }
1453
1454 const struct brw_tracked_state brw_wm_ubo_surfaces = {
1455 .dirty = {
1456 .mesa = _NEW_PROGRAM,
1457 .brw = BRW_NEW_BATCH |
1458 BRW_NEW_BLORP |
1459 BRW_NEW_FS_PROG_DATA |
1460 BRW_NEW_UNIFORM_BUFFER,
1461 },
1462 .emit = brw_upload_wm_ubo_surfaces,
1463 };
1464
1465 static void
1466 brw_upload_cs_ubo_surfaces(struct brw_context *brw)
1467 {
1468 struct gl_context *ctx = &brw->ctx;
1469 /* _NEW_PROGRAM */
1470 struct gl_program *prog =
1471 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1472
1473 /* BRW_NEW_CS_PROG_DATA */
1474 brw_upload_ubo_surfaces(brw, prog, &brw->cs.base, brw->cs.base.prog_data);
1475 }
1476
1477 const struct brw_tracked_state brw_cs_ubo_surfaces = {
1478 .dirty = {
1479 .mesa = _NEW_PROGRAM,
1480 .brw = BRW_NEW_BATCH |
1481 BRW_NEW_BLORP |
1482 BRW_NEW_CS_PROG_DATA |
1483 BRW_NEW_UNIFORM_BUFFER,
1484 },
1485 .emit = brw_upload_cs_ubo_surfaces,
1486 };
1487
1488 void
1489 brw_upload_abo_surfaces(struct brw_context *brw,
1490 const struct gl_program *prog,
1491 struct brw_stage_state *stage_state,
1492 struct brw_stage_prog_data *prog_data)
1493 {
1494 struct gl_context *ctx = &brw->ctx;
1495 uint32_t *surf_offsets =
1496 &stage_state->surf_offset[prog_data->binding_table.abo_start];
1497
1498 if (prog->info.num_abos) {
1499 for (unsigned i = 0; i < prog->info.num_abos; i++) {
1500 struct gl_atomic_buffer_binding *binding =
1501 &ctx->AtomicBufferBindings[prog->sh.AtomicBuffers[i]->Binding];
1502 struct intel_buffer_object *intel_bo =
1503 intel_buffer_object(binding->BufferObject);
1504 struct brw_bo *bo = intel_bufferobj_buffer(
1505 brw, intel_bo, binding->Offset, intel_bo->Base.Size - binding->Offset);
1506
1507 brw_emit_buffer_surface_state(brw, &surf_offsets[i], bo,
1508 binding->Offset, ISL_FORMAT_RAW,
1509 bo->size - binding->Offset, 1, true);
1510 }
1511
1512 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1513 }
1514 }
1515
1516 static void
1517 brw_upload_wm_abo_surfaces(struct brw_context *brw)
1518 {
1519 /* _NEW_PROGRAM */
1520 const struct gl_program *wm = brw->fragment_program;
1521
1522 if (wm) {
1523 /* BRW_NEW_FS_PROG_DATA */
1524 brw_upload_abo_surfaces(brw, wm, &brw->wm.base, brw->wm.base.prog_data);
1525 }
1526 }
1527
1528 const struct brw_tracked_state brw_wm_abo_surfaces = {
1529 .dirty = {
1530 .mesa = _NEW_PROGRAM,
1531 .brw = BRW_NEW_ATOMIC_BUFFER |
1532 BRW_NEW_BLORP |
1533 BRW_NEW_BATCH |
1534 BRW_NEW_FS_PROG_DATA,
1535 },
1536 .emit = brw_upload_wm_abo_surfaces,
1537 };
1538
1539 static void
1540 brw_upload_cs_abo_surfaces(struct brw_context *brw)
1541 {
1542 /* _NEW_PROGRAM */
1543 const struct gl_program *cp = brw->compute_program;
1544
1545 if (cp) {
1546 /* BRW_NEW_CS_PROG_DATA */
1547 brw_upload_abo_surfaces(brw, cp, &brw->cs.base, brw->cs.base.prog_data);
1548 }
1549 }
1550
1551 const struct brw_tracked_state brw_cs_abo_surfaces = {
1552 .dirty = {
1553 .mesa = _NEW_PROGRAM,
1554 .brw = BRW_NEW_ATOMIC_BUFFER |
1555 BRW_NEW_BLORP |
1556 BRW_NEW_BATCH |
1557 BRW_NEW_CS_PROG_DATA,
1558 },
1559 .emit = brw_upload_cs_abo_surfaces,
1560 };
1561
1562 static void
1563 brw_upload_cs_image_surfaces(struct brw_context *brw)
1564 {
1565 /* _NEW_PROGRAM */
1566 const struct gl_program *cp = brw->compute_program;
1567
1568 if (cp) {
1569 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1570 brw_upload_image_surfaces(brw, cp, &brw->cs.base,
1571 brw->cs.base.prog_data);
1572 }
1573 }
1574
1575 const struct brw_tracked_state brw_cs_image_surfaces = {
1576 .dirty = {
1577 .mesa = _NEW_TEXTURE | _NEW_PROGRAM,
1578 .brw = BRW_NEW_BATCH |
1579 BRW_NEW_BLORP |
1580 BRW_NEW_CS_PROG_DATA |
1581 BRW_NEW_IMAGE_UNITS
1582 },
1583 .emit = brw_upload_cs_image_surfaces,
1584 };
1585
1586 static uint32_t
1587 get_image_format(struct brw_context *brw, mesa_format format, GLenum access)
1588 {
1589 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1590 uint32_t hw_format = brw_isl_format_for_mesa_format(format);
1591 if (access == GL_WRITE_ONLY) {
1592 return hw_format;
1593 } else if (isl_has_matching_typed_storage_image_format(devinfo, hw_format)) {
1594 /* Typed surface reads support a very limited subset of the shader
1595 * image formats. Translate it into the closest format the
1596 * hardware supports.
1597 */
1598 return isl_lower_storage_image_format(devinfo, hw_format);
1599 } else {
1600 /* The hardware doesn't actually support a typed format that we can use
1601 * so we have to fall back to untyped read/write messages.
1602 */
1603 return ISL_FORMAT_RAW;
1604 }
1605 }
1606
1607 static void
1608 update_default_image_param(struct brw_context *brw,
1609 struct gl_image_unit *u,
1610 unsigned surface_idx,
1611 struct brw_image_param *param)
1612 {
1613 memset(param, 0, sizeof(*param));
1614 param->surface_idx = surface_idx;
1615 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1616 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1617 * detailed explanation of these parameters.
1618 */
1619 param->swizzling[0] = 0xff;
1620 param->swizzling[1] = 0xff;
1621 }
1622
1623 static void
1624 update_buffer_image_param(struct brw_context *brw,
1625 struct gl_image_unit *u,
1626 unsigned surface_idx,
1627 struct brw_image_param *param)
1628 {
1629 struct gl_buffer_object *obj = u->TexObj->BufferObject;
1630 const uint32_t size = MIN2((uint32_t)u->TexObj->BufferSize, obj->Size);
1631 update_default_image_param(brw, u, surface_idx, param);
1632
1633 param->size[0] = size / _mesa_get_format_bytes(u->_ActualFormat);
1634 param->stride[0] = _mesa_get_format_bytes(u->_ActualFormat);
1635 }
1636
1637 static void
1638 update_texture_image_param(struct brw_context *brw,
1639 struct gl_image_unit *u,
1640 unsigned surface_idx,
1641 struct brw_image_param *param)
1642 {
1643 struct intel_mipmap_tree *mt = intel_texture_object(u->TexObj)->mt;
1644
1645 update_default_image_param(brw, u, surface_idx, param);
1646
1647 param->size[0] = minify(mt->logical_width0, u->Level);
1648 param->size[1] = minify(mt->logical_height0, u->Level);
1649 param->size[2] = (!u->Layered ? 1 :
1650 u->TexObj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1651 u->TexObj->Target == GL_TEXTURE_3D ?
1652 minify(mt->logical_depth0, u->Level) :
1653 mt->logical_depth0);
1654
1655 intel_miptree_get_image_offset(mt, u->Level, u->_Layer,
1656 &param->offset[0],
1657 &param->offset[1]);
1658
1659 param->stride[0] = mt->cpp;
1660 param->stride[1] = mt->pitch / mt->cpp;
1661 param->stride[2] =
1662 brw_miptree_get_horizontal_slice_pitch(brw, mt, u->Level);
1663 param->stride[3] =
1664 brw_miptree_get_vertical_slice_pitch(brw, mt, u->Level);
1665
1666 if (mt->tiling == I915_TILING_X) {
1667 /* An X tile is a rectangular block of 512x8 bytes. */
1668 param->tiling[0] = _mesa_logbase2(512 / mt->cpp);
1669 param->tiling[1] = _mesa_logbase2(8);
1670
1671 if (brw->has_swizzling) {
1672 /* Right shifts required to swizzle bits 9 and 10 of the memory
1673 * address with bit 6.
1674 */
1675 param->swizzling[0] = 3;
1676 param->swizzling[1] = 4;
1677 }
1678 } else if (mt->tiling == I915_TILING_Y) {
1679 /* The layout of a Y-tiled surface in memory isn't really fundamentally
1680 * different to the layout of an X-tiled surface, we simply pretend that
1681 * the surface is broken up in a number of smaller 16Bx32 tiles, each
1682 * one arranged in X-major order just like is the case for X-tiling.
1683 */
1684 param->tiling[0] = _mesa_logbase2(16 / mt->cpp);
1685 param->tiling[1] = _mesa_logbase2(32);
1686
1687 if (brw->has_swizzling) {
1688 /* Right shift required to swizzle bit 9 of the memory address with
1689 * bit 6.
1690 */
1691 param->swizzling[0] = 3;
1692 }
1693 }
1694
1695 /* 3D textures are arranged in 2D in memory with 2^lod slices per row. The
1696 * address calculation algorithm (emit_address_calculation() in
1697 * brw_fs_surface_builder.cpp) handles this as a sort of tiling with
1698 * modulus equal to the LOD.
1699 */
1700 param->tiling[2] = (u->TexObj->Target == GL_TEXTURE_3D ? u->Level :
1701 0);
1702 }
1703
1704 static void
1705 update_image_surface(struct brw_context *brw,
1706 struct gl_image_unit *u,
1707 GLenum access,
1708 unsigned surface_idx,
1709 uint32_t *surf_offset,
1710 struct brw_image_param *param)
1711 {
1712 if (_mesa_is_image_unit_valid(&brw->ctx, u)) {
1713 struct gl_texture_object *obj = u->TexObj;
1714 const unsigned format = get_image_format(brw, u->_ActualFormat, access);
1715
1716 if (obj->Target == GL_TEXTURE_BUFFER) {
1717 struct intel_buffer_object *intel_obj =
1718 intel_buffer_object(obj->BufferObject);
1719 const unsigned texel_size = (format == ISL_FORMAT_RAW ? 1 :
1720 _mesa_get_format_bytes(u->_ActualFormat));
1721
1722 brw_emit_buffer_surface_state(
1723 brw, surf_offset, intel_obj->buffer, obj->BufferOffset,
1724 format, intel_obj->Base.Size, texel_size,
1725 access != GL_READ_ONLY);
1726
1727 update_buffer_image_param(brw, u, surface_idx, param);
1728
1729 } else {
1730 struct intel_texture_object *intel_obj = intel_texture_object(obj);
1731 struct intel_mipmap_tree *mt = intel_obj->mt;
1732
1733 if (format == ISL_FORMAT_RAW) {
1734 brw_emit_buffer_surface_state(
1735 brw, surf_offset, mt->bo, mt->offset,
1736 format, mt->bo->size - mt->offset, 1 /* pitch */,
1737 access != GL_READ_ONLY);
1738
1739 } else {
1740 const unsigned num_layers = (!u->Layered ? 1 :
1741 obj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1742 mt->logical_depth0);
1743
1744 struct isl_view view = {
1745 .format = format,
1746 .base_level = obj->MinLevel + u->Level,
1747 .levels = 1,
1748 .base_array_layer = obj->MinLayer + u->_Layer,
1749 .array_len = num_layers,
1750 .swizzle = ISL_SWIZZLE_IDENTITY,
1751 .usage = ISL_SURF_USAGE_STORAGE_BIT,
1752 };
1753
1754 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
1755 const bool unresolved = intel_miptree_has_color_unresolved(
1756 mt, view.base_level, view.levels,
1757 view.base_array_layer, view.array_len);
1758 const int flags = unresolved ? 0 : INTEL_AUX_BUFFER_DISABLED;
1759 brw_emit_surface_state(brw, mt, flags, mt->target, view,
1760 tex_mocs[brw->gen],
1761 surf_offset, surf_index,
1762 I915_GEM_DOMAIN_SAMPLER,
1763 access == GL_READ_ONLY ? 0 :
1764 I915_GEM_DOMAIN_SAMPLER);
1765 }
1766
1767 update_texture_image_param(brw, u, surface_idx, param);
1768 }
1769
1770 } else {
1771 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, surf_offset);
1772 update_default_image_param(brw, u, surface_idx, param);
1773 }
1774 }
1775
1776 void
1777 brw_upload_image_surfaces(struct brw_context *brw,
1778 const struct gl_program *prog,
1779 struct brw_stage_state *stage_state,
1780 struct brw_stage_prog_data *prog_data)
1781 {
1782 assert(prog);
1783 struct gl_context *ctx = &brw->ctx;
1784
1785 if (prog->info.num_images) {
1786 for (unsigned i = 0; i < prog->info.num_images; i++) {
1787 struct gl_image_unit *u = &ctx->ImageUnits[prog->sh.ImageUnits[i]];
1788 const unsigned surf_idx = prog_data->binding_table.image_start + i;
1789
1790 update_image_surface(brw, u, prog->sh.ImageAccess[i],
1791 surf_idx,
1792 &stage_state->surf_offset[surf_idx],
1793 &prog_data->image_param[i]);
1794 }
1795
1796 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1797 /* This may have changed the image metadata dependent on the context
1798 * image unit state and passed to the program as uniforms, make sure
1799 * that push and pull constants are reuploaded.
1800 */
1801 brw->NewGLState |= _NEW_PROGRAM_CONSTANTS;
1802 }
1803 }
1804
1805 static void
1806 brw_upload_wm_image_surfaces(struct brw_context *brw)
1807 {
1808 /* BRW_NEW_FRAGMENT_PROGRAM */
1809 const struct gl_program *wm = brw->fragment_program;
1810
1811 if (wm) {
1812 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1813 brw_upload_image_surfaces(brw, wm, &brw->wm.base,
1814 brw->wm.base.prog_data);
1815 }
1816 }
1817
1818 const struct brw_tracked_state brw_wm_image_surfaces = {
1819 .dirty = {
1820 .mesa = _NEW_TEXTURE,
1821 .brw = BRW_NEW_BATCH |
1822 BRW_NEW_BLORP |
1823 BRW_NEW_FRAGMENT_PROGRAM |
1824 BRW_NEW_FS_PROG_DATA |
1825 BRW_NEW_IMAGE_UNITS
1826 },
1827 .emit = brw_upload_wm_image_surfaces,
1828 };
1829
1830 void
1831 gen4_init_vtable_surface_functions(struct brw_context *brw)
1832 {
1833 brw->vtbl.update_renderbuffer_surface = gen4_update_renderbuffer_surface;
1834 brw->vtbl.emit_null_surface_state = brw_emit_null_surface_state;
1835 }
1836
1837 void
1838 gen6_init_vtable_surface_functions(struct brw_context *brw)
1839 {
1840 gen4_init_vtable_surface_functions(brw);
1841 brw->vtbl.update_renderbuffer_surface = brw_update_renderbuffer_surface;
1842 }
1843
1844 static void
1845 brw_upload_cs_work_groups_surface(struct brw_context *brw)
1846 {
1847 struct gl_context *ctx = &brw->ctx;
1848 /* _NEW_PROGRAM */
1849 struct gl_program *prog =
1850 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1851 /* BRW_NEW_CS_PROG_DATA */
1852 const struct brw_cs_prog_data *cs_prog_data =
1853 brw_cs_prog_data(brw->cs.base.prog_data);
1854
1855 if (prog && cs_prog_data->uses_num_work_groups) {
1856 const unsigned surf_idx =
1857 cs_prog_data->binding_table.work_groups_start;
1858 uint32_t *surf_offset = &brw->cs.base.surf_offset[surf_idx];
1859 struct brw_bo *bo;
1860 uint32_t bo_offset;
1861
1862 if (brw->compute.num_work_groups_bo == NULL) {
1863 bo = NULL;
1864 intel_upload_data(brw,
1865 (void *)brw->compute.num_work_groups,
1866 3 * sizeof(GLuint),
1867 sizeof(GLuint),
1868 &bo,
1869 &bo_offset);
1870 } else {
1871 bo = brw->compute.num_work_groups_bo;
1872 bo_offset = brw->compute.num_work_groups_offset;
1873 }
1874
1875 brw_emit_buffer_surface_state(brw, surf_offset,
1876 bo, bo_offset,
1877 ISL_FORMAT_RAW,
1878 3 * sizeof(GLuint), 1, true);
1879 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1880 }
1881 }
1882
1883 const struct brw_tracked_state brw_cs_work_groups_surface = {
1884 .dirty = {
1885 .brw = BRW_NEW_BLORP |
1886 BRW_NEW_CS_PROG_DATA |
1887 BRW_NEW_CS_WORK_GROUPS
1888 },
1889 .emit = brw_upload_cs_work_groups_surface,
1890 };