i965: Rename brw_format_for_mesa_format() to brw_isl_format_for_mesa_format()
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_surface_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "compiler/nir/nir.h"
34 #include "main/context.h"
35 #include "main/blend.h"
36 #include "main/mtypes.h"
37 #include "main/samplerobj.h"
38 #include "main/shaderimage.h"
39 #include "main/teximage.h"
40 #include "program/prog_parameter.h"
41 #include "program/prog_instruction.h"
42 #include "main/framebuffer.h"
43 #include "main/shaderapi.h"
44
45 #include "isl/isl.h"
46
47 #include "intel_mipmap_tree.h"
48 #include "intel_batchbuffer.h"
49 #include "intel_tex.h"
50 #include "intel_fbo.h"
51 #include "intel_buffer_objects.h"
52
53 #include "brw_context.h"
54 #include "brw_state.h"
55 #include "brw_defines.h"
56 #include "brw_wm.h"
57
58 enum {
59 INTEL_RENDERBUFFER_LAYERED = 1 << 0,
60 INTEL_AUX_BUFFER_DISABLED = 1 << 1,
61 };
62
63 uint32_t tex_mocs[] = {
64 [7] = GEN7_MOCS_L3,
65 [8] = BDW_MOCS_WB,
66 [9] = SKL_MOCS_WB,
67 };
68
69 uint32_t rb_mocs[] = {
70 [7] = GEN7_MOCS_L3,
71 [8] = BDW_MOCS_PTE,
72 [9] = SKL_MOCS_PTE,
73 };
74
75 static void
76 brw_emit_surface_state(struct brw_context *brw,
77 struct intel_mipmap_tree *mt, uint32_t flags,
78 GLenum target, struct isl_view view,
79 uint32_t mocs, uint32_t *surf_offset, int surf_index,
80 unsigned read_domains, unsigned write_domains)
81 {
82 uint32_t tile_x = mt->level[0].slice[0].x_offset;
83 uint32_t tile_y = mt->level[0].slice[0].y_offset;
84 uint32_t offset = mt->offset;
85
86 struct isl_surf surf;
87 intel_miptree_get_isl_surf(brw, mt, &surf);
88
89 surf.dim = get_isl_surf_dim(target);
90
91 const enum isl_dim_layout dim_layout =
92 get_isl_dim_layout(&brw->screen->devinfo, mt->tiling, target);
93
94 if (surf.dim_layout != dim_layout) {
95 /* The layout of the specified texture target is not compatible with the
96 * actual layout of the miptree structure in memory -- You're entering
97 * dangerous territory, this can only possibly work if you only intended
98 * to access a single level and slice of the texture, and the hardware
99 * supports the tile offset feature in order to allow non-tile-aligned
100 * base offsets, since we'll have to point the hardware to the first
101 * texel of the level instead of relying on the usual base level/layer
102 * controls.
103 */
104 assert(brw->has_surface_tile_offset);
105 assert(view.levels == 1 && view.array_len == 1);
106 assert(tile_x == 0 && tile_y == 0);
107
108 offset += intel_miptree_get_tile_offsets(mt, view.base_level,
109 view.base_array_layer,
110 &tile_x, &tile_y);
111
112 /* Minify the logical dimensions of the texture. */
113 const unsigned l = view.base_level - mt->first_level;
114 surf.logical_level0_px.width = minify(surf.logical_level0_px.width, l);
115 surf.logical_level0_px.height = surf.dim <= ISL_SURF_DIM_1D ? 1 :
116 minify(surf.logical_level0_px.height, l);
117 surf.logical_level0_px.depth = surf.dim <= ISL_SURF_DIM_2D ? 1 :
118 minify(surf.logical_level0_px.depth, l);
119
120 /* Only the base level and layer can be addressed with the overridden
121 * layout.
122 */
123 surf.logical_level0_px.array_len = 1;
124 surf.levels = 1;
125 surf.dim_layout = dim_layout;
126
127 /* The requested slice of the texture is now at the base level and
128 * layer.
129 */
130 view.base_level = 0;
131 view.base_array_layer = 0;
132 }
133
134 union isl_color_value clear_color = { .u32 = { 0, 0, 0, 0 } };
135
136 drm_intel_bo *aux_bo;
137 struct isl_surf *aux_surf = NULL, aux_surf_s;
138 uint64_t aux_offset = 0;
139 enum isl_aux_usage aux_usage = ISL_AUX_USAGE_NONE;
140 if ((mt->mcs_buf || intel_miptree_sample_with_hiz(brw, mt)) &&
141 !(flags & INTEL_AUX_BUFFER_DISABLED)) {
142 intel_miptree_get_aux_isl_surf(brw, mt, &aux_surf_s, &aux_usage);
143 aux_surf = &aux_surf_s;
144
145 if (mt->mcs_buf) {
146 aux_bo = mt->mcs_buf->bo;
147 aux_offset = mt->mcs_buf->bo->offset64 + mt->mcs_buf->offset;
148 } else {
149 aux_bo = mt->hiz_buf->aux_base.bo;
150 aux_offset = mt->hiz_buf->aux_base.bo->offset64;
151 }
152
153 /* We only really need a clear color if we also have an auxiliary
154 * surface. Without one, it does nothing.
155 */
156 clear_color = intel_miptree_get_isl_clear_color(brw, mt);
157 }
158
159 void *state = __brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
160 brw->isl_dev.ss.size,
161 brw->isl_dev.ss.align,
162 surf_index, surf_offset);
163
164 isl_surf_fill_state(&brw->isl_dev, state, .surf = &surf, .view = &view,
165 .address = mt->bo->offset64 + offset,
166 .aux_surf = aux_surf, .aux_usage = aux_usage,
167 .aux_address = aux_offset,
168 .mocs = mocs, .clear_color = clear_color,
169 .x_offset_sa = tile_x, .y_offset_sa = tile_y);
170
171 drm_intel_bo_emit_reloc(brw->batch.bo,
172 *surf_offset + brw->isl_dev.ss.addr_offset,
173 mt->bo, offset,
174 read_domains, write_domains);
175
176 if (aux_surf) {
177 /* On gen7 and prior, the upper 20 bits of surface state DWORD 6 are the
178 * upper 20 bits of the GPU address of the MCS buffer; the lower 12 bits
179 * contain other control information. Since buffer addresses are always
180 * on 4k boundaries (and thus have their lower 12 bits zero), we can use
181 * an ordinary reloc to do the necessary address translation.
182 */
183 assert((aux_offset & 0xfff) == 0);
184 uint32_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
185 drm_intel_bo_emit_reloc(brw->batch.bo,
186 *surf_offset + brw->isl_dev.ss.aux_addr_offset,
187 aux_bo, *aux_addr - aux_bo->offset64,
188 read_domains, write_domains);
189 }
190 }
191
192 uint32_t
193 brw_update_renderbuffer_surface(struct brw_context *brw,
194 struct gl_renderbuffer *rb,
195 uint32_t flags, unsigned unit /* unused */,
196 uint32_t surf_index)
197 {
198 struct gl_context *ctx = &brw->ctx;
199 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
200 struct intel_mipmap_tree *mt = irb->mt;
201
202 if (brw->gen < 9) {
203 assert(!(flags & INTEL_AUX_BUFFER_DISABLED));
204 }
205
206 assert(brw_render_target_supported(brw, rb));
207
208 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
209 if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
210 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
211 __func__, _mesa_get_format_name(rb_format));
212 }
213
214 const unsigned layer_multiplier =
215 (irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
216 irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) ?
217 MAX2(irb->mt->num_samples, 1) : 1;
218
219 struct isl_view view = {
220 .format = brw->render_target_format[rb_format],
221 .base_level = irb->mt_level - irb->mt->first_level,
222 .levels = 1,
223 .base_array_layer = irb->mt_layer / layer_multiplier,
224 .array_len = MAX2(irb->layer_count, 1),
225 .swizzle = ISL_SWIZZLE_IDENTITY,
226 .usage = ISL_SURF_USAGE_RENDER_TARGET_BIT,
227 };
228
229 uint32_t offset;
230 brw_emit_surface_state(brw, mt, flags, mt->target, view,
231 rb_mocs[brw->gen],
232 &offset, surf_index,
233 I915_GEM_DOMAIN_RENDER,
234 I915_GEM_DOMAIN_RENDER);
235 return offset;
236 }
237
238 GLuint
239 translate_tex_target(GLenum target)
240 {
241 switch (target) {
242 case GL_TEXTURE_1D:
243 case GL_TEXTURE_1D_ARRAY_EXT:
244 return BRW_SURFACE_1D;
245
246 case GL_TEXTURE_RECTANGLE_NV:
247 return BRW_SURFACE_2D;
248
249 case GL_TEXTURE_2D:
250 case GL_TEXTURE_2D_ARRAY_EXT:
251 case GL_TEXTURE_EXTERNAL_OES:
252 case GL_TEXTURE_2D_MULTISAMPLE:
253 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
254 return BRW_SURFACE_2D;
255
256 case GL_TEXTURE_3D:
257 return BRW_SURFACE_3D;
258
259 case GL_TEXTURE_CUBE_MAP:
260 case GL_TEXTURE_CUBE_MAP_ARRAY:
261 return BRW_SURFACE_CUBE;
262
263 default:
264 unreachable("not reached");
265 }
266 }
267
268 uint32_t
269 brw_get_surface_tiling_bits(uint32_t tiling)
270 {
271 switch (tiling) {
272 case I915_TILING_X:
273 return BRW_SURFACE_TILED;
274 case I915_TILING_Y:
275 return BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y;
276 default:
277 return 0;
278 }
279 }
280
281
282 uint32_t
283 brw_get_surface_num_multisamples(unsigned num_samples)
284 {
285 if (num_samples > 1)
286 return BRW_SURFACE_MULTISAMPLECOUNT_4;
287 else
288 return BRW_SURFACE_MULTISAMPLECOUNT_1;
289 }
290
291 /**
292 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
293 * swizzling.
294 */
295 int
296 brw_get_texture_swizzle(const struct gl_context *ctx,
297 const struct gl_texture_object *t)
298 {
299 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
300
301 int swizzles[SWIZZLE_NIL + 1] = {
302 SWIZZLE_X,
303 SWIZZLE_Y,
304 SWIZZLE_Z,
305 SWIZZLE_W,
306 SWIZZLE_ZERO,
307 SWIZZLE_ONE,
308 SWIZZLE_NIL
309 };
310
311 if (img->_BaseFormat == GL_DEPTH_COMPONENT ||
312 img->_BaseFormat == GL_DEPTH_STENCIL) {
313 GLenum depth_mode = t->DepthMode;
314
315 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
316 * with depth component data specified with a sized internal format.
317 * Otherwise, it's left at the old default, GL_LUMINANCE.
318 */
319 if (_mesa_is_gles3(ctx) &&
320 img->InternalFormat != GL_DEPTH_COMPONENT &&
321 img->InternalFormat != GL_DEPTH_STENCIL) {
322 depth_mode = GL_RED;
323 }
324
325 switch (depth_mode) {
326 case GL_ALPHA:
327 swizzles[0] = SWIZZLE_ZERO;
328 swizzles[1] = SWIZZLE_ZERO;
329 swizzles[2] = SWIZZLE_ZERO;
330 swizzles[3] = SWIZZLE_X;
331 break;
332 case GL_LUMINANCE:
333 swizzles[0] = SWIZZLE_X;
334 swizzles[1] = SWIZZLE_X;
335 swizzles[2] = SWIZZLE_X;
336 swizzles[3] = SWIZZLE_ONE;
337 break;
338 case GL_INTENSITY:
339 swizzles[0] = SWIZZLE_X;
340 swizzles[1] = SWIZZLE_X;
341 swizzles[2] = SWIZZLE_X;
342 swizzles[3] = SWIZZLE_X;
343 break;
344 case GL_RED:
345 swizzles[0] = SWIZZLE_X;
346 swizzles[1] = SWIZZLE_ZERO;
347 swizzles[2] = SWIZZLE_ZERO;
348 swizzles[3] = SWIZZLE_ONE;
349 break;
350 }
351 }
352
353 GLenum datatype = _mesa_get_format_datatype(img->TexFormat);
354
355 /* If the texture's format is alpha-only, force R, G, and B to
356 * 0.0. Similarly, if the texture's format has no alpha channel,
357 * force the alpha value read to 1.0. This allows for the
358 * implementation to use an RGBA texture for any of these formats
359 * without leaking any unexpected values.
360 */
361 switch (img->_BaseFormat) {
362 case GL_ALPHA:
363 swizzles[0] = SWIZZLE_ZERO;
364 swizzles[1] = SWIZZLE_ZERO;
365 swizzles[2] = SWIZZLE_ZERO;
366 break;
367 case GL_LUMINANCE:
368 if (t->_IsIntegerFormat || datatype == GL_SIGNED_NORMALIZED) {
369 swizzles[0] = SWIZZLE_X;
370 swizzles[1] = SWIZZLE_X;
371 swizzles[2] = SWIZZLE_X;
372 swizzles[3] = SWIZZLE_ONE;
373 }
374 break;
375 case GL_LUMINANCE_ALPHA:
376 if (datatype == GL_SIGNED_NORMALIZED) {
377 swizzles[0] = SWIZZLE_X;
378 swizzles[1] = SWIZZLE_X;
379 swizzles[2] = SWIZZLE_X;
380 swizzles[3] = SWIZZLE_W;
381 }
382 break;
383 case GL_INTENSITY:
384 if (datatype == GL_SIGNED_NORMALIZED) {
385 swizzles[0] = SWIZZLE_X;
386 swizzles[1] = SWIZZLE_X;
387 swizzles[2] = SWIZZLE_X;
388 swizzles[3] = SWIZZLE_X;
389 }
390 break;
391 case GL_RED:
392 case GL_RG:
393 case GL_RGB:
394 if (_mesa_get_format_bits(img->TexFormat, GL_ALPHA_BITS) > 0)
395 swizzles[3] = SWIZZLE_ONE;
396 break;
397 }
398
399 return MAKE_SWIZZLE4(swizzles[GET_SWZ(t->_Swizzle, 0)],
400 swizzles[GET_SWZ(t->_Swizzle, 1)],
401 swizzles[GET_SWZ(t->_Swizzle, 2)],
402 swizzles[GET_SWZ(t->_Swizzle, 3)]);
403 }
404
405 /**
406 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
407 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
408 *
409 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
410 * 0 1 2 3 4 5
411 * 4 5 6 7 0 1
412 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
413 *
414 * which is simply adding 4 then modding by 8 (or anding with 7).
415 *
416 * We then may need to apply workarounds for textureGather hardware bugs.
417 */
418 static unsigned
419 swizzle_to_scs(GLenum swizzle, bool need_green_to_blue)
420 {
421 unsigned scs = (swizzle + 4) & 7;
422
423 return (need_green_to_blue && scs == HSW_SCS_GREEN) ? HSW_SCS_BLUE : scs;
424 }
425
426 static unsigned
427 brw_find_matching_rb(const struct gl_framebuffer *fb,
428 const struct intel_mipmap_tree *mt)
429 {
430 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
431 const struct intel_renderbuffer *irb =
432 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
433
434 if (irb && irb->mt == mt)
435 return i;
436 }
437
438 return fb->_NumColorDrawBuffers;
439 }
440
441 static inline bool
442 brw_texture_view_sane(const struct brw_context *brw,
443 const struct intel_mipmap_tree *mt,
444 const struct isl_view *view)
445 {
446 /* There are special cases only for lossless compression. */
447 if (!intel_miptree_is_lossless_compressed(brw, mt))
448 return true;
449
450 if (isl_format_supports_ccs_e(&brw->screen->devinfo, view->format))
451 return true;
452
453 /* Logic elsewhere needs to take care to resolve the color buffer prior
454 * to sampling it as non-compressed.
455 */
456 if (intel_miptree_has_color_unresolved(mt, view->base_level, view->levels,
457 view->base_array_layer,
458 view->array_len))
459 return false;
460
461 const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
462 const unsigned rb_index = brw_find_matching_rb(fb, mt);
463
464 if (rb_index == fb->_NumColorDrawBuffers)
465 return true;
466
467 /* Underlying surface is compressed but it is sampled using a format that
468 * the sampling engine doesn't support as compressed. Compression must be
469 * disabled for both sampling engine and data port in case the same surface
470 * is used also as render target.
471 */
472 return brw->draw_aux_buffer_disabled[rb_index];
473 }
474
475 static bool
476 brw_disable_aux_surface(const struct brw_context *brw,
477 const struct intel_mipmap_tree *mt,
478 const struct isl_view *view)
479 {
480 /* Nothing to disable. */
481 if (!mt->mcs_buf)
482 return false;
483
484 const bool is_unresolved = intel_miptree_has_color_unresolved(
485 mt, view->base_level, view->levels,
486 view->base_array_layer, view->array_len);
487
488 /* There are special cases only for lossless compression. */
489 if (!intel_miptree_is_lossless_compressed(brw, mt))
490 return !is_unresolved;
491
492 const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
493 const unsigned rb_index = brw_find_matching_rb(fb, mt);
494
495 /* If we are drawing into this with compression enabled, then we must also
496 * enable compression when texturing from it regardless of
497 * fast_clear_state. If we don't then, after the first draw call with
498 * this setup, there will be data in the CCS which won't get picked up by
499 * subsequent texturing operations as required by ARB_texture_barrier.
500 * Since we don't want to re-emit the binding table or do a resolve
501 * operation every draw call, the easiest thing to do is just enable
502 * compression on the texturing side. This is completely safe to do
503 * since, if compressed texturing weren't allowed, we would have disabled
504 * compression of render targets in whatever_that_function_is_called().
505 */
506 if (rb_index < fb->_NumColorDrawBuffers) {
507 if (brw->draw_aux_buffer_disabled[rb_index]) {
508 assert(!is_unresolved);
509 }
510
511 return brw->draw_aux_buffer_disabled[rb_index];
512 }
513
514 return !is_unresolved;
515 }
516
517 void
518 brw_update_texture_surface(struct gl_context *ctx,
519 unsigned unit,
520 uint32_t *surf_offset,
521 bool for_gather,
522 uint32_t plane)
523 {
524 struct brw_context *brw = brw_context(ctx);
525 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
526
527 if (obj->Target == GL_TEXTURE_BUFFER) {
528 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
529
530 } else {
531 struct intel_texture_object *intel_obj = intel_texture_object(obj);
532 struct intel_mipmap_tree *mt = intel_obj->mt;
533
534 if (plane > 0) {
535 if (mt->plane[plane - 1] == NULL)
536 return;
537 mt = mt->plane[plane - 1];
538 }
539
540 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
541 /* If this is a view with restricted NumLayers, then our effective depth
542 * is not just the miptree depth.
543 */
544 const unsigned view_num_layers =
545 (obj->Immutable && obj->Target != GL_TEXTURE_3D) ? obj->NumLayers :
546 mt->logical_depth0;
547
548 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
549 * texturing functions that return a float, as our code generation always
550 * selects the .x channel (which would always be 0).
551 */
552 struct gl_texture_image *firstImage = obj->Image[0][obj->BaseLevel];
553 const bool alpha_depth = obj->DepthMode == GL_ALPHA &&
554 (firstImage->_BaseFormat == GL_DEPTH_COMPONENT ||
555 firstImage->_BaseFormat == GL_DEPTH_STENCIL);
556 const unsigned swizzle = (unlikely(alpha_depth) ? SWIZZLE_XYZW :
557 brw_get_texture_swizzle(&brw->ctx, obj));
558
559 mesa_format mesa_fmt = plane == 0 ? intel_obj->_Format : mt->format;
560 unsigned format = translate_tex_format(brw, mesa_fmt,
561 sampler->sRGBDecode);
562
563 /* Implement gen6 and gen7 gather work-around */
564 bool need_green_to_blue = false;
565 if (for_gather) {
566 if (brw->gen == 7 && (format == ISL_FORMAT_R32G32_FLOAT ||
567 format == ISL_FORMAT_R32G32_SINT ||
568 format == ISL_FORMAT_R32G32_UINT)) {
569 format = ISL_FORMAT_R32G32_FLOAT_LD;
570 need_green_to_blue = brw->is_haswell;
571 } else if (brw->gen == 6) {
572 /* Sandybridge's gather4 message is broken for integer formats.
573 * To work around this, we pretend the surface is UNORM for
574 * 8 or 16-bit formats, and emit shader instructions to recover
575 * the real INT/UINT value. For 32-bit formats, we pretend
576 * the surface is FLOAT, and simply reinterpret the resulting
577 * bits.
578 */
579 switch (format) {
580 case ISL_FORMAT_R8_SINT:
581 case ISL_FORMAT_R8_UINT:
582 format = ISL_FORMAT_R8_UNORM;
583 break;
584
585 case ISL_FORMAT_R16_SINT:
586 case ISL_FORMAT_R16_UINT:
587 format = ISL_FORMAT_R16_UNORM;
588 break;
589
590 case ISL_FORMAT_R32_SINT:
591 case ISL_FORMAT_R32_UINT:
592 format = ISL_FORMAT_R32_FLOAT;
593 break;
594
595 default:
596 break;
597 }
598 }
599 }
600
601 if (obj->StencilSampling && firstImage->_BaseFormat == GL_DEPTH_STENCIL) {
602 if (brw->gen <= 7) {
603 assert(mt->r8stencil_mt && !mt->stencil_mt->r8stencil_needs_update);
604 mt = mt->r8stencil_mt;
605 } else {
606 mt = mt->stencil_mt;
607 }
608 format = ISL_FORMAT_R8_UINT;
609 } else if (brw->gen <= 7 && mt->format == MESA_FORMAT_S_UINT8) {
610 assert(mt->r8stencil_mt && !mt->r8stencil_needs_update);
611 mt = mt->r8stencil_mt;
612 format = ISL_FORMAT_R8_UINT;
613 }
614
615 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
616
617 struct isl_view view = {
618 .format = format,
619 .base_level = obj->MinLevel + obj->BaseLevel,
620 .levels = intel_obj->_MaxLevel - obj->BaseLevel + 1,
621 .base_array_layer = obj->MinLayer,
622 .array_len = view_num_layers,
623 .swizzle = {
624 .r = swizzle_to_scs(GET_SWZ(swizzle, 0), need_green_to_blue),
625 .g = swizzle_to_scs(GET_SWZ(swizzle, 1), need_green_to_blue),
626 .b = swizzle_to_scs(GET_SWZ(swizzle, 2), need_green_to_blue),
627 .a = swizzle_to_scs(GET_SWZ(swizzle, 3), need_green_to_blue),
628 },
629 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
630 };
631
632 if (obj->Target == GL_TEXTURE_CUBE_MAP ||
633 obj->Target == GL_TEXTURE_CUBE_MAP_ARRAY)
634 view.usage |= ISL_SURF_USAGE_CUBE_BIT;
635
636 assert(brw_texture_view_sane(brw, mt, &view));
637
638 const int flags = brw_disable_aux_surface(brw, mt, &view) ?
639 INTEL_AUX_BUFFER_DISABLED : 0;
640 brw_emit_surface_state(brw, mt, flags, mt->target, view,
641 tex_mocs[brw->gen],
642 surf_offset, surf_index,
643 I915_GEM_DOMAIN_SAMPLER, 0);
644 }
645 }
646
647 void
648 brw_emit_buffer_surface_state(struct brw_context *brw,
649 uint32_t *out_offset,
650 drm_intel_bo *bo,
651 unsigned buffer_offset,
652 unsigned surface_format,
653 unsigned buffer_size,
654 unsigned pitch,
655 bool rw)
656 {
657 uint32_t *dw = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
658 brw->isl_dev.ss.size,
659 brw->isl_dev.ss.align,
660 out_offset);
661
662 isl_buffer_fill_state(&brw->isl_dev, dw,
663 .address = (bo ? bo->offset64 : 0) + buffer_offset,
664 .size = buffer_size,
665 .format = surface_format,
666 .stride = pitch,
667 .mocs = tex_mocs[brw->gen]);
668
669 if (bo) {
670 drm_intel_bo_emit_reloc(brw->batch.bo,
671 *out_offset + brw->isl_dev.ss.addr_offset,
672 bo, buffer_offset,
673 I915_GEM_DOMAIN_SAMPLER,
674 (rw ? I915_GEM_DOMAIN_SAMPLER : 0));
675 }
676 }
677
678 void
679 brw_update_buffer_texture_surface(struct gl_context *ctx,
680 unsigned unit,
681 uint32_t *surf_offset)
682 {
683 struct brw_context *brw = brw_context(ctx);
684 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
685 struct intel_buffer_object *intel_obj =
686 intel_buffer_object(tObj->BufferObject);
687 uint32_t size = tObj->BufferSize;
688 drm_intel_bo *bo = NULL;
689 mesa_format format = tObj->_BufferObjectFormat;
690 uint32_t brw_format = brw_isl_format_for_mesa_format(format);
691 int texel_size = _mesa_get_format_bytes(format);
692
693 if (intel_obj) {
694 size = MIN2(size, intel_obj->Base.Size);
695 bo = intel_bufferobj_buffer(brw, intel_obj, tObj->BufferOffset, size);
696 }
697
698 /* The ARB_texture_buffer_specification says:
699 *
700 * "The number of texels in the buffer texture's texel array is given by
701 *
702 * floor(<buffer_size> / (<components> * sizeof(<base_type>)),
703 *
704 * where <buffer_size> is the size of the buffer object, in basic
705 * machine units and <components> and <base_type> are the element count
706 * and base data type for elements, as specified in Table X.1. The
707 * number of texels in the texel array is then clamped to the
708 * implementation-dependent limit MAX_TEXTURE_BUFFER_SIZE_ARB."
709 *
710 * We need to clamp the size in bytes to MAX_TEXTURE_BUFFER_SIZE * stride,
711 * so that when ISL divides by stride to obtain the number of texels, that
712 * texel count is clamped to MAX_TEXTURE_BUFFER_SIZE.
713 */
714 size = MIN2(size, ctx->Const.MaxTextureBufferSize * (unsigned) texel_size);
715
716 if (brw_format == 0 && format != MESA_FORMAT_RGBA_FLOAT32) {
717 _mesa_problem(NULL, "bad format %s for texture buffer\n",
718 _mesa_get_format_name(format));
719 }
720
721 brw_emit_buffer_surface_state(brw, surf_offset, bo,
722 tObj->BufferOffset,
723 brw_format,
724 size,
725 texel_size,
726 false /* rw */);
727 }
728
729 /**
730 * Create the constant buffer surface. Vertex/fragment shader constants will be
731 * read from this buffer with Data Port Read instructions/messages.
732 */
733 void
734 brw_create_constant_surface(struct brw_context *brw,
735 drm_intel_bo *bo,
736 uint32_t offset,
737 uint32_t size,
738 uint32_t *out_offset)
739 {
740 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
741 ISL_FORMAT_R32G32B32A32_FLOAT,
742 size, 1, false);
743 }
744
745 /**
746 * Create the buffer surface. Shader buffer variables will be
747 * read from / write to this buffer with Data Port Read/Write
748 * instructions/messages.
749 */
750 void
751 brw_create_buffer_surface(struct brw_context *brw,
752 drm_intel_bo *bo,
753 uint32_t offset,
754 uint32_t size,
755 uint32_t *out_offset)
756 {
757 /* Use a raw surface so we can reuse existing untyped read/write/atomic
758 * messages. We need these specifically for the fragment shader since they
759 * include a pixel mask header that we need to ensure correct behavior
760 * with helper invocations, which cannot write to the buffer.
761 */
762 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
763 ISL_FORMAT_RAW,
764 size, 1, true);
765 }
766
767 /**
768 * Set up a binding table entry for use by stream output logic (transform
769 * feedback).
770 *
771 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
772 */
773 void
774 brw_update_sol_surface(struct brw_context *brw,
775 struct gl_buffer_object *buffer_obj,
776 uint32_t *out_offset, unsigned num_vector_components,
777 unsigned stride_dwords, unsigned offset_dwords)
778 {
779 struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
780 uint32_t offset_bytes = 4 * offset_dwords;
781 drm_intel_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
782 offset_bytes,
783 buffer_obj->Size - offset_bytes);
784 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
785 out_offset);
786 uint32_t pitch_minus_1 = 4*stride_dwords - 1;
787 size_t size_dwords = buffer_obj->Size / 4;
788 uint32_t buffer_size_minus_1, width, height, depth, surface_format;
789
790 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
791 * too big to map using a single binding table entry?
792 */
793 assert((size_dwords - offset_dwords) / stride_dwords
794 <= BRW_MAX_NUM_BUFFER_ENTRIES);
795
796 if (size_dwords > offset_dwords + num_vector_components) {
797 /* There is room for at least 1 transform feedback output in the buffer.
798 * Compute the number of additional transform feedback outputs the
799 * buffer has room for.
800 */
801 buffer_size_minus_1 =
802 (size_dwords - offset_dwords - num_vector_components) / stride_dwords;
803 } else {
804 /* There isn't even room for a single transform feedback output in the
805 * buffer. We can't configure the binding table entry to prevent output
806 * entirely; we'll have to rely on the geometry shader to detect
807 * overflow. But to minimize the damage in case of a bug, set up the
808 * binding table entry to just allow a single output.
809 */
810 buffer_size_minus_1 = 0;
811 }
812 width = buffer_size_minus_1 & 0x7f;
813 height = (buffer_size_minus_1 & 0xfff80) >> 7;
814 depth = (buffer_size_minus_1 & 0x7f00000) >> 20;
815
816 switch (num_vector_components) {
817 case 1:
818 surface_format = ISL_FORMAT_R32_FLOAT;
819 break;
820 case 2:
821 surface_format = ISL_FORMAT_R32G32_FLOAT;
822 break;
823 case 3:
824 surface_format = ISL_FORMAT_R32G32B32_FLOAT;
825 break;
826 case 4:
827 surface_format = ISL_FORMAT_R32G32B32A32_FLOAT;
828 break;
829 default:
830 unreachable("Invalid vector size for transform feedback output");
831 }
832
833 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
834 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
835 surface_format << BRW_SURFACE_FORMAT_SHIFT |
836 BRW_SURFACE_RC_READ_WRITE;
837 surf[1] = bo->offset64 + offset_bytes; /* reloc */
838 surf[2] = (width << BRW_SURFACE_WIDTH_SHIFT |
839 height << BRW_SURFACE_HEIGHT_SHIFT);
840 surf[3] = (depth << BRW_SURFACE_DEPTH_SHIFT |
841 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
842 surf[4] = 0;
843 surf[5] = 0;
844
845 /* Emit relocation to surface contents. */
846 drm_intel_bo_emit_reloc(brw->batch.bo,
847 *out_offset + 4,
848 bo, offset_bytes,
849 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
850 }
851
852 /* Creates a new WM constant buffer reflecting the current fragment program's
853 * constants, if needed by the fragment program.
854 *
855 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
856 * state atom.
857 */
858 static void
859 brw_upload_wm_pull_constants(struct brw_context *brw)
860 {
861 struct brw_stage_state *stage_state = &brw->wm.base;
862 /* BRW_NEW_FRAGMENT_PROGRAM */
863 struct brw_program *fp = (struct brw_program *) brw->fragment_program;
864 /* BRW_NEW_FS_PROG_DATA */
865 struct brw_stage_prog_data *prog_data = brw->wm.base.prog_data;
866
867 _mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_FRAGMENT);
868 /* _NEW_PROGRAM_CONSTANTS */
869 brw_upload_pull_constants(brw, BRW_NEW_SURFACES, &fp->program,
870 stage_state, prog_data);
871 }
872
873 const struct brw_tracked_state brw_wm_pull_constants = {
874 .dirty = {
875 .mesa = _NEW_PROGRAM_CONSTANTS,
876 .brw = BRW_NEW_BATCH |
877 BRW_NEW_BLORP |
878 BRW_NEW_FRAGMENT_PROGRAM |
879 BRW_NEW_FS_PROG_DATA,
880 },
881 .emit = brw_upload_wm_pull_constants,
882 };
883
884 /**
885 * Creates a null renderbuffer surface.
886 *
887 * This is used when the shader doesn't write to any color output. An FB
888 * write to target 0 will still be emitted, because that's how the thread is
889 * terminated (and computed depth is returned), so we need to have the
890 * hardware discard the target 0 color output..
891 */
892 static void
893 brw_emit_null_surface_state(struct brw_context *brw,
894 unsigned width,
895 unsigned height,
896 unsigned samples,
897 uint32_t *out_offset)
898 {
899 /* From the Sandy bridge PRM, Vol4 Part1 p71 (Surface Type: Programming
900 * Notes):
901 *
902 * A null surface will be used in instances where an actual surface is
903 * not bound. When a write message is generated to a null surface, no
904 * actual surface is written to. When a read message (including any
905 * sampling engine message) is generated to a null surface, the result
906 * is all zeros. Note that a null surface type is allowed to be used
907 * with all messages, even if it is not specificially indicated as
908 * supported. All of the remaining fields in surface state are ignored
909 * for null surfaces, with the following exceptions:
910 *
911 * - [DevSNB+]: Width, Height, Depth, and LOD fields must match the
912 * depth buffer’s corresponding state for all render target surfaces,
913 * including null.
914 *
915 * - Surface Format must be R8G8B8A8_UNORM.
916 */
917 unsigned surface_type = BRW_SURFACE_NULL;
918 drm_intel_bo *bo = NULL;
919 unsigned pitch_minus_1 = 0;
920 uint32_t multisampling_state = 0;
921 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
922 out_offset);
923
924 if (samples > 1) {
925 /* On Gen6, null render targets seem to cause GPU hangs when
926 * multisampling. So work around this problem by rendering into dummy
927 * color buffer.
928 *
929 * To decrease the amount of memory needed by the workaround buffer, we
930 * set its pitch to 128 bytes (the width of a Y tile). This means that
931 * the amount of memory needed for the workaround buffer is
932 * (width_in_tiles + height_in_tiles - 1) tiles.
933 *
934 * Note that since the workaround buffer will be interpreted by the
935 * hardware as an interleaved multisampled buffer, we need to compute
936 * width_in_tiles and height_in_tiles by dividing the width and height
937 * by 16 rather than the normal Y-tile size of 32.
938 */
939 unsigned width_in_tiles = ALIGN(width, 16) / 16;
940 unsigned height_in_tiles = ALIGN(height, 16) / 16;
941 unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
942 brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
943 size_needed);
944 bo = brw->wm.multisampled_null_render_target_bo;
945 surface_type = BRW_SURFACE_2D;
946 pitch_minus_1 = 127;
947 multisampling_state = brw_get_surface_num_multisamples(samples);
948 }
949
950 surf[0] = (surface_type << BRW_SURFACE_TYPE_SHIFT |
951 ISL_FORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
952 if (brw->gen < 6) {
953 surf[0] |= (1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT |
954 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT |
955 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT |
956 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT);
957 }
958 surf[1] = bo ? bo->offset64 : 0;
959 surf[2] = ((width - 1) << BRW_SURFACE_WIDTH_SHIFT |
960 (height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
961
962 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
963 * Notes):
964 *
965 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
966 */
967 surf[3] = (BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y |
968 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
969 surf[4] = multisampling_state;
970 surf[5] = 0;
971
972 if (bo) {
973 drm_intel_bo_emit_reloc(brw->batch.bo,
974 *out_offset + 4,
975 bo, 0,
976 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
977 }
978 }
979
980 /**
981 * Sets up a surface state structure to point at the given region.
982 * While it is only used for the front/back buffer currently, it should be
983 * usable for further buffers when doing ARB_draw_buffer support.
984 */
985 static uint32_t
986 gen4_update_renderbuffer_surface(struct brw_context *brw,
987 struct gl_renderbuffer *rb,
988 uint32_t flags, unsigned unit,
989 uint32_t surf_index)
990 {
991 struct gl_context *ctx = &brw->ctx;
992 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
993 struct intel_mipmap_tree *mt = irb->mt;
994 uint32_t *surf;
995 uint32_t tile_x, tile_y;
996 uint32_t format = 0;
997 uint32_t offset;
998 /* _NEW_BUFFERS */
999 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
1000 /* BRW_NEW_FS_PROG_DATA */
1001
1002 assert(!(flags & INTEL_RENDERBUFFER_LAYERED));
1003 assert(!(flags & INTEL_AUX_BUFFER_DISABLED));
1004
1005 if (rb->TexImage && !brw->has_surface_tile_offset) {
1006 intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y);
1007
1008 if (tile_x != 0 || tile_y != 0) {
1009 /* Original gen4 hardware couldn't draw to a non-tile-aligned
1010 * destination in a miptree unless you actually setup your renderbuffer
1011 * as a miptree and used the fragile lod/array_index/etc. controls to
1012 * select the image. So, instead, we just make a new single-level
1013 * miptree and render into that.
1014 */
1015 intel_renderbuffer_move_to_temp(brw, irb, false);
1016 mt = irb->mt;
1017 }
1018 }
1019
1020 surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32, &offset);
1021
1022 format = brw->render_target_format[rb_format];
1023 if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
1024 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
1025 __func__, _mesa_get_format_name(rb_format));
1026 }
1027
1028 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
1029 format << BRW_SURFACE_FORMAT_SHIFT);
1030
1031 /* reloc */
1032 assert(mt->offset % mt->cpp == 0);
1033 surf[1] = (intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y) +
1034 mt->bo->offset64 + mt->offset);
1035
1036 surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
1037 (rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
1038
1039 surf[3] = (brw_get_surface_tiling_bits(mt->tiling) |
1040 (mt->pitch - 1) << BRW_SURFACE_PITCH_SHIFT);
1041
1042 surf[4] = brw_get_surface_num_multisamples(mt->num_samples);
1043
1044 assert(brw->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
1045 /* Note that the low bits of these fields are missing, so
1046 * there's the possibility of getting in trouble.
1047 */
1048 assert(tile_x % 4 == 0);
1049 assert(tile_y % 2 == 0);
1050 surf[5] = ((tile_x / 4) << BRW_SURFACE_X_OFFSET_SHIFT |
1051 (tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT |
1052 (mt->valign == 4 ? BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
1053
1054 if (brw->gen < 6) {
1055 /* _NEW_COLOR */
1056 if (!ctx->Color.ColorLogicOpEnabled && !ctx->Color._AdvancedBlendMode &&
1057 (ctx->Color.BlendEnabled & (1 << unit)))
1058 surf[0] |= BRW_SURFACE_BLEND_ENABLED;
1059
1060 if (!ctx->Color.ColorMask[unit][0])
1061 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT;
1062 if (!ctx->Color.ColorMask[unit][1])
1063 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT;
1064 if (!ctx->Color.ColorMask[unit][2])
1065 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT;
1066
1067 /* As mentioned above, disable writes to the alpha component when the
1068 * renderbuffer is XRGB.
1069 */
1070 if (ctx->DrawBuffer->Visual.alphaBits == 0 ||
1071 !ctx->Color.ColorMask[unit][3]) {
1072 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT;
1073 }
1074 }
1075
1076 drm_intel_bo_emit_reloc(brw->batch.bo,
1077 offset + 4,
1078 mt->bo,
1079 surf[1] - mt->bo->offset64,
1080 I915_GEM_DOMAIN_RENDER,
1081 I915_GEM_DOMAIN_RENDER);
1082
1083 return offset;
1084 }
1085
1086 /**
1087 * Construct SURFACE_STATE objects for renderbuffers/draw buffers.
1088 */
1089 void
1090 brw_update_renderbuffer_surfaces(struct brw_context *brw,
1091 const struct gl_framebuffer *fb,
1092 uint32_t render_target_start,
1093 uint32_t *surf_offset)
1094 {
1095 GLuint i;
1096 const unsigned int w = _mesa_geometric_width(fb);
1097 const unsigned int h = _mesa_geometric_height(fb);
1098 const unsigned int s = _mesa_geometric_samples(fb);
1099
1100 /* Update surfaces for drawing buffers */
1101 if (fb->_NumColorDrawBuffers >= 1) {
1102 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
1103 const uint32_t surf_index = render_target_start + i;
1104 const int flags = (_mesa_geometric_layers(fb) > 0 ?
1105 INTEL_RENDERBUFFER_LAYERED : 0) |
1106 (brw->draw_aux_buffer_disabled[i] ?
1107 INTEL_AUX_BUFFER_DISABLED : 0);
1108
1109 if (intel_renderbuffer(fb->_ColorDrawBuffers[i])) {
1110 surf_offset[surf_index] =
1111 brw->vtbl.update_renderbuffer_surface(
1112 brw, fb->_ColorDrawBuffers[i], flags, i, surf_index);
1113 } else {
1114 brw->vtbl.emit_null_surface_state(brw, w, h, s,
1115 &surf_offset[surf_index]);
1116 }
1117 }
1118 } else {
1119 const uint32_t surf_index = render_target_start;
1120 brw->vtbl.emit_null_surface_state(brw, w, h, s,
1121 &surf_offset[surf_index]);
1122 }
1123 }
1124
1125 static void
1126 update_renderbuffer_surfaces(struct brw_context *brw)
1127 {
1128 const struct gl_context *ctx = &brw->ctx;
1129
1130 /* BRW_NEW_FS_PROG_DATA */
1131 const struct brw_wm_prog_data *wm_prog_data =
1132 brw_wm_prog_data(brw->wm.base.prog_data);
1133
1134 /* _NEW_BUFFERS | _NEW_COLOR */
1135 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1136 brw_update_renderbuffer_surfaces(
1137 brw, fb,
1138 wm_prog_data->binding_table.render_target_start,
1139 brw->wm.base.surf_offset);
1140 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1141 }
1142
1143 const struct brw_tracked_state brw_renderbuffer_surfaces = {
1144 .dirty = {
1145 .mesa = _NEW_BUFFERS |
1146 _NEW_COLOR,
1147 .brw = BRW_NEW_BATCH |
1148 BRW_NEW_BLORP |
1149 BRW_NEW_FS_PROG_DATA,
1150 },
1151 .emit = update_renderbuffer_surfaces,
1152 };
1153
1154 const struct brw_tracked_state gen6_renderbuffer_surfaces = {
1155 .dirty = {
1156 .mesa = _NEW_BUFFERS,
1157 .brw = BRW_NEW_BATCH |
1158 BRW_NEW_BLORP,
1159 },
1160 .emit = update_renderbuffer_surfaces,
1161 };
1162
1163 static void
1164 update_renderbuffer_read_surfaces(struct brw_context *brw)
1165 {
1166 const struct gl_context *ctx = &brw->ctx;
1167
1168 /* BRW_NEW_FS_PROG_DATA */
1169 const struct brw_wm_prog_data *wm_prog_data =
1170 brw_wm_prog_data(brw->wm.base.prog_data);
1171
1172 /* BRW_NEW_FRAGMENT_PROGRAM */
1173 if (!ctx->Extensions.MESA_shader_framebuffer_fetch &&
1174 brw->fragment_program && brw->fragment_program->info.outputs_read) {
1175 /* _NEW_BUFFERS */
1176 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1177
1178 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
1179 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
1180 const struct intel_renderbuffer *irb = intel_renderbuffer(rb);
1181 const unsigned surf_index =
1182 wm_prog_data->binding_table.render_target_read_start + i;
1183 uint32_t *surf_offset = &brw->wm.base.surf_offset[surf_index];
1184
1185 if (irb) {
1186 const unsigned format = brw->render_target_format[
1187 _mesa_get_render_format(ctx, intel_rb_format(irb))];
1188 assert(isl_format_supports_sampling(&brw->screen->devinfo,
1189 format));
1190
1191 /* Override the target of the texture if the render buffer is a
1192 * single slice of a 3D texture (since the minimum array element
1193 * field of the surface state structure is ignored by the sampler
1194 * unit for 3D textures on some hardware), or if the render buffer
1195 * is a 1D array (since shaders always provide the array index
1196 * coordinate at the Z component to avoid state-dependent
1197 * recompiles when changing the texture target of the
1198 * framebuffer).
1199 */
1200 const GLenum target =
1201 (irb->mt->target == GL_TEXTURE_3D &&
1202 irb->layer_count == 1) ? GL_TEXTURE_2D :
1203 irb->mt->target == GL_TEXTURE_1D_ARRAY ? GL_TEXTURE_2D_ARRAY :
1204 irb->mt->target;
1205
1206 /* intel_renderbuffer::mt_layer is expressed in sample units for
1207 * the UMS and CMS multisample layouts, but
1208 * intel_renderbuffer::layer_count is expressed in units of whole
1209 * logical layers regardless of the multisample layout.
1210 */
1211 const unsigned mt_layer_unit =
1212 (irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
1213 irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) ?
1214 MAX2(irb->mt->num_samples, 1) : 1;
1215
1216 const struct isl_view view = {
1217 .format = format,
1218 .base_level = irb->mt_level - irb->mt->first_level,
1219 .levels = 1,
1220 .base_array_layer = irb->mt_layer / mt_layer_unit,
1221 .array_len = irb->layer_count,
1222 .swizzle = ISL_SWIZZLE_IDENTITY,
1223 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
1224 };
1225
1226 const int flags = brw->draw_aux_buffer_disabled[i] ?
1227 INTEL_AUX_BUFFER_DISABLED : 0;
1228 brw_emit_surface_state(brw, irb->mt, flags, target, view,
1229 tex_mocs[brw->gen],
1230 surf_offset, surf_index,
1231 I915_GEM_DOMAIN_SAMPLER, 0);
1232
1233 } else {
1234 brw->vtbl.emit_null_surface_state(
1235 brw, _mesa_geometric_width(fb), _mesa_geometric_height(fb),
1236 _mesa_geometric_samples(fb), surf_offset);
1237 }
1238 }
1239
1240 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1241 }
1242 }
1243
1244 const struct brw_tracked_state brw_renderbuffer_read_surfaces = {
1245 .dirty = {
1246 .mesa = _NEW_BUFFERS,
1247 .brw = BRW_NEW_BATCH |
1248 BRW_NEW_FRAGMENT_PROGRAM |
1249 BRW_NEW_FS_PROG_DATA,
1250 },
1251 .emit = update_renderbuffer_read_surfaces,
1252 };
1253
1254 static void
1255 update_stage_texture_surfaces(struct brw_context *brw,
1256 const struct gl_program *prog,
1257 struct brw_stage_state *stage_state,
1258 bool for_gather, uint32_t plane)
1259 {
1260 if (!prog)
1261 return;
1262
1263 struct gl_context *ctx = &brw->ctx;
1264
1265 uint32_t *surf_offset = stage_state->surf_offset;
1266
1267 /* BRW_NEW_*_PROG_DATA */
1268 if (for_gather)
1269 surf_offset += stage_state->prog_data->binding_table.gather_texture_start;
1270 else
1271 surf_offset += stage_state->prog_data->binding_table.plane_start[plane];
1272
1273 unsigned num_samplers = util_last_bit(prog->SamplersUsed);
1274 for (unsigned s = 0; s < num_samplers; s++) {
1275 surf_offset[s] = 0;
1276
1277 if (prog->SamplersUsed & (1 << s)) {
1278 const unsigned unit = prog->SamplerUnits[s];
1279
1280 /* _NEW_TEXTURE */
1281 if (ctx->Texture.Unit[unit]._Current) {
1282 brw_update_texture_surface(ctx, unit, surf_offset + s, for_gather, plane);
1283 }
1284 }
1285 }
1286 }
1287
1288
1289 /**
1290 * Construct SURFACE_STATE objects for enabled textures.
1291 */
1292 static void
1293 brw_update_texture_surfaces(struct brw_context *brw)
1294 {
1295 /* BRW_NEW_VERTEX_PROGRAM */
1296 struct gl_program *vs = (struct gl_program *) brw->vertex_program;
1297
1298 /* BRW_NEW_TESS_PROGRAMS */
1299 struct gl_program *tcs = (struct gl_program *) brw->tess_ctrl_program;
1300 struct gl_program *tes = (struct gl_program *) brw->tess_eval_program;
1301
1302 /* BRW_NEW_GEOMETRY_PROGRAM */
1303 struct gl_program *gs = (struct gl_program *) brw->geometry_program;
1304
1305 /* BRW_NEW_FRAGMENT_PROGRAM */
1306 struct gl_program *fs = (struct gl_program *) brw->fragment_program;
1307
1308 /* _NEW_TEXTURE */
1309 update_stage_texture_surfaces(brw, vs, &brw->vs.base, false, 0);
1310 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, false, 0);
1311 update_stage_texture_surfaces(brw, tes, &brw->tes.base, false, 0);
1312 update_stage_texture_surfaces(brw, gs, &brw->gs.base, false, 0);
1313 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 0);
1314
1315 /* emit alternate set of surface state for gather. this
1316 * allows the surface format to be overriden for only the
1317 * gather4 messages. */
1318 if (brw->gen < 8) {
1319 if (vs && vs->nir->info->uses_texture_gather)
1320 update_stage_texture_surfaces(brw, vs, &brw->vs.base, true, 0);
1321 if (tcs && tcs->nir->info->uses_texture_gather)
1322 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, true, 0);
1323 if (tes && tes->nir->info->uses_texture_gather)
1324 update_stage_texture_surfaces(brw, tes, &brw->tes.base, true, 0);
1325 if (gs && gs->nir->info->uses_texture_gather)
1326 update_stage_texture_surfaces(brw, gs, &brw->gs.base, true, 0);
1327 if (fs && fs->nir->info->uses_texture_gather)
1328 update_stage_texture_surfaces(brw, fs, &brw->wm.base, true, 0);
1329 }
1330
1331 if (fs) {
1332 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 1);
1333 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 2);
1334 }
1335
1336 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1337 }
1338
1339 const struct brw_tracked_state brw_texture_surfaces = {
1340 .dirty = {
1341 .mesa = _NEW_TEXTURE,
1342 .brw = BRW_NEW_BATCH |
1343 BRW_NEW_BLORP |
1344 BRW_NEW_FRAGMENT_PROGRAM |
1345 BRW_NEW_FS_PROG_DATA |
1346 BRW_NEW_GEOMETRY_PROGRAM |
1347 BRW_NEW_GS_PROG_DATA |
1348 BRW_NEW_TESS_PROGRAMS |
1349 BRW_NEW_TCS_PROG_DATA |
1350 BRW_NEW_TES_PROG_DATA |
1351 BRW_NEW_TEXTURE_BUFFER |
1352 BRW_NEW_VERTEX_PROGRAM |
1353 BRW_NEW_VS_PROG_DATA,
1354 },
1355 .emit = brw_update_texture_surfaces,
1356 };
1357
1358 static void
1359 brw_update_cs_texture_surfaces(struct brw_context *brw)
1360 {
1361 /* BRW_NEW_COMPUTE_PROGRAM */
1362 struct gl_program *cs = (struct gl_program *) brw->compute_program;
1363
1364 /* _NEW_TEXTURE */
1365 update_stage_texture_surfaces(brw, cs, &brw->cs.base, false, 0);
1366
1367 /* emit alternate set of surface state for gather. this
1368 * allows the surface format to be overriden for only the
1369 * gather4 messages.
1370 */
1371 if (brw->gen < 8) {
1372 if (cs && cs->nir->info->uses_texture_gather)
1373 update_stage_texture_surfaces(brw, cs, &brw->cs.base, true, 0);
1374 }
1375
1376 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1377 }
1378
1379 const struct brw_tracked_state brw_cs_texture_surfaces = {
1380 .dirty = {
1381 .mesa = _NEW_TEXTURE,
1382 .brw = BRW_NEW_BATCH |
1383 BRW_NEW_BLORP |
1384 BRW_NEW_COMPUTE_PROGRAM,
1385 },
1386 .emit = brw_update_cs_texture_surfaces,
1387 };
1388
1389
1390 void
1391 brw_upload_ubo_surfaces(struct brw_context *brw, struct gl_program *prog,
1392 struct brw_stage_state *stage_state,
1393 struct brw_stage_prog_data *prog_data)
1394 {
1395 struct gl_context *ctx = &brw->ctx;
1396
1397 if (!prog)
1398 return;
1399
1400 uint32_t *ubo_surf_offsets =
1401 &stage_state->surf_offset[prog_data->binding_table.ubo_start];
1402
1403 for (int i = 0; i < prog->info.num_ubos; i++) {
1404 struct gl_uniform_buffer_binding *binding =
1405 &ctx->UniformBufferBindings[prog->sh.UniformBlocks[i]->Binding];
1406
1407 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1408 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ubo_surf_offsets[i]);
1409 } else {
1410 struct intel_buffer_object *intel_bo =
1411 intel_buffer_object(binding->BufferObject);
1412 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1413 if (!binding->AutomaticSize)
1414 size = MIN2(size, binding->Size);
1415 drm_intel_bo *bo =
1416 intel_bufferobj_buffer(brw, intel_bo,
1417 binding->Offset,
1418 size);
1419 brw_create_constant_surface(brw, bo, binding->Offset,
1420 size,
1421 &ubo_surf_offsets[i]);
1422 }
1423 }
1424
1425 uint32_t *ssbo_surf_offsets =
1426 &stage_state->surf_offset[prog_data->binding_table.ssbo_start];
1427
1428 for (int i = 0; i < prog->info.num_ssbos; i++) {
1429 struct gl_shader_storage_buffer_binding *binding =
1430 &ctx->ShaderStorageBufferBindings[prog->sh.ShaderStorageBlocks[i]->Binding];
1431
1432 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1433 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ssbo_surf_offsets[i]);
1434 } else {
1435 struct intel_buffer_object *intel_bo =
1436 intel_buffer_object(binding->BufferObject);
1437 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1438 if (!binding->AutomaticSize)
1439 size = MIN2(size, binding->Size);
1440 drm_intel_bo *bo =
1441 intel_bufferobj_buffer(brw, intel_bo,
1442 binding->Offset,
1443 size);
1444 brw_create_buffer_surface(brw, bo, binding->Offset,
1445 size,
1446 &ssbo_surf_offsets[i]);
1447 }
1448 }
1449
1450 if (prog->info.num_ubos || prog->info.num_ssbos)
1451 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1452 }
1453
1454 static void
1455 brw_upload_wm_ubo_surfaces(struct brw_context *brw)
1456 {
1457 struct gl_context *ctx = &brw->ctx;
1458 /* _NEW_PROGRAM */
1459 struct gl_program *prog = ctx->_Shader->_CurrentFragmentProgram;
1460
1461 /* BRW_NEW_FS_PROG_DATA */
1462 brw_upload_ubo_surfaces(brw, prog, &brw->wm.base, brw->wm.base.prog_data);
1463 }
1464
1465 const struct brw_tracked_state brw_wm_ubo_surfaces = {
1466 .dirty = {
1467 .mesa = _NEW_PROGRAM,
1468 .brw = BRW_NEW_BATCH |
1469 BRW_NEW_BLORP |
1470 BRW_NEW_FS_PROG_DATA |
1471 BRW_NEW_UNIFORM_BUFFER,
1472 },
1473 .emit = brw_upload_wm_ubo_surfaces,
1474 };
1475
1476 static void
1477 brw_upload_cs_ubo_surfaces(struct brw_context *brw)
1478 {
1479 struct gl_context *ctx = &brw->ctx;
1480 /* _NEW_PROGRAM */
1481 struct gl_program *prog =
1482 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1483
1484 /* BRW_NEW_CS_PROG_DATA */
1485 brw_upload_ubo_surfaces(brw, prog, &brw->cs.base, brw->cs.base.prog_data);
1486 }
1487
1488 const struct brw_tracked_state brw_cs_ubo_surfaces = {
1489 .dirty = {
1490 .mesa = _NEW_PROGRAM,
1491 .brw = BRW_NEW_BATCH |
1492 BRW_NEW_BLORP |
1493 BRW_NEW_CS_PROG_DATA |
1494 BRW_NEW_UNIFORM_BUFFER,
1495 },
1496 .emit = brw_upload_cs_ubo_surfaces,
1497 };
1498
1499 void
1500 brw_upload_abo_surfaces(struct brw_context *brw,
1501 const struct gl_program *prog,
1502 struct brw_stage_state *stage_state,
1503 struct brw_stage_prog_data *prog_data)
1504 {
1505 struct gl_context *ctx = &brw->ctx;
1506 uint32_t *surf_offsets =
1507 &stage_state->surf_offset[prog_data->binding_table.abo_start];
1508
1509 if (prog->info.num_abos) {
1510 for (unsigned i = 0; i < prog->info.num_abos; i++) {
1511 struct gl_atomic_buffer_binding *binding =
1512 &ctx->AtomicBufferBindings[prog->sh.AtomicBuffers[i]->Binding];
1513 struct intel_buffer_object *intel_bo =
1514 intel_buffer_object(binding->BufferObject);
1515 drm_intel_bo *bo = intel_bufferobj_buffer(
1516 brw, intel_bo, binding->Offset, intel_bo->Base.Size - binding->Offset);
1517
1518 brw_emit_buffer_surface_state(brw, &surf_offsets[i], bo,
1519 binding->Offset, ISL_FORMAT_RAW,
1520 bo->size - binding->Offset, 1, true);
1521 }
1522
1523 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1524 }
1525 }
1526
1527 static void
1528 brw_upload_wm_abo_surfaces(struct brw_context *brw)
1529 {
1530 /* _NEW_PROGRAM */
1531 const struct gl_program *wm = brw->fragment_program;
1532
1533 if (wm) {
1534 /* BRW_NEW_FS_PROG_DATA */
1535 brw_upload_abo_surfaces(brw, wm, &brw->wm.base, brw->wm.base.prog_data);
1536 }
1537 }
1538
1539 const struct brw_tracked_state brw_wm_abo_surfaces = {
1540 .dirty = {
1541 .mesa = _NEW_PROGRAM,
1542 .brw = BRW_NEW_ATOMIC_BUFFER |
1543 BRW_NEW_BLORP |
1544 BRW_NEW_BATCH |
1545 BRW_NEW_FS_PROG_DATA,
1546 },
1547 .emit = brw_upload_wm_abo_surfaces,
1548 };
1549
1550 static void
1551 brw_upload_cs_abo_surfaces(struct brw_context *brw)
1552 {
1553 /* _NEW_PROGRAM */
1554 const struct gl_program *cp = brw->compute_program;
1555
1556 if (cp) {
1557 /* BRW_NEW_CS_PROG_DATA */
1558 brw_upload_abo_surfaces(brw, cp, &brw->cs.base, brw->cs.base.prog_data);
1559 }
1560 }
1561
1562 const struct brw_tracked_state brw_cs_abo_surfaces = {
1563 .dirty = {
1564 .mesa = _NEW_PROGRAM,
1565 .brw = BRW_NEW_ATOMIC_BUFFER |
1566 BRW_NEW_BLORP |
1567 BRW_NEW_BATCH |
1568 BRW_NEW_CS_PROG_DATA,
1569 },
1570 .emit = brw_upload_cs_abo_surfaces,
1571 };
1572
1573 static void
1574 brw_upload_cs_image_surfaces(struct brw_context *brw)
1575 {
1576 /* _NEW_PROGRAM */
1577 const struct gl_program *cp = brw->compute_program;
1578
1579 if (cp) {
1580 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1581 brw_upload_image_surfaces(brw, cp, &brw->cs.base,
1582 brw->cs.base.prog_data);
1583 }
1584 }
1585
1586 const struct brw_tracked_state brw_cs_image_surfaces = {
1587 .dirty = {
1588 .mesa = _NEW_TEXTURE | _NEW_PROGRAM,
1589 .brw = BRW_NEW_BATCH |
1590 BRW_NEW_BLORP |
1591 BRW_NEW_CS_PROG_DATA |
1592 BRW_NEW_IMAGE_UNITS
1593 },
1594 .emit = brw_upload_cs_image_surfaces,
1595 };
1596
1597 static uint32_t
1598 get_image_format(struct brw_context *brw, mesa_format format, GLenum access)
1599 {
1600 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1601 uint32_t hw_format = brw_isl_format_for_mesa_format(format);
1602 if (access == GL_WRITE_ONLY) {
1603 return hw_format;
1604 } else if (isl_has_matching_typed_storage_image_format(devinfo, hw_format)) {
1605 /* Typed surface reads support a very limited subset of the shader
1606 * image formats. Translate it into the closest format the
1607 * hardware supports.
1608 */
1609 return isl_lower_storage_image_format(devinfo, hw_format);
1610 } else {
1611 /* The hardware doesn't actually support a typed format that we can use
1612 * so we have to fall back to untyped read/write messages.
1613 */
1614 return ISL_FORMAT_RAW;
1615 }
1616 }
1617
1618 static void
1619 update_default_image_param(struct brw_context *brw,
1620 struct gl_image_unit *u,
1621 unsigned surface_idx,
1622 struct brw_image_param *param)
1623 {
1624 memset(param, 0, sizeof(*param));
1625 param->surface_idx = surface_idx;
1626 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1627 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1628 * detailed explanation of these parameters.
1629 */
1630 param->swizzling[0] = 0xff;
1631 param->swizzling[1] = 0xff;
1632 }
1633
1634 static void
1635 update_buffer_image_param(struct brw_context *brw,
1636 struct gl_image_unit *u,
1637 unsigned surface_idx,
1638 struct brw_image_param *param)
1639 {
1640 struct gl_buffer_object *obj = u->TexObj->BufferObject;
1641 const uint32_t size = MIN2((uint32_t)u->TexObj->BufferSize, obj->Size);
1642 update_default_image_param(brw, u, surface_idx, param);
1643
1644 param->size[0] = size / _mesa_get_format_bytes(u->_ActualFormat);
1645 param->stride[0] = _mesa_get_format_bytes(u->_ActualFormat);
1646 }
1647
1648 static void
1649 update_texture_image_param(struct brw_context *brw,
1650 struct gl_image_unit *u,
1651 unsigned surface_idx,
1652 struct brw_image_param *param)
1653 {
1654 struct intel_mipmap_tree *mt = intel_texture_object(u->TexObj)->mt;
1655
1656 update_default_image_param(brw, u, surface_idx, param);
1657
1658 param->size[0] = minify(mt->logical_width0, u->Level);
1659 param->size[1] = minify(mt->logical_height0, u->Level);
1660 param->size[2] = (!u->Layered ? 1 :
1661 u->TexObj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1662 u->TexObj->Target == GL_TEXTURE_3D ?
1663 minify(mt->logical_depth0, u->Level) :
1664 mt->logical_depth0);
1665
1666 intel_miptree_get_image_offset(mt, u->Level, u->_Layer,
1667 &param->offset[0],
1668 &param->offset[1]);
1669
1670 param->stride[0] = mt->cpp;
1671 param->stride[1] = mt->pitch / mt->cpp;
1672 param->stride[2] =
1673 brw_miptree_get_horizontal_slice_pitch(brw, mt, u->Level);
1674 param->stride[3] =
1675 brw_miptree_get_vertical_slice_pitch(brw, mt, u->Level);
1676
1677 if (mt->tiling == I915_TILING_X) {
1678 /* An X tile is a rectangular block of 512x8 bytes. */
1679 param->tiling[0] = _mesa_logbase2(512 / mt->cpp);
1680 param->tiling[1] = _mesa_logbase2(8);
1681
1682 if (brw->has_swizzling) {
1683 /* Right shifts required to swizzle bits 9 and 10 of the memory
1684 * address with bit 6.
1685 */
1686 param->swizzling[0] = 3;
1687 param->swizzling[1] = 4;
1688 }
1689 } else if (mt->tiling == I915_TILING_Y) {
1690 /* The layout of a Y-tiled surface in memory isn't really fundamentally
1691 * different to the layout of an X-tiled surface, we simply pretend that
1692 * the surface is broken up in a number of smaller 16Bx32 tiles, each
1693 * one arranged in X-major order just like is the case for X-tiling.
1694 */
1695 param->tiling[0] = _mesa_logbase2(16 / mt->cpp);
1696 param->tiling[1] = _mesa_logbase2(32);
1697
1698 if (brw->has_swizzling) {
1699 /* Right shift required to swizzle bit 9 of the memory address with
1700 * bit 6.
1701 */
1702 param->swizzling[0] = 3;
1703 }
1704 }
1705
1706 /* 3D textures are arranged in 2D in memory with 2^lod slices per row. The
1707 * address calculation algorithm (emit_address_calculation() in
1708 * brw_fs_surface_builder.cpp) handles this as a sort of tiling with
1709 * modulus equal to the LOD.
1710 */
1711 param->tiling[2] = (u->TexObj->Target == GL_TEXTURE_3D ? u->Level :
1712 0);
1713 }
1714
1715 static void
1716 update_image_surface(struct brw_context *brw,
1717 struct gl_image_unit *u,
1718 GLenum access,
1719 unsigned surface_idx,
1720 uint32_t *surf_offset,
1721 struct brw_image_param *param)
1722 {
1723 if (_mesa_is_image_unit_valid(&brw->ctx, u)) {
1724 struct gl_texture_object *obj = u->TexObj;
1725 const unsigned format = get_image_format(brw, u->_ActualFormat, access);
1726
1727 if (obj->Target == GL_TEXTURE_BUFFER) {
1728 struct intel_buffer_object *intel_obj =
1729 intel_buffer_object(obj->BufferObject);
1730 const unsigned texel_size = (format == ISL_FORMAT_RAW ? 1 :
1731 _mesa_get_format_bytes(u->_ActualFormat));
1732
1733 brw_emit_buffer_surface_state(
1734 brw, surf_offset, intel_obj->buffer, obj->BufferOffset,
1735 format, intel_obj->Base.Size, texel_size,
1736 access != GL_READ_ONLY);
1737
1738 update_buffer_image_param(brw, u, surface_idx, param);
1739
1740 } else {
1741 struct intel_texture_object *intel_obj = intel_texture_object(obj);
1742 struct intel_mipmap_tree *mt = intel_obj->mt;
1743
1744 if (format == ISL_FORMAT_RAW) {
1745 brw_emit_buffer_surface_state(
1746 brw, surf_offset, mt->bo, mt->offset,
1747 format, mt->bo->size - mt->offset, 1 /* pitch */,
1748 access != GL_READ_ONLY);
1749
1750 } else {
1751 const unsigned num_layers = (!u->Layered ? 1 :
1752 obj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1753 mt->logical_depth0);
1754
1755 struct isl_view view = {
1756 .format = format,
1757 .base_level = obj->MinLevel + u->Level,
1758 .levels = 1,
1759 .base_array_layer = obj->MinLayer + u->_Layer,
1760 .array_len = num_layers,
1761 .swizzle = ISL_SWIZZLE_IDENTITY,
1762 .usage = ISL_SURF_USAGE_STORAGE_BIT,
1763 };
1764
1765 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
1766 const bool unresolved = intel_miptree_has_color_unresolved(
1767 mt, view.base_level, view.levels,
1768 view.base_array_layer, view.array_len);
1769 const int flags = unresolved ? 0 : INTEL_AUX_BUFFER_DISABLED;
1770 brw_emit_surface_state(brw, mt, flags, mt->target, view,
1771 tex_mocs[brw->gen],
1772 surf_offset, surf_index,
1773 I915_GEM_DOMAIN_SAMPLER,
1774 access == GL_READ_ONLY ? 0 :
1775 I915_GEM_DOMAIN_SAMPLER);
1776 }
1777
1778 update_texture_image_param(brw, u, surface_idx, param);
1779 }
1780
1781 } else {
1782 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, surf_offset);
1783 update_default_image_param(brw, u, surface_idx, param);
1784 }
1785 }
1786
1787 void
1788 brw_upload_image_surfaces(struct brw_context *brw,
1789 const struct gl_program *prog,
1790 struct brw_stage_state *stage_state,
1791 struct brw_stage_prog_data *prog_data)
1792 {
1793 assert(prog);
1794 struct gl_context *ctx = &brw->ctx;
1795
1796 if (prog->info.num_images) {
1797 for (unsigned i = 0; i < prog->info.num_images; i++) {
1798 struct gl_image_unit *u = &ctx->ImageUnits[prog->sh.ImageUnits[i]];
1799 const unsigned surf_idx = prog_data->binding_table.image_start + i;
1800
1801 update_image_surface(brw, u, prog->sh.ImageAccess[i],
1802 surf_idx,
1803 &stage_state->surf_offset[surf_idx],
1804 &prog_data->image_param[i]);
1805 }
1806
1807 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1808 /* This may have changed the image metadata dependent on the context
1809 * image unit state and passed to the program as uniforms, make sure
1810 * that push and pull constants are reuploaded.
1811 */
1812 brw->NewGLState |= _NEW_PROGRAM_CONSTANTS;
1813 }
1814 }
1815
1816 static void
1817 brw_upload_wm_image_surfaces(struct brw_context *brw)
1818 {
1819 /* BRW_NEW_FRAGMENT_PROGRAM */
1820 const struct gl_program *wm = brw->fragment_program;
1821
1822 if (wm) {
1823 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1824 brw_upload_image_surfaces(brw, wm, &brw->wm.base,
1825 brw->wm.base.prog_data);
1826 }
1827 }
1828
1829 const struct brw_tracked_state brw_wm_image_surfaces = {
1830 .dirty = {
1831 .mesa = _NEW_TEXTURE,
1832 .brw = BRW_NEW_BATCH |
1833 BRW_NEW_BLORP |
1834 BRW_NEW_FRAGMENT_PROGRAM |
1835 BRW_NEW_FS_PROG_DATA |
1836 BRW_NEW_IMAGE_UNITS
1837 },
1838 .emit = brw_upload_wm_image_surfaces,
1839 };
1840
1841 void
1842 gen4_init_vtable_surface_functions(struct brw_context *brw)
1843 {
1844 brw->vtbl.update_renderbuffer_surface = gen4_update_renderbuffer_surface;
1845 brw->vtbl.emit_null_surface_state = brw_emit_null_surface_state;
1846 }
1847
1848 void
1849 gen6_init_vtable_surface_functions(struct brw_context *brw)
1850 {
1851 gen4_init_vtable_surface_functions(brw);
1852 brw->vtbl.update_renderbuffer_surface = brw_update_renderbuffer_surface;
1853 }
1854
1855 static void
1856 brw_upload_cs_work_groups_surface(struct brw_context *brw)
1857 {
1858 struct gl_context *ctx = &brw->ctx;
1859 /* _NEW_PROGRAM */
1860 struct gl_program *prog =
1861 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1862 /* BRW_NEW_CS_PROG_DATA */
1863 const struct brw_cs_prog_data *cs_prog_data =
1864 brw_cs_prog_data(brw->cs.base.prog_data);
1865
1866 if (prog && cs_prog_data->uses_num_work_groups) {
1867 const unsigned surf_idx =
1868 cs_prog_data->binding_table.work_groups_start;
1869 uint32_t *surf_offset = &brw->cs.base.surf_offset[surf_idx];
1870 drm_intel_bo *bo;
1871 uint32_t bo_offset;
1872
1873 if (brw->compute.num_work_groups_bo == NULL) {
1874 bo = NULL;
1875 intel_upload_data(brw,
1876 (void *)brw->compute.num_work_groups,
1877 3 * sizeof(GLuint),
1878 sizeof(GLuint),
1879 &bo,
1880 &bo_offset);
1881 } else {
1882 bo = brw->compute.num_work_groups_bo;
1883 bo_offset = brw->compute.num_work_groups_offset;
1884 }
1885
1886 brw_emit_buffer_surface_state(brw, surf_offset,
1887 bo, bo_offset,
1888 ISL_FORMAT_RAW,
1889 3 * sizeof(GLuint), 1, true);
1890 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1891 }
1892 }
1893
1894 const struct brw_tracked_state brw_cs_work_groups_surface = {
1895 .dirty = {
1896 .brw = BRW_NEW_BLORP |
1897 BRW_NEW_CS_PROG_DATA |
1898 BRW_NEW_CS_WORK_GROUPS
1899 },
1900 .emit = brw_upload_cs_work_groups_surface,
1901 };