intel/blorp: Handle gen6 stencil/HiZ offsets in the back-end
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_surface_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "compiler/nir/nir.h"
34 #include "main/context.h"
35 #include "main/blend.h"
36 #include "main/mtypes.h"
37 #include "main/samplerobj.h"
38 #include "main/shaderimage.h"
39 #include "main/teximage.h"
40 #include "program/prog_parameter.h"
41 #include "program/prog_instruction.h"
42 #include "main/framebuffer.h"
43 #include "main/shaderapi.h"
44
45 #include "isl/isl.h"
46
47 #include "intel_mipmap_tree.h"
48 #include "intel_batchbuffer.h"
49 #include "intel_tex.h"
50 #include "intel_fbo.h"
51 #include "intel_buffer_objects.h"
52
53 #include "brw_context.h"
54 #include "brw_state.h"
55 #include "brw_defines.h"
56 #include "brw_wm.h"
57
58 enum {
59 INTEL_RENDERBUFFER_LAYERED = 1 << 0,
60 INTEL_AUX_BUFFER_DISABLED = 1 << 1,
61 };
62
63 uint32_t tex_mocs[] = {
64 [7] = GEN7_MOCS_L3,
65 [8] = BDW_MOCS_WB,
66 [9] = SKL_MOCS_WB,
67 };
68
69 uint32_t rb_mocs[] = {
70 [7] = GEN7_MOCS_L3,
71 [8] = BDW_MOCS_PTE,
72 [9] = SKL_MOCS_PTE,
73 };
74
75 static void
76 brw_emit_surface_state(struct brw_context *brw,
77 struct intel_mipmap_tree *mt, uint32_t flags,
78 GLenum target, struct isl_view view,
79 uint32_t mocs, uint32_t *surf_offset, int surf_index,
80 unsigned read_domains, unsigned write_domains)
81 {
82 uint32_t tile_x = mt->level[0].slice[0].x_offset;
83 uint32_t tile_y = mt->level[0].slice[0].y_offset;
84 uint32_t offset = mt->offset;
85
86 struct isl_surf surf;
87 intel_miptree_get_isl_surf(brw, mt, &surf);
88
89 surf.dim = get_isl_surf_dim(target);
90
91 const enum isl_dim_layout dim_layout =
92 get_isl_dim_layout(&brw->screen->devinfo, mt->tiling, target,
93 mt->array_layout);
94
95 if (surf.dim_layout != dim_layout) {
96 /* The layout of the specified texture target is not compatible with the
97 * actual layout of the miptree structure in memory -- You're entering
98 * dangerous territory, this can only possibly work if you only intended
99 * to access a single level and slice of the texture, and the hardware
100 * supports the tile offset feature in order to allow non-tile-aligned
101 * base offsets, since we'll have to point the hardware to the first
102 * texel of the level instead of relying on the usual base level/layer
103 * controls.
104 */
105 assert(brw->has_surface_tile_offset);
106 assert(view.levels == 1 && view.array_len == 1);
107 assert(tile_x == 0 && tile_y == 0);
108
109 offset += intel_miptree_get_tile_offsets(mt, view.base_level,
110 view.base_array_layer,
111 &tile_x, &tile_y);
112
113 /* Minify the logical dimensions of the texture. */
114 const unsigned l = view.base_level - mt->first_level;
115 surf.logical_level0_px.width = minify(surf.logical_level0_px.width, l);
116 surf.logical_level0_px.height = surf.dim <= ISL_SURF_DIM_1D ? 1 :
117 minify(surf.logical_level0_px.height, l);
118 surf.logical_level0_px.depth = surf.dim <= ISL_SURF_DIM_2D ? 1 :
119 minify(surf.logical_level0_px.depth, l);
120
121 /* Only the base level and layer can be addressed with the overridden
122 * layout.
123 */
124 surf.logical_level0_px.array_len = 1;
125 surf.levels = 1;
126 surf.dim_layout = dim_layout;
127
128 /* The requested slice of the texture is now at the base level and
129 * layer.
130 */
131 view.base_level = 0;
132 view.base_array_layer = 0;
133 }
134
135 union isl_color_value clear_color = { .u32 = { 0, 0, 0, 0 } };
136
137 struct brw_bo *aux_bo;
138 struct isl_surf *aux_surf = NULL, aux_surf_s;
139 uint64_t aux_offset = 0;
140 enum isl_aux_usage aux_usage = ISL_AUX_USAGE_NONE;
141 if ((mt->mcs_buf || intel_miptree_sample_with_hiz(brw, mt)) &&
142 !(flags & INTEL_AUX_BUFFER_DISABLED)) {
143 intel_miptree_get_aux_isl_surf(brw, mt, &aux_surf_s, &aux_usage);
144 aux_surf = &aux_surf_s;
145
146 if (mt->mcs_buf) {
147 aux_bo = mt->mcs_buf->bo;
148 aux_offset = mt->mcs_buf->bo->offset64 + mt->mcs_buf->offset;
149 } else {
150 aux_bo = mt->hiz_buf->aux_base.bo;
151 aux_offset = mt->hiz_buf->aux_base.bo->offset64;
152 }
153
154 /* We only really need a clear color if we also have an auxiliary
155 * surface. Without one, it does nothing.
156 */
157 clear_color = intel_miptree_get_isl_clear_color(brw, mt);
158 }
159
160 void *state = brw_state_batch(brw,
161 brw->isl_dev.ss.size,
162 brw->isl_dev.ss.align,
163 surf_offset);
164
165 isl_surf_fill_state(&brw->isl_dev, state, .surf = &surf, .view = &view,
166 .address = mt->bo->offset64 + offset,
167 .aux_surf = aux_surf, .aux_usage = aux_usage,
168 .aux_address = aux_offset,
169 .mocs = mocs, .clear_color = clear_color,
170 .x_offset_sa = tile_x, .y_offset_sa = tile_y);
171
172 brw_emit_reloc(&brw->batch, *surf_offset + brw->isl_dev.ss.addr_offset,
173 mt->bo, offset, read_domains, write_domains);
174
175 if (aux_surf) {
176 /* On gen7 and prior, the upper 20 bits of surface state DWORD 6 are the
177 * upper 20 bits of the GPU address of the MCS buffer; the lower 12 bits
178 * contain other control information. Since buffer addresses are always
179 * on 4k boundaries (and thus have their lower 12 bits zero), we can use
180 * an ordinary reloc to do the necessary address translation.
181 */
182 assert((aux_offset & 0xfff) == 0);
183 uint32_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
184 brw_emit_reloc(&brw->batch,
185 *surf_offset + brw->isl_dev.ss.aux_addr_offset,
186 aux_bo, *aux_addr - aux_bo->offset64,
187 read_domains, write_domains);
188 }
189 }
190
191 uint32_t
192 brw_update_renderbuffer_surface(struct brw_context *brw,
193 struct gl_renderbuffer *rb,
194 uint32_t flags, unsigned unit /* unused */,
195 uint32_t surf_index)
196 {
197 struct gl_context *ctx = &brw->ctx;
198 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
199 struct intel_mipmap_tree *mt = irb->mt;
200
201 if (brw->gen < 9) {
202 assert(!(flags & INTEL_AUX_BUFFER_DISABLED));
203 }
204
205 assert(brw_render_target_supported(brw, rb));
206
207 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
208 if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
209 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
210 __func__, _mesa_get_format_name(rb_format));
211 }
212
213 const unsigned layer_multiplier =
214 (irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
215 irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) ?
216 MAX2(irb->mt->num_samples, 1) : 1;
217
218 struct isl_view view = {
219 .format = brw->render_target_format[rb_format],
220 .base_level = irb->mt_level - irb->mt->first_level,
221 .levels = 1,
222 .base_array_layer = irb->mt_layer / layer_multiplier,
223 .array_len = MAX2(irb->layer_count, 1),
224 .swizzle = ISL_SWIZZLE_IDENTITY,
225 .usage = ISL_SURF_USAGE_RENDER_TARGET_BIT,
226 };
227
228 uint32_t offset;
229 brw_emit_surface_state(brw, mt, flags, mt->target, view,
230 rb_mocs[brw->gen],
231 &offset, surf_index,
232 I915_GEM_DOMAIN_RENDER,
233 I915_GEM_DOMAIN_RENDER);
234 return offset;
235 }
236
237 GLuint
238 translate_tex_target(GLenum target)
239 {
240 switch (target) {
241 case GL_TEXTURE_1D:
242 case GL_TEXTURE_1D_ARRAY_EXT:
243 return BRW_SURFACE_1D;
244
245 case GL_TEXTURE_RECTANGLE_NV:
246 return BRW_SURFACE_2D;
247
248 case GL_TEXTURE_2D:
249 case GL_TEXTURE_2D_ARRAY_EXT:
250 case GL_TEXTURE_EXTERNAL_OES:
251 case GL_TEXTURE_2D_MULTISAMPLE:
252 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
253 return BRW_SURFACE_2D;
254
255 case GL_TEXTURE_3D:
256 return BRW_SURFACE_3D;
257
258 case GL_TEXTURE_CUBE_MAP:
259 case GL_TEXTURE_CUBE_MAP_ARRAY:
260 return BRW_SURFACE_CUBE;
261
262 default:
263 unreachable("not reached");
264 }
265 }
266
267 uint32_t
268 brw_get_surface_tiling_bits(uint32_t tiling)
269 {
270 switch (tiling) {
271 case I915_TILING_X:
272 return BRW_SURFACE_TILED;
273 case I915_TILING_Y:
274 return BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y;
275 default:
276 return 0;
277 }
278 }
279
280
281 uint32_t
282 brw_get_surface_num_multisamples(unsigned num_samples)
283 {
284 if (num_samples > 1)
285 return BRW_SURFACE_MULTISAMPLECOUNT_4;
286 else
287 return BRW_SURFACE_MULTISAMPLECOUNT_1;
288 }
289
290 /**
291 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
292 * swizzling.
293 */
294 int
295 brw_get_texture_swizzle(const struct gl_context *ctx,
296 const struct gl_texture_object *t)
297 {
298 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
299
300 int swizzles[SWIZZLE_NIL + 1] = {
301 SWIZZLE_X,
302 SWIZZLE_Y,
303 SWIZZLE_Z,
304 SWIZZLE_W,
305 SWIZZLE_ZERO,
306 SWIZZLE_ONE,
307 SWIZZLE_NIL
308 };
309
310 if (img->_BaseFormat == GL_DEPTH_COMPONENT ||
311 img->_BaseFormat == GL_DEPTH_STENCIL) {
312 GLenum depth_mode = t->DepthMode;
313
314 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
315 * with depth component data specified with a sized internal format.
316 * Otherwise, it's left at the old default, GL_LUMINANCE.
317 */
318 if (_mesa_is_gles3(ctx) &&
319 img->InternalFormat != GL_DEPTH_COMPONENT &&
320 img->InternalFormat != GL_DEPTH_STENCIL) {
321 depth_mode = GL_RED;
322 }
323
324 switch (depth_mode) {
325 case GL_ALPHA:
326 swizzles[0] = SWIZZLE_ZERO;
327 swizzles[1] = SWIZZLE_ZERO;
328 swizzles[2] = SWIZZLE_ZERO;
329 swizzles[3] = SWIZZLE_X;
330 break;
331 case GL_LUMINANCE:
332 swizzles[0] = SWIZZLE_X;
333 swizzles[1] = SWIZZLE_X;
334 swizzles[2] = SWIZZLE_X;
335 swizzles[3] = SWIZZLE_ONE;
336 break;
337 case GL_INTENSITY:
338 swizzles[0] = SWIZZLE_X;
339 swizzles[1] = SWIZZLE_X;
340 swizzles[2] = SWIZZLE_X;
341 swizzles[3] = SWIZZLE_X;
342 break;
343 case GL_RED:
344 swizzles[0] = SWIZZLE_X;
345 swizzles[1] = SWIZZLE_ZERO;
346 swizzles[2] = SWIZZLE_ZERO;
347 swizzles[3] = SWIZZLE_ONE;
348 break;
349 }
350 }
351
352 GLenum datatype = _mesa_get_format_datatype(img->TexFormat);
353
354 /* If the texture's format is alpha-only, force R, G, and B to
355 * 0.0. Similarly, if the texture's format has no alpha channel,
356 * force the alpha value read to 1.0. This allows for the
357 * implementation to use an RGBA texture for any of these formats
358 * without leaking any unexpected values.
359 */
360 switch (img->_BaseFormat) {
361 case GL_ALPHA:
362 swizzles[0] = SWIZZLE_ZERO;
363 swizzles[1] = SWIZZLE_ZERO;
364 swizzles[2] = SWIZZLE_ZERO;
365 break;
366 case GL_LUMINANCE:
367 if (t->_IsIntegerFormat || datatype == GL_SIGNED_NORMALIZED) {
368 swizzles[0] = SWIZZLE_X;
369 swizzles[1] = SWIZZLE_X;
370 swizzles[2] = SWIZZLE_X;
371 swizzles[3] = SWIZZLE_ONE;
372 }
373 break;
374 case GL_LUMINANCE_ALPHA:
375 if (datatype == GL_SIGNED_NORMALIZED) {
376 swizzles[0] = SWIZZLE_X;
377 swizzles[1] = SWIZZLE_X;
378 swizzles[2] = SWIZZLE_X;
379 swizzles[3] = SWIZZLE_W;
380 }
381 break;
382 case GL_INTENSITY:
383 if (datatype == GL_SIGNED_NORMALIZED) {
384 swizzles[0] = SWIZZLE_X;
385 swizzles[1] = SWIZZLE_X;
386 swizzles[2] = SWIZZLE_X;
387 swizzles[3] = SWIZZLE_X;
388 }
389 break;
390 case GL_RED:
391 case GL_RG:
392 case GL_RGB:
393 if (_mesa_get_format_bits(img->TexFormat, GL_ALPHA_BITS) > 0 ||
394 img->TexFormat == MESA_FORMAT_RGB_DXT1 ||
395 img->TexFormat == MESA_FORMAT_SRGB_DXT1)
396 swizzles[3] = SWIZZLE_ONE;
397 break;
398 }
399
400 return MAKE_SWIZZLE4(swizzles[GET_SWZ(t->_Swizzle, 0)],
401 swizzles[GET_SWZ(t->_Swizzle, 1)],
402 swizzles[GET_SWZ(t->_Swizzle, 2)],
403 swizzles[GET_SWZ(t->_Swizzle, 3)]);
404 }
405
406 /**
407 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
408 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
409 *
410 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
411 * 0 1 2 3 4 5
412 * 4 5 6 7 0 1
413 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
414 *
415 * which is simply adding 4 then modding by 8 (or anding with 7).
416 *
417 * We then may need to apply workarounds for textureGather hardware bugs.
418 */
419 static unsigned
420 swizzle_to_scs(GLenum swizzle, bool need_green_to_blue)
421 {
422 unsigned scs = (swizzle + 4) & 7;
423
424 return (need_green_to_blue && scs == HSW_SCS_GREEN) ? HSW_SCS_BLUE : scs;
425 }
426
427 static unsigned
428 brw_find_matching_rb(const struct gl_framebuffer *fb,
429 const struct intel_mipmap_tree *mt)
430 {
431 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
432 const struct intel_renderbuffer *irb =
433 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
434
435 if (irb && irb->mt == mt)
436 return i;
437 }
438
439 return fb->_NumColorDrawBuffers;
440 }
441
442 static inline bool
443 brw_texture_view_sane(const struct brw_context *brw,
444 const struct intel_mipmap_tree *mt,
445 const struct isl_view *view)
446 {
447 /* There are special cases only for lossless compression. */
448 if (!intel_miptree_is_lossless_compressed(brw, mt))
449 return true;
450
451 if (isl_format_supports_ccs_e(&brw->screen->devinfo, view->format))
452 return true;
453
454 /* Logic elsewhere needs to take care to resolve the color buffer prior
455 * to sampling it as non-compressed.
456 */
457 if (intel_miptree_has_color_unresolved(mt, view->base_level, view->levels,
458 view->base_array_layer,
459 view->array_len))
460 return false;
461
462 const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
463 const unsigned rb_index = brw_find_matching_rb(fb, mt);
464
465 if (rb_index == fb->_NumColorDrawBuffers)
466 return true;
467
468 /* Underlying surface is compressed but it is sampled using a format that
469 * the sampling engine doesn't support as compressed. Compression must be
470 * disabled for both sampling engine and data port in case the same surface
471 * is used also as render target.
472 */
473 return brw->draw_aux_buffer_disabled[rb_index];
474 }
475
476 static bool
477 brw_disable_aux_surface(const struct brw_context *brw,
478 const struct intel_mipmap_tree *mt,
479 const struct isl_view *view)
480 {
481 /* Nothing to disable. */
482 if (!mt->mcs_buf)
483 return false;
484
485 const bool is_unresolved = intel_miptree_has_color_unresolved(
486 mt, view->base_level, view->levels,
487 view->base_array_layer, view->array_len);
488
489 /* There are special cases only for lossless compression. */
490 if (!intel_miptree_is_lossless_compressed(brw, mt))
491 return !is_unresolved;
492
493 const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
494 const unsigned rb_index = brw_find_matching_rb(fb, mt);
495
496 /* If we are drawing into this with compression enabled, then we must also
497 * enable compression when texturing from it regardless of
498 * fast_clear_state. If we don't then, after the first draw call with
499 * this setup, there will be data in the CCS which won't get picked up by
500 * subsequent texturing operations as required by ARB_texture_barrier.
501 * Since we don't want to re-emit the binding table or do a resolve
502 * operation every draw call, the easiest thing to do is just enable
503 * compression on the texturing side. This is completely safe to do
504 * since, if compressed texturing weren't allowed, we would have disabled
505 * compression of render targets in whatever_that_function_is_called().
506 */
507 if (rb_index < fb->_NumColorDrawBuffers) {
508 if (brw->draw_aux_buffer_disabled[rb_index]) {
509 assert(!is_unresolved);
510 }
511
512 return brw->draw_aux_buffer_disabled[rb_index];
513 }
514
515 return !is_unresolved;
516 }
517
518 void
519 brw_update_texture_surface(struct gl_context *ctx,
520 unsigned unit,
521 uint32_t *surf_offset,
522 bool for_gather,
523 uint32_t plane)
524 {
525 struct brw_context *brw = brw_context(ctx);
526 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
527
528 if (obj->Target == GL_TEXTURE_BUFFER) {
529 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
530
531 } else {
532 struct intel_texture_object *intel_obj = intel_texture_object(obj);
533 struct intel_mipmap_tree *mt = intel_obj->mt;
534
535 if (plane > 0) {
536 if (mt->plane[plane - 1] == NULL)
537 return;
538 mt = mt->plane[plane - 1];
539 }
540
541 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
542 /* If this is a view with restricted NumLayers, then our effective depth
543 * is not just the miptree depth.
544 */
545 const unsigned view_num_layers =
546 (obj->Immutable && obj->Target != GL_TEXTURE_3D) ? obj->NumLayers :
547 mt->logical_depth0;
548
549 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
550 * texturing functions that return a float, as our code generation always
551 * selects the .x channel (which would always be 0).
552 */
553 struct gl_texture_image *firstImage = obj->Image[0][obj->BaseLevel];
554 const bool alpha_depth = obj->DepthMode == GL_ALPHA &&
555 (firstImage->_BaseFormat == GL_DEPTH_COMPONENT ||
556 firstImage->_BaseFormat == GL_DEPTH_STENCIL);
557 const unsigned swizzle = (unlikely(alpha_depth) ? SWIZZLE_XYZW :
558 brw_get_texture_swizzle(&brw->ctx, obj));
559
560 mesa_format mesa_fmt = plane == 0 ? intel_obj->_Format : mt->format;
561 enum isl_format format = translate_tex_format(brw, mesa_fmt,
562 sampler->sRGBDecode);
563
564 /* Implement gen6 and gen7 gather work-around */
565 bool need_green_to_blue = false;
566 if (for_gather) {
567 if (brw->gen == 7 && (format == ISL_FORMAT_R32G32_FLOAT ||
568 format == ISL_FORMAT_R32G32_SINT ||
569 format == ISL_FORMAT_R32G32_UINT)) {
570 format = ISL_FORMAT_R32G32_FLOAT_LD;
571 need_green_to_blue = brw->is_haswell;
572 } else if (brw->gen == 6) {
573 /* Sandybridge's gather4 message is broken for integer formats.
574 * To work around this, we pretend the surface is UNORM for
575 * 8 or 16-bit formats, and emit shader instructions to recover
576 * the real INT/UINT value. For 32-bit formats, we pretend
577 * the surface is FLOAT, and simply reinterpret the resulting
578 * bits.
579 */
580 switch (format) {
581 case ISL_FORMAT_R8_SINT:
582 case ISL_FORMAT_R8_UINT:
583 format = ISL_FORMAT_R8_UNORM;
584 break;
585
586 case ISL_FORMAT_R16_SINT:
587 case ISL_FORMAT_R16_UINT:
588 format = ISL_FORMAT_R16_UNORM;
589 break;
590
591 case ISL_FORMAT_R32_SINT:
592 case ISL_FORMAT_R32_UINT:
593 format = ISL_FORMAT_R32_FLOAT;
594 break;
595
596 default:
597 break;
598 }
599 }
600 }
601
602 if (obj->StencilSampling && firstImage->_BaseFormat == GL_DEPTH_STENCIL) {
603 if (brw->gen <= 7) {
604 assert(mt->r8stencil_mt && !mt->stencil_mt->r8stencil_needs_update);
605 mt = mt->r8stencil_mt;
606 } else {
607 mt = mt->stencil_mt;
608 }
609 format = ISL_FORMAT_R8_UINT;
610 } else if (brw->gen <= 7 && mt->format == MESA_FORMAT_S_UINT8) {
611 assert(mt->r8stencil_mt && !mt->r8stencil_needs_update);
612 mt = mt->r8stencil_mt;
613 format = ISL_FORMAT_R8_UINT;
614 }
615
616 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
617
618 struct isl_view view = {
619 .format = format,
620 .base_level = obj->MinLevel + obj->BaseLevel,
621 .levels = intel_obj->_MaxLevel - obj->BaseLevel + 1,
622 .base_array_layer = obj->MinLayer,
623 .array_len = view_num_layers,
624 .swizzle = {
625 .r = swizzle_to_scs(GET_SWZ(swizzle, 0), need_green_to_blue),
626 .g = swizzle_to_scs(GET_SWZ(swizzle, 1), need_green_to_blue),
627 .b = swizzle_to_scs(GET_SWZ(swizzle, 2), need_green_to_blue),
628 .a = swizzle_to_scs(GET_SWZ(swizzle, 3), need_green_to_blue),
629 },
630 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
631 };
632
633 if (obj->Target == GL_TEXTURE_CUBE_MAP ||
634 obj->Target == GL_TEXTURE_CUBE_MAP_ARRAY)
635 view.usage |= ISL_SURF_USAGE_CUBE_BIT;
636
637 assert(brw_texture_view_sane(brw, mt, &view));
638
639 const int flags = brw_disable_aux_surface(brw, mt, &view) ?
640 INTEL_AUX_BUFFER_DISABLED : 0;
641 brw_emit_surface_state(brw, mt, flags, mt->target, view,
642 tex_mocs[brw->gen],
643 surf_offset, surf_index,
644 I915_GEM_DOMAIN_SAMPLER, 0);
645 }
646 }
647
648 void
649 brw_emit_buffer_surface_state(struct brw_context *brw,
650 uint32_t *out_offset,
651 struct brw_bo *bo,
652 unsigned buffer_offset,
653 unsigned surface_format,
654 unsigned buffer_size,
655 unsigned pitch,
656 bool rw)
657 {
658 uint32_t *dw = brw_state_batch(brw,
659 brw->isl_dev.ss.size,
660 brw->isl_dev.ss.align,
661 out_offset);
662
663 isl_buffer_fill_state(&brw->isl_dev, dw,
664 .address = (bo ? bo->offset64 : 0) + buffer_offset,
665 .size = buffer_size,
666 .format = surface_format,
667 .stride = pitch,
668 .mocs = tex_mocs[brw->gen]);
669
670 if (bo) {
671 brw_emit_reloc(&brw->batch, *out_offset + brw->isl_dev.ss.addr_offset,
672 bo, buffer_offset,
673 I915_GEM_DOMAIN_SAMPLER,
674 (rw ? I915_GEM_DOMAIN_SAMPLER : 0));
675 }
676 }
677
678 void
679 brw_update_buffer_texture_surface(struct gl_context *ctx,
680 unsigned unit,
681 uint32_t *surf_offset)
682 {
683 struct brw_context *brw = brw_context(ctx);
684 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
685 struct intel_buffer_object *intel_obj =
686 intel_buffer_object(tObj->BufferObject);
687 uint32_t size = tObj->BufferSize;
688 struct brw_bo *bo = NULL;
689 mesa_format format = tObj->_BufferObjectFormat;
690 const enum isl_format isl_format = brw_isl_format_for_mesa_format(format);
691 int texel_size = _mesa_get_format_bytes(format);
692
693 if (intel_obj) {
694 size = MIN2(size, intel_obj->Base.Size);
695 bo = intel_bufferobj_buffer(brw, intel_obj, tObj->BufferOffset, size);
696 }
697
698 /* The ARB_texture_buffer_specification says:
699 *
700 * "The number of texels in the buffer texture's texel array is given by
701 *
702 * floor(<buffer_size> / (<components> * sizeof(<base_type>)),
703 *
704 * where <buffer_size> is the size of the buffer object, in basic
705 * machine units and <components> and <base_type> are the element count
706 * and base data type for elements, as specified in Table X.1. The
707 * number of texels in the texel array is then clamped to the
708 * implementation-dependent limit MAX_TEXTURE_BUFFER_SIZE_ARB."
709 *
710 * We need to clamp the size in bytes to MAX_TEXTURE_BUFFER_SIZE * stride,
711 * so that when ISL divides by stride to obtain the number of texels, that
712 * texel count is clamped to MAX_TEXTURE_BUFFER_SIZE.
713 */
714 size = MIN2(size, ctx->Const.MaxTextureBufferSize * (unsigned) texel_size);
715
716 if (isl_format == 0 && format != MESA_FORMAT_RGBA_FLOAT32) {
717 _mesa_problem(NULL, "bad format %s for texture buffer\n",
718 _mesa_get_format_name(format));
719 }
720
721 brw_emit_buffer_surface_state(brw, surf_offset, bo,
722 tObj->BufferOffset,
723 isl_format,
724 size,
725 texel_size,
726 false /* rw */);
727 }
728
729 /**
730 * Create the constant buffer surface. Vertex/fragment shader constants will be
731 * read from this buffer with Data Port Read instructions/messages.
732 */
733 void
734 brw_create_constant_surface(struct brw_context *brw,
735 struct brw_bo *bo,
736 uint32_t offset,
737 uint32_t size,
738 uint32_t *out_offset)
739 {
740 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
741 ISL_FORMAT_R32G32B32A32_FLOAT,
742 size, 1, false);
743 }
744
745 /**
746 * Create the buffer surface. Shader buffer variables will be
747 * read from / write to this buffer with Data Port Read/Write
748 * instructions/messages.
749 */
750 void
751 brw_create_buffer_surface(struct brw_context *brw,
752 struct brw_bo *bo,
753 uint32_t offset,
754 uint32_t size,
755 uint32_t *out_offset)
756 {
757 /* Use a raw surface so we can reuse existing untyped read/write/atomic
758 * messages. We need these specifically for the fragment shader since they
759 * include a pixel mask header that we need to ensure correct behavior
760 * with helper invocations, which cannot write to the buffer.
761 */
762 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
763 ISL_FORMAT_RAW,
764 size, 1, true);
765 }
766
767 /**
768 * Set up a binding table entry for use by stream output logic (transform
769 * feedback).
770 *
771 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
772 */
773 void
774 brw_update_sol_surface(struct brw_context *brw,
775 struct gl_buffer_object *buffer_obj,
776 uint32_t *out_offset, unsigned num_vector_components,
777 unsigned stride_dwords, unsigned offset_dwords)
778 {
779 struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
780 uint32_t offset_bytes = 4 * offset_dwords;
781 struct brw_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
782 offset_bytes,
783 buffer_obj->Size - offset_bytes);
784 uint32_t *surf = brw_state_batch(brw, 6 * 4, 32, out_offset);
785 uint32_t pitch_minus_1 = 4*stride_dwords - 1;
786 size_t size_dwords = buffer_obj->Size / 4;
787 uint32_t buffer_size_minus_1, width, height, depth, surface_format;
788
789 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
790 * too big to map using a single binding table entry?
791 */
792 assert((size_dwords - offset_dwords) / stride_dwords
793 <= BRW_MAX_NUM_BUFFER_ENTRIES);
794
795 if (size_dwords > offset_dwords + num_vector_components) {
796 /* There is room for at least 1 transform feedback output in the buffer.
797 * Compute the number of additional transform feedback outputs the
798 * buffer has room for.
799 */
800 buffer_size_minus_1 =
801 (size_dwords - offset_dwords - num_vector_components) / stride_dwords;
802 } else {
803 /* There isn't even room for a single transform feedback output in the
804 * buffer. We can't configure the binding table entry to prevent output
805 * entirely; we'll have to rely on the geometry shader to detect
806 * overflow. But to minimize the damage in case of a bug, set up the
807 * binding table entry to just allow a single output.
808 */
809 buffer_size_minus_1 = 0;
810 }
811 width = buffer_size_minus_1 & 0x7f;
812 height = (buffer_size_minus_1 & 0xfff80) >> 7;
813 depth = (buffer_size_minus_1 & 0x7f00000) >> 20;
814
815 switch (num_vector_components) {
816 case 1:
817 surface_format = ISL_FORMAT_R32_FLOAT;
818 break;
819 case 2:
820 surface_format = ISL_FORMAT_R32G32_FLOAT;
821 break;
822 case 3:
823 surface_format = ISL_FORMAT_R32G32B32_FLOAT;
824 break;
825 case 4:
826 surface_format = ISL_FORMAT_R32G32B32A32_FLOAT;
827 break;
828 default:
829 unreachable("Invalid vector size for transform feedback output");
830 }
831
832 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
833 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
834 surface_format << BRW_SURFACE_FORMAT_SHIFT |
835 BRW_SURFACE_RC_READ_WRITE;
836 surf[1] = bo->offset64 + offset_bytes; /* reloc */
837 surf[2] = (width << BRW_SURFACE_WIDTH_SHIFT |
838 height << BRW_SURFACE_HEIGHT_SHIFT);
839 surf[3] = (depth << BRW_SURFACE_DEPTH_SHIFT |
840 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
841 surf[4] = 0;
842 surf[5] = 0;
843
844 /* Emit relocation to surface contents. */
845 brw_emit_reloc(&brw->batch, *out_offset + 4, bo, offset_bytes,
846 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
847 }
848
849 /* Creates a new WM constant buffer reflecting the current fragment program's
850 * constants, if needed by the fragment program.
851 *
852 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
853 * state atom.
854 */
855 static void
856 brw_upload_wm_pull_constants(struct brw_context *brw)
857 {
858 struct brw_stage_state *stage_state = &brw->wm.base;
859 /* BRW_NEW_FRAGMENT_PROGRAM */
860 struct brw_program *fp = (struct brw_program *) brw->fragment_program;
861 /* BRW_NEW_FS_PROG_DATA */
862 struct brw_stage_prog_data *prog_data = brw->wm.base.prog_data;
863
864 _mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_FRAGMENT);
865 /* _NEW_PROGRAM_CONSTANTS */
866 brw_upload_pull_constants(brw, BRW_NEW_SURFACES, &fp->program,
867 stage_state, prog_data);
868 }
869
870 const struct brw_tracked_state brw_wm_pull_constants = {
871 .dirty = {
872 .mesa = _NEW_PROGRAM_CONSTANTS,
873 .brw = BRW_NEW_BATCH |
874 BRW_NEW_BLORP |
875 BRW_NEW_FRAGMENT_PROGRAM |
876 BRW_NEW_FS_PROG_DATA,
877 },
878 .emit = brw_upload_wm_pull_constants,
879 };
880
881 /**
882 * Creates a null renderbuffer surface.
883 *
884 * This is used when the shader doesn't write to any color output. An FB
885 * write to target 0 will still be emitted, because that's how the thread is
886 * terminated (and computed depth is returned), so we need to have the
887 * hardware discard the target 0 color output..
888 */
889 static void
890 brw_emit_null_surface_state(struct brw_context *brw,
891 unsigned width,
892 unsigned height,
893 unsigned samples,
894 uint32_t *out_offset)
895 {
896 /* From the Sandy bridge PRM, Vol4 Part1 p71 (Surface Type: Programming
897 * Notes):
898 *
899 * A null surface will be used in instances where an actual surface is
900 * not bound. When a write message is generated to a null surface, no
901 * actual surface is written to. When a read message (including any
902 * sampling engine message) is generated to a null surface, the result
903 * is all zeros. Note that a null surface type is allowed to be used
904 * with all messages, even if it is not specificially indicated as
905 * supported. All of the remaining fields in surface state are ignored
906 * for null surfaces, with the following exceptions:
907 *
908 * - [DevSNB+]: Width, Height, Depth, and LOD fields must match the
909 * depth buffer’s corresponding state for all render target surfaces,
910 * including null.
911 *
912 * - Surface Format must be R8G8B8A8_UNORM.
913 */
914 unsigned surface_type = BRW_SURFACE_NULL;
915 struct brw_bo *bo = NULL;
916 unsigned pitch_minus_1 = 0;
917 uint32_t multisampling_state = 0;
918 uint32_t *surf = brw_state_batch(brw, 6 * 4, 32, out_offset);
919
920 if (samples > 1) {
921 /* On Gen6, null render targets seem to cause GPU hangs when
922 * multisampling. So work around this problem by rendering into dummy
923 * color buffer.
924 *
925 * To decrease the amount of memory needed by the workaround buffer, we
926 * set its pitch to 128 bytes (the width of a Y tile). This means that
927 * the amount of memory needed for the workaround buffer is
928 * (width_in_tiles + height_in_tiles - 1) tiles.
929 *
930 * Note that since the workaround buffer will be interpreted by the
931 * hardware as an interleaved multisampled buffer, we need to compute
932 * width_in_tiles and height_in_tiles by dividing the width and height
933 * by 16 rather than the normal Y-tile size of 32.
934 */
935 unsigned width_in_tiles = ALIGN(width, 16) / 16;
936 unsigned height_in_tiles = ALIGN(height, 16) / 16;
937 unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
938 brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
939 size_needed);
940 bo = brw->wm.multisampled_null_render_target_bo;
941 surface_type = BRW_SURFACE_2D;
942 pitch_minus_1 = 127;
943 multisampling_state = brw_get_surface_num_multisamples(samples);
944 }
945
946 surf[0] = (surface_type << BRW_SURFACE_TYPE_SHIFT |
947 ISL_FORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
948 if (brw->gen < 6) {
949 surf[0] |= (1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT |
950 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT |
951 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT |
952 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT);
953 }
954 surf[1] = bo ? bo->offset64 : 0;
955 surf[2] = ((width - 1) << BRW_SURFACE_WIDTH_SHIFT |
956 (height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
957
958 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
959 * Notes):
960 *
961 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
962 */
963 surf[3] = (BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y |
964 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
965 surf[4] = multisampling_state;
966 surf[5] = 0;
967
968 if (bo) {
969 brw_emit_reloc(&brw->batch, *out_offset + 4, bo, 0,
970 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
971 }
972 }
973
974 /**
975 * Sets up a surface state structure to point at the given region.
976 * While it is only used for the front/back buffer currently, it should be
977 * usable for further buffers when doing ARB_draw_buffer support.
978 */
979 static uint32_t
980 gen4_update_renderbuffer_surface(struct brw_context *brw,
981 struct gl_renderbuffer *rb,
982 uint32_t flags, unsigned unit,
983 uint32_t surf_index)
984 {
985 struct gl_context *ctx = &brw->ctx;
986 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
987 struct intel_mipmap_tree *mt = irb->mt;
988 uint32_t *surf;
989 uint32_t tile_x, tile_y;
990 enum isl_format format;
991 uint32_t offset;
992 /* _NEW_BUFFERS */
993 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
994 /* BRW_NEW_FS_PROG_DATA */
995
996 assert(!(flags & INTEL_RENDERBUFFER_LAYERED));
997 assert(!(flags & INTEL_AUX_BUFFER_DISABLED));
998
999 if (rb->TexImage && !brw->has_surface_tile_offset) {
1000 intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y);
1001
1002 if (tile_x != 0 || tile_y != 0) {
1003 /* Original gen4 hardware couldn't draw to a non-tile-aligned
1004 * destination in a miptree unless you actually setup your renderbuffer
1005 * as a miptree and used the fragile lod/array_index/etc. controls to
1006 * select the image. So, instead, we just make a new single-level
1007 * miptree and render into that.
1008 */
1009 intel_renderbuffer_move_to_temp(brw, irb, false);
1010 mt = irb->mt;
1011 }
1012 }
1013
1014 surf = brw_state_batch(brw, 6 * 4, 32, &offset);
1015
1016 format = brw->render_target_format[rb_format];
1017 if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
1018 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
1019 __func__, _mesa_get_format_name(rb_format));
1020 }
1021
1022 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
1023 format << BRW_SURFACE_FORMAT_SHIFT);
1024
1025 /* reloc */
1026 assert(mt->offset % mt->cpp == 0);
1027 surf[1] = (intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y) +
1028 mt->bo->offset64 + mt->offset);
1029
1030 surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
1031 (rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
1032
1033 surf[3] = (brw_get_surface_tiling_bits(mt->tiling) |
1034 (mt->pitch - 1) << BRW_SURFACE_PITCH_SHIFT);
1035
1036 surf[4] = brw_get_surface_num_multisamples(mt->num_samples);
1037
1038 assert(brw->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
1039 /* Note that the low bits of these fields are missing, so
1040 * there's the possibility of getting in trouble.
1041 */
1042 assert(tile_x % 4 == 0);
1043 assert(tile_y % 2 == 0);
1044 surf[5] = ((tile_x / 4) << BRW_SURFACE_X_OFFSET_SHIFT |
1045 (tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT |
1046 (mt->valign == 4 ? BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
1047
1048 if (brw->gen < 6) {
1049 /* _NEW_COLOR */
1050 if (!ctx->Color.ColorLogicOpEnabled && !ctx->Color._AdvancedBlendMode &&
1051 (ctx->Color.BlendEnabled & (1 << unit)))
1052 surf[0] |= BRW_SURFACE_BLEND_ENABLED;
1053
1054 if (!ctx->Color.ColorMask[unit][0])
1055 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT;
1056 if (!ctx->Color.ColorMask[unit][1])
1057 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT;
1058 if (!ctx->Color.ColorMask[unit][2])
1059 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT;
1060
1061 /* As mentioned above, disable writes to the alpha component when the
1062 * renderbuffer is XRGB.
1063 */
1064 if (ctx->DrawBuffer->Visual.alphaBits == 0 ||
1065 !ctx->Color.ColorMask[unit][3]) {
1066 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT;
1067 }
1068 }
1069
1070 brw_emit_reloc(&brw->batch, offset + 4, mt->bo, surf[1] - mt->bo->offset64,
1071 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
1072
1073 return offset;
1074 }
1075
1076 /**
1077 * Construct SURFACE_STATE objects for renderbuffers/draw buffers.
1078 */
1079 void
1080 brw_update_renderbuffer_surfaces(struct brw_context *brw,
1081 const struct gl_framebuffer *fb,
1082 uint32_t render_target_start,
1083 uint32_t *surf_offset)
1084 {
1085 GLuint i;
1086 const unsigned int w = _mesa_geometric_width(fb);
1087 const unsigned int h = _mesa_geometric_height(fb);
1088 const unsigned int s = _mesa_geometric_samples(fb);
1089
1090 /* Update surfaces for drawing buffers */
1091 if (fb->_NumColorDrawBuffers >= 1) {
1092 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
1093 const uint32_t surf_index = render_target_start + i;
1094 const int flags = (_mesa_geometric_layers(fb) > 0 ?
1095 INTEL_RENDERBUFFER_LAYERED : 0) |
1096 (brw->draw_aux_buffer_disabled[i] ?
1097 INTEL_AUX_BUFFER_DISABLED : 0);
1098
1099 if (intel_renderbuffer(fb->_ColorDrawBuffers[i])) {
1100 surf_offset[surf_index] =
1101 brw->vtbl.update_renderbuffer_surface(
1102 brw, fb->_ColorDrawBuffers[i], flags, i, surf_index);
1103 } else {
1104 brw->vtbl.emit_null_surface_state(brw, w, h, s,
1105 &surf_offset[surf_index]);
1106 }
1107 }
1108 } else {
1109 const uint32_t surf_index = render_target_start;
1110 brw->vtbl.emit_null_surface_state(brw, w, h, s,
1111 &surf_offset[surf_index]);
1112 }
1113 }
1114
1115 static void
1116 update_renderbuffer_surfaces(struct brw_context *brw)
1117 {
1118 const struct gl_context *ctx = &brw->ctx;
1119
1120 /* BRW_NEW_FS_PROG_DATA */
1121 const struct brw_wm_prog_data *wm_prog_data =
1122 brw_wm_prog_data(brw->wm.base.prog_data);
1123
1124 /* _NEW_BUFFERS | _NEW_COLOR */
1125 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1126 brw_update_renderbuffer_surfaces(
1127 brw, fb,
1128 wm_prog_data->binding_table.render_target_start,
1129 brw->wm.base.surf_offset);
1130 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1131 }
1132
1133 const struct brw_tracked_state brw_renderbuffer_surfaces = {
1134 .dirty = {
1135 .mesa = _NEW_BUFFERS |
1136 _NEW_COLOR,
1137 .brw = BRW_NEW_BATCH |
1138 BRW_NEW_BLORP |
1139 BRW_NEW_FS_PROG_DATA,
1140 },
1141 .emit = update_renderbuffer_surfaces,
1142 };
1143
1144 const struct brw_tracked_state gen6_renderbuffer_surfaces = {
1145 .dirty = {
1146 .mesa = _NEW_BUFFERS,
1147 .brw = BRW_NEW_BATCH |
1148 BRW_NEW_BLORP,
1149 },
1150 .emit = update_renderbuffer_surfaces,
1151 };
1152
1153 static void
1154 update_renderbuffer_read_surfaces(struct brw_context *brw)
1155 {
1156 const struct gl_context *ctx = &brw->ctx;
1157
1158 /* BRW_NEW_FS_PROG_DATA */
1159 const struct brw_wm_prog_data *wm_prog_data =
1160 brw_wm_prog_data(brw->wm.base.prog_data);
1161
1162 /* BRW_NEW_FRAGMENT_PROGRAM */
1163 if (!ctx->Extensions.MESA_shader_framebuffer_fetch &&
1164 brw->fragment_program && brw->fragment_program->info.outputs_read) {
1165 /* _NEW_BUFFERS */
1166 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1167
1168 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
1169 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
1170 const struct intel_renderbuffer *irb = intel_renderbuffer(rb);
1171 const unsigned surf_index =
1172 wm_prog_data->binding_table.render_target_read_start + i;
1173 uint32_t *surf_offset = &brw->wm.base.surf_offset[surf_index];
1174
1175 if (irb) {
1176 const enum isl_format format = brw->render_target_format[
1177 _mesa_get_render_format(ctx, intel_rb_format(irb))];
1178 assert(isl_format_supports_sampling(&brw->screen->devinfo,
1179 format));
1180
1181 /* Override the target of the texture if the render buffer is a
1182 * single slice of a 3D texture (since the minimum array element
1183 * field of the surface state structure is ignored by the sampler
1184 * unit for 3D textures on some hardware), or if the render buffer
1185 * is a 1D array (since shaders always provide the array index
1186 * coordinate at the Z component to avoid state-dependent
1187 * recompiles when changing the texture target of the
1188 * framebuffer).
1189 */
1190 const GLenum target =
1191 (irb->mt->target == GL_TEXTURE_3D &&
1192 irb->layer_count == 1) ? GL_TEXTURE_2D :
1193 irb->mt->target == GL_TEXTURE_1D_ARRAY ? GL_TEXTURE_2D_ARRAY :
1194 irb->mt->target;
1195
1196 /* intel_renderbuffer::mt_layer is expressed in sample units for
1197 * the UMS and CMS multisample layouts, but
1198 * intel_renderbuffer::layer_count is expressed in units of whole
1199 * logical layers regardless of the multisample layout.
1200 */
1201 const unsigned mt_layer_unit =
1202 (irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
1203 irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) ?
1204 MAX2(irb->mt->num_samples, 1) : 1;
1205
1206 const struct isl_view view = {
1207 .format = format,
1208 .base_level = irb->mt_level - irb->mt->first_level,
1209 .levels = 1,
1210 .base_array_layer = irb->mt_layer / mt_layer_unit,
1211 .array_len = irb->layer_count,
1212 .swizzle = ISL_SWIZZLE_IDENTITY,
1213 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
1214 };
1215
1216 const int flags = brw->draw_aux_buffer_disabled[i] ?
1217 INTEL_AUX_BUFFER_DISABLED : 0;
1218 brw_emit_surface_state(brw, irb->mt, flags, target, view,
1219 tex_mocs[brw->gen],
1220 surf_offset, surf_index,
1221 I915_GEM_DOMAIN_SAMPLER, 0);
1222
1223 } else {
1224 brw->vtbl.emit_null_surface_state(
1225 brw, _mesa_geometric_width(fb), _mesa_geometric_height(fb),
1226 _mesa_geometric_samples(fb), surf_offset);
1227 }
1228 }
1229
1230 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1231 }
1232 }
1233
1234 const struct brw_tracked_state brw_renderbuffer_read_surfaces = {
1235 .dirty = {
1236 .mesa = _NEW_BUFFERS,
1237 .brw = BRW_NEW_BATCH |
1238 BRW_NEW_FRAGMENT_PROGRAM |
1239 BRW_NEW_FS_PROG_DATA,
1240 },
1241 .emit = update_renderbuffer_read_surfaces,
1242 };
1243
1244 static void
1245 update_stage_texture_surfaces(struct brw_context *brw,
1246 const struct gl_program *prog,
1247 struct brw_stage_state *stage_state,
1248 bool for_gather, uint32_t plane)
1249 {
1250 if (!prog)
1251 return;
1252
1253 struct gl_context *ctx = &brw->ctx;
1254
1255 uint32_t *surf_offset = stage_state->surf_offset;
1256
1257 /* BRW_NEW_*_PROG_DATA */
1258 if (for_gather)
1259 surf_offset += stage_state->prog_data->binding_table.gather_texture_start;
1260 else
1261 surf_offset += stage_state->prog_data->binding_table.plane_start[plane];
1262
1263 unsigned num_samplers = util_last_bit(prog->SamplersUsed);
1264 for (unsigned s = 0; s < num_samplers; s++) {
1265 surf_offset[s] = 0;
1266
1267 if (prog->SamplersUsed & (1 << s)) {
1268 const unsigned unit = prog->SamplerUnits[s];
1269
1270 /* _NEW_TEXTURE */
1271 if (ctx->Texture.Unit[unit]._Current) {
1272 brw_update_texture_surface(ctx, unit, surf_offset + s, for_gather, plane);
1273 }
1274 }
1275 }
1276 }
1277
1278
1279 /**
1280 * Construct SURFACE_STATE objects for enabled textures.
1281 */
1282 static void
1283 brw_update_texture_surfaces(struct brw_context *brw)
1284 {
1285 /* BRW_NEW_VERTEX_PROGRAM */
1286 struct gl_program *vs = (struct gl_program *) brw->vertex_program;
1287
1288 /* BRW_NEW_TESS_PROGRAMS */
1289 struct gl_program *tcs = (struct gl_program *) brw->tess_ctrl_program;
1290 struct gl_program *tes = (struct gl_program *) brw->tess_eval_program;
1291
1292 /* BRW_NEW_GEOMETRY_PROGRAM */
1293 struct gl_program *gs = (struct gl_program *) brw->geometry_program;
1294
1295 /* BRW_NEW_FRAGMENT_PROGRAM */
1296 struct gl_program *fs = (struct gl_program *) brw->fragment_program;
1297
1298 /* _NEW_TEXTURE */
1299 update_stage_texture_surfaces(brw, vs, &brw->vs.base, false, 0);
1300 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, false, 0);
1301 update_stage_texture_surfaces(brw, tes, &brw->tes.base, false, 0);
1302 update_stage_texture_surfaces(brw, gs, &brw->gs.base, false, 0);
1303 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 0);
1304
1305 /* emit alternate set of surface state for gather. this
1306 * allows the surface format to be overriden for only the
1307 * gather4 messages. */
1308 if (brw->gen < 8) {
1309 if (vs && vs->nir->info.uses_texture_gather)
1310 update_stage_texture_surfaces(brw, vs, &brw->vs.base, true, 0);
1311 if (tcs && tcs->nir->info.uses_texture_gather)
1312 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, true, 0);
1313 if (tes && tes->nir->info.uses_texture_gather)
1314 update_stage_texture_surfaces(brw, tes, &brw->tes.base, true, 0);
1315 if (gs && gs->nir->info.uses_texture_gather)
1316 update_stage_texture_surfaces(brw, gs, &brw->gs.base, true, 0);
1317 if (fs && fs->nir->info.uses_texture_gather)
1318 update_stage_texture_surfaces(brw, fs, &brw->wm.base, true, 0);
1319 }
1320
1321 if (fs) {
1322 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 1);
1323 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 2);
1324 }
1325
1326 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1327 }
1328
1329 const struct brw_tracked_state brw_texture_surfaces = {
1330 .dirty = {
1331 .mesa = _NEW_TEXTURE,
1332 .brw = BRW_NEW_BATCH |
1333 BRW_NEW_BLORP |
1334 BRW_NEW_FRAGMENT_PROGRAM |
1335 BRW_NEW_FS_PROG_DATA |
1336 BRW_NEW_GEOMETRY_PROGRAM |
1337 BRW_NEW_GS_PROG_DATA |
1338 BRW_NEW_TESS_PROGRAMS |
1339 BRW_NEW_TCS_PROG_DATA |
1340 BRW_NEW_TES_PROG_DATA |
1341 BRW_NEW_TEXTURE_BUFFER |
1342 BRW_NEW_VERTEX_PROGRAM |
1343 BRW_NEW_VS_PROG_DATA,
1344 },
1345 .emit = brw_update_texture_surfaces,
1346 };
1347
1348 static void
1349 brw_update_cs_texture_surfaces(struct brw_context *brw)
1350 {
1351 /* BRW_NEW_COMPUTE_PROGRAM */
1352 struct gl_program *cs = (struct gl_program *) brw->compute_program;
1353
1354 /* _NEW_TEXTURE */
1355 update_stage_texture_surfaces(brw, cs, &brw->cs.base, false, 0);
1356
1357 /* emit alternate set of surface state for gather. this
1358 * allows the surface format to be overriden for only the
1359 * gather4 messages.
1360 */
1361 if (brw->gen < 8) {
1362 if (cs && cs->nir->info.uses_texture_gather)
1363 update_stage_texture_surfaces(brw, cs, &brw->cs.base, true, 0);
1364 }
1365
1366 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1367 }
1368
1369 const struct brw_tracked_state brw_cs_texture_surfaces = {
1370 .dirty = {
1371 .mesa = _NEW_TEXTURE,
1372 .brw = BRW_NEW_BATCH |
1373 BRW_NEW_BLORP |
1374 BRW_NEW_COMPUTE_PROGRAM,
1375 },
1376 .emit = brw_update_cs_texture_surfaces,
1377 };
1378
1379
1380 void
1381 brw_upload_ubo_surfaces(struct brw_context *brw, struct gl_program *prog,
1382 struct brw_stage_state *stage_state,
1383 struct brw_stage_prog_data *prog_data)
1384 {
1385 struct gl_context *ctx = &brw->ctx;
1386
1387 if (!prog)
1388 return;
1389
1390 uint32_t *ubo_surf_offsets =
1391 &stage_state->surf_offset[prog_data->binding_table.ubo_start];
1392
1393 for (int i = 0; i < prog->info.num_ubos; i++) {
1394 struct gl_uniform_buffer_binding *binding =
1395 &ctx->UniformBufferBindings[prog->sh.UniformBlocks[i]->Binding];
1396
1397 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1398 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ubo_surf_offsets[i]);
1399 } else {
1400 struct intel_buffer_object *intel_bo =
1401 intel_buffer_object(binding->BufferObject);
1402 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1403 if (!binding->AutomaticSize)
1404 size = MIN2(size, binding->Size);
1405 struct brw_bo *bo =
1406 intel_bufferobj_buffer(brw, intel_bo,
1407 binding->Offset,
1408 size);
1409 brw_create_constant_surface(brw, bo, binding->Offset,
1410 size,
1411 &ubo_surf_offsets[i]);
1412 }
1413 }
1414
1415 uint32_t *ssbo_surf_offsets =
1416 &stage_state->surf_offset[prog_data->binding_table.ssbo_start];
1417
1418 for (int i = 0; i < prog->info.num_ssbos; i++) {
1419 struct gl_shader_storage_buffer_binding *binding =
1420 &ctx->ShaderStorageBufferBindings[prog->sh.ShaderStorageBlocks[i]->Binding];
1421
1422 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1423 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ssbo_surf_offsets[i]);
1424 } else {
1425 struct intel_buffer_object *intel_bo =
1426 intel_buffer_object(binding->BufferObject);
1427 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1428 if (!binding->AutomaticSize)
1429 size = MIN2(size, binding->Size);
1430 struct brw_bo *bo =
1431 intel_bufferobj_buffer(brw, intel_bo,
1432 binding->Offset,
1433 size);
1434 brw_create_buffer_surface(brw, bo, binding->Offset,
1435 size,
1436 &ssbo_surf_offsets[i]);
1437 }
1438 }
1439
1440 if (prog->info.num_ubos || prog->info.num_ssbos)
1441 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1442 }
1443
1444 static void
1445 brw_upload_wm_ubo_surfaces(struct brw_context *brw)
1446 {
1447 struct gl_context *ctx = &brw->ctx;
1448 /* _NEW_PROGRAM */
1449 struct gl_program *prog = ctx->FragmentProgram._Current;
1450
1451 /* BRW_NEW_FS_PROG_DATA */
1452 brw_upload_ubo_surfaces(brw, prog, &brw->wm.base, brw->wm.base.prog_data);
1453 }
1454
1455 const struct brw_tracked_state brw_wm_ubo_surfaces = {
1456 .dirty = {
1457 .mesa = _NEW_PROGRAM,
1458 .brw = BRW_NEW_BATCH |
1459 BRW_NEW_BLORP |
1460 BRW_NEW_FS_PROG_DATA |
1461 BRW_NEW_UNIFORM_BUFFER,
1462 },
1463 .emit = brw_upload_wm_ubo_surfaces,
1464 };
1465
1466 static void
1467 brw_upload_cs_ubo_surfaces(struct brw_context *brw)
1468 {
1469 struct gl_context *ctx = &brw->ctx;
1470 /* _NEW_PROGRAM */
1471 struct gl_program *prog =
1472 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1473
1474 /* BRW_NEW_CS_PROG_DATA */
1475 brw_upload_ubo_surfaces(brw, prog, &brw->cs.base, brw->cs.base.prog_data);
1476 }
1477
1478 const struct brw_tracked_state brw_cs_ubo_surfaces = {
1479 .dirty = {
1480 .mesa = _NEW_PROGRAM,
1481 .brw = BRW_NEW_BATCH |
1482 BRW_NEW_BLORP |
1483 BRW_NEW_CS_PROG_DATA |
1484 BRW_NEW_UNIFORM_BUFFER,
1485 },
1486 .emit = brw_upload_cs_ubo_surfaces,
1487 };
1488
1489 void
1490 brw_upload_abo_surfaces(struct brw_context *brw,
1491 const struct gl_program *prog,
1492 struct brw_stage_state *stage_state,
1493 struct brw_stage_prog_data *prog_data)
1494 {
1495 struct gl_context *ctx = &brw->ctx;
1496 uint32_t *surf_offsets =
1497 &stage_state->surf_offset[prog_data->binding_table.abo_start];
1498
1499 if (prog->info.num_abos) {
1500 for (unsigned i = 0; i < prog->info.num_abos; i++) {
1501 struct gl_atomic_buffer_binding *binding =
1502 &ctx->AtomicBufferBindings[prog->sh.AtomicBuffers[i]->Binding];
1503 struct intel_buffer_object *intel_bo =
1504 intel_buffer_object(binding->BufferObject);
1505 struct brw_bo *bo = intel_bufferobj_buffer(
1506 brw, intel_bo, binding->Offset, intel_bo->Base.Size - binding->Offset);
1507
1508 brw_emit_buffer_surface_state(brw, &surf_offsets[i], bo,
1509 binding->Offset, ISL_FORMAT_RAW,
1510 bo->size - binding->Offset, 1, true);
1511 }
1512
1513 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1514 }
1515 }
1516
1517 static void
1518 brw_upload_wm_abo_surfaces(struct brw_context *brw)
1519 {
1520 /* _NEW_PROGRAM */
1521 const struct gl_program *wm = brw->fragment_program;
1522
1523 if (wm) {
1524 /* BRW_NEW_FS_PROG_DATA */
1525 brw_upload_abo_surfaces(brw, wm, &brw->wm.base, brw->wm.base.prog_data);
1526 }
1527 }
1528
1529 const struct brw_tracked_state brw_wm_abo_surfaces = {
1530 .dirty = {
1531 .mesa = _NEW_PROGRAM,
1532 .brw = BRW_NEW_ATOMIC_BUFFER |
1533 BRW_NEW_BLORP |
1534 BRW_NEW_BATCH |
1535 BRW_NEW_FS_PROG_DATA,
1536 },
1537 .emit = brw_upload_wm_abo_surfaces,
1538 };
1539
1540 static void
1541 brw_upload_cs_abo_surfaces(struct brw_context *brw)
1542 {
1543 /* _NEW_PROGRAM */
1544 const struct gl_program *cp = brw->compute_program;
1545
1546 if (cp) {
1547 /* BRW_NEW_CS_PROG_DATA */
1548 brw_upload_abo_surfaces(brw, cp, &brw->cs.base, brw->cs.base.prog_data);
1549 }
1550 }
1551
1552 const struct brw_tracked_state brw_cs_abo_surfaces = {
1553 .dirty = {
1554 .mesa = _NEW_PROGRAM,
1555 .brw = BRW_NEW_ATOMIC_BUFFER |
1556 BRW_NEW_BLORP |
1557 BRW_NEW_BATCH |
1558 BRW_NEW_CS_PROG_DATA,
1559 },
1560 .emit = brw_upload_cs_abo_surfaces,
1561 };
1562
1563 static void
1564 brw_upload_cs_image_surfaces(struct brw_context *brw)
1565 {
1566 /* _NEW_PROGRAM */
1567 const struct gl_program *cp = brw->compute_program;
1568
1569 if (cp) {
1570 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1571 brw_upload_image_surfaces(brw, cp, &brw->cs.base,
1572 brw->cs.base.prog_data);
1573 }
1574 }
1575
1576 const struct brw_tracked_state brw_cs_image_surfaces = {
1577 .dirty = {
1578 .mesa = _NEW_TEXTURE | _NEW_PROGRAM,
1579 .brw = BRW_NEW_BATCH |
1580 BRW_NEW_BLORP |
1581 BRW_NEW_CS_PROG_DATA |
1582 BRW_NEW_IMAGE_UNITS
1583 },
1584 .emit = brw_upload_cs_image_surfaces,
1585 };
1586
1587 static uint32_t
1588 get_image_format(struct brw_context *brw, mesa_format format, GLenum access)
1589 {
1590 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1591 enum isl_format hw_format = brw_isl_format_for_mesa_format(format);
1592 if (access == GL_WRITE_ONLY) {
1593 return hw_format;
1594 } else if (isl_has_matching_typed_storage_image_format(devinfo, hw_format)) {
1595 /* Typed surface reads support a very limited subset of the shader
1596 * image formats. Translate it into the closest format the
1597 * hardware supports.
1598 */
1599 return isl_lower_storage_image_format(devinfo, hw_format);
1600 } else {
1601 /* The hardware doesn't actually support a typed format that we can use
1602 * so we have to fall back to untyped read/write messages.
1603 */
1604 return ISL_FORMAT_RAW;
1605 }
1606 }
1607
1608 static void
1609 update_default_image_param(struct brw_context *brw,
1610 struct gl_image_unit *u,
1611 unsigned surface_idx,
1612 struct brw_image_param *param)
1613 {
1614 memset(param, 0, sizeof(*param));
1615 param->surface_idx = surface_idx;
1616 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1617 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1618 * detailed explanation of these parameters.
1619 */
1620 param->swizzling[0] = 0xff;
1621 param->swizzling[1] = 0xff;
1622 }
1623
1624 static void
1625 update_buffer_image_param(struct brw_context *brw,
1626 struct gl_image_unit *u,
1627 unsigned surface_idx,
1628 struct brw_image_param *param)
1629 {
1630 struct gl_buffer_object *obj = u->TexObj->BufferObject;
1631 const uint32_t size = MIN2((uint32_t)u->TexObj->BufferSize, obj->Size);
1632 update_default_image_param(brw, u, surface_idx, param);
1633
1634 param->size[0] = size / _mesa_get_format_bytes(u->_ActualFormat);
1635 param->stride[0] = _mesa_get_format_bytes(u->_ActualFormat);
1636 }
1637
1638 static void
1639 update_texture_image_param(struct brw_context *brw,
1640 struct gl_image_unit *u,
1641 unsigned surface_idx,
1642 struct brw_image_param *param)
1643 {
1644 struct intel_mipmap_tree *mt = intel_texture_object(u->TexObj)->mt;
1645
1646 update_default_image_param(brw, u, surface_idx, param);
1647
1648 param->size[0] = minify(mt->logical_width0, u->Level);
1649 param->size[1] = minify(mt->logical_height0, u->Level);
1650 param->size[2] = (!u->Layered ? 1 :
1651 u->TexObj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1652 u->TexObj->Target == GL_TEXTURE_3D ?
1653 minify(mt->logical_depth0, u->Level) :
1654 mt->logical_depth0);
1655
1656 intel_miptree_get_image_offset(mt, u->Level, u->_Layer,
1657 &param->offset[0],
1658 &param->offset[1]);
1659
1660 param->stride[0] = mt->cpp;
1661 param->stride[1] = mt->pitch / mt->cpp;
1662 param->stride[2] =
1663 brw_miptree_get_horizontal_slice_pitch(brw, mt, u->Level);
1664 param->stride[3] =
1665 brw_miptree_get_vertical_slice_pitch(brw, mt, u->Level);
1666
1667 if (mt->tiling == I915_TILING_X) {
1668 /* An X tile is a rectangular block of 512x8 bytes. */
1669 param->tiling[0] = _mesa_logbase2(512 / mt->cpp);
1670 param->tiling[1] = _mesa_logbase2(8);
1671
1672 if (brw->has_swizzling) {
1673 /* Right shifts required to swizzle bits 9 and 10 of the memory
1674 * address with bit 6.
1675 */
1676 param->swizzling[0] = 3;
1677 param->swizzling[1] = 4;
1678 }
1679 } else if (mt->tiling == I915_TILING_Y) {
1680 /* The layout of a Y-tiled surface in memory isn't really fundamentally
1681 * different to the layout of an X-tiled surface, we simply pretend that
1682 * the surface is broken up in a number of smaller 16Bx32 tiles, each
1683 * one arranged in X-major order just like is the case for X-tiling.
1684 */
1685 param->tiling[0] = _mesa_logbase2(16 / mt->cpp);
1686 param->tiling[1] = _mesa_logbase2(32);
1687
1688 if (brw->has_swizzling) {
1689 /* Right shift required to swizzle bit 9 of the memory address with
1690 * bit 6.
1691 */
1692 param->swizzling[0] = 3;
1693 }
1694 }
1695
1696 /* 3D textures are arranged in 2D in memory with 2^lod slices per row. The
1697 * address calculation algorithm (emit_address_calculation() in
1698 * brw_fs_surface_builder.cpp) handles this as a sort of tiling with
1699 * modulus equal to the LOD.
1700 */
1701 param->tiling[2] = (u->TexObj->Target == GL_TEXTURE_3D ? u->Level :
1702 0);
1703 }
1704
1705 static void
1706 update_image_surface(struct brw_context *brw,
1707 struct gl_image_unit *u,
1708 GLenum access,
1709 unsigned surface_idx,
1710 uint32_t *surf_offset,
1711 struct brw_image_param *param)
1712 {
1713 if (_mesa_is_image_unit_valid(&brw->ctx, u)) {
1714 struct gl_texture_object *obj = u->TexObj;
1715 const unsigned format = get_image_format(brw, u->_ActualFormat, access);
1716
1717 if (obj->Target == GL_TEXTURE_BUFFER) {
1718 struct intel_buffer_object *intel_obj =
1719 intel_buffer_object(obj->BufferObject);
1720 const unsigned texel_size = (format == ISL_FORMAT_RAW ? 1 :
1721 _mesa_get_format_bytes(u->_ActualFormat));
1722
1723 brw_emit_buffer_surface_state(
1724 brw, surf_offset, intel_obj->buffer, obj->BufferOffset,
1725 format, intel_obj->Base.Size, texel_size,
1726 access != GL_READ_ONLY);
1727
1728 update_buffer_image_param(brw, u, surface_idx, param);
1729
1730 } else {
1731 struct intel_texture_object *intel_obj = intel_texture_object(obj);
1732 struct intel_mipmap_tree *mt = intel_obj->mt;
1733
1734 if (format == ISL_FORMAT_RAW) {
1735 brw_emit_buffer_surface_state(
1736 brw, surf_offset, mt->bo, mt->offset,
1737 format, mt->bo->size - mt->offset, 1 /* pitch */,
1738 access != GL_READ_ONLY);
1739
1740 } else {
1741 const unsigned num_layers = (!u->Layered ? 1 :
1742 obj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1743 mt->logical_depth0);
1744
1745 struct isl_view view = {
1746 .format = format,
1747 .base_level = obj->MinLevel + u->Level,
1748 .levels = 1,
1749 .base_array_layer = obj->MinLayer + u->_Layer,
1750 .array_len = num_layers,
1751 .swizzle = ISL_SWIZZLE_IDENTITY,
1752 .usage = ISL_SURF_USAGE_STORAGE_BIT,
1753 };
1754
1755 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
1756 const bool unresolved = intel_miptree_has_color_unresolved(
1757 mt, view.base_level, view.levels,
1758 view.base_array_layer, view.array_len);
1759 const int flags = unresolved ? 0 : INTEL_AUX_BUFFER_DISABLED;
1760 brw_emit_surface_state(brw, mt, flags, mt->target, view,
1761 tex_mocs[brw->gen],
1762 surf_offset, surf_index,
1763 I915_GEM_DOMAIN_SAMPLER,
1764 access == GL_READ_ONLY ? 0 :
1765 I915_GEM_DOMAIN_SAMPLER);
1766 }
1767
1768 update_texture_image_param(brw, u, surface_idx, param);
1769 }
1770
1771 } else {
1772 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, surf_offset);
1773 update_default_image_param(brw, u, surface_idx, param);
1774 }
1775 }
1776
1777 void
1778 brw_upload_image_surfaces(struct brw_context *brw,
1779 const struct gl_program *prog,
1780 struct brw_stage_state *stage_state,
1781 struct brw_stage_prog_data *prog_data)
1782 {
1783 assert(prog);
1784 struct gl_context *ctx = &brw->ctx;
1785
1786 if (prog->info.num_images) {
1787 for (unsigned i = 0; i < prog->info.num_images; i++) {
1788 struct gl_image_unit *u = &ctx->ImageUnits[prog->sh.ImageUnits[i]];
1789 const unsigned surf_idx = prog_data->binding_table.image_start + i;
1790
1791 update_image_surface(brw, u, prog->sh.ImageAccess[i],
1792 surf_idx,
1793 &stage_state->surf_offset[surf_idx],
1794 &prog_data->image_param[i]);
1795 }
1796
1797 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1798 /* This may have changed the image metadata dependent on the context
1799 * image unit state and passed to the program as uniforms, make sure
1800 * that push and pull constants are reuploaded.
1801 */
1802 brw->NewGLState |= _NEW_PROGRAM_CONSTANTS;
1803 }
1804 }
1805
1806 static void
1807 brw_upload_wm_image_surfaces(struct brw_context *brw)
1808 {
1809 /* BRW_NEW_FRAGMENT_PROGRAM */
1810 const struct gl_program *wm = brw->fragment_program;
1811
1812 if (wm) {
1813 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1814 brw_upload_image_surfaces(brw, wm, &brw->wm.base,
1815 brw->wm.base.prog_data);
1816 }
1817 }
1818
1819 const struct brw_tracked_state brw_wm_image_surfaces = {
1820 .dirty = {
1821 .mesa = _NEW_TEXTURE,
1822 .brw = BRW_NEW_BATCH |
1823 BRW_NEW_BLORP |
1824 BRW_NEW_FRAGMENT_PROGRAM |
1825 BRW_NEW_FS_PROG_DATA |
1826 BRW_NEW_IMAGE_UNITS
1827 },
1828 .emit = brw_upload_wm_image_surfaces,
1829 };
1830
1831 void
1832 gen4_init_vtable_surface_functions(struct brw_context *brw)
1833 {
1834 brw->vtbl.update_renderbuffer_surface = gen4_update_renderbuffer_surface;
1835 brw->vtbl.emit_null_surface_state = brw_emit_null_surface_state;
1836 }
1837
1838 void
1839 gen6_init_vtable_surface_functions(struct brw_context *brw)
1840 {
1841 gen4_init_vtable_surface_functions(brw);
1842 brw->vtbl.update_renderbuffer_surface = brw_update_renderbuffer_surface;
1843 }
1844
1845 static void
1846 brw_upload_cs_work_groups_surface(struct brw_context *brw)
1847 {
1848 struct gl_context *ctx = &brw->ctx;
1849 /* _NEW_PROGRAM */
1850 struct gl_program *prog =
1851 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1852 /* BRW_NEW_CS_PROG_DATA */
1853 const struct brw_cs_prog_data *cs_prog_data =
1854 brw_cs_prog_data(brw->cs.base.prog_data);
1855
1856 if (prog && cs_prog_data->uses_num_work_groups) {
1857 const unsigned surf_idx =
1858 cs_prog_data->binding_table.work_groups_start;
1859 uint32_t *surf_offset = &brw->cs.base.surf_offset[surf_idx];
1860 struct brw_bo *bo;
1861 uint32_t bo_offset;
1862
1863 if (brw->compute.num_work_groups_bo == NULL) {
1864 bo = NULL;
1865 intel_upload_data(brw,
1866 (void *)brw->compute.num_work_groups,
1867 3 * sizeof(GLuint),
1868 sizeof(GLuint),
1869 &bo,
1870 &bo_offset);
1871 } else {
1872 bo = brw->compute.num_work_groups_bo;
1873 bo_offset = brw->compute.num_work_groups_offset;
1874 }
1875
1876 brw_emit_buffer_surface_state(brw, surf_offset,
1877 bo, bo_offset,
1878 ISL_FORMAT_RAW,
1879 3 * sizeof(GLuint), 1, true);
1880 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1881 }
1882 }
1883
1884 const struct brw_tracked_state brw_cs_work_groups_surface = {
1885 .dirty = {
1886 .brw = BRW_NEW_BLORP |
1887 BRW_NEW_CS_PROG_DATA |
1888 BRW_NEW_CS_WORK_GROUPS
1889 },
1890 .emit = brw_upload_cs_work_groups_surface,
1891 };