nir/i965/anv/radv/gallium: make shader info a pointer
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_surface_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "compiler/nir/nir.h"
34 #include "main/context.h"
35 #include "main/blend.h"
36 #include "main/mtypes.h"
37 #include "main/samplerobj.h"
38 #include "main/shaderimage.h"
39 #include "main/teximage.h"
40 #include "program/prog_parameter.h"
41 #include "program/prog_instruction.h"
42 #include "main/framebuffer.h"
43 #include "main/shaderapi.h"
44
45 #include "isl/isl.h"
46
47 #include "intel_mipmap_tree.h"
48 #include "intel_batchbuffer.h"
49 #include "intel_tex.h"
50 #include "intel_fbo.h"
51 #include "intel_buffer_objects.h"
52
53 #include "brw_context.h"
54 #include "brw_state.h"
55 #include "brw_defines.h"
56 #include "brw_wm.h"
57
58 enum {
59 INTEL_RENDERBUFFER_LAYERED = 1 << 0,
60 INTEL_AUX_BUFFER_DISABLED = 1 << 1,
61 };
62
63 struct surface_state_info {
64 unsigned num_dwords;
65 unsigned ss_align; /* Required alignment of RENDER_SURFACE_STATE in bytes */
66 unsigned reloc_dw;
67 unsigned aux_reloc_dw;
68 unsigned tex_mocs;
69 unsigned rb_mocs;
70 };
71
72 static const struct surface_state_info surface_state_infos[] = {
73 [4] = {6, 32, 1, 0},
74 [5] = {6, 32, 1, 0},
75 [6] = {6, 32, 1, 0},
76 [7] = {8, 32, 1, 6, GEN7_MOCS_L3, GEN7_MOCS_L3},
77 [8] = {13, 64, 8, 10, BDW_MOCS_WB, BDW_MOCS_PTE},
78 [9] = {16, 64, 8, 10, SKL_MOCS_WB, SKL_MOCS_PTE},
79 };
80
81 static void
82 brw_emit_surface_state(struct brw_context *brw,
83 struct intel_mipmap_tree *mt, uint32_t flags,
84 GLenum target, struct isl_view view,
85 uint32_t mocs, uint32_t *surf_offset, int surf_index,
86 unsigned read_domains, unsigned write_domains)
87 {
88 const struct surface_state_info ss_info = surface_state_infos[brw->gen];
89 uint32_t tile_x = mt->level[0].slice[0].x_offset;
90 uint32_t tile_y = mt->level[0].slice[0].y_offset;
91 uint32_t offset = mt->offset;
92
93 struct isl_surf surf;
94 intel_miptree_get_isl_surf(brw, mt, &surf);
95
96 surf.dim = get_isl_surf_dim(target);
97
98 const enum isl_dim_layout dim_layout =
99 get_isl_dim_layout(&brw->screen->devinfo, mt->tiling, target);
100
101 if (surf.dim_layout != dim_layout) {
102 /* The layout of the specified texture target is not compatible with the
103 * actual layout of the miptree structure in memory -- You're entering
104 * dangerous territory, this can only possibly work if you only intended
105 * to access a single level and slice of the texture, and the hardware
106 * supports the tile offset feature in order to allow non-tile-aligned
107 * base offsets, since we'll have to point the hardware to the first
108 * texel of the level instead of relying on the usual base level/layer
109 * controls.
110 */
111 assert(brw->has_surface_tile_offset);
112 assert(view.levels == 1 && view.array_len == 1);
113 assert(tile_x == 0 && tile_y == 0);
114
115 offset += intel_miptree_get_tile_offsets(mt, view.base_level,
116 view.base_array_layer,
117 &tile_x, &tile_y);
118
119 /* Minify the logical dimensions of the texture. */
120 const unsigned l = view.base_level - mt->first_level;
121 surf.logical_level0_px.width = minify(surf.logical_level0_px.width, l);
122 surf.logical_level0_px.height = surf.dim <= ISL_SURF_DIM_1D ? 1 :
123 minify(surf.logical_level0_px.height, l);
124 surf.logical_level0_px.depth = surf.dim <= ISL_SURF_DIM_2D ? 1 :
125 minify(surf.logical_level0_px.depth, l);
126
127 /* Only the base level and layer can be addressed with the overridden
128 * layout.
129 */
130 surf.logical_level0_px.array_len = 1;
131 surf.levels = 1;
132 surf.dim_layout = dim_layout;
133
134 /* The requested slice of the texture is now at the base level and
135 * layer.
136 */
137 view.base_level = 0;
138 view.base_array_layer = 0;
139 }
140
141 union isl_color_value clear_color = { .u32 = { 0, 0, 0, 0 } };
142
143 struct isl_surf *aux_surf = NULL, aux_surf_s;
144 uint64_t aux_offset = 0;
145 enum isl_aux_usage aux_usage = ISL_AUX_USAGE_NONE;
146 if (mt->mcs_mt && !(flags & INTEL_AUX_BUFFER_DISABLED)) {
147 intel_miptree_get_aux_isl_surf(brw, mt, &aux_surf_s, &aux_usage);
148 aux_surf = &aux_surf_s;
149 assert(mt->mcs_mt->offset == 0);
150 aux_offset = mt->mcs_mt->bo->offset64;
151
152 /* We only really need a clear color if we also have an auxiliary
153 * surfacae. Without one, it does nothing.
154 */
155 clear_color = intel_miptree_get_isl_clear_color(brw, mt);
156 }
157
158 uint32_t *dw = __brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
159 ss_info.num_dwords * 4, ss_info.ss_align,
160 surf_index, surf_offset);
161
162 isl_surf_fill_state(&brw->isl_dev, dw, .surf = &surf, .view = &view,
163 .address = mt->bo->offset64 + offset,
164 .aux_surf = aux_surf, .aux_usage = aux_usage,
165 .aux_address = aux_offset,
166 .mocs = mocs, .clear_color = clear_color,
167 .x_offset_sa = tile_x, .y_offset_sa = tile_y);
168
169 drm_intel_bo_emit_reloc(brw->batch.bo,
170 *surf_offset + 4 * ss_info.reloc_dw,
171 mt->bo, offset,
172 read_domains, write_domains);
173
174 if (aux_surf) {
175 /* On gen7 and prior, the upper 20 bits of surface state DWORD 6 are the
176 * upper 20 bits of the GPU address of the MCS buffer; the lower 12 bits
177 * contain other control information. Since buffer addresses are always
178 * on 4k boundaries (and thus have their lower 12 bits zero), we can use
179 * an ordinary reloc to do the necessary address translation.
180 */
181 assert((aux_offset & 0xfff) == 0);
182 drm_intel_bo_emit_reloc(brw->batch.bo,
183 *surf_offset + 4 * ss_info.aux_reloc_dw,
184 mt->mcs_mt->bo, dw[ss_info.aux_reloc_dw] & 0xfff,
185 read_domains, write_domains);
186 }
187 }
188
189 uint32_t
190 brw_update_renderbuffer_surface(struct brw_context *brw,
191 struct gl_renderbuffer *rb,
192 uint32_t flags, unsigned unit /* unused */,
193 uint32_t surf_index)
194 {
195 struct gl_context *ctx = &brw->ctx;
196 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
197 struct intel_mipmap_tree *mt = irb->mt;
198
199 if (brw->gen < 9) {
200 assert(!(flags & INTEL_AUX_BUFFER_DISABLED));
201 }
202
203 assert(brw_render_target_supported(brw, rb));
204 intel_miptree_used_for_rendering(mt);
205
206 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
207 if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
208 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
209 __func__, _mesa_get_format_name(rb_format));
210 }
211
212 const unsigned layer_multiplier =
213 (irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
214 irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) ?
215 MAX2(irb->mt->num_samples, 1) : 1;
216
217 struct isl_view view = {
218 .format = brw->render_target_format[rb_format],
219 .base_level = irb->mt_level - irb->mt->first_level,
220 .levels = 1,
221 .base_array_layer = irb->mt_layer / layer_multiplier,
222 .array_len = MAX2(irb->layer_count, 1),
223 .swizzle = ISL_SWIZZLE_IDENTITY,
224 .usage = ISL_SURF_USAGE_RENDER_TARGET_BIT,
225 };
226
227 uint32_t offset;
228 brw_emit_surface_state(brw, mt, flags, mt->target, view,
229 surface_state_infos[brw->gen].rb_mocs,
230 &offset, surf_index,
231 I915_GEM_DOMAIN_RENDER,
232 I915_GEM_DOMAIN_RENDER);
233 return offset;
234 }
235
236 GLuint
237 translate_tex_target(GLenum target)
238 {
239 switch (target) {
240 case GL_TEXTURE_1D:
241 case GL_TEXTURE_1D_ARRAY_EXT:
242 return BRW_SURFACE_1D;
243
244 case GL_TEXTURE_RECTANGLE_NV:
245 return BRW_SURFACE_2D;
246
247 case GL_TEXTURE_2D:
248 case GL_TEXTURE_2D_ARRAY_EXT:
249 case GL_TEXTURE_EXTERNAL_OES:
250 case GL_TEXTURE_2D_MULTISAMPLE:
251 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
252 return BRW_SURFACE_2D;
253
254 case GL_TEXTURE_3D:
255 return BRW_SURFACE_3D;
256
257 case GL_TEXTURE_CUBE_MAP:
258 case GL_TEXTURE_CUBE_MAP_ARRAY:
259 return BRW_SURFACE_CUBE;
260
261 default:
262 unreachable("not reached");
263 }
264 }
265
266 uint32_t
267 brw_get_surface_tiling_bits(uint32_t tiling)
268 {
269 switch (tiling) {
270 case I915_TILING_X:
271 return BRW_SURFACE_TILED;
272 case I915_TILING_Y:
273 return BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y;
274 default:
275 return 0;
276 }
277 }
278
279
280 uint32_t
281 brw_get_surface_num_multisamples(unsigned num_samples)
282 {
283 if (num_samples > 1)
284 return BRW_SURFACE_MULTISAMPLECOUNT_4;
285 else
286 return BRW_SURFACE_MULTISAMPLECOUNT_1;
287 }
288
289 /**
290 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
291 * swizzling.
292 */
293 int
294 brw_get_texture_swizzle(const struct gl_context *ctx,
295 const struct gl_texture_object *t)
296 {
297 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
298
299 int swizzles[SWIZZLE_NIL + 1] = {
300 SWIZZLE_X,
301 SWIZZLE_Y,
302 SWIZZLE_Z,
303 SWIZZLE_W,
304 SWIZZLE_ZERO,
305 SWIZZLE_ONE,
306 SWIZZLE_NIL
307 };
308
309 if (img->_BaseFormat == GL_DEPTH_COMPONENT ||
310 img->_BaseFormat == GL_DEPTH_STENCIL) {
311 GLenum depth_mode = t->DepthMode;
312
313 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
314 * with depth component data specified with a sized internal format.
315 * Otherwise, it's left at the old default, GL_LUMINANCE.
316 */
317 if (_mesa_is_gles3(ctx) &&
318 img->InternalFormat != GL_DEPTH_COMPONENT &&
319 img->InternalFormat != GL_DEPTH_STENCIL) {
320 depth_mode = GL_RED;
321 }
322
323 switch (depth_mode) {
324 case GL_ALPHA:
325 swizzles[0] = SWIZZLE_ZERO;
326 swizzles[1] = SWIZZLE_ZERO;
327 swizzles[2] = SWIZZLE_ZERO;
328 swizzles[3] = SWIZZLE_X;
329 break;
330 case GL_LUMINANCE:
331 swizzles[0] = SWIZZLE_X;
332 swizzles[1] = SWIZZLE_X;
333 swizzles[2] = SWIZZLE_X;
334 swizzles[3] = SWIZZLE_ONE;
335 break;
336 case GL_INTENSITY:
337 swizzles[0] = SWIZZLE_X;
338 swizzles[1] = SWIZZLE_X;
339 swizzles[2] = SWIZZLE_X;
340 swizzles[3] = SWIZZLE_X;
341 break;
342 case GL_RED:
343 swizzles[0] = SWIZZLE_X;
344 swizzles[1] = SWIZZLE_ZERO;
345 swizzles[2] = SWIZZLE_ZERO;
346 swizzles[3] = SWIZZLE_ONE;
347 break;
348 }
349 }
350
351 GLenum datatype = _mesa_get_format_datatype(img->TexFormat);
352
353 /* If the texture's format is alpha-only, force R, G, and B to
354 * 0.0. Similarly, if the texture's format has no alpha channel,
355 * force the alpha value read to 1.0. This allows for the
356 * implementation to use an RGBA texture for any of these formats
357 * without leaking any unexpected values.
358 */
359 switch (img->_BaseFormat) {
360 case GL_ALPHA:
361 swizzles[0] = SWIZZLE_ZERO;
362 swizzles[1] = SWIZZLE_ZERO;
363 swizzles[2] = SWIZZLE_ZERO;
364 break;
365 case GL_LUMINANCE:
366 if (t->_IsIntegerFormat || datatype == GL_SIGNED_NORMALIZED) {
367 swizzles[0] = SWIZZLE_X;
368 swizzles[1] = SWIZZLE_X;
369 swizzles[2] = SWIZZLE_X;
370 swizzles[3] = SWIZZLE_ONE;
371 }
372 break;
373 case GL_LUMINANCE_ALPHA:
374 if (datatype == GL_SIGNED_NORMALIZED) {
375 swizzles[0] = SWIZZLE_X;
376 swizzles[1] = SWIZZLE_X;
377 swizzles[2] = SWIZZLE_X;
378 swizzles[3] = SWIZZLE_W;
379 }
380 break;
381 case GL_INTENSITY:
382 if (datatype == GL_SIGNED_NORMALIZED) {
383 swizzles[0] = SWIZZLE_X;
384 swizzles[1] = SWIZZLE_X;
385 swizzles[2] = SWIZZLE_X;
386 swizzles[3] = SWIZZLE_X;
387 }
388 break;
389 case GL_RED:
390 case GL_RG:
391 case GL_RGB:
392 if (_mesa_get_format_bits(img->TexFormat, GL_ALPHA_BITS) > 0)
393 swizzles[3] = SWIZZLE_ONE;
394 break;
395 }
396
397 return MAKE_SWIZZLE4(swizzles[GET_SWZ(t->_Swizzle, 0)],
398 swizzles[GET_SWZ(t->_Swizzle, 1)],
399 swizzles[GET_SWZ(t->_Swizzle, 2)],
400 swizzles[GET_SWZ(t->_Swizzle, 3)]);
401 }
402
403 /**
404 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
405 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
406 *
407 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
408 * 0 1 2 3 4 5
409 * 4 5 6 7 0 1
410 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
411 *
412 * which is simply adding 4 then modding by 8 (or anding with 7).
413 *
414 * We then may need to apply workarounds for textureGather hardware bugs.
415 */
416 static unsigned
417 swizzle_to_scs(GLenum swizzle, bool need_green_to_blue)
418 {
419 unsigned scs = (swizzle + 4) & 7;
420
421 return (need_green_to_blue && scs == HSW_SCS_GREEN) ? HSW_SCS_BLUE : scs;
422 }
423
424 static unsigned
425 brw_find_matching_rb(const struct gl_framebuffer *fb,
426 const struct intel_mipmap_tree *mt)
427 {
428 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
429 const struct intel_renderbuffer *irb =
430 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
431
432 if (irb && irb->mt == mt)
433 return i;
434 }
435
436 return fb->_NumColorDrawBuffers;
437 }
438
439 static inline bool
440 brw_texture_view_sane(const struct brw_context *brw,
441 const struct intel_mipmap_tree *mt, unsigned format)
442 {
443 /* There are special cases only for lossless compression. */
444 if (!intel_miptree_is_lossless_compressed(brw, mt))
445 return true;
446
447 if (isl_format_supports_lossless_compression(&brw->screen->devinfo,
448 format))
449 return true;
450
451 /* Logic elsewhere needs to take care to resolve the color buffer prior
452 * to sampling it as non-compressed.
453 */
454 if (mt->fast_clear_state != INTEL_FAST_CLEAR_STATE_RESOLVED)
455 return false;
456
457 const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
458 const unsigned rb_index = brw_find_matching_rb(fb, mt);
459
460 if (rb_index == fb->_NumColorDrawBuffers)
461 return true;
462
463 /* Underlying surface is compressed but it is sampled using a format that
464 * the sampling engine doesn't support as compressed. Compression must be
465 * disabled for both sampling engine and data port in case the same surface
466 * is used also as render target.
467 */
468 return brw->draw_aux_buffer_disabled[rb_index];
469 }
470
471 static bool
472 brw_disable_aux_surface(const struct brw_context *brw,
473 const struct intel_mipmap_tree *mt)
474 {
475 /* Nothing to disable. */
476 if (!mt->mcs_mt)
477 return false;
478
479 /* There are special cases only for lossless compression. */
480 if (!intel_miptree_is_lossless_compressed(brw, mt))
481 return mt->fast_clear_state == INTEL_FAST_CLEAR_STATE_RESOLVED;
482
483 const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
484 const unsigned rb_index = brw_find_matching_rb(fb, mt);
485
486 /* If we are drawing into this with compression enabled, then we must also
487 * enable compression when texturing from it regardless of
488 * fast_clear_state. If we don't then, after the first draw call with
489 * this setup, there will be data in the CCS which won't get picked up by
490 * subsequent texturing operations as required by ARB_texture_barrier.
491 * Since we don't want to re-emit the binding table or do a resolve
492 * operation every draw call, the easiest thing to do is just enable
493 * compression on the texturing side. This is completely safe to do
494 * since, if compressed texturing weren't allowed, we would have disabled
495 * compression of render targets in whatever_that_function_is_called().
496 */
497 if (rb_index < fb->_NumColorDrawBuffers) {
498 if (brw->draw_aux_buffer_disabled[rb_index]) {
499 assert(mt->fast_clear_state == INTEL_FAST_CLEAR_STATE_RESOLVED);
500 }
501
502 return brw->draw_aux_buffer_disabled[rb_index];
503 }
504
505 return mt->fast_clear_state == INTEL_FAST_CLEAR_STATE_RESOLVED;
506 }
507
508 void
509 brw_update_texture_surface(struct gl_context *ctx,
510 unsigned unit,
511 uint32_t *surf_offset,
512 bool for_gather,
513 uint32_t plane)
514 {
515 struct brw_context *brw = brw_context(ctx);
516 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
517
518 if (obj->Target == GL_TEXTURE_BUFFER) {
519 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
520
521 } else {
522 struct intel_texture_object *intel_obj = intel_texture_object(obj);
523 struct intel_mipmap_tree *mt = intel_obj->mt;
524
525 if (plane > 0) {
526 if (mt->plane[plane - 1] == NULL)
527 return;
528 mt = mt->plane[plane - 1];
529 }
530
531 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
532 /* If this is a view with restricted NumLayers, then our effective depth
533 * is not just the miptree depth.
534 */
535 const unsigned view_num_layers =
536 (obj->Immutable && obj->Target != GL_TEXTURE_3D) ? obj->NumLayers :
537 mt->logical_depth0;
538
539 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
540 * texturing functions that return a float, as our code generation always
541 * selects the .x channel (which would always be 0).
542 */
543 struct gl_texture_image *firstImage = obj->Image[0][obj->BaseLevel];
544 const bool alpha_depth = obj->DepthMode == GL_ALPHA &&
545 (firstImage->_BaseFormat == GL_DEPTH_COMPONENT ||
546 firstImage->_BaseFormat == GL_DEPTH_STENCIL);
547 const unsigned swizzle = (unlikely(alpha_depth) ? SWIZZLE_XYZW :
548 brw_get_texture_swizzle(&brw->ctx, obj));
549
550 mesa_format mesa_fmt = plane == 0 ? intel_obj->_Format : mt->format;
551 unsigned format = translate_tex_format(brw, mesa_fmt,
552 sampler->sRGBDecode);
553
554 /* Implement gen6 and gen7 gather work-around */
555 bool need_green_to_blue = false;
556 if (for_gather) {
557 if (brw->gen == 7 && format == BRW_SURFACEFORMAT_R32G32_FLOAT) {
558 format = BRW_SURFACEFORMAT_R32G32_FLOAT_LD;
559 need_green_to_blue = brw->is_haswell;
560 } else if (brw->gen == 6) {
561 /* Sandybridge's gather4 message is broken for integer formats.
562 * To work around this, we pretend the surface is UNORM for
563 * 8 or 16-bit formats, and emit shader instructions to recover
564 * the real INT/UINT value. For 32-bit formats, we pretend
565 * the surface is FLOAT, and simply reinterpret the resulting
566 * bits.
567 */
568 switch (format) {
569 case BRW_SURFACEFORMAT_R8_SINT:
570 case BRW_SURFACEFORMAT_R8_UINT:
571 format = BRW_SURFACEFORMAT_R8_UNORM;
572 break;
573
574 case BRW_SURFACEFORMAT_R16_SINT:
575 case BRW_SURFACEFORMAT_R16_UINT:
576 format = BRW_SURFACEFORMAT_R16_UNORM;
577 break;
578
579 case BRW_SURFACEFORMAT_R32_SINT:
580 case BRW_SURFACEFORMAT_R32_UINT:
581 format = BRW_SURFACEFORMAT_R32_FLOAT;
582 break;
583
584 default:
585 break;
586 }
587 }
588 }
589
590 if (obj->StencilSampling && firstImage->_BaseFormat == GL_DEPTH_STENCIL) {
591 if (brw->gen <= 7) {
592 assert(mt->r8stencil_mt && !mt->stencil_mt->r8stencil_needs_update);
593 mt = mt->r8stencil_mt;
594 } else {
595 mt = mt->stencil_mt;
596 }
597 format = BRW_SURFACEFORMAT_R8_UINT;
598 } else if (brw->gen <= 7 && mt->format == MESA_FORMAT_S_UINT8) {
599 assert(mt->r8stencil_mt && !mt->r8stencil_needs_update);
600 mt = mt->r8stencil_mt;
601 format = BRW_SURFACEFORMAT_R8_UINT;
602 }
603
604 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
605
606 struct isl_view view = {
607 .format = format,
608 .base_level = obj->MinLevel + obj->BaseLevel,
609 .levels = intel_obj->_MaxLevel - obj->BaseLevel + 1,
610 .base_array_layer = obj->MinLayer,
611 .array_len = view_num_layers,
612 .swizzle = {
613 .r = swizzle_to_scs(GET_SWZ(swizzle, 0), need_green_to_blue),
614 .g = swizzle_to_scs(GET_SWZ(swizzle, 1), need_green_to_blue),
615 .b = swizzle_to_scs(GET_SWZ(swizzle, 2), need_green_to_blue),
616 .a = swizzle_to_scs(GET_SWZ(swizzle, 3), need_green_to_blue),
617 },
618 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
619 };
620
621 if (obj->Target == GL_TEXTURE_CUBE_MAP ||
622 obj->Target == GL_TEXTURE_CUBE_MAP_ARRAY)
623 view.usage |= ISL_SURF_USAGE_CUBE_BIT;
624
625 assert(brw_texture_view_sane(brw, mt, format));
626
627 const int flags =
628 brw_disable_aux_surface(brw, mt) ? INTEL_AUX_BUFFER_DISABLED : 0;
629 brw_emit_surface_state(brw, mt, flags, mt->target, view,
630 surface_state_infos[brw->gen].tex_mocs,
631 surf_offset, surf_index,
632 I915_GEM_DOMAIN_SAMPLER, 0);
633 }
634 }
635
636 void
637 brw_emit_buffer_surface_state(struct brw_context *brw,
638 uint32_t *out_offset,
639 drm_intel_bo *bo,
640 unsigned buffer_offset,
641 unsigned surface_format,
642 unsigned buffer_size,
643 unsigned pitch,
644 bool rw)
645 {
646 const struct surface_state_info ss_info = surface_state_infos[brw->gen];
647
648 uint32_t *dw = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
649 ss_info.num_dwords * 4, ss_info.ss_align,
650 out_offset);
651
652 isl_buffer_fill_state(&brw->isl_dev, dw,
653 .address = (bo ? bo->offset64 : 0) + buffer_offset,
654 .size = buffer_size,
655 .format = surface_format,
656 .stride = pitch,
657 .mocs = ss_info.tex_mocs);
658
659 if (bo) {
660 drm_intel_bo_emit_reloc(brw->batch.bo,
661 *out_offset + 4 * ss_info.reloc_dw,
662 bo, buffer_offset,
663 I915_GEM_DOMAIN_SAMPLER,
664 (rw ? I915_GEM_DOMAIN_SAMPLER : 0));
665 }
666 }
667
668 void
669 brw_update_buffer_texture_surface(struct gl_context *ctx,
670 unsigned unit,
671 uint32_t *surf_offset)
672 {
673 struct brw_context *brw = brw_context(ctx);
674 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
675 struct intel_buffer_object *intel_obj =
676 intel_buffer_object(tObj->BufferObject);
677 uint32_t size = tObj->BufferSize;
678 drm_intel_bo *bo = NULL;
679 mesa_format format = tObj->_BufferObjectFormat;
680 uint32_t brw_format = brw_format_for_mesa_format(format);
681 int texel_size = _mesa_get_format_bytes(format);
682
683 if (intel_obj) {
684 size = MIN2(size, intel_obj->Base.Size);
685 bo = intel_bufferobj_buffer(brw, intel_obj, tObj->BufferOffset, size);
686 }
687
688 if (brw_format == 0 && format != MESA_FORMAT_RGBA_FLOAT32) {
689 _mesa_problem(NULL, "bad format %s for texture buffer\n",
690 _mesa_get_format_name(format));
691 }
692
693 brw_emit_buffer_surface_state(brw, surf_offset, bo,
694 tObj->BufferOffset,
695 brw_format,
696 size,
697 texel_size,
698 false /* rw */);
699 }
700
701 /**
702 * Create the constant buffer surface. Vertex/fragment shader constants will be
703 * read from this buffer with Data Port Read instructions/messages.
704 */
705 void
706 brw_create_constant_surface(struct brw_context *brw,
707 drm_intel_bo *bo,
708 uint32_t offset,
709 uint32_t size,
710 uint32_t *out_offset)
711 {
712 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
713 BRW_SURFACEFORMAT_R32G32B32A32_FLOAT,
714 size, 1, false);
715 }
716
717 /**
718 * Create the buffer surface. Shader buffer variables will be
719 * read from / write to this buffer with Data Port Read/Write
720 * instructions/messages.
721 */
722 void
723 brw_create_buffer_surface(struct brw_context *brw,
724 drm_intel_bo *bo,
725 uint32_t offset,
726 uint32_t size,
727 uint32_t *out_offset)
728 {
729 /* Use a raw surface so we can reuse existing untyped read/write/atomic
730 * messages. We need these specifically for the fragment shader since they
731 * include a pixel mask header that we need to ensure correct behavior
732 * with helper invocations, which cannot write to the buffer.
733 */
734 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
735 BRW_SURFACEFORMAT_RAW,
736 size, 1, true);
737 }
738
739 /**
740 * Set up a binding table entry for use by stream output logic (transform
741 * feedback).
742 *
743 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
744 */
745 void
746 brw_update_sol_surface(struct brw_context *brw,
747 struct gl_buffer_object *buffer_obj,
748 uint32_t *out_offset, unsigned num_vector_components,
749 unsigned stride_dwords, unsigned offset_dwords)
750 {
751 struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
752 uint32_t offset_bytes = 4 * offset_dwords;
753 drm_intel_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
754 offset_bytes,
755 buffer_obj->Size - offset_bytes);
756 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
757 out_offset);
758 uint32_t pitch_minus_1 = 4*stride_dwords - 1;
759 size_t size_dwords = buffer_obj->Size / 4;
760 uint32_t buffer_size_minus_1, width, height, depth, surface_format;
761
762 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
763 * too big to map using a single binding table entry?
764 */
765 assert((size_dwords - offset_dwords) / stride_dwords
766 <= BRW_MAX_NUM_BUFFER_ENTRIES);
767
768 if (size_dwords > offset_dwords + num_vector_components) {
769 /* There is room for at least 1 transform feedback output in the buffer.
770 * Compute the number of additional transform feedback outputs the
771 * buffer has room for.
772 */
773 buffer_size_minus_1 =
774 (size_dwords - offset_dwords - num_vector_components) / stride_dwords;
775 } else {
776 /* There isn't even room for a single transform feedback output in the
777 * buffer. We can't configure the binding table entry to prevent output
778 * entirely; we'll have to rely on the geometry shader to detect
779 * overflow. But to minimize the damage in case of a bug, set up the
780 * binding table entry to just allow a single output.
781 */
782 buffer_size_minus_1 = 0;
783 }
784 width = buffer_size_minus_1 & 0x7f;
785 height = (buffer_size_minus_1 & 0xfff80) >> 7;
786 depth = (buffer_size_minus_1 & 0x7f00000) >> 20;
787
788 switch (num_vector_components) {
789 case 1:
790 surface_format = BRW_SURFACEFORMAT_R32_FLOAT;
791 break;
792 case 2:
793 surface_format = BRW_SURFACEFORMAT_R32G32_FLOAT;
794 break;
795 case 3:
796 surface_format = BRW_SURFACEFORMAT_R32G32B32_FLOAT;
797 break;
798 case 4:
799 surface_format = BRW_SURFACEFORMAT_R32G32B32A32_FLOAT;
800 break;
801 default:
802 unreachable("Invalid vector size for transform feedback output");
803 }
804
805 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
806 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
807 surface_format << BRW_SURFACE_FORMAT_SHIFT |
808 BRW_SURFACE_RC_READ_WRITE;
809 surf[1] = bo->offset64 + offset_bytes; /* reloc */
810 surf[2] = (width << BRW_SURFACE_WIDTH_SHIFT |
811 height << BRW_SURFACE_HEIGHT_SHIFT);
812 surf[3] = (depth << BRW_SURFACE_DEPTH_SHIFT |
813 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
814 surf[4] = 0;
815 surf[5] = 0;
816
817 /* Emit relocation to surface contents. */
818 drm_intel_bo_emit_reloc(brw->batch.bo,
819 *out_offset + 4,
820 bo, offset_bytes,
821 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
822 }
823
824 /* Creates a new WM constant buffer reflecting the current fragment program's
825 * constants, if needed by the fragment program.
826 *
827 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
828 * state atom.
829 */
830 static void
831 brw_upload_wm_pull_constants(struct brw_context *brw)
832 {
833 struct brw_stage_state *stage_state = &brw->wm.base;
834 /* BRW_NEW_FRAGMENT_PROGRAM */
835 struct brw_fragment_program *fp =
836 (struct brw_fragment_program *) brw->fragment_program;
837 /* BRW_NEW_FS_PROG_DATA */
838 struct brw_stage_prog_data *prog_data = brw->wm.base.prog_data;
839
840 _mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_FRAGMENT);
841 /* _NEW_PROGRAM_CONSTANTS */
842 brw_upload_pull_constants(brw, BRW_NEW_SURFACES, &fp->program.Base,
843 stage_state, prog_data);
844 }
845
846 const struct brw_tracked_state brw_wm_pull_constants = {
847 .dirty = {
848 .mesa = _NEW_PROGRAM_CONSTANTS,
849 .brw = BRW_NEW_BATCH |
850 BRW_NEW_BLORP |
851 BRW_NEW_FRAGMENT_PROGRAM |
852 BRW_NEW_FS_PROG_DATA,
853 },
854 .emit = brw_upload_wm_pull_constants,
855 };
856
857 /**
858 * Creates a null renderbuffer surface.
859 *
860 * This is used when the shader doesn't write to any color output. An FB
861 * write to target 0 will still be emitted, because that's how the thread is
862 * terminated (and computed depth is returned), so we need to have the
863 * hardware discard the target 0 color output..
864 */
865 static void
866 brw_emit_null_surface_state(struct brw_context *brw,
867 unsigned width,
868 unsigned height,
869 unsigned samples,
870 uint32_t *out_offset)
871 {
872 /* From the Sandy bridge PRM, Vol4 Part1 p71 (Surface Type: Programming
873 * Notes):
874 *
875 * A null surface will be used in instances where an actual surface is
876 * not bound. When a write message is generated to a null surface, no
877 * actual surface is written to. When a read message (including any
878 * sampling engine message) is generated to a null surface, the result
879 * is all zeros. Note that a null surface type is allowed to be used
880 * with all messages, even if it is not specificially indicated as
881 * supported. All of the remaining fields in surface state are ignored
882 * for null surfaces, with the following exceptions:
883 *
884 * - [DevSNB+]: Width, Height, Depth, and LOD fields must match the
885 * depth buffer’s corresponding state for all render target surfaces,
886 * including null.
887 *
888 * - Surface Format must be R8G8B8A8_UNORM.
889 */
890 unsigned surface_type = BRW_SURFACE_NULL;
891 drm_intel_bo *bo = NULL;
892 unsigned pitch_minus_1 = 0;
893 uint32_t multisampling_state = 0;
894 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
895 out_offset);
896
897 if (samples > 1) {
898 /* On Gen6, null render targets seem to cause GPU hangs when
899 * multisampling. So work around this problem by rendering into dummy
900 * color buffer.
901 *
902 * To decrease the amount of memory needed by the workaround buffer, we
903 * set its pitch to 128 bytes (the width of a Y tile). This means that
904 * the amount of memory needed for the workaround buffer is
905 * (width_in_tiles + height_in_tiles - 1) tiles.
906 *
907 * Note that since the workaround buffer will be interpreted by the
908 * hardware as an interleaved multisampled buffer, we need to compute
909 * width_in_tiles and height_in_tiles by dividing the width and height
910 * by 16 rather than the normal Y-tile size of 32.
911 */
912 unsigned width_in_tiles = ALIGN(width, 16) / 16;
913 unsigned height_in_tiles = ALIGN(height, 16) / 16;
914 unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
915 brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
916 size_needed);
917 bo = brw->wm.multisampled_null_render_target_bo;
918 surface_type = BRW_SURFACE_2D;
919 pitch_minus_1 = 127;
920 multisampling_state = brw_get_surface_num_multisamples(samples);
921 }
922
923 surf[0] = (surface_type << BRW_SURFACE_TYPE_SHIFT |
924 BRW_SURFACEFORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
925 if (brw->gen < 6) {
926 surf[0] |= (1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT |
927 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT |
928 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT |
929 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT);
930 }
931 surf[1] = bo ? bo->offset64 : 0;
932 surf[2] = ((width - 1) << BRW_SURFACE_WIDTH_SHIFT |
933 (height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
934
935 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
936 * Notes):
937 *
938 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
939 */
940 surf[3] = (BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y |
941 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
942 surf[4] = multisampling_state;
943 surf[5] = 0;
944
945 if (bo) {
946 drm_intel_bo_emit_reloc(brw->batch.bo,
947 *out_offset + 4,
948 bo, 0,
949 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
950 }
951 }
952
953 /**
954 * Sets up a surface state structure to point at the given region.
955 * While it is only used for the front/back buffer currently, it should be
956 * usable for further buffers when doing ARB_draw_buffer support.
957 */
958 static uint32_t
959 gen4_update_renderbuffer_surface(struct brw_context *brw,
960 struct gl_renderbuffer *rb,
961 uint32_t flags, unsigned unit,
962 uint32_t surf_index)
963 {
964 struct gl_context *ctx = &brw->ctx;
965 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
966 struct intel_mipmap_tree *mt = irb->mt;
967 uint32_t *surf;
968 uint32_t tile_x, tile_y;
969 uint32_t format = 0;
970 uint32_t offset;
971 /* _NEW_BUFFERS */
972 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
973 /* BRW_NEW_FS_PROG_DATA */
974
975 assert(!(flags & INTEL_RENDERBUFFER_LAYERED));
976 assert(!(flags & INTEL_AUX_BUFFER_DISABLED));
977
978 if (rb->TexImage && !brw->has_surface_tile_offset) {
979 intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y);
980
981 if (tile_x != 0 || tile_y != 0) {
982 /* Original gen4 hardware couldn't draw to a non-tile-aligned
983 * destination in a miptree unless you actually setup your renderbuffer
984 * as a miptree and used the fragile lod/array_index/etc. controls to
985 * select the image. So, instead, we just make a new single-level
986 * miptree and render into that.
987 */
988 intel_renderbuffer_move_to_temp(brw, irb, false);
989 mt = irb->mt;
990 }
991 }
992
993 intel_miptree_used_for_rendering(irb->mt);
994
995 surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32, &offset);
996
997 format = brw->render_target_format[rb_format];
998 if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
999 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
1000 __func__, _mesa_get_format_name(rb_format));
1001 }
1002
1003 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
1004 format << BRW_SURFACE_FORMAT_SHIFT);
1005
1006 /* reloc */
1007 assert(mt->offset % mt->cpp == 0);
1008 surf[1] = (intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y) +
1009 mt->bo->offset64 + mt->offset);
1010
1011 surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
1012 (rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
1013
1014 surf[3] = (brw_get_surface_tiling_bits(mt->tiling) |
1015 (mt->pitch - 1) << BRW_SURFACE_PITCH_SHIFT);
1016
1017 surf[4] = brw_get_surface_num_multisamples(mt->num_samples);
1018
1019 assert(brw->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
1020 /* Note that the low bits of these fields are missing, so
1021 * there's the possibility of getting in trouble.
1022 */
1023 assert(tile_x % 4 == 0);
1024 assert(tile_y % 2 == 0);
1025 surf[5] = ((tile_x / 4) << BRW_SURFACE_X_OFFSET_SHIFT |
1026 (tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT |
1027 (mt->valign == 4 ? BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
1028
1029 if (brw->gen < 6) {
1030 /* _NEW_COLOR */
1031 if (!ctx->Color.ColorLogicOpEnabled && !ctx->Color._AdvancedBlendMode &&
1032 (ctx->Color.BlendEnabled & (1 << unit)))
1033 surf[0] |= BRW_SURFACE_BLEND_ENABLED;
1034
1035 if (!ctx->Color.ColorMask[unit][0])
1036 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT;
1037 if (!ctx->Color.ColorMask[unit][1])
1038 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT;
1039 if (!ctx->Color.ColorMask[unit][2])
1040 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT;
1041
1042 /* As mentioned above, disable writes to the alpha component when the
1043 * renderbuffer is XRGB.
1044 */
1045 if (ctx->DrawBuffer->Visual.alphaBits == 0 ||
1046 !ctx->Color.ColorMask[unit][3]) {
1047 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT;
1048 }
1049 }
1050
1051 drm_intel_bo_emit_reloc(brw->batch.bo,
1052 offset + 4,
1053 mt->bo,
1054 surf[1] - mt->bo->offset64,
1055 I915_GEM_DOMAIN_RENDER,
1056 I915_GEM_DOMAIN_RENDER);
1057
1058 return offset;
1059 }
1060
1061 /**
1062 * Construct SURFACE_STATE objects for renderbuffers/draw buffers.
1063 */
1064 void
1065 brw_update_renderbuffer_surfaces(struct brw_context *brw,
1066 const struct gl_framebuffer *fb,
1067 uint32_t render_target_start,
1068 uint32_t *surf_offset)
1069 {
1070 GLuint i;
1071 const unsigned int w = _mesa_geometric_width(fb);
1072 const unsigned int h = _mesa_geometric_height(fb);
1073 const unsigned int s = _mesa_geometric_samples(fb);
1074
1075 /* Update surfaces for drawing buffers */
1076 if (fb->_NumColorDrawBuffers >= 1) {
1077 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
1078 const uint32_t surf_index = render_target_start + i;
1079 const int flags = (_mesa_geometric_layers(fb) > 0 ?
1080 INTEL_RENDERBUFFER_LAYERED : 0) |
1081 (brw->draw_aux_buffer_disabled[i] ?
1082 INTEL_AUX_BUFFER_DISABLED : 0);
1083
1084 if (intel_renderbuffer(fb->_ColorDrawBuffers[i])) {
1085 surf_offset[surf_index] =
1086 brw->vtbl.update_renderbuffer_surface(
1087 brw, fb->_ColorDrawBuffers[i], flags, i, surf_index);
1088 } else {
1089 brw->vtbl.emit_null_surface_state(brw, w, h, s,
1090 &surf_offset[surf_index]);
1091 }
1092 }
1093 } else {
1094 const uint32_t surf_index = render_target_start;
1095 brw->vtbl.emit_null_surface_state(brw, w, h, s,
1096 &surf_offset[surf_index]);
1097 }
1098 }
1099
1100 static void
1101 update_renderbuffer_surfaces(struct brw_context *brw)
1102 {
1103 const struct gl_context *ctx = &brw->ctx;
1104
1105 /* BRW_NEW_FS_PROG_DATA */
1106 const struct brw_wm_prog_data *wm_prog_data =
1107 brw_wm_prog_data(brw->wm.base.prog_data);
1108
1109 /* _NEW_BUFFERS | _NEW_COLOR */
1110 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1111 brw_update_renderbuffer_surfaces(
1112 brw, fb,
1113 wm_prog_data->binding_table.render_target_start,
1114 brw->wm.base.surf_offset);
1115 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1116 }
1117
1118 const struct brw_tracked_state brw_renderbuffer_surfaces = {
1119 .dirty = {
1120 .mesa = _NEW_BUFFERS |
1121 _NEW_COLOR,
1122 .brw = BRW_NEW_BATCH |
1123 BRW_NEW_BLORP |
1124 BRW_NEW_FS_PROG_DATA,
1125 },
1126 .emit = update_renderbuffer_surfaces,
1127 };
1128
1129 const struct brw_tracked_state gen6_renderbuffer_surfaces = {
1130 .dirty = {
1131 .mesa = _NEW_BUFFERS,
1132 .brw = BRW_NEW_BATCH |
1133 BRW_NEW_BLORP,
1134 },
1135 .emit = update_renderbuffer_surfaces,
1136 };
1137
1138 static void
1139 update_renderbuffer_read_surfaces(struct brw_context *brw)
1140 {
1141 const struct gl_context *ctx = &brw->ctx;
1142
1143 /* BRW_NEW_FS_PROG_DATA */
1144 const struct brw_wm_prog_data *wm_prog_data =
1145 brw_wm_prog_data(brw->wm.base.prog_data);
1146
1147 /* BRW_NEW_FRAGMENT_PROGRAM */
1148 if (!ctx->Extensions.MESA_shader_framebuffer_fetch &&
1149 brw->fragment_program &&
1150 brw->fragment_program->Base.nir->info->outputs_read) {
1151 /* _NEW_BUFFERS */
1152 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1153
1154 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
1155 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
1156 const struct intel_renderbuffer *irb = intel_renderbuffer(rb);
1157 const unsigned surf_index =
1158 wm_prog_data->binding_table.render_target_read_start + i;
1159 uint32_t *surf_offset = &brw->wm.base.surf_offset[surf_index];
1160
1161 if (irb) {
1162 const unsigned format = brw->render_target_format[
1163 _mesa_get_render_format(ctx, intel_rb_format(irb))];
1164 assert(isl_format_supports_sampling(&brw->screen->devinfo,
1165 format));
1166
1167 /* Override the target of the texture if the render buffer is a
1168 * single slice of a 3D texture (since the minimum array element
1169 * field of the surface state structure is ignored by the sampler
1170 * unit for 3D textures on some hardware), or if the render buffer
1171 * is a 1D array (since shaders always provide the array index
1172 * coordinate at the Z component to avoid state-dependent
1173 * recompiles when changing the texture target of the
1174 * framebuffer).
1175 */
1176 const GLenum target =
1177 (irb->mt->target == GL_TEXTURE_3D &&
1178 irb->layer_count == 1) ? GL_TEXTURE_2D :
1179 irb->mt->target == GL_TEXTURE_1D_ARRAY ? GL_TEXTURE_2D_ARRAY :
1180 irb->mt->target;
1181
1182 /* intel_renderbuffer::mt_layer is expressed in sample units for
1183 * the UMS and CMS multisample layouts, but
1184 * intel_renderbuffer::layer_count is expressed in units of whole
1185 * logical layers regardless of the multisample layout.
1186 */
1187 const unsigned mt_layer_unit =
1188 (irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
1189 irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) ?
1190 MAX2(irb->mt->num_samples, 1) : 1;
1191
1192 const struct isl_view view = {
1193 .format = format,
1194 .base_level = irb->mt_level - irb->mt->first_level,
1195 .levels = 1,
1196 .base_array_layer = irb->mt_layer / mt_layer_unit,
1197 .array_len = irb->layer_count,
1198 .swizzle = ISL_SWIZZLE_IDENTITY,
1199 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
1200 };
1201
1202 const int flags = brw->draw_aux_buffer_disabled[i] ?
1203 INTEL_AUX_BUFFER_DISABLED : 0;
1204 brw_emit_surface_state(brw, irb->mt, flags, target, view,
1205 surface_state_infos[brw->gen].tex_mocs,
1206 surf_offset, surf_index,
1207 I915_GEM_DOMAIN_SAMPLER, 0);
1208
1209 } else {
1210 brw->vtbl.emit_null_surface_state(
1211 brw, _mesa_geometric_width(fb), _mesa_geometric_height(fb),
1212 _mesa_geometric_samples(fb), surf_offset);
1213 }
1214 }
1215
1216 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1217 }
1218 }
1219
1220 const struct brw_tracked_state brw_renderbuffer_read_surfaces = {
1221 .dirty = {
1222 .mesa = _NEW_BUFFERS,
1223 .brw = BRW_NEW_BATCH |
1224 BRW_NEW_FRAGMENT_PROGRAM |
1225 BRW_NEW_FS_PROG_DATA,
1226 },
1227 .emit = update_renderbuffer_read_surfaces,
1228 };
1229
1230 static void
1231 update_stage_texture_surfaces(struct brw_context *brw,
1232 const struct gl_program *prog,
1233 struct brw_stage_state *stage_state,
1234 bool for_gather, uint32_t plane)
1235 {
1236 if (!prog)
1237 return;
1238
1239 struct gl_context *ctx = &brw->ctx;
1240
1241 uint32_t *surf_offset = stage_state->surf_offset;
1242
1243 /* BRW_NEW_*_PROG_DATA */
1244 if (for_gather)
1245 surf_offset += stage_state->prog_data->binding_table.gather_texture_start;
1246 else
1247 surf_offset += stage_state->prog_data->binding_table.plane_start[plane];
1248
1249 unsigned num_samplers = util_last_bit(prog->SamplersUsed);
1250 for (unsigned s = 0; s < num_samplers; s++) {
1251 surf_offset[s] = 0;
1252
1253 if (prog->SamplersUsed & (1 << s)) {
1254 const unsigned unit = prog->SamplerUnits[s];
1255
1256 /* _NEW_TEXTURE */
1257 if (ctx->Texture.Unit[unit]._Current) {
1258 brw_update_texture_surface(ctx, unit, surf_offset + s, for_gather, plane);
1259 }
1260 }
1261 }
1262 }
1263
1264
1265 /**
1266 * Construct SURFACE_STATE objects for enabled textures.
1267 */
1268 static void
1269 brw_update_texture_surfaces(struct brw_context *brw)
1270 {
1271 /* BRW_NEW_VERTEX_PROGRAM */
1272 struct gl_program *vs = (struct gl_program *) brw->vertex_program;
1273
1274 /* BRW_NEW_TESS_PROGRAMS */
1275 struct gl_program *tcs = (struct gl_program *) brw->tess_ctrl_program;
1276 struct gl_program *tes = (struct gl_program *) brw->tess_eval_program;
1277
1278 /* BRW_NEW_GEOMETRY_PROGRAM */
1279 struct gl_program *gs = (struct gl_program *) brw->geometry_program;
1280
1281 /* BRW_NEW_FRAGMENT_PROGRAM */
1282 struct gl_program *fs = (struct gl_program *) brw->fragment_program;
1283
1284 /* _NEW_TEXTURE */
1285 update_stage_texture_surfaces(brw, vs, &brw->vs.base, false, 0);
1286 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, false, 0);
1287 update_stage_texture_surfaces(brw, tes, &brw->tes.base, false, 0);
1288 update_stage_texture_surfaces(brw, gs, &brw->gs.base, false, 0);
1289 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 0);
1290
1291 /* emit alternate set of surface state for gather. this
1292 * allows the surface format to be overriden for only the
1293 * gather4 messages. */
1294 if (brw->gen < 8) {
1295 if (vs && vs->nir->info->uses_texture_gather)
1296 update_stage_texture_surfaces(brw, vs, &brw->vs.base, true, 0);
1297 if (tcs && tcs->nir->info->uses_texture_gather)
1298 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, true, 0);
1299 if (tes && tes->nir->info->uses_texture_gather)
1300 update_stage_texture_surfaces(brw, tes, &brw->tes.base, true, 0);
1301 if (gs && gs->nir->info->uses_texture_gather)
1302 update_stage_texture_surfaces(brw, gs, &brw->gs.base, true, 0);
1303 if (fs && fs->nir->info->uses_texture_gather)
1304 update_stage_texture_surfaces(brw, fs, &brw->wm.base, true, 0);
1305 }
1306
1307 if (fs) {
1308 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 1);
1309 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 2);
1310 }
1311
1312 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1313 }
1314
1315 const struct brw_tracked_state brw_texture_surfaces = {
1316 .dirty = {
1317 .mesa = _NEW_TEXTURE,
1318 .brw = BRW_NEW_BATCH |
1319 BRW_NEW_BLORP |
1320 BRW_NEW_FRAGMENT_PROGRAM |
1321 BRW_NEW_FS_PROG_DATA |
1322 BRW_NEW_GEOMETRY_PROGRAM |
1323 BRW_NEW_GS_PROG_DATA |
1324 BRW_NEW_TESS_PROGRAMS |
1325 BRW_NEW_TCS_PROG_DATA |
1326 BRW_NEW_TES_PROG_DATA |
1327 BRW_NEW_TEXTURE_BUFFER |
1328 BRW_NEW_VERTEX_PROGRAM |
1329 BRW_NEW_VS_PROG_DATA,
1330 },
1331 .emit = brw_update_texture_surfaces,
1332 };
1333
1334 static void
1335 brw_update_cs_texture_surfaces(struct brw_context *brw)
1336 {
1337 /* BRW_NEW_COMPUTE_PROGRAM */
1338 struct gl_program *cs = (struct gl_program *) brw->compute_program;
1339
1340 /* _NEW_TEXTURE */
1341 update_stage_texture_surfaces(brw, cs, &brw->cs.base, false, 0);
1342
1343 /* emit alternate set of surface state for gather. this
1344 * allows the surface format to be overriden for only the
1345 * gather4 messages.
1346 */
1347 if (brw->gen < 8) {
1348 if (cs && cs->nir->info->uses_texture_gather)
1349 update_stage_texture_surfaces(brw, cs, &brw->cs.base, true, 0);
1350 }
1351
1352 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1353 }
1354
1355 const struct brw_tracked_state brw_cs_texture_surfaces = {
1356 .dirty = {
1357 .mesa = _NEW_TEXTURE,
1358 .brw = BRW_NEW_BATCH |
1359 BRW_NEW_BLORP |
1360 BRW_NEW_COMPUTE_PROGRAM,
1361 },
1362 .emit = brw_update_cs_texture_surfaces,
1363 };
1364
1365
1366 void
1367 brw_upload_ubo_surfaces(struct brw_context *brw,
1368 struct gl_linked_shader *shader,
1369 struct brw_stage_state *stage_state,
1370 struct brw_stage_prog_data *prog_data)
1371 {
1372 struct gl_context *ctx = &brw->ctx;
1373
1374 if (!shader)
1375 return;
1376
1377 uint32_t *ubo_surf_offsets =
1378 &stage_state->surf_offset[prog_data->binding_table.ubo_start];
1379
1380 for (int i = 0; i < shader->NumUniformBlocks; i++) {
1381 struct gl_uniform_buffer_binding *binding =
1382 &ctx->UniformBufferBindings[shader->UniformBlocks[i]->Binding];
1383
1384 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1385 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ubo_surf_offsets[i]);
1386 } else {
1387 struct intel_buffer_object *intel_bo =
1388 intel_buffer_object(binding->BufferObject);
1389 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1390 if (!binding->AutomaticSize)
1391 size = MIN2(size, binding->Size);
1392 drm_intel_bo *bo =
1393 intel_bufferobj_buffer(brw, intel_bo,
1394 binding->Offset,
1395 size);
1396 brw_create_constant_surface(brw, bo, binding->Offset,
1397 size,
1398 &ubo_surf_offsets[i]);
1399 }
1400 }
1401
1402 uint32_t *ssbo_surf_offsets =
1403 &stage_state->surf_offset[prog_data->binding_table.ssbo_start];
1404
1405 for (int i = 0; i < shader->NumShaderStorageBlocks; i++) {
1406 struct gl_shader_storage_buffer_binding *binding =
1407 &ctx->ShaderStorageBufferBindings[shader->ShaderStorageBlocks[i]->Binding];
1408
1409 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1410 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ssbo_surf_offsets[i]);
1411 } else {
1412 struct intel_buffer_object *intel_bo =
1413 intel_buffer_object(binding->BufferObject);
1414 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1415 if (!binding->AutomaticSize)
1416 size = MIN2(size, binding->Size);
1417 drm_intel_bo *bo =
1418 intel_bufferobj_buffer(brw, intel_bo,
1419 binding->Offset,
1420 size);
1421 brw_create_buffer_surface(brw, bo, binding->Offset,
1422 size,
1423 &ssbo_surf_offsets[i]);
1424 }
1425 }
1426
1427 if (shader->NumUniformBlocks || shader->NumShaderStorageBlocks)
1428 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1429 }
1430
1431 static void
1432 brw_upload_wm_ubo_surfaces(struct brw_context *brw)
1433 {
1434 struct gl_context *ctx = &brw->ctx;
1435 /* _NEW_PROGRAM */
1436 struct gl_shader_program *prog = ctx->_Shader->_CurrentFragmentProgram;
1437
1438 if (!prog)
1439 return;
1440
1441 /* BRW_NEW_FS_PROG_DATA */
1442 brw_upload_ubo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
1443 &brw->wm.base, brw->wm.base.prog_data);
1444 }
1445
1446 const struct brw_tracked_state brw_wm_ubo_surfaces = {
1447 .dirty = {
1448 .mesa = _NEW_PROGRAM,
1449 .brw = BRW_NEW_BATCH |
1450 BRW_NEW_BLORP |
1451 BRW_NEW_FS_PROG_DATA |
1452 BRW_NEW_UNIFORM_BUFFER,
1453 },
1454 .emit = brw_upload_wm_ubo_surfaces,
1455 };
1456
1457 static void
1458 brw_upload_cs_ubo_surfaces(struct brw_context *brw)
1459 {
1460 struct gl_context *ctx = &brw->ctx;
1461 /* _NEW_PROGRAM */
1462 struct gl_shader_program *prog =
1463 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1464
1465 if (!prog)
1466 return;
1467
1468 /* BRW_NEW_CS_PROG_DATA */
1469 brw_upload_ubo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_COMPUTE],
1470 &brw->cs.base, brw->cs.base.prog_data);
1471 }
1472
1473 const struct brw_tracked_state brw_cs_ubo_surfaces = {
1474 .dirty = {
1475 .mesa = _NEW_PROGRAM,
1476 .brw = BRW_NEW_BATCH |
1477 BRW_NEW_BLORP |
1478 BRW_NEW_CS_PROG_DATA |
1479 BRW_NEW_UNIFORM_BUFFER,
1480 },
1481 .emit = brw_upload_cs_ubo_surfaces,
1482 };
1483
1484 void
1485 brw_upload_abo_surfaces(struct brw_context *brw,
1486 struct gl_linked_shader *shader,
1487 struct brw_stage_state *stage_state,
1488 struct brw_stage_prog_data *prog_data)
1489 {
1490 struct gl_context *ctx = &brw->ctx;
1491 uint32_t *surf_offsets =
1492 &stage_state->surf_offset[prog_data->binding_table.abo_start];
1493
1494 if (shader && shader->NumAtomicBuffers) {
1495 for (unsigned i = 0; i < shader->NumAtomicBuffers; i++) {
1496 struct gl_atomic_buffer_binding *binding =
1497 &ctx->AtomicBufferBindings[shader->AtomicBuffers[i]->Binding];
1498 struct intel_buffer_object *intel_bo =
1499 intel_buffer_object(binding->BufferObject);
1500 drm_intel_bo *bo = intel_bufferobj_buffer(
1501 brw, intel_bo, binding->Offset, intel_bo->Base.Size - binding->Offset);
1502
1503 brw_emit_buffer_surface_state(brw, &surf_offsets[i], bo,
1504 binding->Offset, BRW_SURFACEFORMAT_RAW,
1505 bo->size - binding->Offset, 1, true);
1506 }
1507
1508 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1509 }
1510 }
1511
1512 static void
1513 brw_upload_wm_abo_surfaces(struct brw_context *brw)
1514 {
1515 struct gl_context *ctx = &brw->ctx;
1516 /* _NEW_PROGRAM */
1517 struct gl_shader_program *prog = ctx->_Shader->_CurrentFragmentProgram;
1518
1519 if (prog) {
1520 /* BRW_NEW_FS_PROG_DATA */
1521 brw_upload_abo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
1522 &brw->wm.base, brw->wm.base.prog_data);
1523 }
1524 }
1525
1526 const struct brw_tracked_state brw_wm_abo_surfaces = {
1527 .dirty = {
1528 .mesa = _NEW_PROGRAM,
1529 .brw = BRW_NEW_ATOMIC_BUFFER |
1530 BRW_NEW_BLORP |
1531 BRW_NEW_BATCH |
1532 BRW_NEW_FS_PROG_DATA,
1533 },
1534 .emit = brw_upload_wm_abo_surfaces,
1535 };
1536
1537 static void
1538 brw_upload_cs_abo_surfaces(struct brw_context *brw)
1539 {
1540 struct gl_context *ctx = &brw->ctx;
1541 /* _NEW_PROGRAM */
1542 struct gl_shader_program *prog =
1543 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1544
1545 if (prog) {
1546 /* BRW_NEW_CS_PROG_DATA */
1547 brw_upload_abo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_COMPUTE],
1548 &brw->cs.base, brw->cs.base.prog_data);
1549 }
1550 }
1551
1552 const struct brw_tracked_state brw_cs_abo_surfaces = {
1553 .dirty = {
1554 .mesa = _NEW_PROGRAM,
1555 .brw = BRW_NEW_ATOMIC_BUFFER |
1556 BRW_NEW_BLORP |
1557 BRW_NEW_BATCH |
1558 BRW_NEW_CS_PROG_DATA,
1559 },
1560 .emit = brw_upload_cs_abo_surfaces,
1561 };
1562
1563 static void
1564 brw_upload_cs_image_surfaces(struct brw_context *brw)
1565 {
1566 struct gl_context *ctx = &brw->ctx;
1567 /* _NEW_PROGRAM */
1568 struct gl_shader_program *prog =
1569 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1570
1571 if (prog) {
1572 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1573 brw_upload_image_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_COMPUTE],
1574 &brw->cs.base, brw->cs.base.prog_data);
1575 }
1576 }
1577
1578 const struct brw_tracked_state brw_cs_image_surfaces = {
1579 .dirty = {
1580 .mesa = _NEW_TEXTURE | _NEW_PROGRAM,
1581 .brw = BRW_NEW_BATCH |
1582 BRW_NEW_BLORP |
1583 BRW_NEW_CS_PROG_DATA |
1584 BRW_NEW_IMAGE_UNITS
1585 },
1586 .emit = brw_upload_cs_image_surfaces,
1587 };
1588
1589 static uint32_t
1590 get_image_format(struct brw_context *brw, mesa_format format, GLenum access)
1591 {
1592 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1593 uint32_t hw_format = brw_format_for_mesa_format(format);
1594 if (access == GL_WRITE_ONLY) {
1595 return hw_format;
1596 } else if (isl_has_matching_typed_storage_image_format(devinfo, hw_format)) {
1597 /* Typed surface reads support a very limited subset of the shader
1598 * image formats. Translate it into the closest format the
1599 * hardware supports.
1600 */
1601 return isl_lower_storage_image_format(devinfo, hw_format);
1602 } else {
1603 /* The hardware doesn't actually support a typed format that we can use
1604 * so we have to fall back to untyped read/write messages.
1605 */
1606 return BRW_SURFACEFORMAT_RAW;
1607 }
1608 }
1609
1610 static void
1611 update_default_image_param(struct brw_context *brw,
1612 struct gl_image_unit *u,
1613 unsigned surface_idx,
1614 struct brw_image_param *param)
1615 {
1616 memset(param, 0, sizeof(*param));
1617 param->surface_idx = surface_idx;
1618 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1619 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1620 * detailed explanation of these parameters.
1621 */
1622 param->swizzling[0] = 0xff;
1623 param->swizzling[1] = 0xff;
1624 }
1625
1626 static void
1627 update_buffer_image_param(struct brw_context *brw,
1628 struct gl_image_unit *u,
1629 unsigned surface_idx,
1630 struct brw_image_param *param)
1631 {
1632 struct gl_buffer_object *obj = u->TexObj->BufferObject;
1633 const uint32_t size = MIN2((uint32_t)u->TexObj->BufferSize, obj->Size);
1634 update_default_image_param(brw, u, surface_idx, param);
1635
1636 param->size[0] = size / _mesa_get_format_bytes(u->_ActualFormat);
1637 param->stride[0] = _mesa_get_format_bytes(u->_ActualFormat);
1638 }
1639
1640 static void
1641 update_texture_image_param(struct brw_context *brw,
1642 struct gl_image_unit *u,
1643 unsigned surface_idx,
1644 struct brw_image_param *param)
1645 {
1646 struct intel_mipmap_tree *mt = intel_texture_object(u->TexObj)->mt;
1647
1648 update_default_image_param(brw, u, surface_idx, param);
1649
1650 param->size[0] = minify(mt->logical_width0, u->Level);
1651 param->size[1] = minify(mt->logical_height0, u->Level);
1652 param->size[2] = (!u->Layered ? 1 :
1653 u->TexObj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1654 u->TexObj->Target == GL_TEXTURE_3D ?
1655 minify(mt->logical_depth0, u->Level) :
1656 mt->logical_depth0);
1657
1658 intel_miptree_get_image_offset(mt, u->Level, u->_Layer,
1659 &param->offset[0],
1660 &param->offset[1]);
1661
1662 param->stride[0] = mt->cpp;
1663 param->stride[1] = mt->pitch / mt->cpp;
1664 param->stride[2] =
1665 brw_miptree_get_horizontal_slice_pitch(brw, mt, u->Level);
1666 param->stride[3] =
1667 brw_miptree_get_vertical_slice_pitch(brw, mt, u->Level);
1668
1669 if (mt->tiling == I915_TILING_X) {
1670 /* An X tile is a rectangular block of 512x8 bytes. */
1671 param->tiling[0] = _mesa_logbase2(512 / mt->cpp);
1672 param->tiling[1] = _mesa_logbase2(8);
1673
1674 if (brw->has_swizzling) {
1675 /* Right shifts required to swizzle bits 9 and 10 of the memory
1676 * address with bit 6.
1677 */
1678 param->swizzling[0] = 3;
1679 param->swizzling[1] = 4;
1680 }
1681 } else if (mt->tiling == I915_TILING_Y) {
1682 /* The layout of a Y-tiled surface in memory isn't really fundamentally
1683 * different to the layout of an X-tiled surface, we simply pretend that
1684 * the surface is broken up in a number of smaller 16Bx32 tiles, each
1685 * one arranged in X-major order just like is the case for X-tiling.
1686 */
1687 param->tiling[0] = _mesa_logbase2(16 / mt->cpp);
1688 param->tiling[1] = _mesa_logbase2(32);
1689
1690 if (brw->has_swizzling) {
1691 /* Right shift required to swizzle bit 9 of the memory address with
1692 * bit 6.
1693 */
1694 param->swizzling[0] = 3;
1695 }
1696 }
1697
1698 /* 3D textures are arranged in 2D in memory with 2^lod slices per row. The
1699 * address calculation algorithm (emit_address_calculation() in
1700 * brw_fs_surface_builder.cpp) handles this as a sort of tiling with
1701 * modulus equal to the LOD.
1702 */
1703 param->tiling[2] = (u->TexObj->Target == GL_TEXTURE_3D ? u->Level :
1704 0);
1705 }
1706
1707 static void
1708 update_image_surface(struct brw_context *brw,
1709 struct gl_image_unit *u,
1710 GLenum access,
1711 unsigned surface_idx,
1712 uint32_t *surf_offset,
1713 struct brw_image_param *param)
1714 {
1715 if (_mesa_is_image_unit_valid(&brw->ctx, u)) {
1716 struct gl_texture_object *obj = u->TexObj;
1717 const unsigned format = get_image_format(brw, u->_ActualFormat, access);
1718
1719 if (obj->Target == GL_TEXTURE_BUFFER) {
1720 struct intel_buffer_object *intel_obj =
1721 intel_buffer_object(obj->BufferObject);
1722 const unsigned texel_size = (format == BRW_SURFACEFORMAT_RAW ? 1 :
1723 _mesa_get_format_bytes(u->_ActualFormat));
1724
1725 brw_emit_buffer_surface_state(
1726 brw, surf_offset, intel_obj->buffer, obj->BufferOffset,
1727 format, intel_obj->Base.Size, texel_size,
1728 access != GL_READ_ONLY);
1729
1730 update_buffer_image_param(brw, u, surface_idx, param);
1731
1732 } else {
1733 struct intel_texture_object *intel_obj = intel_texture_object(obj);
1734 struct intel_mipmap_tree *mt = intel_obj->mt;
1735
1736 if (format == BRW_SURFACEFORMAT_RAW) {
1737 brw_emit_buffer_surface_state(
1738 brw, surf_offset, mt->bo, mt->offset,
1739 format, mt->bo->size - mt->offset, 1 /* pitch */,
1740 access != GL_READ_ONLY);
1741
1742 } else {
1743 const unsigned num_layers = (!u->Layered ? 1 :
1744 obj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1745 mt->logical_depth0);
1746
1747 struct isl_view view = {
1748 .format = format,
1749 .base_level = obj->MinLevel + u->Level,
1750 .levels = 1,
1751 .base_array_layer = obj->MinLayer + u->_Layer,
1752 .array_len = num_layers,
1753 .swizzle = ISL_SWIZZLE_IDENTITY,
1754 .usage = ISL_SURF_USAGE_STORAGE_BIT,
1755 };
1756
1757 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
1758 const int flags =
1759 mt->fast_clear_state == INTEL_FAST_CLEAR_STATE_RESOLVED ?
1760 INTEL_AUX_BUFFER_DISABLED : 0;
1761 brw_emit_surface_state(brw, mt, flags, mt->target, view,
1762 surface_state_infos[brw->gen].tex_mocs,
1763 surf_offset, surf_index,
1764 I915_GEM_DOMAIN_SAMPLER,
1765 access == GL_READ_ONLY ? 0 :
1766 I915_GEM_DOMAIN_SAMPLER);
1767 }
1768
1769 update_texture_image_param(brw, u, surface_idx, param);
1770 }
1771
1772 } else {
1773 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, surf_offset);
1774 update_default_image_param(brw, u, surface_idx, param);
1775 }
1776 }
1777
1778 void
1779 brw_upload_image_surfaces(struct brw_context *brw,
1780 struct gl_linked_shader *shader,
1781 struct brw_stage_state *stage_state,
1782 struct brw_stage_prog_data *prog_data)
1783 {
1784 struct gl_context *ctx = &brw->ctx;
1785
1786 if (shader && shader->NumImages) {
1787 for (unsigned i = 0; i < shader->NumImages; i++) {
1788 struct gl_image_unit *u = &ctx->ImageUnits[shader->ImageUnits[i]];
1789 const unsigned surf_idx = prog_data->binding_table.image_start + i;
1790
1791 update_image_surface(brw, u, shader->ImageAccess[i],
1792 surf_idx,
1793 &stage_state->surf_offset[surf_idx],
1794 &prog_data->image_param[i]);
1795 }
1796
1797 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1798 /* This may have changed the image metadata dependent on the context
1799 * image unit state and passed to the program as uniforms, make sure
1800 * that push and pull constants are reuploaded.
1801 */
1802 brw->NewGLState |= _NEW_PROGRAM_CONSTANTS;
1803 }
1804 }
1805
1806 static void
1807 brw_upload_wm_image_surfaces(struct brw_context *brw)
1808 {
1809 struct gl_context *ctx = &brw->ctx;
1810 /* BRW_NEW_FRAGMENT_PROGRAM */
1811 struct gl_shader_program *prog = ctx->_Shader->_CurrentFragmentProgram;
1812
1813 if (prog) {
1814 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1815 brw_upload_image_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
1816 &brw->wm.base, brw->wm.base.prog_data);
1817 }
1818 }
1819
1820 const struct brw_tracked_state brw_wm_image_surfaces = {
1821 .dirty = {
1822 .mesa = _NEW_TEXTURE,
1823 .brw = BRW_NEW_BATCH |
1824 BRW_NEW_BLORP |
1825 BRW_NEW_FRAGMENT_PROGRAM |
1826 BRW_NEW_FS_PROG_DATA |
1827 BRW_NEW_IMAGE_UNITS
1828 },
1829 .emit = brw_upload_wm_image_surfaces,
1830 };
1831
1832 void
1833 gen4_init_vtable_surface_functions(struct brw_context *brw)
1834 {
1835 brw->vtbl.update_renderbuffer_surface = gen4_update_renderbuffer_surface;
1836 brw->vtbl.emit_null_surface_state = brw_emit_null_surface_state;
1837 }
1838
1839 void
1840 gen6_init_vtable_surface_functions(struct brw_context *brw)
1841 {
1842 gen4_init_vtable_surface_functions(brw);
1843 brw->vtbl.update_renderbuffer_surface = brw_update_renderbuffer_surface;
1844 }
1845
1846 static void
1847 brw_upload_cs_work_groups_surface(struct brw_context *brw)
1848 {
1849 struct gl_context *ctx = &brw->ctx;
1850 /* _NEW_PROGRAM */
1851 struct gl_shader_program *prog =
1852 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1853 /* BRW_NEW_CS_PROG_DATA */
1854 const struct brw_cs_prog_data *cs_prog_data =
1855 brw_cs_prog_data(brw->cs.base.prog_data);
1856
1857 if (prog && cs_prog_data->uses_num_work_groups) {
1858 const unsigned surf_idx =
1859 cs_prog_data->binding_table.work_groups_start;
1860 uint32_t *surf_offset = &brw->cs.base.surf_offset[surf_idx];
1861 drm_intel_bo *bo;
1862 uint32_t bo_offset;
1863
1864 if (brw->compute.num_work_groups_bo == NULL) {
1865 bo = NULL;
1866 intel_upload_data(brw,
1867 (void *)brw->compute.num_work_groups,
1868 3 * sizeof(GLuint),
1869 sizeof(GLuint),
1870 &bo,
1871 &bo_offset);
1872 } else {
1873 bo = brw->compute.num_work_groups_bo;
1874 bo_offset = brw->compute.num_work_groups_offset;
1875 }
1876
1877 brw_emit_buffer_surface_state(brw, surf_offset,
1878 bo, bo_offset,
1879 BRW_SURFACEFORMAT_RAW,
1880 3 * sizeof(GLuint), 1, true);
1881 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1882 }
1883 }
1884
1885 const struct brw_tracked_state brw_cs_work_groups_surface = {
1886 .dirty = {
1887 .brw = BRW_NEW_BLORP |
1888 BRW_NEW_CS_PROG_DATA |
1889 BRW_NEW_CS_WORK_GROUPS
1890 },
1891 .emit = brw_upload_cs_work_groups_surface,
1892 };