b64f1225a89e94a4aecca18d73ed0bf3688444c9
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_surface_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "compiler/nir/nir.h"
34 #include "main/context.h"
35 #include "main/blend.h"
36 #include "main/mtypes.h"
37 #include "main/samplerobj.h"
38 #include "main/shaderimage.h"
39 #include "main/teximage.h"
40 #include "program/prog_parameter.h"
41 #include "program/prog_instruction.h"
42 #include "main/framebuffer.h"
43 #include "main/shaderapi.h"
44
45 #include "isl/isl.h"
46
47 #include "intel_mipmap_tree.h"
48 #include "intel_batchbuffer.h"
49 #include "intel_tex.h"
50 #include "intel_fbo.h"
51 #include "intel_buffer_objects.h"
52
53 #include "brw_context.h"
54 #include "brw_state.h"
55 #include "brw_defines.h"
56 #include "brw_wm.h"
57
58 enum {
59 INTEL_RENDERBUFFER_LAYERED = 1 << 0,
60 INTEL_AUX_BUFFER_DISABLED = 1 << 1,
61 };
62
63 struct surface_state_info {
64 unsigned num_dwords;
65 unsigned ss_align; /* Required alignment of RENDER_SURFACE_STATE in bytes */
66 unsigned reloc_dw;
67 unsigned aux_reloc_dw;
68 unsigned tex_mocs;
69 unsigned rb_mocs;
70 };
71
72 static const struct surface_state_info surface_state_infos[] = {
73 [4] = {6, 32, 1, 0},
74 [5] = {6, 32, 1, 0},
75 [6] = {6, 32, 1, 0},
76 [7] = {8, 32, 1, 6, GEN7_MOCS_L3, GEN7_MOCS_L3},
77 [8] = {13, 64, 8, 10, BDW_MOCS_WB, BDW_MOCS_PTE},
78 [9] = {16, 64, 8, 10, SKL_MOCS_WB, SKL_MOCS_PTE},
79 };
80
81 static void
82 brw_emit_surface_state(struct brw_context *brw,
83 struct intel_mipmap_tree *mt, uint32_t flags,
84 GLenum target, struct isl_view view,
85 uint32_t mocs, uint32_t *surf_offset, int surf_index,
86 unsigned read_domains, unsigned write_domains)
87 {
88 const struct surface_state_info ss_info = surface_state_infos[brw->gen];
89 uint32_t tile_x = mt->level[0].slice[0].x_offset;
90 uint32_t tile_y = mt->level[0].slice[0].y_offset;
91 uint32_t offset = mt->offset;
92
93 struct isl_surf surf;
94 intel_miptree_get_isl_surf(brw, mt, &surf);
95
96 surf.dim = get_isl_surf_dim(target);
97
98 const enum isl_dim_layout dim_layout =
99 get_isl_dim_layout(&brw->screen->devinfo, mt->tiling, target);
100
101 if (surf.dim_layout != dim_layout) {
102 /* The layout of the specified texture target is not compatible with the
103 * actual layout of the miptree structure in memory -- You're entering
104 * dangerous territory, this can only possibly work if you only intended
105 * to access a single level and slice of the texture, and the hardware
106 * supports the tile offset feature in order to allow non-tile-aligned
107 * base offsets, since we'll have to point the hardware to the first
108 * texel of the level instead of relying on the usual base level/layer
109 * controls.
110 */
111 assert(brw->has_surface_tile_offset);
112 assert(view.levels == 1 && view.array_len == 1);
113 assert(tile_x == 0 && tile_y == 0);
114
115 offset += intel_miptree_get_tile_offsets(mt, view.base_level,
116 view.base_array_layer,
117 &tile_x, &tile_y);
118
119 /* Minify the logical dimensions of the texture. */
120 const unsigned l = view.base_level - mt->first_level;
121 surf.logical_level0_px.width = minify(surf.logical_level0_px.width, l);
122 surf.logical_level0_px.height = surf.dim <= ISL_SURF_DIM_1D ? 1 :
123 minify(surf.logical_level0_px.height, l);
124 surf.logical_level0_px.depth = surf.dim <= ISL_SURF_DIM_2D ? 1 :
125 minify(surf.logical_level0_px.depth, l);
126
127 /* Only the base level and layer can be addressed with the overridden
128 * layout.
129 */
130 surf.logical_level0_px.array_len = 1;
131 surf.levels = 1;
132 surf.dim_layout = dim_layout;
133
134 /* The requested slice of the texture is now at the base level and
135 * layer.
136 */
137 view.base_level = 0;
138 view.base_array_layer = 0;
139 }
140
141 union isl_color_value clear_color = { .u32 = { 0, 0, 0, 0 } };
142
143 struct isl_surf *aux_surf = NULL, aux_surf_s;
144 uint64_t aux_offset = 0;
145 enum isl_aux_usage aux_usage = ISL_AUX_USAGE_NONE;
146 if (mt->mcs_mt && !(flags & INTEL_AUX_BUFFER_DISABLED)) {
147 intel_miptree_get_aux_isl_surf(brw, mt, &aux_surf_s, &aux_usage);
148 aux_surf = &aux_surf_s;
149 assert(mt->mcs_mt->offset == 0);
150 aux_offset = mt->mcs_mt->bo->offset64;
151
152 /* We only really need a clear color if we also have an auxiliary
153 * surfacae. Without one, it does nothing.
154 */
155 clear_color = intel_miptree_get_isl_clear_color(brw, mt);
156 }
157
158 uint32_t *dw = __brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
159 ss_info.num_dwords * 4, ss_info.ss_align,
160 surf_index, surf_offset);
161
162 isl_surf_fill_state(&brw->isl_dev, dw, .surf = &surf, .view = &view,
163 .address = mt->bo->offset64 + offset,
164 .aux_surf = aux_surf, .aux_usage = aux_usage,
165 .aux_address = aux_offset,
166 .mocs = mocs, .clear_color = clear_color,
167 .x_offset_sa = tile_x, .y_offset_sa = tile_y);
168
169 drm_intel_bo_emit_reloc(brw->batch.bo,
170 *surf_offset + 4 * ss_info.reloc_dw,
171 mt->bo, offset,
172 read_domains, write_domains);
173
174 if (aux_surf) {
175 /* On gen7 and prior, the upper 20 bits of surface state DWORD 6 are the
176 * upper 20 bits of the GPU address of the MCS buffer; the lower 12 bits
177 * contain other control information. Since buffer addresses are always
178 * on 4k boundaries (and thus have their lower 12 bits zero), we can use
179 * an ordinary reloc to do the necessary address translation.
180 */
181 assert((aux_offset & 0xfff) == 0);
182 drm_intel_bo_emit_reloc(brw->batch.bo,
183 *surf_offset + 4 * ss_info.aux_reloc_dw,
184 mt->mcs_mt->bo, dw[ss_info.aux_reloc_dw] & 0xfff,
185 read_domains, write_domains);
186 }
187 }
188
189 uint32_t
190 brw_update_renderbuffer_surface(struct brw_context *brw,
191 struct gl_renderbuffer *rb,
192 uint32_t flags, unsigned unit /* unused */,
193 uint32_t surf_index)
194 {
195 struct gl_context *ctx = &brw->ctx;
196 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
197 struct intel_mipmap_tree *mt = irb->mt;
198
199 if (brw->gen < 9) {
200 assert(!(flags & INTEL_AUX_BUFFER_DISABLED));
201 }
202
203 assert(brw_render_target_supported(brw, rb));
204 intel_miptree_used_for_rendering(mt);
205
206 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
207 if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
208 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
209 __func__, _mesa_get_format_name(rb_format));
210 }
211
212 const unsigned layer_multiplier =
213 (irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
214 irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) ?
215 MAX2(irb->mt->num_samples, 1) : 1;
216
217 struct isl_view view = {
218 .format = brw->render_target_format[rb_format],
219 .base_level = irb->mt_level - irb->mt->first_level,
220 .levels = 1,
221 .base_array_layer = irb->mt_layer / layer_multiplier,
222 .array_len = MAX2(irb->layer_count, 1),
223 .swizzle = ISL_SWIZZLE_IDENTITY,
224 .usage = ISL_SURF_USAGE_RENDER_TARGET_BIT,
225 };
226
227 uint32_t offset;
228 brw_emit_surface_state(brw, mt, flags, mt->target, view,
229 surface_state_infos[brw->gen].rb_mocs,
230 &offset, surf_index,
231 I915_GEM_DOMAIN_RENDER,
232 I915_GEM_DOMAIN_RENDER);
233 return offset;
234 }
235
236 GLuint
237 translate_tex_target(GLenum target)
238 {
239 switch (target) {
240 case GL_TEXTURE_1D:
241 case GL_TEXTURE_1D_ARRAY_EXT:
242 return BRW_SURFACE_1D;
243
244 case GL_TEXTURE_RECTANGLE_NV:
245 return BRW_SURFACE_2D;
246
247 case GL_TEXTURE_2D:
248 case GL_TEXTURE_2D_ARRAY_EXT:
249 case GL_TEXTURE_EXTERNAL_OES:
250 case GL_TEXTURE_2D_MULTISAMPLE:
251 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
252 return BRW_SURFACE_2D;
253
254 case GL_TEXTURE_3D:
255 return BRW_SURFACE_3D;
256
257 case GL_TEXTURE_CUBE_MAP:
258 case GL_TEXTURE_CUBE_MAP_ARRAY:
259 return BRW_SURFACE_CUBE;
260
261 default:
262 unreachable("not reached");
263 }
264 }
265
266 uint32_t
267 brw_get_surface_tiling_bits(uint32_t tiling)
268 {
269 switch (tiling) {
270 case I915_TILING_X:
271 return BRW_SURFACE_TILED;
272 case I915_TILING_Y:
273 return BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y;
274 default:
275 return 0;
276 }
277 }
278
279
280 uint32_t
281 brw_get_surface_num_multisamples(unsigned num_samples)
282 {
283 if (num_samples > 1)
284 return BRW_SURFACE_MULTISAMPLECOUNT_4;
285 else
286 return BRW_SURFACE_MULTISAMPLECOUNT_1;
287 }
288
289 /**
290 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
291 * swizzling.
292 */
293 int
294 brw_get_texture_swizzle(const struct gl_context *ctx,
295 const struct gl_texture_object *t)
296 {
297 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
298
299 int swizzles[SWIZZLE_NIL + 1] = {
300 SWIZZLE_X,
301 SWIZZLE_Y,
302 SWIZZLE_Z,
303 SWIZZLE_W,
304 SWIZZLE_ZERO,
305 SWIZZLE_ONE,
306 SWIZZLE_NIL
307 };
308
309 if (img->_BaseFormat == GL_DEPTH_COMPONENT ||
310 img->_BaseFormat == GL_DEPTH_STENCIL) {
311 GLenum depth_mode = t->DepthMode;
312
313 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
314 * with depth component data specified with a sized internal format.
315 * Otherwise, it's left at the old default, GL_LUMINANCE.
316 */
317 if (_mesa_is_gles3(ctx) &&
318 img->InternalFormat != GL_DEPTH_COMPONENT &&
319 img->InternalFormat != GL_DEPTH_STENCIL) {
320 depth_mode = GL_RED;
321 }
322
323 switch (depth_mode) {
324 case GL_ALPHA:
325 swizzles[0] = SWIZZLE_ZERO;
326 swizzles[1] = SWIZZLE_ZERO;
327 swizzles[2] = SWIZZLE_ZERO;
328 swizzles[3] = SWIZZLE_X;
329 break;
330 case GL_LUMINANCE:
331 swizzles[0] = SWIZZLE_X;
332 swizzles[1] = SWIZZLE_X;
333 swizzles[2] = SWIZZLE_X;
334 swizzles[3] = SWIZZLE_ONE;
335 break;
336 case GL_INTENSITY:
337 swizzles[0] = SWIZZLE_X;
338 swizzles[1] = SWIZZLE_X;
339 swizzles[2] = SWIZZLE_X;
340 swizzles[3] = SWIZZLE_X;
341 break;
342 case GL_RED:
343 swizzles[0] = SWIZZLE_X;
344 swizzles[1] = SWIZZLE_ZERO;
345 swizzles[2] = SWIZZLE_ZERO;
346 swizzles[3] = SWIZZLE_ONE;
347 break;
348 }
349 }
350
351 GLenum datatype = _mesa_get_format_datatype(img->TexFormat);
352
353 /* If the texture's format is alpha-only, force R, G, and B to
354 * 0.0. Similarly, if the texture's format has no alpha channel,
355 * force the alpha value read to 1.0. This allows for the
356 * implementation to use an RGBA texture for any of these formats
357 * without leaking any unexpected values.
358 */
359 switch (img->_BaseFormat) {
360 case GL_ALPHA:
361 swizzles[0] = SWIZZLE_ZERO;
362 swizzles[1] = SWIZZLE_ZERO;
363 swizzles[2] = SWIZZLE_ZERO;
364 break;
365 case GL_LUMINANCE:
366 if (t->_IsIntegerFormat || datatype == GL_SIGNED_NORMALIZED) {
367 swizzles[0] = SWIZZLE_X;
368 swizzles[1] = SWIZZLE_X;
369 swizzles[2] = SWIZZLE_X;
370 swizzles[3] = SWIZZLE_ONE;
371 }
372 break;
373 case GL_LUMINANCE_ALPHA:
374 if (datatype == GL_SIGNED_NORMALIZED) {
375 swizzles[0] = SWIZZLE_X;
376 swizzles[1] = SWIZZLE_X;
377 swizzles[2] = SWIZZLE_X;
378 swizzles[3] = SWIZZLE_W;
379 }
380 break;
381 case GL_INTENSITY:
382 if (datatype == GL_SIGNED_NORMALIZED) {
383 swizzles[0] = SWIZZLE_X;
384 swizzles[1] = SWIZZLE_X;
385 swizzles[2] = SWIZZLE_X;
386 swizzles[3] = SWIZZLE_X;
387 }
388 break;
389 case GL_RED:
390 case GL_RG:
391 case GL_RGB:
392 if (_mesa_get_format_bits(img->TexFormat, GL_ALPHA_BITS) > 0)
393 swizzles[3] = SWIZZLE_ONE;
394 break;
395 }
396
397 return MAKE_SWIZZLE4(swizzles[GET_SWZ(t->_Swizzle, 0)],
398 swizzles[GET_SWZ(t->_Swizzle, 1)],
399 swizzles[GET_SWZ(t->_Swizzle, 2)],
400 swizzles[GET_SWZ(t->_Swizzle, 3)]);
401 }
402
403 /**
404 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
405 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
406 *
407 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
408 * 0 1 2 3 4 5
409 * 4 5 6 7 0 1
410 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
411 *
412 * which is simply adding 4 then modding by 8 (or anding with 7).
413 *
414 * We then may need to apply workarounds for textureGather hardware bugs.
415 */
416 static unsigned
417 swizzle_to_scs(GLenum swizzle, bool need_green_to_blue)
418 {
419 unsigned scs = (swizzle + 4) & 7;
420
421 return (need_green_to_blue && scs == HSW_SCS_GREEN) ? HSW_SCS_BLUE : scs;
422 }
423
424 static unsigned
425 brw_find_matching_rb(const struct gl_framebuffer *fb,
426 const struct intel_mipmap_tree *mt)
427 {
428 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
429 const struct intel_renderbuffer *irb =
430 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
431
432 if (irb && irb->mt == mt)
433 return i;
434 }
435
436 return fb->_NumColorDrawBuffers;
437 }
438
439 static inline bool
440 brw_texture_view_sane(const struct brw_context *brw,
441 const struct intel_mipmap_tree *mt, unsigned format)
442 {
443 /* There are special cases only for lossless compression. */
444 if (!intel_miptree_is_lossless_compressed(brw, mt))
445 return true;
446
447 if (isl_format_supports_lossless_compression(&brw->screen->devinfo,
448 format))
449 return true;
450
451 /* Logic elsewhere needs to take care to resolve the color buffer prior
452 * to sampling it as non-compressed.
453 */
454 if (mt->fast_clear_state != INTEL_FAST_CLEAR_STATE_RESOLVED)
455 return false;
456
457 const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
458 const unsigned rb_index = brw_find_matching_rb(fb, mt);
459
460 if (rb_index == fb->_NumColorDrawBuffers)
461 return true;
462
463 /* Underlying surface is compressed but it is sampled using a format that
464 * the sampling engine doesn't support as compressed. Compression must be
465 * disabled for both sampling engine and data port in case the same surface
466 * is used also as render target.
467 */
468 return brw->draw_aux_buffer_disabled[rb_index];
469 }
470
471 static bool
472 brw_disable_aux_surface(const struct brw_context *brw,
473 const struct intel_mipmap_tree *mt)
474 {
475 /* Nothing to disable. */
476 if (!mt->mcs_mt)
477 return false;
478
479 /* There are special cases only for lossless compression. */
480 if (!intel_miptree_is_lossless_compressed(brw, mt))
481 return mt->fast_clear_state == INTEL_FAST_CLEAR_STATE_RESOLVED;
482
483 const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
484 const unsigned rb_index = brw_find_matching_rb(fb, mt);
485
486 /* If we are drawing into this with compression enabled, then we must also
487 * enable compression when texturing from it regardless of
488 * fast_clear_state. If we don't then, after the first draw call with
489 * this setup, there will be data in the CCS which won't get picked up by
490 * subsequent texturing operations as required by ARB_texture_barrier.
491 * Since we don't want to re-emit the binding table or do a resolve
492 * operation every draw call, the easiest thing to do is just enable
493 * compression on the texturing side. This is completely safe to do
494 * since, if compressed texturing weren't allowed, we would have disabled
495 * compression of render targets in whatever_that_function_is_called().
496 */
497 if (rb_index < fb->_NumColorDrawBuffers) {
498 if (brw->draw_aux_buffer_disabled[rb_index]) {
499 assert(mt->fast_clear_state == INTEL_FAST_CLEAR_STATE_RESOLVED);
500 }
501
502 return brw->draw_aux_buffer_disabled[rb_index];
503 }
504
505 return mt->fast_clear_state == INTEL_FAST_CLEAR_STATE_RESOLVED;
506 }
507
508 void
509 brw_update_texture_surface(struct gl_context *ctx,
510 unsigned unit,
511 uint32_t *surf_offset,
512 bool for_gather,
513 uint32_t plane)
514 {
515 struct brw_context *brw = brw_context(ctx);
516 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
517
518 if (obj->Target == GL_TEXTURE_BUFFER) {
519 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
520
521 } else {
522 struct intel_texture_object *intel_obj = intel_texture_object(obj);
523 struct intel_mipmap_tree *mt = intel_obj->mt;
524
525 if (plane > 0) {
526 if (mt->plane[plane - 1] == NULL)
527 return;
528 mt = mt->plane[plane - 1];
529 }
530
531 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
532 /* If this is a view with restricted NumLayers, then our effective depth
533 * is not just the miptree depth.
534 */
535 const unsigned view_num_layers =
536 (obj->Immutable && obj->Target != GL_TEXTURE_3D) ? obj->NumLayers :
537 mt->logical_depth0;
538
539 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
540 * texturing functions that return a float, as our code generation always
541 * selects the .x channel (which would always be 0).
542 */
543 struct gl_texture_image *firstImage = obj->Image[0][obj->BaseLevel];
544 const bool alpha_depth = obj->DepthMode == GL_ALPHA &&
545 (firstImage->_BaseFormat == GL_DEPTH_COMPONENT ||
546 firstImage->_BaseFormat == GL_DEPTH_STENCIL);
547 const unsigned swizzle = (unlikely(alpha_depth) ? SWIZZLE_XYZW :
548 brw_get_texture_swizzle(&brw->ctx, obj));
549
550 mesa_format mesa_fmt = plane == 0 ? intel_obj->_Format : mt->format;
551 unsigned format = translate_tex_format(brw, mesa_fmt,
552 sampler->sRGBDecode);
553
554 /* Implement gen6 and gen7 gather work-around */
555 bool need_green_to_blue = false;
556 if (for_gather) {
557 if (brw->gen == 7 && format == BRW_SURFACEFORMAT_R32G32_FLOAT) {
558 format = BRW_SURFACEFORMAT_R32G32_FLOAT_LD;
559 need_green_to_blue = brw->is_haswell;
560 } else if (brw->gen == 6) {
561 /* Sandybridge's gather4 message is broken for integer formats.
562 * To work around this, we pretend the surface is UNORM for
563 * 8 or 16-bit formats, and emit shader instructions to recover
564 * the real INT/UINT value. For 32-bit formats, we pretend
565 * the surface is FLOAT, and simply reinterpret the resulting
566 * bits.
567 */
568 switch (format) {
569 case BRW_SURFACEFORMAT_R8_SINT:
570 case BRW_SURFACEFORMAT_R8_UINT:
571 format = BRW_SURFACEFORMAT_R8_UNORM;
572 break;
573
574 case BRW_SURFACEFORMAT_R16_SINT:
575 case BRW_SURFACEFORMAT_R16_UINT:
576 format = BRW_SURFACEFORMAT_R16_UNORM;
577 break;
578
579 case BRW_SURFACEFORMAT_R32_SINT:
580 case BRW_SURFACEFORMAT_R32_UINT:
581 format = BRW_SURFACEFORMAT_R32_FLOAT;
582 break;
583
584 default:
585 break;
586 }
587 }
588 }
589
590 if (obj->StencilSampling && firstImage->_BaseFormat == GL_DEPTH_STENCIL) {
591 if (brw->gen <= 7) {
592 assert(mt->r8stencil_mt && !mt->stencil_mt->r8stencil_needs_update);
593 mt = mt->r8stencil_mt;
594 } else {
595 mt = mt->stencil_mt;
596 }
597 format = BRW_SURFACEFORMAT_R8_UINT;
598 } else if (brw->gen <= 7 && mt->format == MESA_FORMAT_S_UINT8) {
599 assert(mt->r8stencil_mt && !mt->r8stencil_needs_update);
600 mt = mt->r8stencil_mt;
601 format = BRW_SURFACEFORMAT_R8_UINT;
602 }
603
604 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
605
606 struct isl_view view = {
607 .format = format,
608 .base_level = obj->MinLevel + obj->BaseLevel,
609 .levels = intel_obj->_MaxLevel - obj->BaseLevel + 1,
610 .base_array_layer = obj->MinLayer,
611 .array_len = view_num_layers,
612 .swizzle = {
613 .r = swizzle_to_scs(GET_SWZ(swizzle, 0), need_green_to_blue),
614 .g = swizzle_to_scs(GET_SWZ(swizzle, 1), need_green_to_blue),
615 .b = swizzle_to_scs(GET_SWZ(swizzle, 2), need_green_to_blue),
616 .a = swizzle_to_scs(GET_SWZ(swizzle, 3), need_green_to_blue),
617 },
618 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
619 };
620
621 if (obj->Target == GL_TEXTURE_CUBE_MAP ||
622 obj->Target == GL_TEXTURE_CUBE_MAP_ARRAY)
623 view.usage |= ISL_SURF_USAGE_CUBE_BIT;
624
625 assert(brw_texture_view_sane(brw, mt, format));
626
627 const int flags =
628 brw_disable_aux_surface(brw, mt) ? INTEL_AUX_BUFFER_DISABLED : 0;
629 brw_emit_surface_state(brw, mt, flags, mt->target, view,
630 surface_state_infos[brw->gen].tex_mocs,
631 surf_offset, surf_index,
632 I915_GEM_DOMAIN_SAMPLER, 0);
633 }
634 }
635
636 void
637 brw_emit_buffer_surface_state(struct brw_context *brw,
638 uint32_t *out_offset,
639 drm_intel_bo *bo,
640 unsigned buffer_offset,
641 unsigned surface_format,
642 unsigned buffer_size,
643 unsigned pitch,
644 bool rw)
645 {
646 const struct surface_state_info ss_info = surface_state_infos[brw->gen];
647
648 uint32_t *dw = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
649 ss_info.num_dwords * 4, ss_info.ss_align,
650 out_offset);
651
652 isl_buffer_fill_state(&brw->isl_dev, dw,
653 .address = (bo ? bo->offset64 : 0) + buffer_offset,
654 .size = buffer_size,
655 .format = surface_format,
656 .stride = pitch,
657 .mocs = ss_info.tex_mocs);
658
659 if (bo) {
660 drm_intel_bo_emit_reloc(brw->batch.bo,
661 *out_offset + 4 * ss_info.reloc_dw,
662 bo, buffer_offset,
663 I915_GEM_DOMAIN_SAMPLER,
664 (rw ? I915_GEM_DOMAIN_SAMPLER : 0));
665 }
666 }
667
668 void
669 brw_update_buffer_texture_surface(struct gl_context *ctx,
670 unsigned unit,
671 uint32_t *surf_offset)
672 {
673 struct brw_context *brw = brw_context(ctx);
674 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
675 struct intel_buffer_object *intel_obj =
676 intel_buffer_object(tObj->BufferObject);
677 uint32_t size = tObj->BufferSize;
678 drm_intel_bo *bo = NULL;
679 mesa_format format = tObj->_BufferObjectFormat;
680 uint32_t brw_format = brw_format_for_mesa_format(format);
681 int texel_size = _mesa_get_format_bytes(format);
682
683 if (intel_obj) {
684 size = MIN2(size, intel_obj->Base.Size);
685 bo = intel_bufferobj_buffer(brw, intel_obj, tObj->BufferOffset, size);
686 }
687
688 if (brw_format == 0 && format != MESA_FORMAT_RGBA_FLOAT32) {
689 _mesa_problem(NULL, "bad format %s for texture buffer\n",
690 _mesa_get_format_name(format));
691 }
692
693 brw_emit_buffer_surface_state(brw, surf_offset, bo,
694 tObj->BufferOffset,
695 brw_format,
696 size,
697 texel_size,
698 false /* rw */);
699 }
700
701 /**
702 * Create the constant buffer surface. Vertex/fragment shader constants will be
703 * read from this buffer with Data Port Read instructions/messages.
704 */
705 void
706 brw_create_constant_surface(struct brw_context *brw,
707 drm_intel_bo *bo,
708 uint32_t offset,
709 uint32_t size,
710 uint32_t *out_offset)
711 {
712 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
713 BRW_SURFACEFORMAT_R32G32B32A32_FLOAT,
714 size, 1, false);
715 }
716
717 /**
718 * Create the buffer surface. Shader buffer variables will be
719 * read from / write to this buffer with Data Port Read/Write
720 * instructions/messages.
721 */
722 void
723 brw_create_buffer_surface(struct brw_context *brw,
724 drm_intel_bo *bo,
725 uint32_t offset,
726 uint32_t size,
727 uint32_t *out_offset)
728 {
729 /* Use a raw surface so we can reuse existing untyped read/write/atomic
730 * messages. We need these specifically for the fragment shader since they
731 * include a pixel mask header that we need to ensure correct behavior
732 * with helper invocations, which cannot write to the buffer.
733 */
734 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
735 BRW_SURFACEFORMAT_RAW,
736 size, 1, true);
737 }
738
739 /**
740 * Set up a binding table entry for use by stream output logic (transform
741 * feedback).
742 *
743 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
744 */
745 void
746 brw_update_sol_surface(struct brw_context *brw,
747 struct gl_buffer_object *buffer_obj,
748 uint32_t *out_offset, unsigned num_vector_components,
749 unsigned stride_dwords, unsigned offset_dwords)
750 {
751 struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
752 uint32_t offset_bytes = 4 * offset_dwords;
753 drm_intel_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
754 offset_bytes,
755 buffer_obj->Size - offset_bytes);
756 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
757 out_offset);
758 uint32_t pitch_minus_1 = 4*stride_dwords - 1;
759 size_t size_dwords = buffer_obj->Size / 4;
760 uint32_t buffer_size_minus_1, width, height, depth, surface_format;
761
762 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
763 * too big to map using a single binding table entry?
764 */
765 assert((size_dwords - offset_dwords) / stride_dwords
766 <= BRW_MAX_NUM_BUFFER_ENTRIES);
767
768 if (size_dwords > offset_dwords + num_vector_components) {
769 /* There is room for at least 1 transform feedback output in the buffer.
770 * Compute the number of additional transform feedback outputs the
771 * buffer has room for.
772 */
773 buffer_size_minus_1 =
774 (size_dwords - offset_dwords - num_vector_components) / stride_dwords;
775 } else {
776 /* There isn't even room for a single transform feedback output in the
777 * buffer. We can't configure the binding table entry to prevent output
778 * entirely; we'll have to rely on the geometry shader to detect
779 * overflow. But to minimize the damage in case of a bug, set up the
780 * binding table entry to just allow a single output.
781 */
782 buffer_size_minus_1 = 0;
783 }
784 width = buffer_size_minus_1 & 0x7f;
785 height = (buffer_size_minus_1 & 0xfff80) >> 7;
786 depth = (buffer_size_minus_1 & 0x7f00000) >> 20;
787
788 switch (num_vector_components) {
789 case 1:
790 surface_format = BRW_SURFACEFORMAT_R32_FLOAT;
791 break;
792 case 2:
793 surface_format = BRW_SURFACEFORMAT_R32G32_FLOAT;
794 break;
795 case 3:
796 surface_format = BRW_SURFACEFORMAT_R32G32B32_FLOAT;
797 break;
798 case 4:
799 surface_format = BRW_SURFACEFORMAT_R32G32B32A32_FLOAT;
800 break;
801 default:
802 unreachable("Invalid vector size for transform feedback output");
803 }
804
805 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
806 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
807 surface_format << BRW_SURFACE_FORMAT_SHIFT |
808 BRW_SURFACE_RC_READ_WRITE;
809 surf[1] = bo->offset64 + offset_bytes; /* reloc */
810 surf[2] = (width << BRW_SURFACE_WIDTH_SHIFT |
811 height << BRW_SURFACE_HEIGHT_SHIFT);
812 surf[3] = (depth << BRW_SURFACE_DEPTH_SHIFT |
813 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
814 surf[4] = 0;
815 surf[5] = 0;
816
817 /* Emit relocation to surface contents. */
818 drm_intel_bo_emit_reloc(brw->batch.bo,
819 *out_offset + 4,
820 bo, offset_bytes,
821 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
822 }
823
824 /* Creates a new WM constant buffer reflecting the current fragment program's
825 * constants, if needed by the fragment program.
826 *
827 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
828 * state atom.
829 */
830 static void
831 brw_upload_wm_pull_constants(struct brw_context *brw)
832 {
833 struct brw_stage_state *stage_state = &brw->wm.base;
834 /* BRW_NEW_FRAGMENT_PROGRAM */
835 struct brw_fragment_program *fp =
836 (struct brw_fragment_program *) brw->fragment_program;
837 /* BRW_NEW_FS_PROG_DATA */
838 struct brw_stage_prog_data *prog_data = brw->wm.base.prog_data;
839
840 _mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_FRAGMENT);
841 /* _NEW_PROGRAM_CONSTANTS */
842 brw_upload_pull_constants(brw, BRW_NEW_SURFACES, &fp->program.Base,
843 stage_state, prog_data);
844 }
845
846 const struct brw_tracked_state brw_wm_pull_constants = {
847 .dirty = {
848 .mesa = _NEW_PROGRAM_CONSTANTS,
849 .brw = BRW_NEW_BATCH |
850 BRW_NEW_BLORP |
851 BRW_NEW_FRAGMENT_PROGRAM |
852 BRW_NEW_FS_PROG_DATA,
853 },
854 .emit = brw_upload_wm_pull_constants,
855 };
856
857 /**
858 * Creates a null renderbuffer surface.
859 *
860 * This is used when the shader doesn't write to any color output. An FB
861 * write to target 0 will still be emitted, because that's how the thread is
862 * terminated (and computed depth is returned), so we need to have the
863 * hardware discard the target 0 color output..
864 */
865 static void
866 brw_emit_null_surface_state(struct brw_context *brw,
867 unsigned width,
868 unsigned height,
869 unsigned samples,
870 uint32_t *out_offset)
871 {
872 /* From the Sandy bridge PRM, Vol4 Part1 p71 (Surface Type: Programming
873 * Notes):
874 *
875 * A null surface will be used in instances where an actual surface is
876 * not bound. When a write message is generated to a null surface, no
877 * actual surface is written to. When a read message (including any
878 * sampling engine message) is generated to a null surface, the result
879 * is all zeros. Note that a null surface type is allowed to be used
880 * with all messages, even if it is not specificially indicated as
881 * supported. All of the remaining fields in surface state are ignored
882 * for null surfaces, with the following exceptions:
883 *
884 * - [DevSNB+]: Width, Height, Depth, and LOD fields must match the
885 * depth buffer’s corresponding state for all render target surfaces,
886 * including null.
887 *
888 * - Surface Format must be R8G8B8A8_UNORM.
889 */
890 unsigned surface_type = BRW_SURFACE_NULL;
891 drm_intel_bo *bo = NULL;
892 unsigned pitch_minus_1 = 0;
893 uint32_t multisampling_state = 0;
894 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
895 out_offset);
896
897 if (samples > 1) {
898 /* On Gen6, null render targets seem to cause GPU hangs when
899 * multisampling. So work around this problem by rendering into dummy
900 * color buffer.
901 *
902 * To decrease the amount of memory needed by the workaround buffer, we
903 * set its pitch to 128 bytes (the width of a Y tile). This means that
904 * the amount of memory needed for the workaround buffer is
905 * (width_in_tiles + height_in_tiles - 1) tiles.
906 *
907 * Note that since the workaround buffer will be interpreted by the
908 * hardware as an interleaved multisampled buffer, we need to compute
909 * width_in_tiles and height_in_tiles by dividing the width and height
910 * by 16 rather than the normal Y-tile size of 32.
911 */
912 unsigned width_in_tiles = ALIGN(width, 16) / 16;
913 unsigned height_in_tiles = ALIGN(height, 16) / 16;
914 unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
915 brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
916 size_needed);
917 bo = brw->wm.multisampled_null_render_target_bo;
918 surface_type = BRW_SURFACE_2D;
919 pitch_minus_1 = 127;
920 multisampling_state = brw_get_surface_num_multisamples(samples);
921 }
922
923 surf[0] = (surface_type << BRW_SURFACE_TYPE_SHIFT |
924 BRW_SURFACEFORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
925 if (brw->gen < 6) {
926 surf[0] |= (1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT |
927 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT |
928 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT |
929 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT);
930 }
931 surf[1] = bo ? bo->offset64 : 0;
932 surf[2] = ((width - 1) << BRW_SURFACE_WIDTH_SHIFT |
933 (height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
934
935 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
936 * Notes):
937 *
938 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
939 */
940 surf[3] = (BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y |
941 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
942 surf[4] = multisampling_state;
943 surf[5] = 0;
944
945 if (bo) {
946 drm_intel_bo_emit_reloc(brw->batch.bo,
947 *out_offset + 4,
948 bo, 0,
949 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
950 }
951 }
952
953 /**
954 * Sets up a surface state structure to point at the given region.
955 * While it is only used for the front/back buffer currently, it should be
956 * usable for further buffers when doing ARB_draw_buffer support.
957 */
958 static uint32_t
959 gen4_update_renderbuffer_surface(struct brw_context *brw,
960 struct gl_renderbuffer *rb,
961 uint32_t flags, unsigned unit,
962 uint32_t surf_index)
963 {
964 struct gl_context *ctx = &brw->ctx;
965 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
966 struct intel_mipmap_tree *mt = irb->mt;
967 uint32_t *surf;
968 uint32_t tile_x, tile_y;
969 uint32_t format = 0;
970 uint32_t offset;
971 /* _NEW_BUFFERS */
972 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
973 /* BRW_NEW_FS_PROG_DATA */
974
975 assert(!(flags & INTEL_RENDERBUFFER_LAYERED));
976 assert(!(flags & INTEL_AUX_BUFFER_DISABLED));
977
978 if (rb->TexImage && !brw->has_surface_tile_offset) {
979 intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y);
980
981 if (tile_x != 0 || tile_y != 0) {
982 /* Original gen4 hardware couldn't draw to a non-tile-aligned
983 * destination in a miptree unless you actually setup your renderbuffer
984 * as a miptree and used the fragile lod/array_index/etc. controls to
985 * select the image. So, instead, we just make a new single-level
986 * miptree and render into that.
987 */
988 intel_renderbuffer_move_to_temp(brw, irb, false);
989 mt = irb->mt;
990 }
991 }
992
993 intel_miptree_used_for_rendering(irb->mt);
994
995 surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32, &offset);
996
997 format = brw->render_target_format[rb_format];
998 if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
999 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
1000 __func__, _mesa_get_format_name(rb_format));
1001 }
1002
1003 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
1004 format << BRW_SURFACE_FORMAT_SHIFT);
1005
1006 /* reloc */
1007 assert(mt->offset % mt->cpp == 0);
1008 surf[1] = (intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y) +
1009 mt->bo->offset64 + mt->offset);
1010
1011 surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
1012 (rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
1013
1014 surf[3] = (brw_get_surface_tiling_bits(mt->tiling) |
1015 (mt->pitch - 1) << BRW_SURFACE_PITCH_SHIFT);
1016
1017 surf[4] = brw_get_surface_num_multisamples(mt->num_samples);
1018
1019 assert(brw->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
1020 /* Note that the low bits of these fields are missing, so
1021 * there's the possibility of getting in trouble.
1022 */
1023 assert(tile_x % 4 == 0);
1024 assert(tile_y % 2 == 0);
1025 surf[5] = ((tile_x / 4) << BRW_SURFACE_X_OFFSET_SHIFT |
1026 (tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT |
1027 (mt->valign == 4 ? BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
1028
1029 if (brw->gen < 6) {
1030 /* _NEW_COLOR */
1031 if (!ctx->Color.ColorLogicOpEnabled && !ctx->Color._AdvancedBlendMode &&
1032 (ctx->Color.BlendEnabled & (1 << unit)))
1033 surf[0] |= BRW_SURFACE_BLEND_ENABLED;
1034
1035 if (!ctx->Color.ColorMask[unit][0])
1036 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT;
1037 if (!ctx->Color.ColorMask[unit][1])
1038 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT;
1039 if (!ctx->Color.ColorMask[unit][2])
1040 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT;
1041
1042 /* As mentioned above, disable writes to the alpha component when the
1043 * renderbuffer is XRGB.
1044 */
1045 if (ctx->DrawBuffer->Visual.alphaBits == 0 ||
1046 !ctx->Color.ColorMask[unit][3]) {
1047 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT;
1048 }
1049 }
1050
1051 drm_intel_bo_emit_reloc(brw->batch.bo,
1052 offset + 4,
1053 mt->bo,
1054 surf[1] - mt->bo->offset64,
1055 I915_GEM_DOMAIN_RENDER,
1056 I915_GEM_DOMAIN_RENDER);
1057
1058 return offset;
1059 }
1060
1061 /**
1062 * Construct SURFACE_STATE objects for renderbuffers/draw buffers.
1063 */
1064 void
1065 brw_update_renderbuffer_surfaces(struct brw_context *brw,
1066 const struct gl_framebuffer *fb,
1067 uint32_t render_target_start,
1068 uint32_t *surf_offset)
1069 {
1070 GLuint i;
1071 const unsigned int w = _mesa_geometric_width(fb);
1072 const unsigned int h = _mesa_geometric_height(fb);
1073 const unsigned int s = _mesa_geometric_samples(fb);
1074
1075 /* Update surfaces for drawing buffers */
1076 if (fb->_NumColorDrawBuffers >= 1) {
1077 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
1078 const uint32_t surf_index = render_target_start + i;
1079 const int flags = (_mesa_geometric_layers(fb) > 0 ?
1080 INTEL_RENDERBUFFER_LAYERED : 0) |
1081 (brw->draw_aux_buffer_disabled[i] ?
1082 INTEL_AUX_BUFFER_DISABLED : 0);
1083
1084 if (intel_renderbuffer(fb->_ColorDrawBuffers[i])) {
1085 surf_offset[surf_index] =
1086 brw->vtbl.update_renderbuffer_surface(
1087 brw, fb->_ColorDrawBuffers[i], flags, i, surf_index);
1088 } else {
1089 brw->vtbl.emit_null_surface_state(brw, w, h, s,
1090 &surf_offset[surf_index]);
1091 }
1092 }
1093 } else {
1094 const uint32_t surf_index = render_target_start;
1095 brw->vtbl.emit_null_surface_state(brw, w, h, s,
1096 &surf_offset[surf_index]);
1097 }
1098 }
1099
1100 static void
1101 update_renderbuffer_surfaces(struct brw_context *brw)
1102 {
1103 const struct gl_context *ctx = &brw->ctx;
1104
1105 /* BRW_NEW_FS_PROG_DATA */
1106 const struct brw_wm_prog_data *wm_prog_data =
1107 brw_wm_prog_data(brw->wm.base.prog_data);
1108
1109 /* _NEW_BUFFERS | _NEW_COLOR */
1110 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1111 brw_update_renderbuffer_surfaces(
1112 brw, fb,
1113 wm_prog_data->binding_table.render_target_start,
1114 brw->wm.base.surf_offset);
1115 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1116 }
1117
1118 const struct brw_tracked_state brw_renderbuffer_surfaces = {
1119 .dirty = {
1120 .mesa = _NEW_BUFFERS |
1121 _NEW_COLOR,
1122 .brw = BRW_NEW_BATCH |
1123 BRW_NEW_BLORP |
1124 BRW_NEW_FS_PROG_DATA,
1125 },
1126 .emit = update_renderbuffer_surfaces,
1127 };
1128
1129 const struct brw_tracked_state gen6_renderbuffer_surfaces = {
1130 .dirty = {
1131 .mesa = _NEW_BUFFERS,
1132 .brw = BRW_NEW_BATCH |
1133 BRW_NEW_BLORP,
1134 },
1135 .emit = update_renderbuffer_surfaces,
1136 };
1137
1138 static void
1139 update_renderbuffer_read_surfaces(struct brw_context *brw)
1140 {
1141 const struct gl_context *ctx = &brw->ctx;
1142
1143 /* BRW_NEW_FS_PROG_DATA */
1144 const struct brw_wm_prog_data *wm_prog_data =
1145 brw_wm_prog_data(brw->wm.base.prog_data);
1146
1147 /* BRW_NEW_FRAGMENT_PROGRAM */
1148 if (!ctx->Extensions.MESA_shader_framebuffer_fetch &&
1149 brw->fragment_program && brw->fragment_program->Base.info.outputs_read) {
1150 /* _NEW_BUFFERS */
1151 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1152
1153 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
1154 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
1155 const struct intel_renderbuffer *irb = intel_renderbuffer(rb);
1156 const unsigned surf_index =
1157 wm_prog_data->binding_table.render_target_read_start + i;
1158 uint32_t *surf_offset = &brw->wm.base.surf_offset[surf_index];
1159
1160 if (irb) {
1161 const unsigned format = brw->render_target_format[
1162 _mesa_get_render_format(ctx, intel_rb_format(irb))];
1163 assert(isl_format_supports_sampling(&brw->screen->devinfo,
1164 format));
1165
1166 /* Override the target of the texture if the render buffer is a
1167 * single slice of a 3D texture (since the minimum array element
1168 * field of the surface state structure is ignored by the sampler
1169 * unit for 3D textures on some hardware), or if the render buffer
1170 * is a 1D array (since shaders always provide the array index
1171 * coordinate at the Z component to avoid state-dependent
1172 * recompiles when changing the texture target of the
1173 * framebuffer).
1174 */
1175 const GLenum target =
1176 (irb->mt->target == GL_TEXTURE_3D &&
1177 irb->layer_count == 1) ? GL_TEXTURE_2D :
1178 irb->mt->target == GL_TEXTURE_1D_ARRAY ? GL_TEXTURE_2D_ARRAY :
1179 irb->mt->target;
1180
1181 /* intel_renderbuffer::mt_layer is expressed in sample units for
1182 * the UMS and CMS multisample layouts, but
1183 * intel_renderbuffer::layer_count is expressed in units of whole
1184 * logical layers regardless of the multisample layout.
1185 */
1186 const unsigned mt_layer_unit =
1187 (irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
1188 irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) ?
1189 MAX2(irb->mt->num_samples, 1) : 1;
1190
1191 const struct isl_view view = {
1192 .format = format,
1193 .base_level = irb->mt_level - irb->mt->first_level,
1194 .levels = 1,
1195 .base_array_layer = irb->mt_layer / mt_layer_unit,
1196 .array_len = irb->layer_count,
1197 .swizzle = ISL_SWIZZLE_IDENTITY,
1198 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
1199 };
1200
1201 const int flags = brw->draw_aux_buffer_disabled[i] ?
1202 INTEL_AUX_BUFFER_DISABLED : 0;
1203 brw_emit_surface_state(brw, irb->mt, flags, target, view,
1204 surface_state_infos[brw->gen].tex_mocs,
1205 surf_offset, surf_index,
1206 I915_GEM_DOMAIN_SAMPLER, 0);
1207
1208 } else {
1209 brw->vtbl.emit_null_surface_state(
1210 brw, _mesa_geometric_width(fb), _mesa_geometric_height(fb),
1211 _mesa_geometric_samples(fb), surf_offset);
1212 }
1213 }
1214
1215 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1216 }
1217 }
1218
1219 const struct brw_tracked_state brw_renderbuffer_read_surfaces = {
1220 .dirty = {
1221 .mesa = _NEW_BUFFERS,
1222 .brw = BRW_NEW_BATCH |
1223 BRW_NEW_FRAGMENT_PROGRAM |
1224 BRW_NEW_FS_PROG_DATA,
1225 },
1226 .emit = update_renderbuffer_read_surfaces,
1227 };
1228
1229 static void
1230 update_stage_texture_surfaces(struct brw_context *brw,
1231 const struct gl_program *prog,
1232 struct brw_stage_state *stage_state,
1233 bool for_gather, uint32_t plane)
1234 {
1235 if (!prog)
1236 return;
1237
1238 struct gl_context *ctx = &brw->ctx;
1239
1240 uint32_t *surf_offset = stage_state->surf_offset;
1241
1242 /* BRW_NEW_*_PROG_DATA */
1243 if (for_gather)
1244 surf_offset += stage_state->prog_data->binding_table.gather_texture_start;
1245 else
1246 surf_offset += stage_state->prog_data->binding_table.plane_start[plane];
1247
1248 unsigned num_samplers = util_last_bit(prog->SamplersUsed);
1249 for (unsigned s = 0; s < num_samplers; s++) {
1250 surf_offset[s] = 0;
1251
1252 if (prog->SamplersUsed & (1 << s)) {
1253 const unsigned unit = prog->SamplerUnits[s];
1254
1255 /* _NEW_TEXTURE */
1256 if (ctx->Texture.Unit[unit]._Current) {
1257 brw_update_texture_surface(ctx, unit, surf_offset + s, for_gather, plane);
1258 }
1259 }
1260 }
1261 }
1262
1263
1264 /**
1265 * Construct SURFACE_STATE objects for enabled textures.
1266 */
1267 static void
1268 brw_update_texture_surfaces(struct brw_context *brw)
1269 {
1270 /* BRW_NEW_VERTEX_PROGRAM */
1271 struct gl_program *vs = (struct gl_program *) brw->vertex_program;
1272
1273 /* BRW_NEW_TESS_PROGRAMS */
1274 struct gl_program *tcs = (struct gl_program *) brw->tess_ctrl_program;
1275 struct gl_program *tes = (struct gl_program *) brw->tess_eval_program;
1276
1277 /* BRW_NEW_GEOMETRY_PROGRAM */
1278 struct gl_program *gs = (struct gl_program *) brw->geometry_program;
1279
1280 /* BRW_NEW_FRAGMENT_PROGRAM */
1281 struct gl_program *fs = (struct gl_program *) brw->fragment_program;
1282
1283 /* _NEW_TEXTURE */
1284 update_stage_texture_surfaces(brw, vs, &brw->vs.base, false, 0);
1285 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, false, 0);
1286 update_stage_texture_surfaces(brw, tes, &brw->tes.base, false, 0);
1287 update_stage_texture_surfaces(brw, gs, &brw->gs.base, false, 0);
1288 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 0);
1289
1290 /* emit alternate set of surface state for gather. this
1291 * allows the surface format to be overriden for only the
1292 * gather4 messages. */
1293 if (brw->gen < 8) {
1294 if (vs && vs->nir->info->uses_texture_gather)
1295 update_stage_texture_surfaces(brw, vs, &brw->vs.base, true, 0);
1296 if (tcs && tcs->nir->info->uses_texture_gather)
1297 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, true, 0);
1298 if (tes && tes->nir->info->uses_texture_gather)
1299 update_stage_texture_surfaces(brw, tes, &brw->tes.base, true, 0);
1300 if (gs && gs->nir->info->uses_texture_gather)
1301 update_stage_texture_surfaces(brw, gs, &brw->gs.base, true, 0);
1302 if (fs && fs->nir->info->uses_texture_gather)
1303 update_stage_texture_surfaces(brw, fs, &brw->wm.base, true, 0);
1304 }
1305
1306 if (fs) {
1307 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 1);
1308 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 2);
1309 }
1310
1311 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1312 }
1313
1314 const struct brw_tracked_state brw_texture_surfaces = {
1315 .dirty = {
1316 .mesa = _NEW_TEXTURE,
1317 .brw = BRW_NEW_BATCH |
1318 BRW_NEW_BLORP |
1319 BRW_NEW_FRAGMENT_PROGRAM |
1320 BRW_NEW_FS_PROG_DATA |
1321 BRW_NEW_GEOMETRY_PROGRAM |
1322 BRW_NEW_GS_PROG_DATA |
1323 BRW_NEW_TESS_PROGRAMS |
1324 BRW_NEW_TCS_PROG_DATA |
1325 BRW_NEW_TES_PROG_DATA |
1326 BRW_NEW_TEXTURE_BUFFER |
1327 BRW_NEW_VERTEX_PROGRAM |
1328 BRW_NEW_VS_PROG_DATA,
1329 },
1330 .emit = brw_update_texture_surfaces,
1331 };
1332
1333 static void
1334 brw_update_cs_texture_surfaces(struct brw_context *brw)
1335 {
1336 /* BRW_NEW_COMPUTE_PROGRAM */
1337 struct gl_program *cs = (struct gl_program *) brw->compute_program;
1338
1339 /* _NEW_TEXTURE */
1340 update_stage_texture_surfaces(brw, cs, &brw->cs.base, false, 0);
1341
1342 /* emit alternate set of surface state for gather. this
1343 * allows the surface format to be overriden for only the
1344 * gather4 messages.
1345 */
1346 if (brw->gen < 8) {
1347 if (cs && cs->nir->info->uses_texture_gather)
1348 update_stage_texture_surfaces(brw, cs, &brw->cs.base, true, 0);
1349 }
1350
1351 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1352 }
1353
1354 const struct brw_tracked_state brw_cs_texture_surfaces = {
1355 .dirty = {
1356 .mesa = _NEW_TEXTURE,
1357 .brw = BRW_NEW_BATCH |
1358 BRW_NEW_BLORP |
1359 BRW_NEW_COMPUTE_PROGRAM,
1360 },
1361 .emit = brw_update_cs_texture_surfaces,
1362 };
1363
1364
1365 void
1366 brw_upload_ubo_surfaces(struct brw_context *brw,
1367 struct gl_linked_shader *shader,
1368 struct brw_stage_state *stage_state,
1369 struct brw_stage_prog_data *prog_data)
1370 {
1371 struct gl_context *ctx = &brw->ctx;
1372
1373 if (!shader)
1374 return;
1375
1376 uint32_t *ubo_surf_offsets =
1377 &stage_state->surf_offset[prog_data->binding_table.ubo_start];
1378
1379 for (int i = 0; i < shader->NumUniformBlocks; i++) {
1380 struct gl_uniform_buffer_binding *binding =
1381 &ctx->UniformBufferBindings[shader->UniformBlocks[i]->Binding];
1382
1383 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1384 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ubo_surf_offsets[i]);
1385 } else {
1386 struct intel_buffer_object *intel_bo =
1387 intel_buffer_object(binding->BufferObject);
1388 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1389 if (!binding->AutomaticSize)
1390 size = MIN2(size, binding->Size);
1391 drm_intel_bo *bo =
1392 intel_bufferobj_buffer(brw, intel_bo,
1393 binding->Offset,
1394 size);
1395 brw_create_constant_surface(brw, bo, binding->Offset,
1396 size,
1397 &ubo_surf_offsets[i]);
1398 }
1399 }
1400
1401 uint32_t *ssbo_surf_offsets =
1402 &stage_state->surf_offset[prog_data->binding_table.ssbo_start];
1403
1404 for (int i = 0; i < shader->NumShaderStorageBlocks; i++) {
1405 struct gl_shader_storage_buffer_binding *binding =
1406 &ctx->ShaderStorageBufferBindings[shader->ShaderStorageBlocks[i]->Binding];
1407
1408 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1409 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ssbo_surf_offsets[i]);
1410 } else {
1411 struct intel_buffer_object *intel_bo =
1412 intel_buffer_object(binding->BufferObject);
1413 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1414 if (!binding->AutomaticSize)
1415 size = MIN2(size, binding->Size);
1416 drm_intel_bo *bo =
1417 intel_bufferobj_buffer(brw, intel_bo,
1418 binding->Offset,
1419 size);
1420 brw_create_buffer_surface(brw, bo, binding->Offset,
1421 size,
1422 &ssbo_surf_offsets[i]);
1423 }
1424 }
1425
1426 if (shader->NumUniformBlocks || shader->NumShaderStorageBlocks)
1427 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1428 }
1429
1430 static void
1431 brw_upload_wm_ubo_surfaces(struct brw_context *brw)
1432 {
1433 struct gl_context *ctx = &brw->ctx;
1434 /* _NEW_PROGRAM */
1435 struct gl_shader_program *prog = ctx->_Shader->_CurrentFragmentProgram;
1436
1437 if (!prog)
1438 return;
1439
1440 /* BRW_NEW_FS_PROG_DATA */
1441 brw_upload_ubo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
1442 &brw->wm.base, brw->wm.base.prog_data);
1443 }
1444
1445 const struct brw_tracked_state brw_wm_ubo_surfaces = {
1446 .dirty = {
1447 .mesa = _NEW_PROGRAM,
1448 .brw = BRW_NEW_BATCH |
1449 BRW_NEW_BLORP |
1450 BRW_NEW_FS_PROG_DATA |
1451 BRW_NEW_UNIFORM_BUFFER,
1452 },
1453 .emit = brw_upload_wm_ubo_surfaces,
1454 };
1455
1456 static void
1457 brw_upload_cs_ubo_surfaces(struct brw_context *brw)
1458 {
1459 struct gl_context *ctx = &brw->ctx;
1460 /* _NEW_PROGRAM */
1461 struct gl_shader_program *prog =
1462 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1463
1464 if (!prog)
1465 return;
1466
1467 /* BRW_NEW_CS_PROG_DATA */
1468 brw_upload_ubo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_COMPUTE],
1469 &brw->cs.base, brw->cs.base.prog_data);
1470 }
1471
1472 const struct brw_tracked_state brw_cs_ubo_surfaces = {
1473 .dirty = {
1474 .mesa = _NEW_PROGRAM,
1475 .brw = BRW_NEW_BATCH |
1476 BRW_NEW_BLORP |
1477 BRW_NEW_CS_PROG_DATA |
1478 BRW_NEW_UNIFORM_BUFFER,
1479 },
1480 .emit = brw_upload_cs_ubo_surfaces,
1481 };
1482
1483 void
1484 brw_upload_abo_surfaces(struct brw_context *brw,
1485 struct gl_linked_shader *shader,
1486 struct brw_stage_state *stage_state,
1487 struct brw_stage_prog_data *prog_data)
1488 {
1489 struct gl_context *ctx = &brw->ctx;
1490 uint32_t *surf_offsets =
1491 &stage_state->surf_offset[prog_data->binding_table.abo_start];
1492
1493 if (shader && shader->NumAtomicBuffers) {
1494 for (unsigned i = 0; i < shader->NumAtomicBuffers; i++) {
1495 struct gl_atomic_buffer_binding *binding =
1496 &ctx->AtomicBufferBindings[shader->AtomicBuffers[i]->Binding];
1497 struct intel_buffer_object *intel_bo =
1498 intel_buffer_object(binding->BufferObject);
1499 drm_intel_bo *bo = intel_bufferobj_buffer(
1500 brw, intel_bo, binding->Offset, intel_bo->Base.Size - binding->Offset);
1501
1502 brw_emit_buffer_surface_state(brw, &surf_offsets[i], bo,
1503 binding->Offset, BRW_SURFACEFORMAT_RAW,
1504 bo->size - binding->Offset, 1, true);
1505 }
1506
1507 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1508 }
1509 }
1510
1511 static void
1512 brw_upload_wm_abo_surfaces(struct brw_context *brw)
1513 {
1514 struct gl_context *ctx = &brw->ctx;
1515 /* _NEW_PROGRAM */
1516 struct gl_shader_program *prog = ctx->_Shader->_CurrentFragmentProgram;
1517
1518 if (prog) {
1519 /* BRW_NEW_FS_PROG_DATA */
1520 brw_upload_abo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
1521 &brw->wm.base, brw->wm.base.prog_data);
1522 }
1523 }
1524
1525 const struct brw_tracked_state brw_wm_abo_surfaces = {
1526 .dirty = {
1527 .mesa = _NEW_PROGRAM,
1528 .brw = BRW_NEW_ATOMIC_BUFFER |
1529 BRW_NEW_BLORP |
1530 BRW_NEW_BATCH |
1531 BRW_NEW_FS_PROG_DATA,
1532 },
1533 .emit = brw_upload_wm_abo_surfaces,
1534 };
1535
1536 static void
1537 brw_upload_cs_abo_surfaces(struct brw_context *brw)
1538 {
1539 struct gl_context *ctx = &brw->ctx;
1540 /* _NEW_PROGRAM */
1541 struct gl_shader_program *prog =
1542 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1543
1544 if (prog) {
1545 /* BRW_NEW_CS_PROG_DATA */
1546 brw_upload_abo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_COMPUTE],
1547 &brw->cs.base, brw->cs.base.prog_data);
1548 }
1549 }
1550
1551 const struct brw_tracked_state brw_cs_abo_surfaces = {
1552 .dirty = {
1553 .mesa = _NEW_PROGRAM,
1554 .brw = BRW_NEW_ATOMIC_BUFFER |
1555 BRW_NEW_BLORP |
1556 BRW_NEW_BATCH |
1557 BRW_NEW_CS_PROG_DATA,
1558 },
1559 .emit = brw_upload_cs_abo_surfaces,
1560 };
1561
1562 static void
1563 brw_upload_cs_image_surfaces(struct brw_context *brw)
1564 {
1565 struct gl_context *ctx = &brw->ctx;
1566 /* _NEW_PROGRAM */
1567 struct gl_shader_program *prog =
1568 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1569
1570 if (prog) {
1571 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1572 brw_upload_image_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_COMPUTE],
1573 &brw->cs.base, brw->cs.base.prog_data);
1574 }
1575 }
1576
1577 const struct brw_tracked_state brw_cs_image_surfaces = {
1578 .dirty = {
1579 .mesa = _NEW_TEXTURE | _NEW_PROGRAM,
1580 .brw = BRW_NEW_BATCH |
1581 BRW_NEW_BLORP |
1582 BRW_NEW_CS_PROG_DATA |
1583 BRW_NEW_IMAGE_UNITS
1584 },
1585 .emit = brw_upload_cs_image_surfaces,
1586 };
1587
1588 static uint32_t
1589 get_image_format(struct brw_context *brw, mesa_format format, GLenum access)
1590 {
1591 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1592 uint32_t hw_format = brw_format_for_mesa_format(format);
1593 if (access == GL_WRITE_ONLY) {
1594 return hw_format;
1595 } else if (isl_has_matching_typed_storage_image_format(devinfo, hw_format)) {
1596 /* Typed surface reads support a very limited subset of the shader
1597 * image formats. Translate it into the closest format the
1598 * hardware supports.
1599 */
1600 return isl_lower_storage_image_format(devinfo, hw_format);
1601 } else {
1602 /* The hardware doesn't actually support a typed format that we can use
1603 * so we have to fall back to untyped read/write messages.
1604 */
1605 return BRW_SURFACEFORMAT_RAW;
1606 }
1607 }
1608
1609 static void
1610 update_default_image_param(struct brw_context *brw,
1611 struct gl_image_unit *u,
1612 unsigned surface_idx,
1613 struct brw_image_param *param)
1614 {
1615 memset(param, 0, sizeof(*param));
1616 param->surface_idx = surface_idx;
1617 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1618 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1619 * detailed explanation of these parameters.
1620 */
1621 param->swizzling[0] = 0xff;
1622 param->swizzling[1] = 0xff;
1623 }
1624
1625 static void
1626 update_buffer_image_param(struct brw_context *brw,
1627 struct gl_image_unit *u,
1628 unsigned surface_idx,
1629 struct brw_image_param *param)
1630 {
1631 struct gl_buffer_object *obj = u->TexObj->BufferObject;
1632 const uint32_t size = MIN2((uint32_t)u->TexObj->BufferSize, obj->Size);
1633 update_default_image_param(brw, u, surface_idx, param);
1634
1635 param->size[0] = size / _mesa_get_format_bytes(u->_ActualFormat);
1636 param->stride[0] = _mesa_get_format_bytes(u->_ActualFormat);
1637 }
1638
1639 static void
1640 update_texture_image_param(struct brw_context *brw,
1641 struct gl_image_unit *u,
1642 unsigned surface_idx,
1643 struct brw_image_param *param)
1644 {
1645 struct intel_mipmap_tree *mt = intel_texture_object(u->TexObj)->mt;
1646
1647 update_default_image_param(brw, u, surface_idx, param);
1648
1649 param->size[0] = minify(mt->logical_width0, u->Level);
1650 param->size[1] = minify(mt->logical_height0, u->Level);
1651 param->size[2] = (!u->Layered ? 1 :
1652 u->TexObj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1653 u->TexObj->Target == GL_TEXTURE_3D ?
1654 minify(mt->logical_depth0, u->Level) :
1655 mt->logical_depth0);
1656
1657 intel_miptree_get_image_offset(mt, u->Level, u->_Layer,
1658 &param->offset[0],
1659 &param->offset[1]);
1660
1661 param->stride[0] = mt->cpp;
1662 param->stride[1] = mt->pitch / mt->cpp;
1663 param->stride[2] =
1664 brw_miptree_get_horizontal_slice_pitch(brw, mt, u->Level);
1665 param->stride[3] =
1666 brw_miptree_get_vertical_slice_pitch(brw, mt, u->Level);
1667
1668 if (mt->tiling == I915_TILING_X) {
1669 /* An X tile is a rectangular block of 512x8 bytes. */
1670 param->tiling[0] = _mesa_logbase2(512 / mt->cpp);
1671 param->tiling[1] = _mesa_logbase2(8);
1672
1673 if (brw->has_swizzling) {
1674 /* Right shifts required to swizzle bits 9 and 10 of the memory
1675 * address with bit 6.
1676 */
1677 param->swizzling[0] = 3;
1678 param->swizzling[1] = 4;
1679 }
1680 } else if (mt->tiling == I915_TILING_Y) {
1681 /* The layout of a Y-tiled surface in memory isn't really fundamentally
1682 * different to the layout of an X-tiled surface, we simply pretend that
1683 * the surface is broken up in a number of smaller 16Bx32 tiles, each
1684 * one arranged in X-major order just like is the case for X-tiling.
1685 */
1686 param->tiling[0] = _mesa_logbase2(16 / mt->cpp);
1687 param->tiling[1] = _mesa_logbase2(32);
1688
1689 if (brw->has_swizzling) {
1690 /* Right shift required to swizzle bit 9 of the memory address with
1691 * bit 6.
1692 */
1693 param->swizzling[0] = 3;
1694 }
1695 }
1696
1697 /* 3D textures are arranged in 2D in memory with 2^lod slices per row. The
1698 * address calculation algorithm (emit_address_calculation() in
1699 * brw_fs_surface_builder.cpp) handles this as a sort of tiling with
1700 * modulus equal to the LOD.
1701 */
1702 param->tiling[2] = (u->TexObj->Target == GL_TEXTURE_3D ? u->Level :
1703 0);
1704 }
1705
1706 static void
1707 update_image_surface(struct brw_context *brw,
1708 struct gl_image_unit *u,
1709 GLenum access,
1710 unsigned surface_idx,
1711 uint32_t *surf_offset,
1712 struct brw_image_param *param)
1713 {
1714 if (_mesa_is_image_unit_valid(&brw->ctx, u)) {
1715 struct gl_texture_object *obj = u->TexObj;
1716 const unsigned format = get_image_format(brw, u->_ActualFormat, access);
1717
1718 if (obj->Target == GL_TEXTURE_BUFFER) {
1719 struct intel_buffer_object *intel_obj =
1720 intel_buffer_object(obj->BufferObject);
1721 const unsigned texel_size = (format == BRW_SURFACEFORMAT_RAW ? 1 :
1722 _mesa_get_format_bytes(u->_ActualFormat));
1723
1724 brw_emit_buffer_surface_state(
1725 brw, surf_offset, intel_obj->buffer, obj->BufferOffset,
1726 format, intel_obj->Base.Size, texel_size,
1727 access != GL_READ_ONLY);
1728
1729 update_buffer_image_param(brw, u, surface_idx, param);
1730
1731 } else {
1732 struct intel_texture_object *intel_obj = intel_texture_object(obj);
1733 struct intel_mipmap_tree *mt = intel_obj->mt;
1734
1735 if (format == BRW_SURFACEFORMAT_RAW) {
1736 brw_emit_buffer_surface_state(
1737 brw, surf_offset, mt->bo, mt->offset,
1738 format, mt->bo->size - mt->offset, 1 /* pitch */,
1739 access != GL_READ_ONLY);
1740
1741 } else {
1742 const unsigned num_layers = (!u->Layered ? 1 :
1743 obj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1744 mt->logical_depth0);
1745
1746 struct isl_view view = {
1747 .format = format,
1748 .base_level = obj->MinLevel + u->Level,
1749 .levels = 1,
1750 .base_array_layer = obj->MinLayer + u->_Layer,
1751 .array_len = num_layers,
1752 .swizzle = ISL_SWIZZLE_IDENTITY,
1753 .usage = ISL_SURF_USAGE_STORAGE_BIT,
1754 };
1755
1756 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
1757 const int flags =
1758 mt->fast_clear_state == INTEL_FAST_CLEAR_STATE_RESOLVED ?
1759 INTEL_AUX_BUFFER_DISABLED : 0;
1760 brw_emit_surface_state(brw, mt, flags, mt->target, view,
1761 surface_state_infos[brw->gen].tex_mocs,
1762 surf_offset, surf_index,
1763 I915_GEM_DOMAIN_SAMPLER,
1764 access == GL_READ_ONLY ? 0 :
1765 I915_GEM_DOMAIN_SAMPLER);
1766 }
1767
1768 update_texture_image_param(brw, u, surface_idx, param);
1769 }
1770
1771 } else {
1772 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, surf_offset);
1773 update_default_image_param(brw, u, surface_idx, param);
1774 }
1775 }
1776
1777 void
1778 brw_upload_image_surfaces(struct brw_context *brw,
1779 struct gl_linked_shader *shader,
1780 struct brw_stage_state *stage_state,
1781 struct brw_stage_prog_data *prog_data)
1782 {
1783 struct gl_context *ctx = &brw->ctx;
1784
1785 if (shader && shader->NumImages) {
1786 for (unsigned i = 0; i < shader->NumImages; i++) {
1787 struct gl_image_unit *u = &ctx->ImageUnits[shader->ImageUnits[i]];
1788 const unsigned surf_idx = prog_data->binding_table.image_start + i;
1789
1790 update_image_surface(brw, u, shader->ImageAccess[i],
1791 surf_idx,
1792 &stage_state->surf_offset[surf_idx],
1793 &prog_data->image_param[i]);
1794 }
1795
1796 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1797 /* This may have changed the image metadata dependent on the context
1798 * image unit state and passed to the program as uniforms, make sure
1799 * that push and pull constants are reuploaded.
1800 */
1801 brw->NewGLState |= _NEW_PROGRAM_CONSTANTS;
1802 }
1803 }
1804
1805 static void
1806 brw_upload_wm_image_surfaces(struct brw_context *brw)
1807 {
1808 struct gl_context *ctx = &brw->ctx;
1809 /* BRW_NEW_FRAGMENT_PROGRAM */
1810 struct gl_shader_program *prog = ctx->_Shader->_CurrentFragmentProgram;
1811
1812 if (prog) {
1813 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1814 brw_upload_image_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
1815 &brw->wm.base, brw->wm.base.prog_data);
1816 }
1817 }
1818
1819 const struct brw_tracked_state brw_wm_image_surfaces = {
1820 .dirty = {
1821 .mesa = _NEW_TEXTURE,
1822 .brw = BRW_NEW_BATCH |
1823 BRW_NEW_BLORP |
1824 BRW_NEW_FRAGMENT_PROGRAM |
1825 BRW_NEW_FS_PROG_DATA |
1826 BRW_NEW_IMAGE_UNITS
1827 },
1828 .emit = brw_upload_wm_image_surfaces,
1829 };
1830
1831 void
1832 gen4_init_vtable_surface_functions(struct brw_context *brw)
1833 {
1834 brw->vtbl.update_renderbuffer_surface = gen4_update_renderbuffer_surface;
1835 brw->vtbl.emit_null_surface_state = brw_emit_null_surface_state;
1836 }
1837
1838 void
1839 gen6_init_vtable_surface_functions(struct brw_context *brw)
1840 {
1841 gen4_init_vtable_surface_functions(brw);
1842 brw->vtbl.update_renderbuffer_surface = brw_update_renderbuffer_surface;
1843 }
1844
1845 static void
1846 brw_upload_cs_work_groups_surface(struct brw_context *brw)
1847 {
1848 struct gl_context *ctx = &brw->ctx;
1849 /* _NEW_PROGRAM */
1850 struct gl_shader_program *prog =
1851 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1852 /* BRW_NEW_CS_PROG_DATA */
1853 const struct brw_cs_prog_data *cs_prog_data =
1854 brw_cs_prog_data(brw->cs.base.prog_data);
1855
1856 if (prog && cs_prog_data->uses_num_work_groups) {
1857 const unsigned surf_idx =
1858 cs_prog_data->binding_table.work_groups_start;
1859 uint32_t *surf_offset = &brw->cs.base.surf_offset[surf_idx];
1860 drm_intel_bo *bo;
1861 uint32_t bo_offset;
1862
1863 if (brw->compute.num_work_groups_bo == NULL) {
1864 bo = NULL;
1865 intel_upload_data(brw,
1866 (void *)brw->compute.num_work_groups,
1867 3 * sizeof(GLuint),
1868 sizeof(GLuint),
1869 &bo,
1870 &bo_offset);
1871 } else {
1872 bo = brw->compute.num_work_groups_bo;
1873 bo_offset = brw->compute.num_work_groups_offset;
1874 }
1875
1876 brw_emit_buffer_surface_state(brw, surf_offset,
1877 bo, bo_offset,
1878 BRW_SURFACEFORMAT_RAW,
1879 3 * sizeof(GLuint), 1, true);
1880 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1881 }
1882 }
1883
1884 const struct brw_tracked_state brw_cs_work_groups_surface = {
1885 .dirty = {
1886 .brw = BRW_NEW_BLORP |
1887 BRW_NEW_CS_PROG_DATA |
1888 BRW_NEW_CS_WORK_GROUPS
1889 },
1890 .emit = brw_upload_cs_work_groups_surface,
1891 };