i965: Drop the aux mt when not used
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_surface_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "compiler/nir/nir.h"
34 #include "main/context.h"
35 #include "main/blend.h"
36 #include "main/mtypes.h"
37 #include "main/samplerobj.h"
38 #include "main/shaderimage.h"
39 #include "main/teximage.h"
40 #include "program/prog_parameter.h"
41 #include "program/prog_instruction.h"
42 #include "main/framebuffer.h"
43 #include "main/shaderapi.h"
44
45 #include "isl/isl.h"
46
47 #include "intel_mipmap_tree.h"
48 #include "intel_batchbuffer.h"
49 #include "intel_tex.h"
50 #include "intel_fbo.h"
51 #include "intel_buffer_objects.h"
52
53 #include "brw_context.h"
54 #include "brw_state.h"
55 #include "brw_defines.h"
56 #include "brw_wm.h"
57
58 enum {
59 INTEL_RENDERBUFFER_LAYERED = 1 << 0,
60 INTEL_AUX_BUFFER_DISABLED = 1 << 1,
61 };
62
63 struct surface_state_info {
64 unsigned num_dwords;
65 unsigned ss_align; /* Required alignment of RENDER_SURFACE_STATE in bytes */
66 unsigned reloc_dw;
67 unsigned aux_reloc_dw;
68 unsigned tex_mocs;
69 unsigned rb_mocs;
70 };
71
72 static const struct surface_state_info surface_state_infos[] = {
73 [4] = {6, 32, 1, 0},
74 [5] = {6, 32, 1, 0},
75 [6] = {6, 32, 1, 0},
76 [7] = {8, 32, 1, 6, GEN7_MOCS_L3, GEN7_MOCS_L3},
77 [8] = {13, 64, 8, 10, BDW_MOCS_WB, BDW_MOCS_PTE},
78 [9] = {16, 64, 8, 10, SKL_MOCS_WB, SKL_MOCS_PTE},
79 };
80
81 static void
82 brw_emit_surface_state(struct brw_context *brw,
83 struct intel_mipmap_tree *mt, uint32_t flags,
84 GLenum target, struct isl_view view,
85 uint32_t mocs, uint32_t *surf_offset, int surf_index,
86 unsigned read_domains, unsigned write_domains)
87 {
88 const struct surface_state_info ss_info = surface_state_infos[brw->gen];
89 uint32_t tile_x = mt->level[0].slice[0].x_offset;
90 uint32_t tile_y = mt->level[0].slice[0].y_offset;
91 uint32_t offset = mt->offset;
92
93 struct isl_surf surf;
94 intel_miptree_get_isl_surf(brw, mt, &surf);
95
96 surf.dim = get_isl_surf_dim(target);
97
98 const enum isl_dim_layout dim_layout =
99 get_isl_dim_layout(&brw->screen->devinfo, mt->tiling, target);
100
101 if (surf.dim_layout != dim_layout) {
102 /* The layout of the specified texture target is not compatible with the
103 * actual layout of the miptree structure in memory -- You're entering
104 * dangerous territory, this can only possibly work if you only intended
105 * to access a single level and slice of the texture, and the hardware
106 * supports the tile offset feature in order to allow non-tile-aligned
107 * base offsets, since we'll have to point the hardware to the first
108 * texel of the level instead of relying on the usual base level/layer
109 * controls.
110 */
111 assert(brw->has_surface_tile_offset);
112 assert(view.levels == 1 && view.array_len == 1);
113 assert(tile_x == 0 && tile_y == 0);
114
115 offset += intel_miptree_get_tile_offsets(mt, view.base_level,
116 view.base_array_layer,
117 &tile_x, &tile_y);
118
119 /* Minify the logical dimensions of the texture. */
120 const unsigned l = view.base_level - mt->first_level;
121 surf.logical_level0_px.width = minify(surf.logical_level0_px.width, l);
122 surf.logical_level0_px.height = surf.dim <= ISL_SURF_DIM_1D ? 1 :
123 minify(surf.logical_level0_px.height, l);
124 surf.logical_level0_px.depth = surf.dim <= ISL_SURF_DIM_2D ? 1 :
125 minify(surf.logical_level0_px.depth, l);
126
127 /* Only the base level and layer can be addressed with the overridden
128 * layout.
129 */
130 surf.logical_level0_px.array_len = 1;
131 surf.levels = 1;
132 surf.dim_layout = dim_layout;
133
134 /* The requested slice of the texture is now at the base level and
135 * layer.
136 */
137 view.base_level = 0;
138 view.base_array_layer = 0;
139 }
140
141 union isl_color_value clear_color = { .u32 = { 0, 0, 0, 0 } };
142
143 struct isl_surf *aux_surf = NULL, aux_surf_s;
144 uint64_t aux_offset = 0;
145 enum isl_aux_usage aux_usage = ISL_AUX_USAGE_NONE;
146 if (mt->mcs_buf && !(flags & INTEL_AUX_BUFFER_DISABLED)) {
147 intel_miptree_get_aux_isl_surf(brw, mt, &aux_surf_s, &aux_usage);
148 aux_surf = &aux_surf_s;
149 assert(mt->mcs_buf->offset == 0);
150 aux_offset = mt->mcs_buf->bo->offset64;
151
152 /* We only really need a clear color if we also have an auxiliary
153 * surfacae. Without one, it does nothing.
154 */
155 clear_color = intel_miptree_get_isl_clear_color(brw, mt);
156 }
157
158 uint32_t *dw = __brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
159 ss_info.num_dwords * 4, ss_info.ss_align,
160 surf_index, surf_offset);
161
162 isl_surf_fill_state(&brw->isl_dev, dw, .surf = &surf, .view = &view,
163 .address = mt->bo->offset64 + offset,
164 .aux_surf = aux_surf, .aux_usage = aux_usage,
165 .aux_address = aux_offset,
166 .mocs = mocs, .clear_color = clear_color,
167 .x_offset_sa = tile_x, .y_offset_sa = tile_y);
168
169 drm_intel_bo_emit_reloc(brw->batch.bo,
170 *surf_offset + 4 * ss_info.reloc_dw,
171 mt->bo, offset,
172 read_domains, write_domains);
173
174 if (aux_surf) {
175 /* On gen7 and prior, the upper 20 bits of surface state DWORD 6 are the
176 * upper 20 bits of the GPU address of the MCS buffer; the lower 12 bits
177 * contain other control information. Since buffer addresses are always
178 * on 4k boundaries (and thus have their lower 12 bits zero), we can use
179 * an ordinary reloc to do the necessary address translation.
180 */
181 assert((aux_offset & 0xfff) == 0);
182 drm_intel_bo_emit_reloc(brw->batch.bo,
183 *surf_offset + 4 * ss_info.aux_reloc_dw,
184 mt->mcs_buf->bo,
185 dw[ss_info.aux_reloc_dw] & 0xfff,
186 read_domains, write_domains);
187 }
188 }
189
190 uint32_t
191 brw_update_renderbuffer_surface(struct brw_context *brw,
192 struct gl_renderbuffer *rb,
193 uint32_t flags, unsigned unit /* unused */,
194 uint32_t surf_index)
195 {
196 struct gl_context *ctx = &brw->ctx;
197 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
198 struct intel_mipmap_tree *mt = irb->mt;
199
200 if (brw->gen < 9) {
201 assert(!(flags & INTEL_AUX_BUFFER_DISABLED));
202 }
203
204 assert(brw_render_target_supported(brw, rb));
205 intel_miptree_used_for_rendering(mt);
206
207 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
208 if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
209 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
210 __func__, _mesa_get_format_name(rb_format));
211 }
212
213 const unsigned layer_multiplier =
214 (irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
215 irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) ?
216 MAX2(irb->mt->num_samples, 1) : 1;
217
218 struct isl_view view = {
219 .format = brw->render_target_format[rb_format],
220 .base_level = irb->mt_level - irb->mt->first_level,
221 .levels = 1,
222 .base_array_layer = irb->mt_layer / layer_multiplier,
223 .array_len = MAX2(irb->layer_count, 1),
224 .swizzle = ISL_SWIZZLE_IDENTITY,
225 .usage = ISL_SURF_USAGE_RENDER_TARGET_BIT,
226 };
227
228 uint32_t offset;
229 brw_emit_surface_state(brw, mt, flags, mt->target, view,
230 surface_state_infos[brw->gen].rb_mocs,
231 &offset, surf_index,
232 I915_GEM_DOMAIN_RENDER,
233 I915_GEM_DOMAIN_RENDER);
234 return offset;
235 }
236
237 GLuint
238 translate_tex_target(GLenum target)
239 {
240 switch (target) {
241 case GL_TEXTURE_1D:
242 case GL_TEXTURE_1D_ARRAY_EXT:
243 return BRW_SURFACE_1D;
244
245 case GL_TEXTURE_RECTANGLE_NV:
246 return BRW_SURFACE_2D;
247
248 case GL_TEXTURE_2D:
249 case GL_TEXTURE_2D_ARRAY_EXT:
250 case GL_TEXTURE_EXTERNAL_OES:
251 case GL_TEXTURE_2D_MULTISAMPLE:
252 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
253 return BRW_SURFACE_2D;
254
255 case GL_TEXTURE_3D:
256 return BRW_SURFACE_3D;
257
258 case GL_TEXTURE_CUBE_MAP:
259 case GL_TEXTURE_CUBE_MAP_ARRAY:
260 return BRW_SURFACE_CUBE;
261
262 default:
263 unreachable("not reached");
264 }
265 }
266
267 uint32_t
268 brw_get_surface_tiling_bits(uint32_t tiling)
269 {
270 switch (tiling) {
271 case I915_TILING_X:
272 return BRW_SURFACE_TILED;
273 case I915_TILING_Y:
274 return BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y;
275 default:
276 return 0;
277 }
278 }
279
280
281 uint32_t
282 brw_get_surface_num_multisamples(unsigned num_samples)
283 {
284 if (num_samples > 1)
285 return BRW_SURFACE_MULTISAMPLECOUNT_4;
286 else
287 return BRW_SURFACE_MULTISAMPLECOUNT_1;
288 }
289
290 /**
291 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
292 * swizzling.
293 */
294 int
295 brw_get_texture_swizzle(const struct gl_context *ctx,
296 const struct gl_texture_object *t)
297 {
298 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
299
300 int swizzles[SWIZZLE_NIL + 1] = {
301 SWIZZLE_X,
302 SWIZZLE_Y,
303 SWIZZLE_Z,
304 SWIZZLE_W,
305 SWIZZLE_ZERO,
306 SWIZZLE_ONE,
307 SWIZZLE_NIL
308 };
309
310 if (img->_BaseFormat == GL_DEPTH_COMPONENT ||
311 img->_BaseFormat == GL_DEPTH_STENCIL) {
312 GLenum depth_mode = t->DepthMode;
313
314 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
315 * with depth component data specified with a sized internal format.
316 * Otherwise, it's left at the old default, GL_LUMINANCE.
317 */
318 if (_mesa_is_gles3(ctx) &&
319 img->InternalFormat != GL_DEPTH_COMPONENT &&
320 img->InternalFormat != GL_DEPTH_STENCIL) {
321 depth_mode = GL_RED;
322 }
323
324 switch (depth_mode) {
325 case GL_ALPHA:
326 swizzles[0] = SWIZZLE_ZERO;
327 swizzles[1] = SWIZZLE_ZERO;
328 swizzles[2] = SWIZZLE_ZERO;
329 swizzles[3] = SWIZZLE_X;
330 break;
331 case GL_LUMINANCE:
332 swizzles[0] = SWIZZLE_X;
333 swizzles[1] = SWIZZLE_X;
334 swizzles[2] = SWIZZLE_X;
335 swizzles[3] = SWIZZLE_ONE;
336 break;
337 case GL_INTENSITY:
338 swizzles[0] = SWIZZLE_X;
339 swizzles[1] = SWIZZLE_X;
340 swizzles[2] = SWIZZLE_X;
341 swizzles[3] = SWIZZLE_X;
342 break;
343 case GL_RED:
344 swizzles[0] = SWIZZLE_X;
345 swizzles[1] = SWIZZLE_ZERO;
346 swizzles[2] = SWIZZLE_ZERO;
347 swizzles[3] = SWIZZLE_ONE;
348 break;
349 }
350 }
351
352 GLenum datatype = _mesa_get_format_datatype(img->TexFormat);
353
354 /* If the texture's format is alpha-only, force R, G, and B to
355 * 0.0. Similarly, if the texture's format has no alpha channel,
356 * force the alpha value read to 1.0. This allows for the
357 * implementation to use an RGBA texture for any of these formats
358 * without leaking any unexpected values.
359 */
360 switch (img->_BaseFormat) {
361 case GL_ALPHA:
362 swizzles[0] = SWIZZLE_ZERO;
363 swizzles[1] = SWIZZLE_ZERO;
364 swizzles[2] = SWIZZLE_ZERO;
365 break;
366 case GL_LUMINANCE:
367 if (t->_IsIntegerFormat || datatype == GL_SIGNED_NORMALIZED) {
368 swizzles[0] = SWIZZLE_X;
369 swizzles[1] = SWIZZLE_X;
370 swizzles[2] = SWIZZLE_X;
371 swizzles[3] = SWIZZLE_ONE;
372 }
373 break;
374 case GL_LUMINANCE_ALPHA:
375 if (datatype == GL_SIGNED_NORMALIZED) {
376 swizzles[0] = SWIZZLE_X;
377 swizzles[1] = SWIZZLE_X;
378 swizzles[2] = SWIZZLE_X;
379 swizzles[3] = SWIZZLE_W;
380 }
381 break;
382 case GL_INTENSITY:
383 if (datatype == GL_SIGNED_NORMALIZED) {
384 swizzles[0] = SWIZZLE_X;
385 swizzles[1] = SWIZZLE_X;
386 swizzles[2] = SWIZZLE_X;
387 swizzles[3] = SWIZZLE_X;
388 }
389 break;
390 case GL_RED:
391 case GL_RG:
392 case GL_RGB:
393 if (_mesa_get_format_bits(img->TexFormat, GL_ALPHA_BITS) > 0)
394 swizzles[3] = SWIZZLE_ONE;
395 break;
396 }
397
398 return MAKE_SWIZZLE4(swizzles[GET_SWZ(t->_Swizzle, 0)],
399 swizzles[GET_SWZ(t->_Swizzle, 1)],
400 swizzles[GET_SWZ(t->_Swizzle, 2)],
401 swizzles[GET_SWZ(t->_Swizzle, 3)]);
402 }
403
404 /**
405 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
406 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
407 *
408 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
409 * 0 1 2 3 4 5
410 * 4 5 6 7 0 1
411 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
412 *
413 * which is simply adding 4 then modding by 8 (or anding with 7).
414 *
415 * We then may need to apply workarounds for textureGather hardware bugs.
416 */
417 static unsigned
418 swizzle_to_scs(GLenum swizzle, bool need_green_to_blue)
419 {
420 unsigned scs = (swizzle + 4) & 7;
421
422 return (need_green_to_blue && scs == HSW_SCS_GREEN) ? HSW_SCS_BLUE : scs;
423 }
424
425 static unsigned
426 brw_find_matching_rb(const struct gl_framebuffer *fb,
427 const struct intel_mipmap_tree *mt)
428 {
429 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
430 const struct intel_renderbuffer *irb =
431 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
432
433 if (irb && irb->mt == mt)
434 return i;
435 }
436
437 return fb->_NumColorDrawBuffers;
438 }
439
440 static inline bool
441 brw_texture_view_sane(const struct brw_context *brw,
442 const struct intel_mipmap_tree *mt, unsigned format)
443 {
444 /* There are special cases only for lossless compression. */
445 if (!intel_miptree_is_lossless_compressed(brw, mt))
446 return true;
447
448 if (isl_format_supports_lossless_compression(&brw->screen->devinfo,
449 format))
450 return true;
451
452 /* Logic elsewhere needs to take care to resolve the color buffer prior
453 * to sampling it as non-compressed.
454 */
455 if (mt->fast_clear_state != INTEL_FAST_CLEAR_STATE_RESOLVED)
456 return false;
457
458 const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
459 const unsigned rb_index = brw_find_matching_rb(fb, mt);
460
461 if (rb_index == fb->_NumColorDrawBuffers)
462 return true;
463
464 /* Underlying surface is compressed but it is sampled using a format that
465 * the sampling engine doesn't support as compressed. Compression must be
466 * disabled for both sampling engine and data port in case the same surface
467 * is used also as render target.
468 */
469 return brw->draw_aux_buffer_disabled[rb_index];
470 }
471
472 static bool
473 brw_disable_aux_surface(const struct brw_context *brw,
474 const struct intel_mipmap_tree *mt)
475 {
476 /* Nothing to disable. */
477 if (!mt->mcs_buf)
478 return false;
479
480 /* There are special cases only for lossless compression. */
481 if (!intel_miptree_is_lossless_compressed(brw, mt))
482 return mt->fast_clear_state == INTEL_FAST_CLEAR_STATE_RESOLVED;
483
484 const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
485 const unsigned rb_index = brw_find_matching_rb(fb, mt);
486
487 /* If we are drawing into this with compression enabled, then we must also
488 * enable compression when texturing from it regardless of
489 * fast_clear_state. If we don't then, after the first draw call with
490 * this setup, there will be data in the CCS which won't get picked up by
491 * subsequent texturing operations as required by ARB_texture_barrier.
492 * Since we don't want to re-emit the binding table or do a resolve
493 * operation every draw call, the easiest thing to do is just enable
494 * compression on the texturing side. This is completely safe to do
495 * since, if compressed texturing weren't allowed, we would have disabled
496 * compression of render targets in whatever_that_function_is_called().
497 */
498 if (rb_index < fb->_NumColorDrawBuffers) {
499 if (brw->draw_aux_buffer_disabled[rb_index]) {
500 assert(mt->fast_clear_state == INTEL_FAST_CLEAR_STATE_RESOLVED);
501 }
502
503 return brw->draw_aux_buffer_disabled[rb_index];
504 }
505
506 return mt->fast_clear_state == INTEL_FAST_CLEAR_STATE_RESOLVED;
507 }
508
509 void
510 brw_update_texture_surface(struct gl_context *ctx,
511 unsigned unit,
512 uint32_t *surf_offset,
513 bool for_gather,
514 uint32_t plane)
515 {
516 struct brw_context *brw = brw_context(ctx);
517 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
518
519 if (obj->Target == GL_TEXTURE_BUFFER) {
520 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
521
522 } else {
523 struct intel_texture_object *intel_obj = intel_texture_object(obj);
524 struct intel_mipmap_tree *mt = intel_obj->mt;
525
526 if (plane > 0) {
527 if (mt->plane[plane - 1] == NULL)
528 return;
529 mt = mt->plane[plane - 1];
530 }
531
532 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
533 /* If this is a view with restricted NumLayers, then our effective depth
534 * is not just the miptree depth.
535 */
536 const unsigned view_num_layers =
537 (obj->Immutable && obj->Target != GL_TEXTURE_3D) ? obj->NumLayers :
538 mt->logical_depth0;
539
540 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
541 * texturing functions that return a float, as our code generation always
542 * selects the .x channel (which would always be 0).
543 */
544 struct gl_texture_image *firstImage = obj->Image[0][obj->BaseLevel];
545 const bool alpha_depth = obj->DepthMode == GL_ALPHA &&
546 (firstImage->_BaseFormat == GL_DEPTH_COMPONENT ||
547 firstImage->_BaseFormat == GL_DEPTH_STENCIL);
548 const unsigned swizzle = (unlikely(alpha_depth) ? SWIZZLE_XYZW :
549 brw_get_texture_swizzle(&brw->ctx, obj));
550
551 mesa_format mesa_fmt = plane == 0 ? intel_obj->_Format : mt->format;
552 unsigned format = translate_tex_format(brw, mesa_fmt,
553 sampler->sRGBDecode);
554
555 /* Implement gen6 and gen7 gather work-around */
556 bool need_green_to_blue = false;
557 if (for_gather) {
558 if (brw->gen == 7 && format == BRW_SURFACEFORMAT_R32G32_FLOAT) {
559 format = BRW_SURFACEFORMAT_R32G32_FLOAT_LD;
560 need_green_to_blue = brw->is_haswell;
561 } else if (brw->gen == 6) {
562 /* Sandybridge's gather4 message is broken for integer formats.
563 * To work around this, we pretend the surface is UNORM for
564 * 8 or 16-bit formats, and emit shader instructions to recover
565 * the real INT/UINT value. For 32-bit formats, we pretend
566 * the surface is FLOAT, and simply reinterpret the resulting
567 * bits.
568 */
569 switch (format) {
570 case BRW_SURFACEFORMAT_R8_SINT:
571 case BRW_SURFACEFORMAT_R8_UINT:
572 format = BRW_SURFACEFORMAT_R8_UNORM;
573 break;
574
575 case BRW_SURFACEFORMAT_R16_SINT:
576 case BRW_SURFACEFORMAT_R16_UINT:
577 format = BRW_SURFACEFORMAT_R16_UNORM;
578 break;
579
580 case BRW_SURFACEFORMAT_R32_SINT:
581 case BRW_SURFACEFORMAT_R32_UINT:
582 format = BRW_SURFACEFORMAT_R32_FLOAT;
583 break;
584
585 default:
586 break;
587 }
588 }
589 }
590
591 if (obj->StencilSampling && firstImage->_BaseFormat == GL_DEPTH_STENCIL) {
592 if (brw->gen <= 7) {
593 assert(mt->r8stencil_mt && !mt->stencil_mt->r8stencil_needs_update);
594 mt = mt->r8stencil_mt;
595 } else {
596 mt = mt->stencil_mt;
597 }
598 format = BRW_SURFACEFORMAT_R8_UINT;
599 } else if (brw->gen <= 7 && mt->format == MESA_FORMAT_S_UINT8) {
600 assert(mt->r8stencil_mt && !mt->r8stencil_needs_update);
601 mt = mt->r8stencil_mt;
602 format = BRW_SURFACEFORMAT_R8_UINT;
603 }
604
605 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
606
607 struct isl_view view = {
608 .format = format,
609 .base_level = obj->MinLevel + obj->BaseLevel,
610 .levels = intel_obj->_MaxLevel - obj->BaseLevel + 1,
611 .base_array_layer = obj->MinLayer,
612 .array_len = view_num_layers,
613 .swizzle = {
614 .r = swizzle_to_scs(GET_SWZ(swizzle, 0), need_green_to_blue),
615 .g = swizzle_to_scs(GET_SWZ(swizzle, 1), need_green_to_blue),
616 .b = swizzle_to_scs(GET_SWZ(swizzle, 2), need_green_to_blue),
617 .a = swizzle_to_scs(GET_SWZ(swizzle, 3), need_green_to_blue),
618 },
619 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
620 };
621
622 if (obj->Target == GL_TEXTURE_CUBE_MAP ||
623 obj->Target == GL_TEXTURE_CUBE_MAP_ARRAY)
624 view.usage |= ISL_SURF_USAGE_CUBE_BIT;
625
626 assert(brw_texture_view_sane(brw, mt, format));
627
628 const int flags =
629 brw_disable_aux_surface(brw, mt) ? INTEL_AUX_BUFFER_DISABLED : 0;
630 brw_emit_surface_state(brw, mt, flags, mt->target, view,
631 surface_state_infos[brw->gen].tex_mocs,
632 surf_offset, surf_index,
633 I915_GEM_DOMAIN_SAMPLER, 0);
634 }
635 }
636
637 void
638 brw_emit_buffer_surface_state(struct brw_context *brw,
639 uint32_t *out_offset,
640 drm_intel_bo *bo,
641 unsigned buffer_offset,
642 unsigned surface_format,
643 unsigned buffer_size,
644 unsigned pitch,
645 bool rw)
646 {
647 const struct surface_state_info ss_info = surface_state_infos[brw->gen];
648
649 uint32_t *dw = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
650 ss_info.num_dwords * 4, ss_info.ss_align,
651 out_offset);
652
653 isl_buffer_fill_state(&brw->isl_dev, dw,
654 .address = (bo ? bo->offset64 : 0) + buffer_offset,
655 .size = buffer_size,
656 .format = surface_format,
657 .stride = pitch,
658 .mocs = ss_info.tex_mocs);
659
660 if (bo) {
661 drm_intel_bo_emit_reloc(brw->batch.bo,
662 *out_offset + 4 * ss_info.reloc_dw,
663 bo, buffer_offset,
664 I915_GEM_DOMAIN_SAMPLER,
665 (rw ? I915_GEM_DOMAIN_SAMPLER : 0));
666 }
667 }
668
669 void
670 brw_update_buffer_texture_surface(struct gl_context *ctx,
671 unsigned unit,
672 uint32_t *surf_offset)
673 {
674 struct brw_context *brw = brw_context(ctx);
675 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
676 struct intel_buffer_object *intel_obj =
677 intel_buffer_object(tObj->BufferObject);
678 uint32_t size = tObj->BufferSize;
679 drm_intel_bo *bo = NULL;
680 mesa_format format = tObj->_BufferObjectFormat;
681 uint32_t brw_format = brw_format_for_mesa_format(format);
682 int texel_size = _mesa_get_format_bytes(format);
683
684 if (intel_obj) {
685 size = MIN2(size, intel_obj->Base.Size);
686 bo = intel_bufferobj_buffer(brw, intel_obj, tObj->BufferOffset, size);
687 }
688
689 if (brw_format == 0 && format != MESA_FORMAT_RGBA_FLOAT32) {
690 _mesa_problem(NULL, "bad format %s for texture buffer\n",
691 _mesa_get_format_name(format));
692 }
693
694 brw_emit_buffer_surface_state(brw, surf_offset, bo,
695 tObj->BufferOffset,
696 brw_format,
697 size,
698 texel_size,
699 false /* rw */);
700 }
701
702 /**
703 * Create the constant buffer surface. Vertex/fragment shader constants will be
704 * read from this buffer with Data Port Read instructions/messages.
705 */
706 void
707 brw_create_constant_surface(struct brw_context *brw,
708 drm_intel_bo *bo,
709 uint32_t offset,
710 uint32_t size,
711 uint32_t *out_offset)
712 {
713 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
714 BRW_SURFACEFORMAT_R32G32B32A32_FLOAT,
715 size, 1, false);
716 }
717
718 /**
719 * Create the buffer surface. Shader buffer variables will be
720 * read from / write to this buffer with Data Port Read/Write
721 * instructions/messages.
722 */
723 void
724 brw_create_buffer_surface(struct brw_context *brw,
725 drm_intel_bo *bo,
726 uint32_t offset,
727 uint32_t size,
728 uint32_t *out_offset)
729 {
730 /* Use a raw surface so we can reuse existing untyped read/write/atomic
731 * messages. We need these specifically for the fragment shader since they
732 * include a pixel mask header that we need to ensure correct behavior
733 * with helper invocations, which cannot write to the buffer.
734 */
735 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
736 BRW_SURFACEFORMAT_RAW,
737 size, 1, true);
738 }
739
740 /**
741 * Set up a binding table entry for use by stream output logic (transform
742 * feedback).
743 *
744 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
745 */
746 void
747 brw_update_sol_surface(struct brw_context *brw,
748 struct gl_buffer_object *buffer_obj,
749 uint32_t *out_offset, unsigned num_vector_components,
750 unsigned stride_dwords, unsigned offset_dwords)
751 {
752 struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
753 uint32_t offset_bytes = 4 * offset_dwords;
754 drm_intel_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
755 offset_bytes,
756 buffer_obj->Size - offset_bytes);
757 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
758 out_offset);
759 uint32_t pitch_minus_1 = 4*stride_dwords - 1;
760 size_t size_dwords = buffer_obj->Size / 4;
761 uint32_t buffer_size_minus_1, width, height, depth, surface_format;
762
763 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
764 * too big to map using a single binding table entry?
765 */
766 assert((size_dwords - offset_dwords) / stride_dwords
767 <= BRW_MAX_NUM_BUFFER_ENTRIES);
768
769 if (size_dwords > offset_dwords + num_vector_components) {
770 /* There is room for at least 1 transform feedback output in the buffer.
771 * Compute the number of additional transform feedback outputs the
772 * buffer has room for.
773 */
774 buffer_size_minus_1 =
775 (size_dwords - offset_dwords - num_vector_components) / stride_dwords;
776 } else {
777 /* There isn't even room for a single transform feedback output in the
778 * buffer. We can't configure the binding table entry to prevent output
779 * entirely; we'll have to rely on the geometry shader to detect
780 * overflow. But to minimize the damage in case of a bug, set up the
781 * binding table entry to just allow a single output.
782 */
783 buffer_size_minus_1 = 0;
784 }
785 width = buffer_size_minus_1 & 0x7f;
786 height = (buffer_size_minus_1 & 0xfff80) >> 7;
787 depth = (buffer_size_minus_1 & 0x7f00000) >> 20;
788
789 switch (num_vector_components) {
790 case 1:
791 surface_format = BRW_SURFACEFORMAT_R32_FLOAT;
792 break;
793 case 2:
794 surface_format = BRW_SURFACEFORMAT_R32G32_FLOAT;
795 break;
796 case 3:
797 surface_format = BRW_SURFACEFORMAT_R32G32B32_FLOAT;
798 break;
799 case 4:
800 surface_format = BRW_SURFACEFORMAT_R32G32B32A32_FLOAT;
801 break;
802 default:
803 unreachable("Invalid vector size for transform feedback output");
804 }
805
806 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
807 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
808 surface_format << BRW_SURFACE_FORMAT_SHIFT |
809 BRW_SURFACE_RC_READ_WRITE;
810 surf[1] = bo->offset64 + offset_bytes; /* reloc */
811 surf[2] = (width << BRW_SURFACE_WIDTH_SHIFT |
812 height << BRW_SURFACE_HEIGHT_SHIFT);
813 surf[3] = (depth << BRW_SURFACE_DEPTH_SHIFT |
814 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
815 surf[4] = 0;
816 surf[5] = 0;
817
818 /* Emit relocation to surface contents. */
819 drm_intel_bo_emit_reloc(brw->batch.bo,
820 *out_offset + 4,
821 bo, offset_bytes,
822 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
823 }
824
825 /* Creates a new WM constant buffer reflecting the current fragment program's
826 * constants, if needed by the fragment program.
827 *
828 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
829 * state atom.
830 */
831 static void
832 brw_upload_wm_pull_constants(struct brw_context *brw)
833 {
834 struct brw_stage_state *stage_state = &brw->wm.base;
835 /* BRW_NEW_FRAGMENT_PROGRAM */
836 struct brw_program *fp = (struct brw_program *) brw->fragment_program;
837 /* BRW_NEW_FS_PROG_DATA */
838 struct brw_stage_prog_data *prog_data = brw->wm.base.prog_data;
839
840 _mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_FRAGMENT);
841 /* _NEW_PROGRAM_CONSTANTS */
842 brw_upload_pull_constants(brw, BRW_NEW_SURFACES, &fp->program,
843 stage_state, prog_data);
844 }
845
846 const struct brw_tracked_state brw_wm_pull_constants = {
847 .dirty = {
848 .mesa = _NEW_PROGRAM_CONSTANTS,
849 .brw = BRW_NEW_BATCH |
850 BRW_NEW_BLORP |
851 BRW_NEW_FRAGMENT_PROGRAM |
852 BRW_NEW_FS_PROG_DATA,
853 },
854 .emit = brw_upload_wm_pull_constants,
855 };
856
857 /**
858 * Creates a null renderbuffer surface.
859 *
860 * This is used when the shader doesn't write to any color output. An FB
861 * write to target 0 will still be emitted, because that's how the thread is
862 * terminated (and computed depth is returned), so we need to have the
863 * hardware discard the target 0 color output..
864 */
865 static void
866 brw_emit_null_surface_state(struct brw_context *brw,
867 unsigned width,
868 unsigned height,
869 unsigned samples,
870 uint32_t *out_offset)
871 {
872 /* From the Sandy bridge PRM, Vol4 Part1 p71 (Surface Type: Programming
873 * Notes):
874 *
875 * A null surface will be used in instances where an actual surface is
876 * not bound. When a write message is generated to a null surface, no
877 * actual surface is written to. When a read message (including any
878 * sampling engine message) is generated to a null surface, the result
879 * is all zeros. Note that a null surface type is allowed to be used
880 * with all messages, even if it is not specificially indicated as
881 * supported. All of the remaining fields in surface state are ignored
882 * for null surfaces, with the following exceptions:
883 *
884 * - [DevSNB+]: Width, Height, Depth, and LOD fields must match the
885 * depth buffer’s corresponding state for all render target surfaces,
886 * including null.
887 *
888 * - Surface Format must be R8G8B8A8_UNORM.
889 */
890 unsigned surface_type = BRW_SURFACE_NULL;
891 drm_intel_bo *bo = NULL;
892 unsigned pitch_minus_1 = 0;
893 uint32_t multisampling_state = 0;
894 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
895 out_offset);
896
897 if (samples > 1) {
898 /* On Gen6, null render targets seem to cause GPU hangs when
899 * multisampling. So work around this problem by rendering into dummy
900 * color buffer.
901 *
902 * To decrease the amount of memory needed by the workaround buffer, we
903 * set its pitch to 128 bytes (the width of a Y tile). This means that
904 * the amount of memory needed for the workaround buffer is
905 * (width_in_tiles + height_in_tiles - 1) tiles.
906 *
907 * Note that since the workaround buffer will be interpreted by the
908 * hardware as an interleaved multisampled buffer, we need to compute
909 * width_in_tiles and height_in_tiles by dividing the width and height
910 * by 16 rather than the normal Y-tile size of 32.
911 */
912 unsigned width_in_tiles = ALIGN(width, 16) / 16;
913 unsigned height_in_tiles = ALIGN(height, 16) / 16;
914 unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
915 brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
916 size_needed);
917 bo = brw->wm.multisampled_null_render_target_bo;
918 surface_type = BRW_SURFACE_2D;
919 pitch_minus_1 = 127;
920 multisampling_state = brw_get_surface_num_multisamples(samples);
921 }
922
923 surf[0] = (surface_type << BRW_SURFACE_TYPE_SHIFT |
924 BRW_SURFACEFORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
925 if (brw->gen < 6) {
926 surf[0] |= (1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT |
927 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT |
928 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT |
929 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT);
930 }
931 surf[1] = bo ? bo->offset64 : 0;
932 surf[2] = ((width - 1) << BRW_SURFACE_WIDTH_SHIFT |
933 (height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
934
935 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
936 * Notes):
937 *
938 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
939 */
940 surf[3] = (BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y |
941 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
942 surf[4] = multisampling_state;
943 surf[5] = 0;
944
945 if (bo) {
946 drm_intel_bo_emit_reloc(brw->batch.bo,
947 *out_offset + 4,
948 bo, 0,
949 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
950 }
951 }
952
953 /**
954 * Sets up a surface state structure to point at the given region.
955 * While it is only used for the front/back buffer currently, it should be
956 * usable for further buffers when doing ARB_draw_buffer support.
957 */
958 static uint32_t
959 gen4_update_renderbuffer_surface(struct brw_context *brw,
960 struct gl_renderbuffer *rb,
961 uint32_t flags, unsigned unit,
962 uint32_t surf_index)
963 {
964 struct gl_context *ctx = &brw->ctx;
965 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
966 struct intel_mipmap_tree *mt = irb->mt;
967 uint32_t *surf;
968 uint32_t tile_x, tile_y;
969 uint32_t format = 0;
970 uint32_t offset;
971 /* _NEW_BUFFERS */
972 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
973 /* BRW_NEW_FS_PROG_DATA */
974
975 assert(!(flags & INTEL_RENDERBUFFER_LAYERED));
976 assert(!(flags & INTEL_AUX_BUFFER_DISABLED));
977
978 if (rb->TexImage && !brw->has_surface_tile_offset) {
979 intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y);
980
981 if (tile_x != 0 || tile_y != 0) {
982 /* Original gen4 hardware couldn't draw to a non-tile-aligned
983 * destination in a miptree unless you actually setup your renderbuffer
984 * as a miptree and used the fragile lod/array_index/etc. controls to
985 * select the image. So, instead, we just make a new single-level
986 * miptree and render into that.
987 */
988 intel_renderbuffer_move_to_temp(brw, irb, false);
989 mt = irb->mt;
990 }
991 }
992
993 intel_miptree_used_for_rendering(irb->mt);
994
995 surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32, &offset);
996
997 format = brw->render_target_format[rb_format];
998 if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
999 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
1000 __func__, _mesa_get_format_name(rb_format));
1001 }
1002
1003 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
1004 format << BRW_SURFACE_FORMAT_SHIFT);
1005
1006 /* reloc */
1007 assert(mt->offset % mt->cpp == 0);
1008 surf[1] = (intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y) +
1009 mt->bo->offset64 + mt->offset);
1010
1011 surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
1012 (rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
1013
1014 surf[3] = (brw_get_surface_tiling_bits(mt->tiling) |
1015 (mt->pitch - 1) << BRW_SURFACE_PITCH_SHIFT);
1016
1017 surf[4] = brw_get_surface_num_multisamples(mt->num_samples);
1018
1019 assert(brw->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
1020 /* Note that the low bits of these fields are missing, so
1021 * there's the possibility of getting in trouble.
1022 */
1023 assert(tile_x % 4 == 0);
1024 assert(tile_y % 2 == 0);
1025 surf[5] = ((tile_x / 4) << BRW_SURFACE_X_OFFSET_SHIFT |
1026 (tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT |
1027 (mt->valign == 4 ? BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
1028
1029 if (brw->gen < 6) {
1030 /* _NEW_COLOR */
1031 if (!ctx->Color.ColorLogicOpEnabled && !ctx->Color._AdvancedBlendMode &&
1032 (ctx->Color.BlendEnabled & (1 << unit)))
1033 surf[0] |= BRW_SURFACE_BLEND_ENABLED;
1034
1035 if (!ctx->Color.ColorMask[unit][0])
1036 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT;
1037 if (!ctx->Color.ColorMask[unit][1])
1038 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT;
1039 if (!ctx->Color.ColorMask[unit][2])
1040 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT;
1041
1042 /* As mentioned above, disable writes to the alpha component when the
1043 * renderbuffer is XRGB.
1044 */
1045 if (ctx->DrawBuffer->Visual.alphaBits == 0 ||
1046 !ctx->Color.ColorMask[unit][3]) {
1047 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT;
1048 }
1049 }
1050
1051 drm_intel_bo_emit_reloc(brw->batch.bo,
1052 offset + 4,
1053 mt->bo,
1054 surf[1] - mt->bo->offset64,
1055 I915_GEM_DOMAIN_RENDER,
1056 I915_GEM_DOMAIN_RENDER);
1057
1058 return offset;
1059 }
1060
1061 /**
1062 * Construct SURFACE_STATE objects for renderbuffers/draw buffers.
1063 */
1064 void
1065 brw_update_renderbuffer_surfaces(struct brw_context *brw,
1066 const struct gl_framebuffer *fb,
1067 uint32_t render_target_start,
1068 uint32_t *surf_offset)
1069 {
1070 GLuint i;
1071 const unsigned int w = _mesa_geometric_width(fb);
1072 const unsigned int h = _mesa_geometric_height(fb);
1073 const unsigned int s = _mesa_geometric_samples(fb);
1074
1075 /* Update surfaces for drawing buffers */
1076 if (fb->_NumColorDrawBuffers >= 1) {
1077 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
1078 const uint32_t surf_index = render_target_start + i;
1079 const int flags = (_mesa_geometric_layers(fb) > 0 ?
1080 INTEL_RENDERBUFFER_LAYERED : 0) |
1081 (brw->draw_aux_buffer_disabled[i] ?
1082 INTEL_AUX_BUFFER_DISABLED : 0);
1083
1084 if (intel_renderbuffer(fb->_ColorDrawBuffers[i])) {
1085 surf_offset[surf_index] =
1086 brw->vtbl.update_renderbuffer_surface(
1087 brw, fb->_ColorDrawBuffers[i], flags, i, surf_index);
1088 } else {
1089 brw->vtbl.emit_null_surface_state(brw, w, h, s,
1090 &surf_offset[surf_index]);
1091 }
1092 }
1093 } else {
1094 const uint32_t surf_index = render_target_start;
1095 brw->vtbl.emit_null_surface_state(brw, w, h, s,
1096 &surf_offset[surf_index]);
1097 }
1098 }
1099
1100 static void
1101 update_renderbuffer_surfaces(struct brw_context *brw)
1102 {
1103 const struct gl_context *ctx = &brw->ctx;
1104
1105 /* BRW_NEW_FS_PROG_DATA */
1106 const struct brw_wm_prog_data *wm_prog_data =
1107 brw_wm_prog_data(brw->wm.base.prog_data);
1108
1109 /* _NEW_BUFFERS | _NEW_COLOR */
1110 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1111 brw_update_renderbuffer_surfaces(
1112 brw, fb,
1113 wm_prog_data->binding_table.render_target_start,
1114 brw->wm.base.surf_offset);
1115 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1116 }
1117
1118 const struct brw_tracked_state brw_renderbuffer_surfaces = {
1119 .dirty = {
1120 .mesa = _NEW_BUFFERS |
1121 _NEW_COLOR,
1122 .brw = BRW_NEW_BATCH |
1123 BRW_NEW_BLORP |
1124 BRW_NEW_FS_PROG_DATA,
1125 },
1126 .emit = update_renderbuffer_surfaces,
1127 };
1128
1129 const struct brw_tracked_state gen6_renderbuffer_surfaces = {
1130 .dirty = {
1131 .mesa = _NEW_BUFFERS,
1132 .brw = BRW_NEW_BATCH |
1133 BRW_NEW_BLORP,
1134 },
1135 .emit = update_renderbuffer_surfaces,
1136 };
1137
1138 static void
1139 update_renderbuffer_read_surfaces(struct brw_context *brw)
1140 {
1141 const struct gl_context *ctx = &brw->ctx;
1142
1143 /* BRW_NEW_FS_PROG_DATA */
1144 const struct brw_wm_prog_data *wm_prog_data =
1145 brw_wm_prog_data(brw->wm.base.prog_data);
1146
1147 /* BRW_NEW_FRAGMENT_PROGRAM */
1148 if (!ctx->Extensions.MESA_shader_framebuffer_fetch &&
1149 brw->fragment_program && brw->fragment_program->info.outputs_read) {
1150 /* _NEW_BUFFERS */
1151 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1152
1153 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
1154 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
1155 const struct intel_renderbuffer *irb = intel_renderbuffer(rb);
1156 const unsigned surf_index =
1157 wm_prog_data->binding_table.render_target_read_start + i;
1158 uint32_t *surf_offset = &brw->wm.base.surf_offset[surf_index];
1159
1160 if (irb) {
1161 const unsigned format = brw->render_target_format[
1162 _mesa_get_render_format(ctx, intel_rb_format(irb))];
1163 assert(isl_format_supports_sampling(&brw->screen->devinfo,
1164 format));
1165
1166 /* Override the target of the texture if the render buffer is a
1167 * single slice of a 3D texture (since the minimum array element
1168 * field of the surface state structure is ignored by the sampler
1169 * unit for 3D textures on some hardware), or if the render buffer
1170 * is a 1D array (since shaders always provide the array index
1171 * coordinate at the Z component to avoid state-dependent
1172 * recompiles when changing the texture target of the
1173 * framebuffer).
1174 */
1175 const GLenum target =
1176 (irb->mt->target == GL_TEXTURE_3D &&
1177 irb->layer_count == 1) ? GL_TEXTURE_2D :
1178 irb->mt->target == GL_TEXTURE_1D_ARRAY ? GL_TEXTURE_2D_ARRAY :
1179 irb->mt->target;
1180
1181 /* intel_renderbuffer::mt_layer is expressed in sample units for
1182 * the UMS and CMS multisample layouts, but
1183 * intel_renderbuffer::layer_count is expressed in units of whole
1184 * logical layers regardless of the multisample layout.
1185 */
1186 const unsigned mt_layer_unit =
1187 (irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
1188 irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) ?
1189 MAX2(irb->mt->num_samples, 1) : 1;
1190
1191 const struct isl_view view = {
1192 .format = format,
1193 .base_level = irb->mt_level - irb->mt->first_level,
1194 .levels = 1,
1195 .base_array_layer = irb->mt_layer / mt_layer_unit,
1196 .array_len = irb->layer_count,
1197 .swizzle = ISL_SWIZZLE_IDENTITY,
1198 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
1199 };
1200
1201 const int flags = brw->draw_aux_buffer_disabled[i] ?
1202 INTEL_AUX_BUFFER_DISABLED : 0;
1203 brw_emit_surface_state(brw, irb->mt, flags, target, view,
1204 surface_state_infos[brw->gen].tex_mocs,
1205 surf_offset, surf_index,
1206 I915_GEM_DOMAIN_SAMPLER, 0);
1207
1208 } else {
1209 brw->vtbl.emit_null_surface_state(
1210 brw, _mesa_geometric_width(fb), _mesa_geometric_height(fb),
1211 _mesa_geometric_samples(fb), surf_offset);
1212 }
1213 }
1214
1215 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1216 }
1217 }
1218
1219 const struct brw_tracked_state brw_renderbuffer_read_surfaces = {
1220 .dirty = {
1221 .mesa = _NEW_BUFFERS,
1222 .brw = BRW_NEW_BATCH |
1223 BRW_NEW_FRAGMENT_PROGRAM |
1224 BRW_NEW_FS_PROG_DATA,
1225 },
1226 .emit = update_renderbuffer_read_surfaces,
1227 };
1228
1229 static void
1230 update_stage_texture_surfaces(struct brw_context *brw,
1231 const struct gl_program *prog,
1232 struct brw_stage_state *stage_state,
1233 bool for_gather, uint32_t plane)
1234 {
1235 if (!prog)
1236 return;
1237
1238 struct gl_context *ctx = &brw->ctx;
1239
1240 uint32_t *surf_offset = stage_state->surf_offset;
1241
1242 /* BRW_NEW_*_PROG_DATA */
1243 if (for_gather)
1244 surf_offset += stage_state->prog_data->binding_table.gather_texture_start;
1245 else
1246 surf_offset += stage_state->prog_data->binding_table.plane_start[plane];
1247
1248 unsigned num_samplers = util_last_bit(prog->SamplersUsed);
1249 for (unsigned s = 0; s < num_samplers; s++) {
1250 surf_offset[s] = 0;
1251
1252 if (prog->SamplersUsed & (1 << s)) {
1253 const unsigned unit = prog->SamplerUnits[s];
1254
1255 /* _NEW_TEXTURE */
1256 if (ctx->Texture.Unit[unit]._Current) {
1257 brw_update_texture_surface(ctx, unit, surf_offset + s, for_gather, plane);
1258 }
1259 }
1260 }
1261 }
1262
1263
1264 /**
1265 * Construct SURFACE_STATE objects for enabled textures.
1266 */
1267 static void
1268 brw_update_texture_surfaces(struct brw_context *brw)
1269 {
1270 /* BRW_NEW_VERTEX_PROGRAM */
1271 struct gl_program *vs = (struct gl_program *) brw->vertex_program;
1272
1273 /* BRW_NEW_TESS_PROGRAMS */
1274 struct gl_program *tcs = (struct gl_program *) brw->tess_ctrl_program;
1275 struct gl_program *tes = (struct gl_program *) brw->tess_eval_program;
1276
1277 /* BRW_NEW_GEOMETRY_PROGRAM */
1278 struct gl_program *gs = (struct gl_program *) brw->geometry_program;
1279
1280 /* BRW_NEW_FRAGMENT_PROGRAM */
1281 struct gl_program *fs = (struct gl_program *) brw->fragment_program;
1282
1283 /* _NEW_TEXTURE */
1284 update_stage_texture_surfaces(brw, vs, &brw->vs.base, false, 0);
1285 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, false, 0);
1286 update_stage_texture_surfaces(brw, tes, &brw->tes.base, false, 0);
1287 update_stage_texture_surfaces(brw, gs, &brw->gs.base, false, 0);
1288 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 0);
1289
1290 /* emit alternate set of surface state for gather. this
1291 * allows the surface format to be overriden for only the
1292 * gather4 messages. */
1293 if (brw->gen < 8) {
1294 if (vs && vs->nir->info->uses_texture_gather)
1295 update_stage_texture_surfaces(brw, vs, &brw->vs.base, true, 0);
1296 if (tcs && tcs->nir->info->uses_texture_gather)
1297 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, true, 0);
1298 if (tes && tes->nir->info->uses_texture_gather)
1299 update_stage_texture_surfaces(brw, tes, &brw->tes.base, true, 0);
1300 if (gs && gs->nir->info->uses_texture_gather)
1301 update_stage_texture_surfaces(brw, gs, &brw->gs.base, true, 0);
1302 if (fs && fs->nir->info->uses_texture_gather)
1303 update_stage_texture_surfaces(brw, fs, &brw->wm.base, true, 0);
1304 }
1305
1306 if (fs) {
1307 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 1);
1308 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 2);
1309 }
1310
1311 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1312 }
1313
1314 const struct brw_tracked_state brw_texture_surfaces = {
1315 .dirty = {
1316 .mesa = _NEW_TEXTURE,
1317 .brw = BRW_NEW_BATCH |
1318 BRW_NEW_BLORP |
1319 BRW_NEW_FRAGMENT_PROGRAM |
1320 BRW_NEW_FS_PROG_DATA |
1321 BRW_NEW_GEOMETRY_PROGRAM |
1322 BRW_NEW_GS_PROG_DATA |
1323 BRW_NEW_TESS_PROGRAMS |
1324 BRW_NEW_TCS_PROG_DATA |
1325 BRW_NEW_TES_PROG_DATA |
1326 BRW_NEW_TEXTURE_BUFFER |
1327 BRW_NEW_VERTEX_PROGRAM |
1328 BRW_NEW_VS_PROG_DATA,
1329 },
1330 .emit = brw_update_texture_surfaces,
1331 };
1332
1333 static void
1334 brw_update_cs_texture_surfaces(struct brw_context *brw)
1335 {
1336 /* BRW_NEW_COMPUTE_PROGRAM */
1337 struct gl_program *cs = (struct gl_program *) brw->compute_program;
1338
1339 /* _NEW_TEXTURE */
1340 update_stage_texture_surfaces(brw, cs, &brw->cs.base, false, 0);
1341
1342 /* emit alternate set of surface state for gather. this
1343 * allows the surface format to be overriden for only the
1344 * gather4 messages.
1345 */
1346 if (brw->gen < 8) {
1347 if (cs && cs->nir->info->uses_texture_gather)
1348 update_stage_texture_surfaces(brw, cs, &brw->cs.base, true, 0);
1349 }
1350
1351 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1352 }
1353
1354 const struct brw_tracked_state brw_cs_texture_surfaces = {
1355 .dirty = {
1356 .mesa = _NEW_TEXTURE,
1357 .brw = BRW_NEW_BATCH |
1358 BRW_NEW_BLORP |
1359 BRW_NEW_COMPUTE_PROGRAM,
1360 },
1361 .emit = brw_update_cs_texture_surfaces,
1362 };
1363
1364
1365 void
1366 brw_upload_ubo_surfaces(struct brw_context *brw,
1367 struct gl_linked_shader *shader,
1368 struct brw_stage_state *stage_state,
1369 struct brw_stage_prog_data *prog_data)
1370 {
1371 struct gl_context *ctx = &brw->ctx;
1372
1373 if (!shader)
1374 return;
1375
1376 uint32_t *ubo_surf_offsets =
1377 &stage_state->surf_offset[prog_data->binding_table.ubo_start];
1378
1379 for (int i = 0; i < shader->NumUniformBlocks; i++) {
1380 struct gl_uniform_buffer_binding *binding =
1381 &ctx->UniformBufferBindings[shader->UniformBlocks[i]->Binding];
1382
1383 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1384 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ubo_surf_offsets[i]);
1385 } else {
1386 struct intel_buffer_object *intel_bo =
1387 intel_buffer_object(binding->BufferObject);
1388 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1389 if (!binding->AutomaticSize)
1390 size = MIN2(size, binding->Size);
1391 drm_intel_bo *bo =
1392 intel_bufferobj_buffer(brw, intel_bo,
1393 binding->Offset,
1394 size);
1395 brw_create_constant_surface(brw, bo, binding->Offset,
1396 size,
1397 &ubo_surf_offsets[i]);
1398 }
1399 }
1400
1401 uint32_t *ssbo_surf_offsets =
1402 &stage_state->surf_offset[prog_data->binding_table.ssbo_start];
1403
1404 for (int i = 0; i < shader->NumShaderStorageBlocks; i++) {
1405 struct gl_shader_storage_buffer_binding *binding =
1406 &ctx->ShaderStorageBufferBindings[shader->ShaderStorageBlocks[i]->Binding];
1407
1408 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1409 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ssbo_surf_offsets[i]);
1410 } else {
1411 struct intel_buffer_object *intel_bo =
1412 intel_buffer_object(binding->BufferObject);
1413 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1414 if (!binding->AutomaticSize)
1415 size = MIN2(size, binding->Size);
1416 drm_intel_bo *bo =
1417 intel_bufferobj_buffer(brw, intel_bo,
1418 binding->Offset,
1419 size);
1420 brw_create_buffer_surface(brw, bo, binding->Offset,
1421 size,
1422 &ssbo_surf_offsets[i]);
1423 }
1424 }
1425
1426 if (shader->NumUniformBlocks || shader->NumShaderStorageBlocks)
1427 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1428 }
1429
1430 static void
1431 brw_upload_wm_ubo_surfaces(struct brw_context *brw)
1432 {
1433 struct gl_context *ctx = &brw->ctx;
1434 /* _NEW_PROGRAM */
1435 struct gl_shader_program *prog = ctx->_Shader->_CurrentFragmentProgram;
1436
1437 if (!prog)
1438 return;
1439
1440 /* BRW_NEW_FS_PROG_DATA */
1441 brw_upload_ubo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
1442 &brw->wm.base, brw->wm.base.prog_data);
1443 }
1444
1445 const struct brw_tracked_state brw_wm_ubo_surfaces = {
1446 .dirty = {
1447 .mesa = _NEW_PROGRAM,
1448 .brw = BRW_NEW_BATCH |
1449 BRW_NEW_BLORP |
1450 BRW_NEW_FS_PROG_DATA |
1451 BRW_NEW_UNIFORM_BUFFER,
1452 },
1453 .emit = brw_upload_wm_ubo_surfaces,
1454 };
1455
1456 static void
1457 brw_upload_cs_ubo_surfaces(struct brw_context *brw)
1458 {
1459 struct gl_context *ctx = &brw->ctx;
1460 /* _NEW_PROGRAM */
1461 struct gl_shader_program *prog =
1462 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1463
1464 if (!prog)
1465 return;
1466
1467 /* BRW_NEW_CS_PROG_DATA */
1468 brw_upload_ubo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_COMPUTE],
1469 &brw->cs.base, brw->cs.base.prog_data);
1470 }
1471
1472 const struct brw_tracked_state brw_cs_ubo_surfaces = {
1473 .dirty = {
1474 .mesa = _NEW_PROGRAM,
1475 .brw = BRW_NEW_BATCH |
1476 BRW_NEW_BLORP |
1477 BRW_NEW_CS_PROG_DATA |
1478 BRW_NEW_UNIFORM_BUFFER,
1479 },
1480 .emit = brw_upload_cs_ubo_surfaces,
1481 };
1482
1483 void
1484 brw_upload_abo_surfaces(struct brw_context *brw,
1485 struct gl_linked_shader *shader,
1486 struct brw_stage_state *stage_state,
1487 struct brw_stage_prog_data *prog_data)
1488 {
1489 struct gl_context *ctx = &brw->ctx;
1490 uint32_t *surf_offsets =
1491 &stage_state->surf_offset[prog_data->binding_table.abo_start];
1492
1493 if (shader && shader->NumAtomicBuffers) {
1494 for (unsigned i = 0; i < shader->NumAtomicBuffers; i++) {
1495 struct gl_atomic_buffer_binding *binding =
1496 &ctx->AtomicBufferBindings[shader->AtomicBuffers[i]->Binding];
1497 struct intel_buffer_object *intel_bo =
1498 intel_buffer_object(binding->BufferObject);
1499 drm_intel_bo *bo = intel_bufferobj_buffer(
1500 brw, intel_bo, binding->Offset, intel_bo->Base.Size - binding->Offset);
1501
1502 brw_emit_buffer_surface_state(brw, &surf_offsets[i], bo,
1503 binding->Offset, BRW_SURFACEFORMAT_RAW,
1504 bo->size - binding->Offset, 1, true);
1505 }
1506
1507 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1508 }
1509 }
1510
1511 static void
1512 brw_upload_wm_abo_surfaces(struct brw_context *brw)
1513 {
1514 struct gl_context *ctx = &brw->ctx;
1515 /* _NEW_PROGRAM */
1516 struct gl_shader_program *prog = ctx->_Shader->_CurrentFragmentProgram;
1517
1518 if (prog) {
1519 /* BRW_NEW_FS_PROG_DATA */
1520 brw_upload_abo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
1521 &brw->wm.base, brw->wm.base.prog_data);
1522 }
1523 }
1524
1525 const struct brw_tracked_state brw_wm_abo_surfaces = {
1526 .dirty = {
1527 .mesa = _NEW_PROGRAM,
1528 .brw = BRW_NEW_ATOMIC_BUFFER |
1529 BRW_NEW_BLORP |
1530 BRW_NEW_BATCH |
1531 BRW_NEW_FS_PROG_DATA,
1532 },
1533 .emit = brw_upload_wm_abo_surfaces,
1534 };
1535
1536 static void
1537 brw_upload_cs_abo_surfaces(struct brw_context *brw)
1538 {
1539 struct gl_context *ctx = &brw->ctx;
1540 /* _NEW_PROGRAM */
1541 struct gl_shader_program *prog =
1542 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1543
1544 if (prog) {
1545 /* BRW_NEW_CS_PROG_DATA */
1546 brw_upload_abo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_COMPUTE],
1547 &brw->cs.base, brw->cs.base.prog_data);
1548 }
1549 }
1550
1551 const struct brw_tracked_state brw_cs_abo_surfaces = {
1552 .dirty = {
1553 .mesa = _NEW_PROGRAM,
1554 .brw = BRW_NEW_ATOMIC_BUFFER |
1555 BRW_NEW_BLORP |
1556 BRW_NEW_BATCH |
1557 BRW_NEW_CS_PROG_DATA,
1558 },
1559 .emit = brw_upload_cs_abo_surfaces,
1560 };
1561
1562 static void
1563 brw_upload_cs_image_surfaces(struct brw_context *brw)
1564 {
1565 struct gl_context *ctx = &brw->ctx;
1566 /* _NEW_PROGRAM */
1567 struct gl_shader_program *prog =
1568 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1569
1570 if (prog) {
1571 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1572 brw_upload_image_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_COMPUTE],
1573 &brw->cs.base, brw->cs.base.prog_data);
1574 }
1575 }
1576
1577 const struct brw_tracked_state brw_cs_image_surfaces = {
1578 .dirty = {
1579 .mesa = _NEW_TEXTURE | _NEW_PROGRAM,
1580 .brw = BRW_NEW_BATCH |
1581 BRW_NEW_BLORP |
1582 BRW_NEW_CS_PROG_DATA |
1583 BRW_NEW_IMAGE_UNITS
1584 },
1585 .emit = brw_upload_cs_image_surfaces,
1586 };
1587
1588 static uint32_t
1589 get_image_format(struct brw_context *brw, mesa_format format, GLenum access)
1590 {
1591 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1592 uint32_t hw_format = brw_format_for_mesa_format(format);
1593 if (access == GL_WRITE_ONLY) {
1594 return hw_format;
1595 } else if (isl_has_matching_typed_storage_image_format(devinfo, hw_format)) {
1596 /* Typed surface reads support a very limited subset of the shader
1597 * image formats. Translate it into the closest format the
1598 * hardware supports.
1599 */
1600 return isl_lower_storage_image_format(devinfo, hw_format);
1601 } else {
1602 /* The hardware doesn't actually support a typed format that we can use
1603 * so we have to fall back to untyped read/write messages.
1604 */
1605 return BRW_SURFACEFORMAT_RAW;
1606 }
1607 }
1608
1609 static void
1610 update_default_image_param(struct brw_context *brw,
1611 struct gl_image_unit *u,
1612 unsigned surface_idx,
1613 struct brw_image_param *param)
1614 {
1615 memset(param, 0, sizeof(*param));
1616 param->surface_idx = surface_idx;
1617 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1618 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1619 * detailed explanation of these parameters.
1620 */
1621 param->swizzling[0] = 0xff;
1622 param->swizzling[1] = 0xff;
1623 }
1624
1625 static void
1626 update_buffer_image_param(struct brw_context *brw,
1627 struct gl_image_unit *u,
1628 unsigned surface_idx,
1629 struct brw_image_param *param)
1630 {
1631 struct gl_buffer_object *obj = u->TexObj->BufferObject;
1632 const uint32_t size = MIN2((uint32_t)u->TexObj->BufferSize, obj->Size);
1633 update_default_image_param(brw, u, surface_idx, param);
1634
1635 param->size[0] = size / _mesa_get_format_bytes(u->_ActualFormat);
1636 param->stride[0] = _mesa_get_format_bytes(u->_ActualFormat);
1637 }
1638
1639 static void
1640 update_texture_image_param(struct brw_context *brw,
1641 struct gl_image_unit *u,
1642 unsigned surface_idx,
1643 struct brw_image_param *param)
1644 {
1645 struct intel_mipmap_tree *mt = intel_texture_object(u->TexObj)->mt;
1646
1647 update_default_image_param(brw, u, surface_idx, param);
1648
1649 param->size[0] = minify(mt->logical_width0, u->Level);
1650 param->size[1] = minify(mt->logical_height0, u->Level);
1651 param->size[2] = (!u->Layered ? 1 :
1652 u->TexObj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1653 u->TexObj->Target == GL_TEXTURE_3D ?
1654 minify(mt->logical_depth0, u->Level) :
1655 mt->logical_depth0);
1656
1657 intel_miptree_get_image_offset(mt, u->Level, u->_Layer,
1658 &param->offset[0],
1659 &param->offset[1]);
1660
1661 param->stride[0] = mt->cpp;
1662 param->stride[1] = mt->pitch / mt->cpp;
1663 param->stride[2] =
1664 brw_miptree_get_horizontal_slice_pitch(brw, mt, u->Level);
1665 param->stride[3] =
1666 brw_miptree_get_vertical_slice_pitch(brw, mt, u->Level);
1667
1668 if (mt->tiling == I915_TILING_X) {
1669 /* An X tile is a rectangular block of 512x8 bytes. */
1670 param->tiling[0] = _mesa_logbase2(512 / mt->cpp);
1671 param->tiling[1] = _mesa_logbase2(8);
1672
1673 if (brw->has_swizzling) {
1674 /* Right shifts required to swizzle bits 9 and 10 of the memory
1675 * address with bit 6.
1676 */
1677 param->swizzling[0] = 3;
1678 param->swizzling[1] = 4;
1679 }
1680 } else if (mt->tiling == I915_TILING_Y) {
1681 /* The layout of a Y-tiled surface in memory isn't really fundamentally
1682 * different to the layout of an X-tiled surface, we simply pretend that
1683 * the surface is broken up in a number of smaller 16Bx32 tiles, each
1684 * one arranged in X-major order just like is the case for X-tiling.
1685 */
1686 param->tiling[0] = _mesa_logbase2(16 / mt->cpp);
1687 param->tiling[1] = _mesa_logbase2(32);
1688
1689 if (brw->has_swizzling) {
1690 /* Right shift required to swizzle bit 9 of the memory address with
1691 * bit 6.
1692 */
1693 param->swizzling[0] = 3;
1694 }
1695 }
1696
1697 /* 3D textures are arranged in 2D in memory with 2^lod slices per row. The
1698 * address calculation algorithm (emit_address_calculation() in
1699 * brw_fs_surface_builder.cpp) handles this as a sort of tiling with
1700 * modulus equal to the LOD.
1701 */
1702 param->tiling[2] = (u->TexObj->Target == GL_TEXTURE_3D ? u->Level :
1703 0);
1704 }
1705
1706 static void
1707 update_image_surface(struct brw_context *brw,
1708 struct gl_image_unit *u,
1709 GLenum access,
1710 unsigned surface_idx,
1711 uint32_t *surf_offset,
1712 struct brw_image_param *param)
1713 {
1714 if (_mesa_is_image_unit_valid(&brw->ctx, u)) {
1715 struct gl_texture_object *obj = u->TexObj;
1716 const unsigned format = get_image_format(brw, u->_ActualFormat, access);
1717
1718 if (obj->Target == GL_TEXTURE_BUFFER) {
1719 struct intel_buffer_object *intel_obj =
1720 intel_buffer_object(obj->BufferObject);
1721 const unsigned texel_size = (format == BRW_SURFACEFORMAT_RAW ? 1 :
1722 _mesa_get_format_bytes(u->_ActualFormat));
1723
1724 brw_emit_buffer_surface_state(
1725 brw, surf_offset, intel_obj->buffer, obj->BufferOffset,
1726 format, intel_obj->Base.Size, texel_size,
1727 access != GL_READ_ONLY);
1728
1729 update_buffer_image_param(brw, u, surface_idx, param);
1730
1731 } else {
1732 struct intel_texture_object *intel_obj = intel_texture_object(obj);
1733 struct intel_mipmap_tree *mt = intel_obj->mt;
1734
1735 if (format == BRW_SURFACEFORMAT_RAW) {
1736 brw_emit_buffer_surface_state(
1737 brw, surf_offset, mt->bo, mt->offset,
1738 format, mt->bo->size - mt->offset, 1 /* pitch */,
1739 access != GL_READ_ONLY);
1740
1741 } else {
1742 const unsigned num_layers = (!u->Layered ? 1 :
1743 obj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1744 mt->logical_depth0);
1745
1746 struct isl_view view = {
1747 .format = format,
1748 .base_level = obj->MinLevel + u->Level,
1749 .levels = 1,
1750 .base_array_layer = obj->MinLayer + u->_Layer,
1751 .array_len = num_layers,
1752 .swizzle = ISL_SWIZZLE_IDENTITY,
1753 .usage = ISL_SURF_USAGE_STORAGE_BIT,
1754 };
1755
1756 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
1757 const int flags =
1758 mt->fast_clear_state == INTEL_FAST_CLEAR_STATE_RESOLVED ?
1759 INTEL_AUX_BUFFER_DISABLED : 0;
1760 brw_emit_surface_state(brw, mt, flags, mt->target, view,
1761 surface_state_infos[brw->gen].tex_mocs,
1762 surf_offset, surf_index,
1763 I915_GEM_DOMAIN_SAMPLER,
1764 access == GL_READ_ONLY ? 0 :
1765 I915_GEM_DOMAIN_SAMPLER);
1766 }
1767
1768 update_texture_image_param(brw, u, surface_idx, param);
1769 }
1770
1771 } else {
1772 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, surf_offset);
1773 update_default_image_param(brw, u, surface_idx, param);
1774 }
1775 }
1776
1777 void
1778 brw_upload_image_surfaces(struct brw_context *brw,
1779 struct gl_linked_shader *shader,
1780 struct brw_stage_state *stage_state,
1781 struct brw_stage_prog_data *prog_data)
1782 {
1783 struct gl_context *ctx = &brw->ctx;
1784
1785 if (shader && shader->NumImages) {
1786 for (unsigned i = 0; i < shader->NumImages; i++) {
1787 struct gl_image_unit *u = &ctx->ImageUnits[shader->ImageUnits[i]];
1788 const unsigned surf_idx = prog_data->binding_table.image_start + i;
1789
1790 update_image_surface(brw, u, shader->ImageAccess[i],
1791 surf_idx,
1792 &stage_state->surf_offset[surf_idx],
1793 &prog_data->image_param[i]);
1794 }
1795
1796 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1797 /* This may have changed the image metadata dependent on the context
1798 * image unit state and passed to the program as uniforms, make sure
1799 * that push and pull constants are reuploaded.
1800 */
1801 brw->NewGLState |= _NEW_PROGRAM_CONSTANTS;
1802 }
1803 }
1804
1805 static void
1806 brw_upload_wm_image_surfaces(struct brw_context *brw)
1807 {
1808 struct gl_context *ctx = &brw->ctx;
1809 /* BRW_NEW_FRAGMENT_PROGRAM */
1810 struct gl_shader_program *prog = ctx->_Shader->_CurrentFragmentProgram;
1811
1812 if (prog) {
1813 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1814 brw_upload_image_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
1815 &brw->wm.base, brw->wm.base.prog_data);
1816 }
1817 }
1818
1819 const struct brw_tracked_state brw_wm_image_surfaces = {
1820 .dirty = {
1821 .mesa = _NEW_TEXTURE,
1822 .brw = BRW_NEW_BATCH |
1823 BRW_NEW_BLORP |
1824 BRW_NEW_FRAGMENT_PROGRAM |
1825 BRW_NEW_FS_PROG_DATA |
1826 BRW_NEW_IMAGE_UNITS
1827 },
1828 .emit = brw_upload_wm_image_surfaces,
1829 };
1830
1831 void
1832 gen4_init_vtable_surface_functions(struct brw_context *brw)
1833 {
1834 brw->vtbl.update_renderbuffer_surface = gen4_update_renderbuffer_surface;
1835 brw->vtbl.emit_null_surface_state = brw_emit_null_surface_state;
1836 }
1837
1838 void
1839 gen6_init_vtable_surface_functions(struct brw_context *brw)
1840 {
1841 gen4_init_vtable_surface_functions(brw);
1842 brw->vtbl.update_renderbuffer_surface = brw_update_renderbuffer_surface;
1843 }
1844
1845 static void
1846 brw_upload_cs_work_groups_surface(struct brw_context *brw)
1847 {
1848 struct gl_context *ctx = &brw->ctx;
1849 /* _NEW_PROGRAM */
1850 struct gl_shader_program *prog =
1851 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1852 /* BRW_NEW_CS_PROG_DATA */
1853 const struct brw_cs_prog_data *cs_prog_data =
1854 brw_cs_prog_data(brw->cs.base.prog_data);
1855
1856 if (prog && cs_prog_data->uses_num_work_groups) {
1857 const unsigned surf_idx =
1858 cs_prog_data->binding_table.work_groups_start;
1859 uint32_t *surf_offset = &brw->cs.base.surf_offset[surf_idx];
1860 drm_intel_bo *bo;
1861 uint32_t bo_offset;
1862
1863 if (brw->compute.num_work_groups_bo == NULL) {
1864 bo = NULL;
1865 intel_upload_data(brw,
1866 (void *)brw->compute.num_work_groups,
1867 3 * sizeof(GLuint),
1868 sizeof(GLuint),
1869 &bo,
1870 &bo_offset);
1871 } else {
1872 bo = brw->compute.num_work_groups_bo;
1873 bo_offset = brw->compute.num_work_groups_offset;
1874 }
1875
1876 brw_emit_buffer_surface_state(brw, surf_offset,
1877 bo, bo_offset,
1878 BRW_SURFACEFORMAT_RAW,
1879 3 * sizeof(GLuint), 1, true);
1880 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1881 }
1882 }
1883
1884 const struct brw_tracked_state brw_cs_work_groups_surface = {
1885 .dirty = {
1886 .brw = BRW_NEW_BLORP |
1887 BRW_NEW_CS_PROG_DATA |
1888 BRW_NEW_CS_WORK_GROUPS
1889 },
1890 .emit = brw_upload_cs_work_groups_surface,
1891 };