i965: get uses texture gather from nir info
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_surface_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "compiler/nir/nir.h"
34 #include "main/context.h"
35 #include "main/blend.h"
36 #include "main/mtypes.h"
37 #include "main/samplerobj.h"
38 #include "main/shaderimage.h"
39 #include "main/teximage.h"
40 #include "program/prog_parameter.h"
41 #include "program/prog_instruction.h"
42 #include "main/framebuffer.h"
43 #include "main/shaderapi.h"
44
45 #include "isl/isl.h"
46
47 #include "intel_mipmap_tree.h"
48 #include "intel_batchbuffer.h"
49 #include "intel_tex.h"
50 #include "intel_fbo.h"
51 #include "intel_buffer_objects.h"
52
53 #include "brw_context.h"
54 #include "brw_state.h"
55 #include "brw_defines.h"
56 #include "brw_wm.h"
57
58 enum {
59 INTEL_RENDERBUFFER_LAYERED = 1 << 0,
60 INTEL_AUX_BUFFER_DISABLED = 1 << 1,
61 };
62
63 struct surface_state_info {
64 unsigned num_dwords;
65 unsigned ss_align; /* Required alignment of RENDER_SURFACE_STATE in bytes */
66 unsigned reloc_dw;
67 unsigned aux_reloc_dw;
68 unsigned tex_mocs;
69 unsigned rb_mocs;
70 };
71
72 static const struct surface_state_info surface_state_infos[] = {
73 [4] = {6, 32, 1, 0},
74 [5] = {6, 32, 1, 0},
75 [6] = {6, 32, 1, 0},
76 [7] = {8, 32, 1, 6, GEN7_MOCS_L3, GEN7_MOCS_L3},
77 [8] = {13, 64, 8, 10, BDW_MOCS_WB, BDW_MOCS_PTE},
78 [9] = {16, 64, 8, 10, SKL_MOCS_WB, SKL_MOCS_PTE},
79 };
80
81 static void
82 brw_emit_surface_state(struct brw_context *brw,
83 struct intel_mipmap_tree *mt, uint32_t flags,
84 GLenum target, struct isl_view view,
85 uint32_t mocs, uint32_t *surf_offset, int surf_index,
86 unsigned read_domains, unsigned write_domains)
87 {
88 const struct surface_state_info ss_info = surface_state_infos[brw->gen];
89 uint32_t tile_x = 0, tile_y = 0;
90 uint32_t offset = mt->offset;
91
92 struct isl_surf surf;
93 intel_miptree_get_isl_surf(brw, mt, &surf);
94
95 surf.dim = get_isl_surf_dim(target);
96
97 const enum isl_dim_layout dim_layout =
98 get_isl_dim_layout(&brw->screen->devinfo, mt->tiling, target);
99
100 if (surf.dim_layout != dim_layout) {
101 /* The layout of the specified texture target is not compatible with the
102 * actual layout of the miptree structure in memory -- You're entering
103 * dangerous territory, this can only possibly work if you only intended
104 * to access a single level and slice of the texture, and the hardware
105 * supports the tile offset feature in order to allow non-tile-aligned
106 * base offsets, since we'll have to point the hardware to the first
107 * texel of the level instead of relying on the usual base level/layer
108 * controls.
109 */
110 assert(brw->has_surface_tile_offset);
111 assert(view.levels == 1 && view.array_len == 1);
112
113 offset += intel_miptree_get_tile_offsets(mt, view.base_level,
114 view.base_array_layer,
115 &tile_x, &tile_y);
116
117 /* Minify the logical dimensions of the texture. */
118 const unsigned l = view.base_level - mt->first_level;
119 surf.logical_level0_px.width = minify(surf.logical_level0_px.width, l);
120 surf.logical_level0_px.height = surf.dim <= ISL_SURF_DIM_1D ? 1 :
121 minify(surf.logical_level0_px.height, l);
122 surf.logical_level0_px.depth = surf.dim <= ISL_SURF_DIM_2D ? 1 :
123 minify(surf.logical_level0_px.depth, l);
124
125 /* Only the base level and layer can be addressed with the overridden
126 * layout.
127 */
128 surf.logical_level0_px.array_len = 1;
129 surf.levels = 1;
130 surf.dim_layout = dim_layout;
131
132 /* The requested slice of the texture is now at the base level and
133 * layer.
134 */
135 view.base_level = 0;
136 view.base_array_layer = 0;
137 }
138
139 union isl_color_value clear_color = { .u32 = { 0, 0, 0, 0 } };
140
141 struct isl_surf *aux_surf = NULL, aux_surf_s;
142 uint64_t aux_offset = 0;
143 enum isl_aux_usage aux_usage = ISL_AUX_USAGE_NONE;
144 if (mt->mcs_mt && !(flags & INTEL_AUX_BUFFER_DISABLED)) {
145 intel_miptree_get_aux_isl_surf(brw, mt, &aux_surf_s, &aux_usage);
146 aux_surf = &aux_surf_s;
147 assert(mt->mcs_mt->offset == 0);
148 aux_offset = mt->mcs_mt->bo->offset64;
149
150 /* We only really need a clear color if we also have an auxiliary
151 * surfacae. Without one, it does nothing.
152 */
153 clear_color = intel_miptree_get_isl_clear_color(brw, mt);
154 }
155
156 uint32_t *dw = __brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
157 ss_info.num_dwords * 4, ss_info.ss_align,
158 surf_index, surf_offset);
159
160 isl_surf_fill_state(&brw->isl_dev, dw, .surf = &surf, .view = &view,
161 .address = mt->bo->offset64 + offset,
162 .aux_surf = aux_surf, .aux_usage = aux_usage,
163 .aux_address = aux_offset,
164 .mocs = mocs, .clear_color = clear_color,
165 .x_offset_sa = tile_x, .y_offset_sa = tile_y);
166
167 drm_intel_bo_emit_reloc(brw->batch.bo,
168 *surf_offset + 4 * ss_info.reloc_dw,
169 mt->bo, offset,
170 read_domains, write_domains);
171
172 if (aux_surf) {
173 /* On gen7 and prior, the upper 20 bits of surface state DWORD 6 are the
174 * upper 20 bits of the GPU address of the MCS buffer; the lower 12 bits
175 * contain other control information. Since buffer addresses are always
176 * on 4k boundaries (and thus have their lower 12 bits zero), we can use
177 * an ordinary reloc to do the necessary address translation.
178 */
179 assert((aux_offset & 0xfff) == 0);
180 drm_intel_bo_emit_reloc(brw->batch.bo,
181 *surf_offset + 4 * ss_info.aux_reloc_dw,
182 mt->mcs_mt->bo, dw[ss_info.aux_reloc_dw] & 0xfff,
183 read_domains, write_domains);
184 }
185 }
186
187 uint32_t
188 brw_update_renderbuffer_surface(struct brw_context *brw,
189 struct gl_renderbuffer *rb,
190 uint32_t flags, unsigned unit /* unused */,
191 uint32_t surf_index)
192 {
193 struct gl_context *ctx = &brw->ctx;
194 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
195 struct intel_mipmap_tree *mt = irb->mt;
196
197 if (brw->gen < 9) {
198 assert(!(flags & INTEL_AUX_BUFFER_DISABLED));
199 }
200
201 assert(brw_render_target_supported(brw, rb));
202 intel_miptree_used_for_rendering(mt);
203
204 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
205 if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
206 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
207 __func__, _mesa_get_format_name(rb_format));
208 }
209
210 const unsigned layer_multiplier =
211 (irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
212 irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) ?
213 MAX2(irb->mt->num_samples, 1) : 1;
214
215 struct isl_view view = {
216 .format = brw->render_target_format[rb_format],
217 .base_level = irb->mt_level - irb->mt->first_level,
218 .levels = 1,
219 .base_array_layer = irb->mt_layer / layer_multiplier,
220 .array_len = MAX2(irb->layer_count, 1),
221 .swizzle = ISL_SWIZZLE_IDENTITY,
222 .usage = ISL_SURF_USAGE_RENDER_TARGET_BIT,
223 };
224
225 uint32_t offset;
226 brw_emit_surface_state(brw, mt, flags, mt->target, view,
227 surface_state_infos[brw->gen].rb_mocs,
228 &offset, surf_index,
229 I915_GEM_DOMAIN_RENDER,
230 I915_GEM_DOMAIN_RENDER);
231 return offset;
232 }
233
234 GLuint
235 translate_tex_target(GLenum target)
236 {
237 switch (target) {
238 case GL_TEXTURE_1D:
239 case GL_TEXTURE_1D_ARRAY_EXT:
240 return BRW_SURFACE_1D;
241
242 case GL_TEXTURE_RECTANGLE_NV:
243 return BRW_SURFACE_2D;
244
245 case GL_TEXTURE_2D:
246 case GL_TEXTURE_2D_ARRAY_EXT:
247 case GL_TEXTURE_EXTERNAL_OES:
248 case GL_TEXTURE_2D_MULTISAMPLE:
249 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
250 return BRW_SURFACE_2D;
251
252 case GL_TEXTURE_3D:
253 return BRW_SURFACE_3D;
254
255 case GL_TEXTURE_CUBE_MAP:
256 case GL_TEXTURE_CUBE_MAP_ARRAY:
257 return BRW_SURFACE_CUBE;
258
259 default:
260 unreachable("not reached");
261 }
262 }
263
264 uint32_t
265 brw_get_surface_tiling_bits(uint32_t tiling)
266 {
267 switch (tiling) {
268 case I915_TILING_X:
269 return BRW_SURFACE_TILED;
270 case I915_TILING_Y:
271 return BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y;
272 default:
273 return 0;
274 }
275 }
276
277
278 uint32_t
279 brw_get_surface_num_multisamples(unsigned num_samples)
280 {
281 if (num_samples > 1)
282 return BRW_SURFACE_MULTISAMPLECOUNT_4;
283 else
284 return BRW_SURFACE_MULTISAMPLECOUNT_1;
285 }
286
287 /**
288 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
289 * swizzling.
290 */
291 int
292 brw_get_texture_swizzle(const struct gl_context *ctx,
293 const struct gl_texture_object *t)
294 {
295 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
296
297 int swizzles[SWIZZLE_NIL + 1] = {
298 SWIZZLE_X,
299 SWIZZLE_Y,
300 SWIZZLE_Z,
301 SWIZZLE_W,
302 SWIZZLE_ZERO,
303 SWIZZLE_ONE,
304 SWIZZLE_NIL
305 };
306
307 if (img->_BaseFormat == GL_DEPTH_COMPONENT ||
308 img->_BaseFormat == GL_DEPTH_STENCIL) {
309 GLenum depth_mode = t->DepthMode;
310
311 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
312 * with depth component data specified with a sized internal format.
313 * Otherwise, it's left at the old default, GL_LUMINANCE.
314 */
315 if (_mesa_is_gles3(ctx) &&
316 img->InternalFormat != GL_DEPTH_COMPONENT &&
317 img->InternalFormat != GL_DEPTH_STENCIL) {
318 depth_mode = GL_RED;
319 }
320
321 switch (depth_mode) {
322 case GL_ALPHA:
323 swizzles[0] = SWIZZLE_ZERO;
324 swizzles[1] = SWIZZLE_ZERO;
325 swizzles[2] = SWIZZLE_ZERO;
326 swizzles[3] = SWIZZLE_X;
327 break;
328 case GL_LUMINANCE:
329 swizzles[0] = SWIZZLE_X;
330 swizzles[1] = SWIZZLE_X;
331 swizzles[2] = SWIZZLE_X;
332 swizzles[3] = SWIZZLE_ONE;
333 break;
334 case GL_INTENSITY:
335 swizzles[0] = SWIZZLE_X;
336 swizzles[1] = SWIZZLE_X;
337 swizzles[2] = SWIZZLE_X;
338 swizzles[3] = SWIZZLE_X;
339 break;
340 case GL_RED:
341 swizzles[0] = SWIZZLE_X;
342 swizzles[1] = SWIZZLE_ZERO;
343 swizzles[2] = SWIZZLE_ZERO;
344 swizzles[3] = SWIZZLE_ONE;
345 break;
346 }
347 }
348
349 GLenum datatype = _mesa_get_format_datatype(img->TexFormat);
350
351 /* If the texture's format is alpha-only, force R, G, and B to
352 * 0.0. Similarly, if the texture's format has no alpha channel,
353 * force the alpha value read to 1.0. This allows for the
354 * implementation to use an RGBA texture for any of these formats
355 * without leaking any unexpected values.
356 */
357 switch (img->_BaseFormat) {
358 case GL_ALPHA:
359 swizzles[0] = SWIZZLE_ZERO;
360 swizzles[1] = SWIZZLE_ZERO;
361 swizzles[2] = SWIZZLE_ZERO;
362 break;
363 case GL_LUMINANCE:
364 if (t->_IsIntegerFormat || datatype == GL_SIGNED_NORMALIZED) {
365 swizzles[0] = SWIZZLE_X;
366 swizzles[1] = SWIZZLE_X;
367 swizzles[2] = SWIZZLE_X;
368 swizzles[3] = SWIZZLE_ONE;
369 }
370 break;
371 case GL_LUMINANCE_ALPHA:
372 if (datatype == GL_SIGNED_NORMALIZED) {
373 swizzles[0] = SWIZZLE_X;
374 swizzles[1] = SWIZZLE_X;
375 swizzles[2] = SWIZZLE_X;
376 swizzles[3] = SWIZZLE_W;
377 }
378 break;
379 case GL_INTENSITY:
380 if (datatype == GL_SIGNED_NORMALIZED) {
381 swizzles[0] = SWIZZLE_X;
382 swizzles[1] = SWIZZLE_X;
383 swizzles[2] = SWIZZLE_X;
384 swizzles[3] = SWIZZLE_X;
385 }
386 break;
387 case GL_RED:
388 case GL_RG:
389 case GL_RGB:
390 if (_mesa_get_format_bits(img->TexFormat, GL_ALPHA_BITS) > 0)
391 swizzles[3] = SWIZZLE_ONE;
392 break;
393 }
394
395 return MAKE_SWIZZLE4(swizzles[GET_SWZ(t->_Swizzle, 0)],
396 swizzles[GET_SWZ(t->_Swizzle, 1)],
397 swizzles[GET_SWZ(t->_Swizzle, 2)],
398 swizzles[GET_SWZ(t->_Swizzle, 3)]);
399 }
400
401 /**
402 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
403 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
404 *
405 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
406 * 0 1 2 3 4 5
407 * 4 5 6 7 0 1
408 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
409 *
410 * which is simply adding 4 then modding by 8 (or anding with 7).
411 *
412 * We then may need to apply workarounds for textureGather hardware bugs.
413 */
414 static unsigned
415 swizzle_to_scs(GLenum swizzle, bool need_green_to_blue)
416 {
417 unsigned scs = (swizzle + 4) & 7;
418
419 return (need_green_to_blue && scs == HSW_SCS_GREEN) ? HSW_SCS_BLUE : scs;
420 }
421
422 static unsigned
423 brw_find_matching_rb(const struct gl_framebuffer *fb,
424 const struct intel_mipmap_tree *mt)
425 {
426 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
427 const struct intel_renderbuffer *irb =
428 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
429
430 if (irb && irb->mt == mt)
431 return i;
432 }
433
434 return fb->_NumColorDrawBuffers;
435 }
436
437 static inline bool
438 brw_texture_view_sane(const struct brw_context *brw,
439 const struct intel_mipmap_tree *mt, unsigned format)
440 {
441 /* There are special cases only for lossless compression. */
442 if (!intel_miptree_is_lossless_compressed(brw, mt))
443 return true;
444
445 if (isl_format_supports_lossless_compression(&brw->screen->devinfo,
446 format))
447 return true;
448
449 /* Logic elsewhere needs to take care to resolve the color buffer prior
450 * to sampling it as non-compressed.
451 */
452 if (mt->fast_clear_state != INTEL_FAST_CLEAR_STATE_RESOLVED)
453 return false;
454
455 const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
456 const unsigned rb_index = brw_find_matching_rb(fb, mt);
457
458 if (rb_index == fb->_NumColorDrawBuffers)
459 return true;
460
461 /* Underlying surface is compressed but it is sampled using a format that
462 * the sampling engine doesn't support as compressed. Compression must be
463 * disabled for both sampling engine and data port in case the same surface
464 * is used also as render target.
465 */
466 return brw->draw_aux_buffer_disabled[rb_index];
467 }
468
469 static bool
470 brw_disable_aux_surface(const struct brw_context *brw,
471 const struct intel_mipmap_tree *mt)
472 {
473 /* Nothing to disable. */
474 if (!mt->mcs_mt)
475 return false;
476
477 /* There are special cases only for lossless compression. */
478 if (!intel_miptree_is_lossless_compressed(brw, mt))
479 return mt->fast_clear_state == INTEL_FAST_CLEAR_STATE_RESOLVED;
480
481 const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
482 const unsigned rb_index = brw_find_matching_rb(fb, mt);
483
484 /* If we are drawing into this with compression enabled, then we must also
485 * enable compression when texturing from it regardless of
486 * fast_clear_state. If we don't then, after the first draw call with
487 * this setup, there will be data in the CCS which won't get picked up by
488 * subsequent texturing operations as required by ARB_texture_barrier.
489 * Since we don't want to re-emit the binding table or do a resolve
490 * operation every draw call, the easiest thing to do is just enable
491 * compression on the texturing side. This is completely safe to do
492 * since, if compressed texturing weren't allowed, we would have disabled
493 * compression of render targets in whatever_that_function_is_called().
494 */
495 if (rb_index < fb->_NumColorDrawBuffers) {
496 if (brw->draw_aux_buffer_disabled[rb_index]) {
497 assert(mt->fast_clear_state == INTEL_FAST_CLEAR_STATE_RESOLVED);
498 }
499
500 return brw->draw_aux_buffer_disabled[rb_index];
501 }
502
503 return mt->fast_clear_state == INTEL_FAST_CLEAR_STATE_RESOLVED;
504 }
505
506 void
507 brw_update_texture_surface(struct gl_context *ctx,
508 unsigned unit,
509 uint32_t *surf_offset,
510 bool for_gather,
511 uint32_t plane)
512 {
513 struct brw_context *brw = brw_context(ctx);
514 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
515
516 if (obj->Target == GL_TEXTURE_BUFFER) {
517 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
518
519 } else {
520 struct intel_texture_object *intel_obj = intel_texture_object(obj);
521 struct intel_mipmap_tree *mt = intel_obj->mt;
522
523 if (plane > 0) {
524 if (mt->plane[plane - 1] == NULL)
525 return;
526 mt = mt->plane[plane - 1];
527 }
528
529 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
530 /* If this is a view with restricted NumLayers, then our effective depth
531 * is not just the miptree depth.
532 */
533 const unsigned view_num_layers =
534 (obj->Immutable && obj->Target != GL_TEXTURE_3D) ? obj->NumLayers :
535 mt->logical_depth0;
536
537 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
538 * texturing functions that return a float, as our code generation always
539 * selects the .x channel (which would always be 0).
540 */
541 struct gl_texture_image *firstImage = obj->Image[0][obj->BaseLevel];
542 const bool alpha_depth = obj->DepthMode == GL_ALPHA &&
543 (firstImage->_BaseFormat == GL_DEPTH_COMPONENT ||
544 firstImage->_BaseFormat == GL_DEPTH_STENCIL);
545 const unsigned swizzle = (unlikely(alpha_depth) ? SWIZZLE_XYZW :
546 brw_get_texture_swizzle(&brw->ctx, obj));
547
548 mesa_format mesa_fmt = plane == 0 ? intel_obj->_Format : mt->format;
549 unsigned format = translate_tex_format(brw, mesa_fmt,
550 sampler->sRGBDecode);
551
552 /* Implement gen6 and gen7 gather work-around */
553 bool need_green_to_blue = false;
554 if (for_gather) {
555 if (brw->gen == 7 && format == BRW_SURFACEFORMAT_R32G32_FLOAT) {
556 format = BRW_SURFACEFORMAT_R32G32_FLOAT_LD;
557 need_green_to_blue = brw->is_haswell;
558 } else if (brw->gen == 6) {
559 /* Sandybridge's gather4 message is broken for integer formats.
560 * To work around this, we pretend the surface is UNORM for
561 * 8 or 16-bit formats, and emit shader instructions to recover
562 * the real INT/UINT value. For 32-bit formats, we pretend
563 * the surface is FLOAT, and simply reinterpret the resulting
564 * bits.
565 */
566 switch (format) {
567 case BRW_SURFACEFORMAT_R8_SINT:
568 case BRW_SURFACEFORMAT_R8_UINT:
569 format = BRW_SURFACEFORMAT_R8_UNORM;
570 break;
571
572 case BRW_SURFACEFORMAT_R16_SINT:
573 case BRW_SURFACEFORMAT_R16_UINT:
574 format = BRW_SURFACEFORMAT_R16_UNORM;
575 break;
576
577 case BRW_SURFACEFORMAT_R32_SINT:
578 case BRW_SURFACEFORMAT_R32_UINT:
579 format = BRW_SURFACEFORMAT_R32_FLOAT;
580 break;
581
582 default:
583 break;
584 }
585 }
586 }
587
588 if (obj->StencilSampling && firstImage->_BaseFormat == GL_DEPTH_STENCIL) {
589 if (brw->gen <= 7) {
590 assert(mt->r8stencil_mt && !mt->stencil_mt->r8stencil_needs_update);
591 mt = mt->r8stencil_mt;
592 } else {
593 mt = mt->stencil_mt;
594 }
595 format = BRW_SURFACEFORMAT_R8_UINT;
596 } else if (brw->gen <= 7 && mt->format == MESA_FORMAT_S_UINT8) {
597 assert(mt->r8stencil_mt && !mt->r8stencil_needs_update);
598 mt = mt->r8stencil_mt;
599 format = BRW_SURFACEFORMAT_R8_UINT;
600 }
601
602 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
603
604 struct isl_view view = {
605 .format = format,
606 .base_level = obj->MinLevel + obj->BaseLevel,
607 .levels = intel_obj->_MaxLevel - obj->BaseLevel + 1,
608 .base_array_layer = obj->MinLayer,
609 .array_len = view_num_layers,
610 .swizzle = {
611 .r = swizzle_to_scs(GET_SWZ(swizzle, 0), need_green_to_blue),
612 .g = swizzle_to_scs(GET_SWZ(swizzle, 1), need_green_to_blue),
613 .b = swizzle_to_scs(GET_SWZ(swizzle, 2), need_green_to_blue),
614 .a = swizzle_to_scs(GET_SWZ(swizzle, 3), need_green_to_blue),
615 },
616 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
617 };
618
619 if (obj->Target == GL_TEXTURE_CUBE_MAP ||
620 obj->Target == GL_TEXTURE_CUBE_MAP_ARRAY)
621 view.usage |= ISL_SURF_USAGE_CUBE_BIT;
622
623 assert(brw_texture_view_sane(brw, mt, format));
624
625 const int flags =
626 brw_disable_aux_surface(brw, mt) ? INTEL_AUX_BUFFER_DISABLED : 0;
627 brw_emit_surface_state(brw, mt, flags, mt->target, view,
628 surface_state_infos[brw->gen].tex_mocs,
629 surf_offset, surf_index,
630 I915_GEM_DOMAIN_SAMPLER, 0);
631 }
632 }
633
634 void
635 brw_emit_buffer_surface_state(struct brw_context *brw,
636 uint32_t *out_offset,
637 drm_intel_bo *bo,
638 unsigned buffer_offset,
639 unsigned surface_format,
640 unsigned buffer_size,
641 unsigned pitch,
642 bool rw)
643 {
644 const struct surface_state_info ss_info = surface_state_infos[brw->gen];
645
646 uint32_t *dw = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
647 ss_info.num_dwords * 4, ss_info.ss_align,
648 out_offset);
649
650 isl_buffer_fill_state(&brw->isl_dev, dw,
651 .address = (bo ? bo->offset64 : 0) + buffer_offset,
652 .size = buffer_size,
653 .format = surface_format,
654 .stride = pitch,
655 .mocs = ss_info.tex_mocs);
656
657 if (bo) {
658 drm_intel_bo_emit_reloc(brw->batch.bo,
659 *out_offset + 4 * ss_info.reloc_dw,
660 bo, buffer_offset,
661 I915_GEM_DOMAIN_SAMPLER,
662 (rw ? I915_GEM_DOMAIN_SAMPLER : 0));
663 }
664 }
665
666 void
667 brw_update_buffer_texture_surface(struct gl_context *ctx,
668 unsigned unit,
669 uint32_t *surf_offset)
670 {
671 struct brw_context *brw = brw_context(ctx);
672 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
673 struct intel_buffer_object *intel_obj =
674 intel_buffer_object(tObj->BufferObject);
675 uint32_t size = tObj->BufferSize;
676 drm_intel_bo *bo = NULL;
677 mesa_format format = tObj->_BufferObjectFormat;
678 uint32_t brw_format = brw_format_for_mesa_format(format);
679 int texel_size = _mesa_get_format_bytes(format);
680
681 if (intel_obj) {
682 size = MIN2(size, intel_obj->Base.Size);
683 bo = intel_bufferobj_buffer(brw, intel_obj, tObj->BufferOffset, size);
684 }
685
686 if (brw_format == 0 && format != MESA_FORMAT_RGBA_FLOAT32) {
687 _mesa_problem(NULL, "bad format %s for texture buffer\n",
688 _mesa_get_format_name(format));
689 }
690
691 brw_emit_buffer_surface_state(brw, surf_offset, bo,
692 tObj->BufferOffset,
693 brw_format,
694 size,
695 texel_size,
696 false /* rw */);
697 }
698
699 /**
700 * Create the constant buffer surface. Vertex/fragment shader constants will be
701 * read from this buffer with Data Port Read instructions/messages.
702 */
703 void
704 brw_create_constant_surface(struct brw_context *brw,
705 drm_intel_bo *bo,
706 uint32_t offset,
707 uint32_t size,
708 uint32_t *out_offset)
709 {
710 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
711 BRW_SURFACEFORMAT_R32G32B32A32_FLOAT,
712 size, 1, false);
713 }
714
715 /**
716 * Create the buffer surface. Shader buffer variables will be
717 * read from / write to this buffer with Data Port Read/Write
718 * instructions/messages.
719 */
720 void
721 brw_create_buffer_surface(struct brw_context *brw,
722 drm_intel_bo *bo,
723 uint32_t offset,
724 uint32_t size,
725 uint32_t *out_offset)
726 {
727 /* Use a raw surface so we can reuse existing untyped read/write/atomic
728 * messages. We need these specifically for the fragment shader since they
729 * include a pixel mask header that we need to ensure correct behavior
730 * with helper invocations, which cannot write to the buffer.
731 */
732 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
733 BRW_SURFACEFORMAT_RAW,
734 size, 1, true);
735 }
736
737 /**
738 * Set up a binding table entry for use by stream output logic (transform
739 * feedback).
740 *
741 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
742 */
743 void
744 brw_update_sol_surface(struct brw_context *brw,
745 struct gl_buffer_object *buffer_obj,
746 uint32_t *out_offset, unsigned num_vector_components,
747 unsigned stride_dwords, unsigned offset_dwords)
748 {
749 struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
750 uint32_t offset_bytes = 4 * offset_dwords;
751 drm_intel_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
752 offset_bytes,
753 buffer_obj->Size - offset_bytes);
754 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
755 out_offset);
756 uint32_t pitch_minus_1 = 4*stride_dwords - 1;
757 size_t size_dwords = buffer_obj->Size / 4;
758 uint32_t buffer_size_minus_1, width, height, depth, surface_format;
759
760 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
761 * too big to map using a single binding table entry?
762 */
763 assert((size_dwords - offset_dwords) / stride_dwords
764 <= BRW_MAX_NUM_BUFFER_ENTRIES);
765
766 if (size_dwords > offset_dwords + num_vector_components) {
767 /* There is room for at least 1 transform feedback output in the buffer.
768 * Compute the number of additional transform feedback outputs the
769 * buffer has room for.
770 */
771 buffer_size_minus_1 =
772 (size_dwords - offset_dwords - num_vector_components) / stride_dwords;
773 } else {
774 /* There isn't even room for a single transform feedback output in the
775 * buffer. We can't configure the binding table entry to prevent output
776 * entirely; we'll have to rely on the geometry shader to detect
777 * overflow. But to minimize the damage in case of a bug, set up the
778 * binding table entry to just allow a single output.
779 */
780 buffer_size_minus_1 = 0;
781 }
782 width = buffer_size_minus_1 & 0x7f;
783 height = (buffer_size_minus_1 & 0xfff80) >> 7;
784 depth = (buffer_size_minus_1 & 0x7f00000) >> 20;
785
786 switch (num_vector_components) {
787 case 1:
788 surface_format = BRW_SURFACEFORMAT_R32_FLOAT;
789 break;
790 case 2:
791 surface_format = BRW_SURFACEFORMAT_R32G32_FLOAT;
792 break;
793 case 3:
794 surface_format = BRW_SURFACEFORMAT_R32G32B32_FLOAT;
795 break;
796 case 4:
797 surface_format = BRW_SURFACEFORMAT_R32G32B32A32_FLOAT;
798 break;
799 default:
800 unreachable("Invalid vector size for transform feedback output");
801 }
802
803 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
804 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
805 surface_format << BRW_SURFACE_FORMAT_SHIFT |
806 BRW_SURFACE_RC_READ_WRITE;
807 surf[1] = bo->offset64 + offset_bytes; /* reloc */
808 surf[2] = (width << BRW_SURFACE_WIDTH_SHIFT |
809 height << BRW_SURFACE_HEIGHT_SHIFT);
810 surf[3] = (depth << BRW_SURFACE_DEPTH_SHIFT |
811 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
812 surf[4] = 0;
813 surf[5] = 0;
814
815 /* Emit relocation to surface contents. */
816 drm_intel_bo_emit_reloc(brw->batch.bo,
817 *out_offset + 4,
818 bo, offset_bytes,
819 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
820 }
821
822 /* Creates a new WM constant buffer reflecting the current fragment program's
823 * constants, if needed by the fragment program.
824 *
825 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
826 * state atom.
827 */
828 static void
829 brw_upload_wm_pull_constants(struct brw_context *brw)
830 {
831 struct brw_stage_state *stage_state = &brw->wm.base;
832 /* BRW_NEW_FRAGMENT_PROGRAM */
833 struct brw_fragment_program *fp =
834 (struct brw_fragment_program *) brw->fragment_program;
835 /* BRW_NEW_FS_PROG_DATA */
836 struct brw_stage_prog_data *prog_data = brw->wm.base.prog_data;
837
838 _mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_FRAGMENT);
839 /* _NEW_PROGRAM_CONSTANTS */
840 brw_upload_pull_constants(brw, BRW_NEW_SURFACES, &fp->program.Base,
841 stage_state, prog_data);
842 }
843
844 const struct brw_tracked_state brw_wm_pull_constants = {
845 .dirty = {
846 .mesa = _NEW_PROGRAM_CONSTANTS,
847 .brw = BRW_NEW_BATCH |
848 BRW_NEW_BLORP |
849 BRW_NEW_FRAGMENT_PROGRAM |
850 BRW_NEW_FS_PROG_DATA,
851 },
852 .emit = brw_upload_wm_pull_constants,
853 };
854
855 /**
856 * Creates a null renderbuffer surface.
857 *
858 * This is used when the shader doesn't write to any color output. An FB
859 * write to target 0 will still be emitted, because that's how the thread is
860 * terminated (and computed depth is returned), so we need to have the
861 * hardware discard the target 0 color output..
862 */
863 static void
864 brw_emit_null_surface_state(struct brw_context *brw,
865 unsigned width,
866 unsigned height,
867 unsigned samples,
868 uint32_t *out_offset)
869 {
870 /* From the Sandy bridge PRM, Vol4 Part1 p71 (Surface Type: Programming
871 * Notes):
872 *
873 * A null surface will be used in instances where an actual surface is
874 * not bound. When a write message is generated to a null surface, no
875 * actual surface is written to. When a read message (including any
876 * sampling engine message) is generated to a null surface, the result
877 * is all zeros. Note that a null surface type is allowed to be used
878 * with all messages, even if it is not specificially indicated as
879 * supported. All of the remaining fields in surface state are ignored
880 * for null surfaces, with the following exceptions:
881 *
882 * - [DevSNB+]: Width, Height, Depth, and LOD fields must match the
883 * depth buffer’s corresponding state for all render target surfaces,
884 * including null.
885 *
886 * - Surface Format must be R8G8B8A8_UNORM.
887 */
888 unsigned surface_type = BRW_SURFACE_NULL;
889 drm_intel_bo *bo = NULL;
890 unsigned pitch_minus_1 = 0;
891 uint32_t multisampling_state = 0;
892 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
893 out_offset);
894
895 if (samples > 1) {
896 /* On Gen6, null render targets seem to cause GPU hangs when
897 * multisampling. So work around this problem by rendering into dummy
898 * color buffer.
899 *
900 * To decrease the amount of memory needed by the workaround buffer, we
901 * set its pitch to 128 bytes (the width of a Y tile). This means that
902 * the amount of memory needed for the workaround buffer is
903 * (width_in_tiles + height_in_tiles - 1) tiles.
904 *
905 * Note that since the workaround buffer will be interpreted by the
906 * hardware as an interleaved multisampled buffer, we need to compute
907 * width_in_tiles and height_in_tiles by dividing the width and height
908 * by 16 rather than the normal Y-tile size of 32.
909 */
910 unsigned width_in_tiles = ALIGN(width, 16) / 16;
911 unsigned height_in_tiles = ALIGN(height, 16) / 16;
912 unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
913 brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
914 size_needed);
915 bo = brw->wm.multisampled_null_render_target_bo;
916 surface_type = BRW_SURFACE_2D;
917 pitch_minus_1 = 127;
918 multisampling_state = brw_get_surface_num_multisamples(samples);
919 }
920
921 surf[0] = (surface_type << BRW_SURFACE_TYPE_SHIFT |
922 BRW_SURFACEFORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
923 if (brw->gen < 6) {
924 surf[0] |= (1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT |
925 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT |
926 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT |
927 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT);
928 }
929 surf[1] = bo ? bo->offset64 : 0;
930 surf[2] = ((width - 1) << BRW_SURFACE_WIDTH_SHIFT |
931 (height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
932
933 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
934 * Notes):
935 *
936 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
937 */
938 surf[3] = (BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y |
939 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
940 surf[4] = multisampling_state;
941 surf[5] = 0;
942
943 if (bo) {
944 drm_intel_bo_emit_reloc(brw->batch.bo,
945 *out_offset + 4,
946 bo, 0,
947 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
948 }
949 }
950
951 /**
952 * Sets up a surface state structure to point at the given region.
953 * While it is only used for the front/back buffer currently, it should be
954 * usable for further buffers when doing ARB_draw_buffer support.
955 */
956 static uint32_t
957 gen4_update_renderbuffer_surface(struct brw_context *brw,
958 struct gl_renderbuffer *rb,
959 uint32_t flags, unsigned unit,
960 uint32_t surf_index)
961 {
962 struct gl_context *ctx = &brw->ctx;
963 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
964 struct intel_mipmap_tree *mt = irb->mt;
965 uint32_t *surf;
966 uint32_t tile_x, tile_y;
967 uint32_t format = 0;
968 uint32_t offset;
969 /* _NEW_BUFFERS */
970 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
971 /* BRW_NEW_FS_PROG_DATA */
972
973 assert(!(flags & INTEL_RENDERBUFFER_LAYERED));
974 assert(!(flags & INTEL_AUX_BUFFER_DISABLED));
975
976 if (rb->TexImage && !brw->has_surface_tile_offset) {
977 intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y);
978
979 if (tile_x != 0 || tile_y != 0) {
980 /* Original gen4 hardware couldn't draw to a non-tile-aligned
981 * destination in a miptree unless you actually setup your renderbuffer
982 * as a miptree and used the fragile lod/array_index/etc. controls to
983 * select the image. So, instead, we just make a new single-level
984 * miptree and render into that.
985 */
986 intel_renderbuffer_move_to_temp(brw, irb, false);
987 mt = irb->mt;
988 }
989 }
990
991 intel_miptree_used_for_rendering(irb->mt);
992
993 surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32, &offset);
994
995 format = brw->render_target_format[rb_format];
996 if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
997 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
998 __func__, _mesa_get_format_name(rb_format));
999 }
1000
1001 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
1002 format << BRW_SURFACE_FORMAT_SHIFT);
1003
1004 /* reloc */
1005 assert(mt->offset % mt->cpp == 0);
1006 surf[1] = (intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y) +
1007 mt->bo->offset64 + mt->offset);
1008
1009 surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
1010 (rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
1011
1012 surf[3] = (brw_get_surface_tiling_bits(mt->tiling) |
1013 (mt->pitch - 1) << BRW_SURFACE_PITCH_SHIFT);
1014
1015 surf[4] = brw_get_surface_num_multisamples(mt->num_samples);
1016
1017 assert(brw->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
1018 /* Note that the low bits of these fields are missing, so
1019 * there's the possibility of getting in trouble.
1020 */
1021 assert(tile_x % 4 == 0);
1022 assert(tile_y % 2 == 0);
1023 surf[5] = ((tile_x / 4) << BRW_SURFACE_X_OFFSET_SHIFT |
1024 (tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT |
1025 (mt->valign == 4 ? BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
1026
1027 if (brw->gen < 6) {
1028 /* _NEW_COLOR */
1029 if (!ctx->Color.ColorLogicOpEnabled && !ctx->Color._AdvancedBlendMode &&
1030 (ctx->Color.BlendEnabled & (1 << unit)))
1031 surf[0] |= BRW_SURFACE_BLEND_ENABLED;
1032
1033 if (!ctx->Color.ColorMask[unit][0])
1034 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT;
1035 if (!ctx->Color.ColorMask[unit][1])
1036 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT;
1037 if (!ctx->Color.ColorMask[unit][2])
1038 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT;
1039
1040 /* As mentioned above, disable writes to the alpha component when the
1041 * renderbuffer is XRGB.
1042 */
1043 if (ctx->DrawBuffer->Visual.alphaBits == 0 ||
1044 !ctx->Color.ColorMask[unit][3]) {
1045 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT;
1046 }
1047 }
1048
1049 drm_intel_bo_emit_reloc(brw->batch.bo,
1050 offset + 4,
1051 mt->bo,
1052 surf[1] - mt->bo->offset64,
1053 I915_GEM_DOMAIN_RENDER,
1054 I915_GEM_DOMAIN_RENDER);
1055
1056 return offset;
1057 }
1058
1059 /**
1060 * Construct SURFACE_STATE objects for renderbuffers/draw buffers.
1061 */
1062 void
1063 brw_update_renderbuffer_surfaces(struct brw_context *brw,
1064 const struct gl_framebuffer *fb,
1065 uint32_t render_target_start,
1066 uint32_t *surf_offset)
1067 {
1068 GLuint i;
1069 const unsigned int w = _mesa_geometric_width(fb);
1070 const unsigned int h = _mesa_geometric_height(fb);
1071 const unsigned int s = _mesa_geometric_samples(fb);
1072
1073 /* Update surfaces for drawing buffers */
1074 if (fb->_NumColorDrawBuffers >= 1) {
1075 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
1076 const uint32_t surf_index = render_target_start + i;
1077 const int flags = (_mesa_geometric_layers(fb) > 0 ?
1078 INTEL_RENDERBUFFER_LAYERED : 0) |
1079 (brw->draw_aux_buffer_disabled[i] ?
1080 INTEL_AUX_BUFFER_DISABLED : 0);
1081
1082 if (intel_renderbuffer(fb->_ColorDrawBuffers[i])) {
1083 surf_offset[surf_index] =
1084 brw->vtbl.update_renderbuffer_surface(
1085 brw, fb->_ColorDrawBuffers[i], flags, i, surf_index);
1086 } else {
1087 brw->vtbl.emit_null_surface_state(brw, w, h, s,
1088 &surf_offset[surf_index]);
1089 }
1090 }
1091 } else {
1092 const uint32_t surf_index = render_target_start;
1093 brw->vtbl.emit_null_surface_state(brw, w, h, s,
1094 &surf_offset[surf_index]);
1095 }
1096 }
1097
1098 static void
1099 update_renderbuffer_surfaces(struct brw_context *brw)
1100 {
1101 const struct gl_context *ctx = &brw->ctx;
1102
1103 /* BRW_NEW_FS_PROG_DATA */
1104 const struct brw_wm_prog_data *wm_prog_data =
1105 brw_wm_prog_data(brw->wm.base.prog_data);
1106
1107 /* _NEW_BUFFERS | _NEW_COLOR */
1108 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1109 brw_update_renderbuffer_surfaces(
1110 brw, fb,
1111 wm_prog_data->binding_table.render_target_start,
1112 brw->wm.base.surf_offset);
1113 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1114 }
1115
1116 const struct brw_tracked_state brw_renderbuffer_surfaces = {
1117 .dirty = {
1118 .mesa = _NEW_BUFFERS |
1119 _NEW_COLOR,
1120 .brw = BRW_NEW_BATCH |
1121 BRW_NEW_BLORP |
1122 BRW_NEW_FS_PROG_DATA,
1123 },
1124 .emit = update_renderbuffer_surfaces,
1125 };
1126
1127 const struct brw_tracked_state gen6_renderbuffer_surfaces = {
1128 .dirty = {
1129 .mesa = _NEW_BUFFERS,
1130 .brw = BRW_NEW_BATCH |
1131 BRW_NEW_BLORP,
1132 },
1133 .emit = update_renderbuffer_surfaces,
1134 };
1135
1136 static void
1137 update_renderbuffer_read_surfaces(struct brw_context *brw)
1138 {
1139 const struct gl_context *ctx = &brw->ctx;
1140
1141 /* BRW_NEW_FS_PROG_DATA */
1142 const struct brw_wm_prog_data *wm_prog_data =
1143 brw_wm_prog_data(brw->wm.base.prog_data);
1144
1145 /* BRW_NEW_FRAGMENT_PROGRAM */
1146 if (!ctx->Extensions.MESA_shader_framebuffer_fetch &&
1147 brw->fragment_program &&
1148 brw->fragment_program->Base.OutputsRead) {
1149 /* _NEW_BUFFERS */
1150 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1151
1152 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
1153 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
1154 const struct intel_renderbuffer *irb = intel_renderbuffer(rb);
1155 const unsigned surf_index =
1156 wm_prog_data->binding_table.render_target_read_start + i;
1157 uint32_t *surf_offset = &brw->wm.base.surf_offset[surf_index];
1158
1159 if (irb) {
1160 const unsigned format = brw->render_target_format[
1161 _mesa_get_render_format(ctx, intel_rb_format(irb))];
1162 assert(isl_format_supports_sampling(&brw->screen->devinfo,
1163 format));
1164
1165 /* Override the target of the texture if the render buffer is a
1166 * single slice of a 3D texture (since the minimum array element
1167 * field of the surface state structure is ignored by the sampler
1168 * unit for 3D textures on some hardware), or if the render buffer
1169 * is a 1D array (since shaders always provide the array index
1170 * coordinate at the Z component to avoid state-dependent
1171 * recompiles when changing the texture target of the
1172 * framebuffer).
1173 */
1174 const GLenum target =
1175 (irb->mt->target == GL_TEXTURE_3D &&
1176 irb->layer_count == 1) ? GL_TEXTURE_2D :
1177 irb->mt->target == GL_TEXTURE_1D_ARRAY ? GL_TEXTURE_2D_ARRAY :
1178 irb->mt->target;
1179
1180 /* intel_renderbuffer::mt_layer is expressed in sample units for
1181 * the UMS and CMS multisample layouts, but
1182 * intel_renderbuffer::layer_count is expressed in units of whole
1183 * logical layers regardless of the multisample layout.
1184 */
1185 const unsigned mt_layer_unit =
1186 (irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
1187 irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) ?
1188 MAX2(irb->mt->num_samples, 1) : 1;
1189
1190 const struct isl_view view = {
1191 .format = format,
1192 .base_level = irb->mt_level - irb->mt->first_level,
1193 .levels = 1,
1194 .base_array_layer = irb->mt_layer / mt_layer_unit,
1195 .array_len = irb->layer_count,
1196 .swizzle = ISL_SWIZZLE_IDENTITY,
1197 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
1198 };
1199
1200 const int flags = brw->draw_aux_buffer_disabled[i] ?
1201 INTEL_AUX_BUFFER_DISABLED : 0;
1202 brw_emit_surface_state(brw, irb->mt, flags, target, view,
1203 surface_state_infos[brw->gen].tex_mocs,
1204 surf_offset, surf_index,
1205 I915_GEM_DOMAIN_SAMPLER, 0);
1206
1207 } else {
1208 brw->vtbl.emit_null_surface_state(
1209 brw, _mesa_geometric_width(fb), _mesa_geometric_height(fb),
1210 _mesa_geometric_samples(fb), surf_offset);
1211 }
1212 }
1213
1214 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1215 }
1216 }
1217
1218 const struct brw_tracked_state brw_renderbuffer_read_surfaces = {
1219 .dirty = {
1220 .mesa = _NEW_BUFFERS,
1221 .brw = BRW_NEW_BATCH |
1222 BRW_NEW_FRAGMENT_PROGRAM |
1223 BRW_NEW_FS_PROG_DATA,
1224 },
1225 .emit = update_renderbuffer_read_surfaces,
1226 };
1227
1228 static void
1229 update_stage_texture_surfaces(struct brw_context *brw,
1230 const struct gl_program *prog,
1231 struct brw_stage_state *stage_state,
1232 bool for_gather, uint32_t plane)
1233 {
1234 if (!prog)
1235 return;
1236
1237 struct gl_context *ctx = &brw->ctx;
1238
1239 uint32_t *surf_offset = stage_state->surf_offset;
1240
1241 /* BRW_NEW_*_PROG_DATA */
1242 if (for_gather)
1243 surf_offset += stage_state->prog_data->binding_table.gather_texture_start;
1244 else
1245 surf_offset += stage_state->prog_data->binding_table.plane_start[plane];
1246
1247 unsigned num_samplers = util_last_bit(prog->SamplersUsed);
1248 for (unsigned s = 0; s < num_samplers; s++) {
1249 surf_offset[s] = 0;
1250
1251 if (prog->SamplersUsed & (1 << s)) {
1252 const unsigned unit = prog->SamplerUnits[s];
1253
1254 /* _NEW_TEXTURE */
1255 if (ctx->Texture.Unit[unit]._Current) {
1256 brw_update_texture_surface(ctx, unit, surf_offset + s, for_gather, plane);
1257 }
1258 }
1259 }
1260 }
1261
1262
1263 /**
1264 * Construct SURFACE_STATE objects for enabled textures.
1265 */
1266 static void
1267 brw_update_texture_surfaces(struct brw_context *brw)
1268 {
1269 /* BRW_NEW_VERTEX_PROGRAM */
1270 struct gl_program *vs = (struct gl_program *) brw->vertex_program;
1271
1272 /* BRW_NEW_TESS_PROGRAMS */
1273 struct gl_program *tcs = (struct gl_program *) brw->tess_ctrl_program;
1274 struct gl_program *tes = (struct gl_program *) brw->tess_eval_program;
1275
1276 /* BRW_NEW_GEOMETRY_PROGRAM */
1277 struct gl_program *gs = (struct gl_program *) brw->geometry_program;
1278
1279 /* BRW_NEW_FRAGMENT_PROGRAM */
1280 struct gl_program *fs = (struct gl_program *) brw->fragment_program;
1281
1282 /* _NEW_TEXTURE */
1283 update_stage_texture_surfaces(brw, vs, &brw->vs.base, false, 0);
1284 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, false, 0);
1285 update_stage_texture_surfaces(brw, tes, &brw->tes.base, false, 0);
1286 update_stage_texture_surfaces(brw, gs, &brw->gs.base, false, 0);
1287 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 0);
1288
1289 /* emit alternate set of surface state for gather. this
1290 * allows the surface format to be overriden for only the
1291 * gather4 messages. */
1292 if (brw->gen < 8) {
1293 if (vs && vs->nir->info.uses_texture_gather)
1294 update_stage_texture_surfaces(brw, vs, &brw->vs.base, true, 0);
1295 if (tcs && tcs->nir->info.uses_texture_gather)
1296 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, true, 0);
1297 if (tes && tes->nir->info.uses_texture_gather)
1298 update_stage_texture_surfaces(brw, tes, &brw->tes.base, true, 0);
1299 if (gs && gs->nir->info.uses_texture_gather)
1300 update_stage_texture_surfaces(brw, gs, &brw->gs.base, true, 0);
1301 if (fs && fs->nir->info.uses_texture_gather)
1302 update_stage_texture_surfaces(brw, fs, &brw->wm.base, true, 0);
1303 }
1304
1305 if (fs) {
1306 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 1);
1307 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 2);
1308 }
1309
1310 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1311 }
1312
1313 const struct brw_tracked_state brw_texture_surfaces = {
1314 .dirty = {
1315 .mesa = _NEW_TEXTURE,
1316 .brw = BRW_NEW_BATCH |
1317 BRW_NEW_BLORP |
1318 BRW_NEW_FRAGMENT_PROGRAM |
1319 BRW_NEW_FS_PROG_DATA |
1320 BRW_NEW_GEOMETRY_PROGRAM |
1321 BRW_NEW_GS_PROG_DATA |
1322 BRW_NEW_TESS_PROGRAMS |
1323 BRW_NEW_TCS_PROG_DATA |
1324 BRW_NEW_TES_PROG_DATA |
1325 BRW_NEW_TEXTURE_BUFFER |
1326 BRW_NEW_VERTEX_PROGRAM |
1327 BRW_NEW_VS_PROG_DATA,
1328 },
1329 .emit = brw_update_texture_surfaces,
1330 };
1331
1332 static void
1333 brw_update_cs_texture_surfaces(struct brw_context *brw)
1334 {
1335 /* BRW_NEW_COMPUTE_PROGRAM */
1336 struct gl_program *cs = (struct gl_program *) brw->compute_program;
1337
1338 /* _NEW_TEXTURE */
1339 update_stage_texture_surfaces(brw, cs, &brw->cs.base, false, 0);
1340
1341 /* emit alternate set of surface state for gather. this
1342 * allows the surface format to be overriden for only the
1343 * gather4 messages.
1344 */
1345 if (brw->gen < 8) {
1346 if (cs && cs->nir->info.uses_texture_gather)
1347 update_stage_texture_surfaces(brw, cs, &brw->cs.base, true, 0);
1348 }
1349
1350 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1351 }
1352
1353 const struct brw_tracked_state brw_cs_texture_surfaces = {
1354 .dirty = {
1355 .mesa = _NEW_TEXTURE,
1356 .brw = BRW_NEW_BATCH |
1357 BRW_NEW_BLORP |
1358 BRW_NEW_COMPUTE_PROGRAM,
1359 },
1360 .emit = brw_update_cs_texture_surfaces,
1361 };
1362
1363
1364 void
1365 brw_upload_ubo_surfaces(struct brw_context *brw,
1366 struct gl_linked_shader *shader,
1367 struct brw_stage_state *stage_state,
1368 struct brw_stage_prog_data *prog_data)
1369 {
1370 struct gl_context *ctx = &brw->ctx;
1371
1372 if (!shader)
1373 return;
1374
1375 uint32_t *ubo_surf_offsets =
1376 &stage_state->surf_offset[prog_data->binding_table.ubo_start];
1377
1378 for (int i = 0; i < shader->NumUniformBlocks; i++) {
1379 struct gl_uniform_buffer_binding *binding =
1380 &ctx->UniformBufferBindings[shader->UniformBlocks[i]->Binding];
1381
1382 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1383 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ubo_surf_offsets[i]);
1384 } else {
1385 struct intel_buffer_object *intel_bo =
1386 intel_buffer_object(binding->BufferObject);
1387 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1388 if (!binding->AutomaticSize)
1389 size = MIN2(size, binding->Size);
1390 drm_intel_bo *bo =
1391 intel_bufferobj_buffer(brw, intel_bo,
1392 binding->Offset,
1393 size);
1394 brw_create_constant_surface(brw, bo, binding->Offset,
1395 size,
1396 &ubo_surf_offsets[i]);
1397 }
1398 }
1399
1400 uint32_t *ssbo_surf_offsets =
1401 &stage_state->surf_offset[prog_data->binding_table.ssbo_start];
1402
1403 for (int i = 0; i < shader->NumShaderStorageBlocks; i++) {
1404 struct gl_shader_storage_buffer_binding *binding =
1405 &ctx->ShaderStorageBufferBindings[shader->ShaderStorageBlocks[i]->Binding];
1406
1407 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1408 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ssbo_surf_offsets[i]);
1409 } else {
1410 struct intel_buffer_object *intel_bo =
1411 intel_buffer_object(binding->BufferObject);
1412 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1413 if (!binding->AutomaticSize)
1414 size = MIN2(size, binding->Size);
1415 drm_intel_bo *bo =
1416 intel_bufferobj_buffer(brw, intel_bo,
1417 binding->Offset,
1418 size);
1419 brw_create_buffer_surface(brw, bo, binding->Offset,
1420 size,
1421 &ssbo_surf_offsets[i]);
1422 }
1423 }
1424
1425 if (shader->NumUniformBlocks || shader->NumShaderStorageBlocks)
1426 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1427 }
1428
1429 static void
1430 brw_upload_wm_ubo_surfaces(struct brw_context *brw)
1431 {
1432 struct gl_context *ctx = &brw->ctx;
1433 /* _NEW_PROGRAM */
1434 struct gl_shader_program *prog = ctx->_Shader->_CurrentFragmentProgram;
1435
1436 if (!prog)
1437 return;
1438
1439 /* BRW_NEW_FS_PROG_DATA */
1440 brw_upload_ubo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
1441 &brw->wm.base, brw->wm.base.prog_data);
1442 }
1443
1444 const struct brw_tracked_state brw_wm_ubo_surfaces = {
1445 .dirty = {
1446 .mesa = _NEW_PROGRAM,
1447 .brw = BRW_NEW_BATCH |
1448 BRW_NEW_BLORP |
1449 BRW_NEW_FS_PROG_DATA |
1450 BRW_NEW_UNIFORM_BUFFER,
1451 },
1452 .emit = brw_upload_wm_ubo_surfaces,
1453 };
1454
1455 static void
1456 brw_upload_cs_ubo_surfaces(struct brw_context *brw)
1457 {
1458 struct gl_context *ctx = &brw->ctx;
1459 /* _NEW_PROGRAM */
1460 struct gl_shader_program *prog =
1461 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1462
1463 if (!prog)
1464 return;
1465
1466 /* BRW_NEW_CS_PROG_DATA */
1467 brw_upload_ubo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_COMPUTE],
1468 &brw->cs.base, brw->cs.base.prog_data);
1469 }
1470
1471 const struct brw_tracked_state brw_cs_ubo_surfaces = {
1472 .dirty = {
1473 .mesa = _NEW_PROGRAM,
1474 .brw = BRW_NEW_BATCH |
1475 BRW_NEW_BLORP |
1476 BRW_NEW_CS_PROG_DATA |
1477 BRW_NEW_UNIFORM_BUFFER,
1478 },
1479 .emit = brw_upload_cs_ubo_surfaces,
1480 };
1481
1482 void
1483 brw_upload_abo_surfaces(struct brw_context *brw,
1484 struct gl_linked_shader *shader,
1485 struct brw_stage_state *stage_state,
1486 struct brw_stage_prog_data *prog_data)
1487 {
1488 struct gl_context *ctx = &brw->ctx;
1489 uint32_t *surf_offsets =
1490 &stage_state->surf_offset[prog_data->binding_table.abo_start];
1491
1492 if (shader && shader->NumAtomicBuffers) {
1493 for (unsigned i = 0; i < shader->NumAtomicBuffers; i++) {
1494 struct gl_atomic_buffer_binding *binding =
1495 &ctx->AtomicBufferBindings[shader->AtomicBuffers[i]->Binding];
1496 struct intel_buffer_object *intel_bo =
1497 intel_buffer_object(binding->BufferObject);
1498 drm_intel_bo *bo = intel_bufferobj_buffer(
1499 brw, intel_bo, binding->Offset, intel_bo->Base.Size - binding->Offset);
1500
1501 brw_emit_buffer_surface_state(brw, &surf_offsets[i], bo,
1502 binding->Offset, BRW_SURFACEFORMAT_RAW,
1503 bo->size - binding->Offset, 1, true);
1504 }
1505
1506 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1507 }
1508 }
1509
1510 static void
1511 brw_upload_wm_abo_surfaces(struct brw_context *brw)
1512 {
1513 struct gl_context *ctx = &brw->ctx;
1514 /* _NEW_PROGRAM */
1515 struct gl_shader_program *prog = ctx->_Shader->_CurrentFragmentProgram;
1516
1517 if (prog) {
1518 /* BRW_NEW_FS_PROG_DATA */
1519 brw_upload_abo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
1520 &brw->wm.base, brw->wm.base.prog_data);
1521 }
1522 }
1523
1524 const struct brw_tracked_state brw_wm_abo_surfaces = {
1525 .dirty = {
1526 .mesa = _NEW_PROGRAM,
1527 .brw = BRW_NEW_ATOMIC_BUFFER |
1528 BRW_NEW_BLORP |
1529 BRW_NEW_BATCH |
1530 BRW_NEW_FS_PROG_DATA,
1531 },
1532 .emit = brw_upload_wm_abo_surfaces,
1533 };
1534
1535 static void
1536 brw_upload_cs_abo_surfaces(struct brw_context *brw)
1537 {
1538 struct gl_context *ctx = &brw->ctx;
1539 /* _NEW_PROGRAM */
1540 struct gl_shader_program *prog =
1541 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1542
1543 if (prog) {
1544 /* BRW_NEW_CS_PROG_DATA */
1545 brw_upload_abo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_COMPUTE],
1546 &brw->cs.base, brw->cs.base.prog_data);
1547 }
1548 }
1549
1550 const struct brw_tracked_state brw_cs_abo_surfaces = {
1551 .dirty = {
1552 .mesa = _NEW_PROGRAM,
1553 .brw = BRW_NEW_ATOMIC_BUFFER |
1554 BRW_NEW_BLORP |
1555 BRW_NEW_BATCH |
1556 BRW_NEW_CS_PROG_DATA,
1557 },
1558 .emit = brw_upload_cs_abo_surfaces,
1559 };
1560
1561 static void
1562 brw_upload_cs_image_surfaces(struct brw_context *brw)
1563 {
1564 struct gl_context *ctx = &brw->ctx;
1565 /* _NEW_PROGRAM */
1566 struct gl_shader_program *prog =
1567 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1568
1569 if (prog) {
1570 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1571 brw_upload_image_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_COMPUTE],
1572 &brw->cs.base, brw->cs.base.prog_data);
1573 }
1574 }
1575
1576 const struct brw_tracked_state brw_cs_image_surfaces = {
1577 .dirty = {
1578 .mesa = _NEW_TEXTURE | _NEW_PROGRAM,
1579 .brw = BRW_NEW_BATCH |
1580 BRW_NEW_BLORP |
1581 BRW_NEW_CS_PROG_DATA |
1582 BRW_NEW_IMAGE_UNITS
1583 },
1584 .emit = brw_upload_cs_image_surfaces,
1585 };
1586
1587 static uint32_t
1588 get_image_format(struct brw_context *brw, mesa_format format, GLenum access)
1589 {
1590 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1591 uint32_t hw_format = brw_format_for_mesa_format(format);
1592 if (access == GL_WRITE_ONLY) {
1593 return hw_format;
1594 } else if (isl_has_matching_typed_storage_image_format(devinfo, hw_format)) {
1595 /* Typed surface reads support a very limited subset of the shader
1596 * image formats. Translate it into the closest format the
1597 * hardware supports.
1598 */
1599 return isl_lower_storage_image_format(devinfo, hw_format);
1600 } else {
1601 /* The hardware doesn't actually support a typed format that we can use
1602 * so we have to fall back to untyped read/write messages.
1603 */
1604 return BRW_SURFACEFORMAT_RAW;
1605 }
1606 }
1607
1608 static void
1609 update_default_image_param(struct brw_context *brw,
1610 struct gl_image_unit *u,
1611 unsigned surface_idx,
1612 struct brw_image_param *param)
1613 {
1614 memset(param, 0, sizeof(*param));
1615 param->surface_idx = surface_idx;
1616 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1617 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1618 * detailed explanation of these parameters.
1619 */
1620 param->swizzling[0] = 0xff;
1621 param->swizzling[1] = 0xff;
1622 }
1623
1624 static void
1625 update_buffer_image_param(struct brw_context *brw,
1626 struct gl_image_unit *u,
1627 unsigned surface_idx,
1628 struct brw_image_param *param)
1629 {
1630 struct gl_buffer_object *obj = u->TexObj->BufferObject;
1631 const uint32_t size = MIN2((uint32_t)u->TexObj->BufferSize, obj->Size);
1632 update_default_image_param(brw, u, surface_idx, param);
1633
1634 param->size[0] = size / _mesa_get_format_bytes(u->_ActualFormat);
1635 param->stride[0] = _mesa_get_format_bytes(u->_ActualFormat);
1636 }
1637
1638 static void
1639 update_texture_image_param(struct brw_context *brw,
1640 struct gl_image_unit *u,
1641 unsigned surface_idx,
1642 struct brw_image_param *param)
1643 {
1644 struct intel_mipmap_tree *mt = intel_texture_object(u->TexObj)->mt;
1645
1646 update_default_image_param(brw, u, surface_idx, param);
1647
1648 param->size[0] = minify(mt->logical_width0, u->Level);
1649 param->size[1] = minify(mt->logical_height0, u->Level);
1650 param->size[2] = (!u->Layered ? 1 :
1651 u->TexObj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1652 u->TexObj->Target == GL_TEXTURE_3D ?
1653 minify(mt->logical_depth0, u->Level) :
1654 mt->logical_depth0);
1655
1656 intel_miptree_get_image_offset(mt, u->Level, u->_Layer,
1657 &param->offset[0],
1658 &param->offset[1]);
1659
1660 param->stride[0] = mt->cpp;
1661 param->stride[1] = mt->pitch / mt->cpp;
1662 param->stride[2] =
1663 brw_miptree_get_horizontal_slice_pitch(brw, mt, u->Level);
1664 param->stride[3] =
1665 brw_miptree_get_vertical_slice_pitch(brw, mt, u->Level);
1666
1667 if (mt->tiling == I915_TILING_X) {
1668 /* An X tile is a rectangular block of 512x8 bytes. */
1669 param->tiling[0] = _mesa_logbase2(512 / mt->cpp);
1670 param->tiling[1] = _mesa_logbase2(8);
1671
1672 if (brw->has_swizzling) {
1673 /* Right shifts required to swizzle bits 9 and 10 of the memory
1674 * address with bit 6.
1675 */
1676 param->swizzling[0] = 3;
1677 param->swizzling[1] = 4;
1678 }
1679 } else if (mt->tiling == I915_TILING_Y) {
1680 /* The layout of a Y-tiled surface in memory isn't really fundamentally
1681 * different to the layout of an X-tiled surface, we simply pretend that
1682 * the surface is broken up in a number of smaller 16Bx32 tiles, each
1683 * one arranged in X-major order just like is the case for X-tiling.
1684 */
1685 param->tiling[0] = _mesa_logbase2(16 / mt->cpp);
1686 param->tiling[1] = _mesa_logbase2(32);
1687
1688 if (brw->has_swizzling) {
1689 /* Right shift required to swizzle bit 9 of the memory address with
1690 * bit 6.
1691 */
1692 param->swizzling[0] = 3;
1693 }
1694 }
1695
1696 /* 3D textures are arranged in 2D in memory with 2^lod slices per row. The
1697 * address calculation algorithm (emit_address_calculation() in
1698 * brw_fs_surface_builder.cpp) handles this as a sort of tiling with
1699 * modulus equal to the LOD.
1700 */
1701 param->tiling[2] = (u->TexObj->Target == GL_TEXTURE_3D ? u->Level :
1702 0);
1703 }
1704
1705 static void
1706 update_image_surface(struct brw_context *brw,
1707 struct gl_image_unit *u,
1708 GLenum access,
1709 unsigned surface_idx,
1710 uint32_t *surf_offset,
1711 struct brw_image_param *param)
1712 {
1713 if (_mesa_is_image_unit_valid(&brw->ctx, u)) {
1714 struct gl_texture_object *obj = u->TexObj;
1715 const unsigned format = get_image_format(brw, u->_ActualFormat, access);
1716
1717 if (obj->Target == GL_TEXTURE_BUFFER) {
1718 struct intel_buffer_object *intel_obj =
1719 intel_buffer_object(obj->BufferObject);
1720 const unsigned texel_size = (format == BRW_SURFACEFORMAT_RAW ? 1 :
1721 _mesa_get_format_bytes(u->_ActualFormat));
1722
1723 brw_emit_buffer_surface_state(
1724 brw, surf_offset, intel_obj->buffer, obj->BufferOffset,
1725 format, intel_obj->Base.Size, texel_size,
1726 access != GL_READ_ONLY);
1727
1728 update_buffer_image_param(brw, u, surface_idx, param);
1729
1730 } else {
1731 struct intel_texture_object *intel_obj = intel_texture_object(obj);
1732 struct intel_mipmap_tree *mt = intel_obj->mt;
1733
1734 if (format == BRW_SURFACEFORMAT_RAW) {
1735 brw_emit_buffer_surface_state(
1736 brw, surf_offset, mt->bo, mt->offset,
1737 format, mt->bo->size - mt->offset, 1 /* pitch */,
1738 access != GL_READ_ONLY);
1739
1740 } else {
1741 const unsigned num_layers = (!u->Layered ? 1 :
1742 obj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1743 mt->logical_depth0);
1744
1745 struct isl_view view = {
1746 .format = format,
1747 .base_level = obj->MinLevel + u->Level,
1748 .levels = 1,
1749 .base_array_layer = obj->MinLayer + u->_Layer,
1750 .array_len = num_layers,
1751 .swizzle = ISL_SWIZZLE_IDENTITY,
1752 .usage = ISL_SURF_USAGE_STORAGE_BIT,
1753 };
1754
1755 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
1756 const int flags =
1757 mt->fast_clear_state == INTEL_FAST_CLEAR_STATE_RESOLVED ?
1758 INTEL_AUX_BUFFER_DISABLED : 0;
1759 brw_emit_surface_state(brw, mt, flags, mt->target, view,
1760 surface_state_infos[brw->gen].tex_mocs,
1761 surf_offset, surf_index,
1762 I915_GEM_DOMAIN_SAMPLER,
1763 access == GL_READ_ONLY ? 0 :
1764 I915_GEM_DOMAIN_SAMPLER);
1765 }
1766
1767 update_texture_image_param(brw, u, surface_idx, param);
1768 }
1769
1770 } else {
1771 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, surf_offset);
1772 update_default_image_param(brw, u, surface_idx, param);
1773 }
1774 }
1775
1776 void
1777 brw_upload_image_surfaces(struct brw_context *brw,
1778 struct gl_linked_shader *shader,
1779 struct brw_stage_state *stage_state,
1780 struct brw_stage_prog_data *prog_data)
1781 {
1782 struct gl_context *ctx = &brw->ctx;
1783
1784 if (shader && shader->NumImages) {
1785 for (unsigned i = 0; i < shader->NumImages; i++) {
1786 struct gl_image_unit *u = &ctx->ImageUnits[shader->ImageUnits[i]];
1787 const unsigned surf_idx = prog_data->binding_table.image_start + i;
1788
1789 update_image_surface(brw, u, shader->ImageAccess[i],
1790 surf_idx,
1791 &stage_state->surf_offset[surf_idx],
1792 &prog_data->image_param[i]);
1793 }
1794
1795 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1796 /* This may have changed the image metadata dependent on the context
1797 * image unit state and passed to the program as uniforms, make sure
1798 * that push and pull constants are reuploaded.
1799 */
1800 brw->NewGLState |= _NEW_PROGRAM_CONSTANTS;
1801 }
1802 }
1803
1804 static void
1805 brw_upload_wm_image_surfaces(struct brw_context *brw)
1806 {
1807 struct gl_context *ctx = &brw->ctx;
1808 /* BRW_NEW_FRAGMENT_PROGRAM */
1809 struct gl_shader_program *prog = ctx->_Shader->_CurrentFragmentProgram;
1810
1811 if (prog) {
1812 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1813 brw_upload_image_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
1814 &brw->wm.base, brw->wm.base.prog_data);
1815 }
1816 }
1817
1818 const struct brw_tracked_state brw_wm_image_surfaces = {
1819 .dirty = {
1820 .mesa = _NEW_TEXTURE,
1821 .brw = BRW_NEW_BATCH |
1822 BRW_NEW_BLORP |
1823 BRW_NEW_FRAGMENT_PROGRAM |
1824 BRW_NEW_FS_PROG_DATA |
1825 BRW_NEW_IMAGE_UNITS
1826 },
1827 .emit = brw_upload_wm_image_surfaces,
1828 };
1829
1830 void
1831 gen4_init_vtable_surface_functions(struct brw_context *brw)
1832 {
1833 brw->vtbl.update_renderbuffer_surface = gen4_update_renderbuffer_surface;
1834 brw->vtbl.emit_null_surface_state = brw_emit_null_surface_state;
1835 }
1836
1837 void
1838 gen6_init_vtable_surface_functions(struct brw_context *brw)
1839 {
1840 gen4_init_vtable_surface_functions(brw);
1841 brw->vtbl.update_renderbuffer_surface = brw_update_renderbuffer_surface;
1842 }
1843
1844 static void
1845 brw_upload_cs_work_groups_surface(struct brw_context *brw)
1846 {
1847 struct gl_context *ctx = &brw->ctx;
1848 /* _NEW_PROGRAM */
1849 struct gl_shader_program *prog =
1850 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1851 /* BRW_NEW_CS_PROG_DATA */
1852 const struct brw_cs_prog_data *cs_prog_data =
1853 brw_cs_prog_data(brw->cs.base.prog_data);
1854
1855 if (prog && cs_prog_data->uses_num_work_groups) {
1856 const unsigned surf_idx =
1857 cs_prog_data->binding_table.work_groups_start;
1858 uint32_t *surf_offset = &brw->cs.base.surf_offset[surf_idx];
1859 drm_intel_bo *bo;
1860 uint32_t bo_offset;
1861
1862 if (brw->compute.num_work_groups_bo == NULL) {
1863 bo = NULL;
1864 intel_upload_data(brw,
1865 (void *)brw->compute.num_work_groups,
1866 3 * sizeof(GLuint),
1867 sizeof(GLuint),
1868 &bo,
1869 &bo_offset);
1870 } else {
1871 bo = brw->compute.num_work_groups_bo;
1872 bo_offset = brw->compute.num_work_groups_offset;
1873 }
1874
1875 brw_emit_buffer_surface_state(brw, surf_offset,
1876 bo, bo_offset,
1877 BRW_SURFACEFORMAT_RAW,
1878 3 * sizeof(GLuint), 1, true);
1879 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1880 }
1881 }
1882
1883 const struct brw_tracked_state brw_cs_work_groups_surface = {
1884 .dirty = {
1885 .brw = BRW_NEW_BLORP |
1886 BRW_NEW_CS_PROG_DATA |
1887 BRW_NEW_CS_WORK_GROUPS
1888 },
1889 .emit = brw_upload_cs_work_groups_surface,
1890 };