mesa: make _CurrentFragmentProgram a gl_program struct pointer
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_surface_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "compiler/nir/nir.h"
34 #include "main/context.h"
35 #include "main/blend.h"
36 #include "main/mtypes.h"
37 #include "main/samplerobj.h"
38 #include "main/shaderimage.h"
39 #include "main/teximage.h"
40 #include "program/prog_parameter.h"
41 #include "program/prog_instruction.h"
42 #include "main/framebuffer.h"
43 #include "main/shaderapi.h"
44
45 #include "isl/isl.h"
46
47 #include "intel_mipmap_tree.h"
48 #include "intel_batchbuffer.h"
49 #include "intel_tex.h"
50 #include "intel_fbo.h"
51 #include "intel_buffer_objects.h"
52
53 #include "brw_context.h"
54 #include "brw_state.h"
55 #include "brw_defines.h"
56 #include "brw_wm.h"
57
58 enum {
59 INTEL_RENDERBUFFER_LAYERED = 1 << 0,
60 INTEL_AUX_BUFFER_DISABLED = 1 << 1,
61 };
62
63 uint32_t tex_mocs[] = {
64 [7] = GEN7_MOCS_L3,
65 [8] = BDW_MOCS_WB,
66 [9] = SKL_MOCS_WB,
67 };
68
69 uint32_t rb_mocs[] = {
70 [7] = GEN7_MOCS_L3,
71 [8] = BDW_MOCS_PTE,
72 [9] = SKL_MOCS_PTE,
73 };
74
75 static void
76 brw_emit_surface_state(struct brw_context *brw,
77 struct intel_mipmap_tree *mt, uint32_t flags,
78 GLenum target, struct isl_view view,
79 uint32_t mocs, uint32_t *surf_offset, int surf_index,
80 unsigned read_domains, unsigned write_domains)
81 {
82 uint32_t tile_x = mt->level[0].slice[0].x_offset;
83 uint32_t tile_y = mt->level[0].slice[0].y_offset;
84 uint32_t offset = mt->offset;
85
86 struct isl_surf surf;
87 intel_miptree_get_isl_surf(brw, mt, &surf);
88
89 surf.dim = get_isl_surf_dim(target);
90
91 const enum isl_dim_layout dim_layout =
92 get_isl_dim_layout(&brw->screen->devinfo, mt->tiling, target);
93
94 if (surf.dim_layout != dim_layout) {
95 /* The layout of the specified texture target is not compatible with the
96 * actual layout of the miptree structure in memory -- You're entering
97 * dangerous territory, this can only possibly work if you only intended
98 * to access a single level and slice of the texture, and the hardware
99 * supports the tile offset feature in order to allow non-tile-aligned
100 * base offsets, since we'll have to point the hardware to the first
101 * texel of the level instead of relying on the usual base level/layer
102 * controls.
103 */
104 assert(brw->has_surface_tile_offset);
105 assert(view.levels == 1 && view.array_len == 1);
106 assert(tile_x == 0 && tile_y == 0);
107
108 offset += intel_miptree_get_tile_offsets(mt, view.base_level,
109 view.base_array_layer,
110 &tile_x, &tile_y);
111
112 /* Minify the logical dimensions of the texture. */
113 const unsigned l = view.base_level - mt->first_level;
114 surf.logical_level0_px.width = minify(surf.logical_level0_px.width, l);
115 surf.logical_level0_px.height = surf.dim <= ISL_SURF_DIM_1D ? 1 :
116 minify(surf.logical_level0_px.height, l);
117 surf.logical_level0_px.depth = surf.dim <= ISL_SURF_DIM_2D ? 1 :
118 minify(surf.logical_level0_px.depth, l);
119
120 /* Only the base level and layer can be addressed with the overridden
121 * layout.
122 */
123 surf.logical_level0_px.array_len = 1;
124 surf.levels = 1;
125 surf.dim_layout = dim_layout;
126
127 /* The requested slice of the texture is now at the base level and
128 * layer.
129 */
130 view.base_level = 0;
131 view.base_array_layer = 0;
132 }
133
134 union isl_color_value clear_color = { .u32 = { 0, 0, 0, 0 } };
135
136 drm_intel_bo *aux_bo;
137 struct isl_surf *aux_surf = NULL, aux_surf_s;
138 uint64_t aux_offset = 0;
139 enum isl_aux_usage aux_usage = ISL_AUX_USAGE_NONE;
140 if ((mt->mcs_buf || intel_miptree_sample_with_hiz(brw, mt)) &&
141 !(flags & INTEL_AUX_BUFFER_DISABLED)) {
142 intel_miptree_get_aux_isl_surf(brw, mt, &aux_surf_s, &aux_usage);
143 aux_surf = &aux_surf_s;
144
145 if (mt->mcs_buf) {
146 assert(mt->mcs_buf->offset == 0);
147 aux_bo = mt->mcs_buf->bo;
148 aux_offset = mt->mcs_buf->bo->offset64 + mt->mcs_buf->offset;
149 } else {
150 aux_bo = mt->hiz_buf->aux_base.bo;
151 aux_offset = mt->hiz_buf->aux_base.bo->offset64;
152 }
153
154 /* We only really need a clear color if we also have an auxiliary
155 * surface. Without one, it does nothing.
156 */
157 clear_color = intel_miptree_get_isl_clear_color(brw, mt);
158 }
159
160 void *state = __brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
161 brw->isl_dev.ss.size,
162 brw->isl_dev.ss.align,
163 surf_index, surf_offset);
164
165 isl_surf_fill_state(&brw->isl_dev, state, .surf = &surf, .view = &view,
166 .address = mt->bo->offset64 + offset,
167 .aux_surf = aux_surf, .aux_usage = aux_usage,
168 .aux_address = aux_offset,
169 .mocs = mocs, .clear_color = clear_color,
170 .x_offset_sa = tile_x, .y_offset_sa = tile_y);
171
172 drm_intel_bo_emit_reloc(brw->batch.bo,
173 *surf_offset + brw->isl_dev.ss.addr_offset,
174 mt->bo, offset,
175 read_domains, write_domains);
176
177 if (aux_surf) {
178 /* On gen7 and prior, the upper 20 bits of surface state DWORD 6 are the
179 * upper 20 bits of the GPU address of the MCS buffer; the lower 12 bits
180 * contain other control information. Since buffer addresses are always
181 * on 4k boundaries (and thus have their lower 12 bits zero), we can use
182 * an ordinary reloc to do the necessary address translation.
183 */
184 assert((aux_offset & 0xfff) == 0);
185 uint32_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
186 drm_intel_bo_emit_reloc(brw->batch.bo,
187 *surf_offset + brw->isl_dev.ss.aux_addr_offset,
188 aux_bo, *aux_addr & 0xfff,
189 read_domains, write_domains);
190 }
191 }
192
193 uint32_t
194 brw_update_renderbuffer_surface(struct brw_context *brw,
195 struct gl_renderbuffer *rb,
196 uint32_t flags, unsigned unit /* unused */,
197 uint32_t surf_index)
198 {
199 struct gl_context *ctx = &brw->ctx;
200 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
201 struct intel_mipmap_tree *mt = irb->mt;
202
203 if (brw->gen < 9) {
204 assert(!(flags & INTEL_AUX_BUFFER_DISABLED));
205 }
206
207 assert(brw_render_target_supported(brw, rb));
208
209 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
210 if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
211 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
212 __func__, _mesa_get_format_name(rb_format));
213 }
214
215 const unsigned layer_multiplier =
216 (irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
217 irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) ?
218 MAX2(irb->mt->num_samples, 1) : 1;
219
220 struct isl_view view = {
221 .format = brw->render_target_format[rb_format],
222 .base_level = irb->mt_level - irb->mt->first_level,
223 .levels = 1,
224 .base_array_layer = irb->mt_layer / layer_multiplier,
225 .array_len = MAX2(irb->layer_count, 1),
226 .swizzle = ISL_SWIZZLE_IDENTITY,
227 .usage = ISL_SURF_USAGE_RENDER_TARGET_BIT,
228 };
229
230 uint32_t offset;
231 brw_emit_surface_state(brw, mt, flags, mt->target, view,
232 rb_mocs[brw->gen],
233 &offset, surf_index,
234 I915_GEM_DOMAIN_RENDER,
235 I915_GEM_DOMAIN_RENDER);
236 return offset;
237 }
238
239 GLuint
240 translate_tex_target(GLenum target)
241 {
242 switch (target) {
243 case GL_TEXTURE_1D:
244 case GL_TEXTURE_1D_ARRAY_EXT:
245 return BRW_SURFACE_1D;
246
247 case GL_TEXTURE_RECTANGLE_NV:
248 return BRW_SURFACE_2D;
249
250 case GL_TEXTURE_2D:
251 case GL_TEXTURE_2D_ARRAY_EXT:
252 case GL_TEXTURE_EXTERNAL_OES:
253 case GL_TEXTURE_2D_MULTISAMPLE:
254 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
255 return BRW_SURFACE_2D;
256
257 case GL_TEXTURE_3D:
258 return BRW_SURFACE_3D;
259
260 case GL_TEXTURE_CUBE_MAP:
261 case GL_TEXTURE_CUBE_MAP_ARRAY:
262 return BRW_SURFACE_CUBE;
263
264 default:
265 unreachable("not reached");
266 }
267 }
268
269 uint32_t
270 brw_get_surface_tiling_bits(uint32_t tiling)
271 {
272 switch (tiling) {
273 case I915_TILING_X:
274 return BRW_SURFACE_TILED;
275 case I915_TILING_Y:
276 return BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y;
277 default:
278 return 0;
279 }
280 }
281
282
283 uint32_t
284 brw_get_surface_num_multisamples(unsigned num_samples)
285 {
286 if (num_samples > 1)
287 return BRW_SURFACE_MULTISAMPLECOUNT_4;
288 else
289 return BRW_SURFACE_MULTISAMPLECOUNT_1;
290 }
291
292 /**
293 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
294 * swizzling.
295 */
296 int
297 brw_get_texture_swizzle(const struct gl_context *ctx,
298 const struct gl_texture_object *t)
299 {
300 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
301
302 int swizzles[SWIZZLE_NIL + 1] = {
303 SWIZZLE_X,
304 SWIZZLE_Y,
305 SWIZZLE_Z,
306 SWIZZLE_W,
307 SWIZZLE_ZERO,
308 SWIZZLE_ONE,
309 SWIZZLE_NIL
310 };
311
312 if (img->_BaseFormat == GL_DEPTH_COMPONENT ||
313 img->_BaseFormat == GL_DEPTH_STENCIL) {
314 GLenum depth_mode = t->DepthMode;
315
316 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
317 * with depth component data specified with a sized internal format.
318 * Otherwise, it's left at the old default, GL_LUMINANCE.
319 */
320 if (_mesa_is_gles3(ctx) &&
321 img->InternalFormat != GL_DEPTH_COMPONENT &&
322 img->InternalFormat != GL_DEPTH_STENCIL) {
323 depth_mode = GL_RED;
324 }
325
326 switch (depth_mode) {
327 case GL_ALPHA:
328 swizzles[0] = SWIZZLE_ZERO;
329 swizzles[1] = SWIZZLE_ZERO;
330 swizzles[2] = SWIZZLE_ZERO;
331 swizzles[3] = SWIZZLE_X;
332 break;
333 case GL_LUMINANCE:
334 swizzles[0] = SWIZZLE_X;
335 swizzles[1] = SWIZZLE_X;
336 swizzles[2] = SWIZZLE_X;
337 swizzles[3] = SWIZZLE_ONE;
338 break;
339 case GL_INTENSITY:
340 swizzles[0] = SWIZZLE_X;
341 swizzles[1] = SWIZZLE_X;
342 swizzles[2] = SWIZZLE_X;
343 swizzles[3] = SWIZZLE_X;
344 break;
345 case GL_RED:
346 swizzles[0] = SWIZZLE_X;
347 swizzles[1] = SWIZZLE_ZERO;
348 swizzles[2] = SWIZZLE_ZERO;
349 swizzles[3] = SWIZZLE_ONE;
350 break;
351 }
352 }
353
354 GLenum datatype = _mesa_get_format_datatype(img->TexFormat);
355
356 /* If the texture's format is alpha-only, force R, G, and B to
357 * 0.0. Similarly, if the texture's format has no alpha channel,
358 * force the alpha value read to 1.0. This allows for the
359 * implementation to use an RGBA texture for any of these formats
360 * without leaking any unexpected values.
361 */
362 switch (img->_BaseFormat) {
363 case GL_ALPHA:
364 swizzles[0] = SWIZZLE_ZERO;
365 swizzles[1] = SWIZZLE_ZERO;
366 swizzles[2] = SWIZZLE_ZERO;
367 break;
368 case GL_LUMINANCE:
369 if (t->_IsIntegerFormat || datatype == GL_SIGNED_NORMALIZED) {
370 swizzles[0] = SWIZZLE_X;
371 swizzles[1] = SWIZZLE_X;
372 swizzles[2] = SWIZZLE_X;
373 swizzles[3] = SWIZZLE_ONE;
374 }
375 break;
376 case GL_LUMINANCE_ALPHA:
377 if (datatype == GL_SIGNED_NORMALIZED) {
378 swizzles[0] = SWIZZLE_X;
379 swizzles[1] = SWIZZLE_X;
380 swizzles[2] = SWIZZLE_X;
381 swizzles[3] = SWIZZLE_W;
382 }
383 break;
384 case GL_INTENSITY:
385 if (datatype == GL_SIGNED_NORMALIZED) {
386 swizzles[0] = SWIZZLE_X;
387 swizzles[1] = SWIZZLE_X;
388 swizzles[2] = SWIZZLE_X;
389 swizzles[3] = SWIZZLE_X;
390 }
391 break;
392 case GL_RED:
393 case GL_RG:
394 case GL_RGB:
395 if (_mesa_get_format_bits(img->TexFormat, GL_ALPHA_BITS) > 0)
396 swizzles[3] = SWIZZLE_ONE;
397 break;
398 }
399
400 return MAKE_SWIZZLE4(swizzles[GET_SWZ(t->_Swizzle, 0)],
401 swizzles[GET_SWZ(t->_Swizzle, 1)],
402 swizzles[GET_SWZ(t->_Swizzle, 2)],
403 swizzles[GET_SWZ(t->_Swizzle, 3)]);
404 }
405
406 /**
407 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
408 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
409 *
410 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
411 * 0 1 2 3 4 5
412 * 4 5 6 7 0 1
413 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
414 *
415 * which is simply adding 4 then modding by 8 (or anding with 7).
416 *
417 * We then may need to apply workarounds for textureGather hardware bugs.
418 */
419 static unsigned
420 swizzle_to_scs(GLenum swizzle, bool need_green_to_blue)
421 {
422 unsigned scs = (swizzle + 4) & 7;
423
424 return (need_green_to_blue && scs == HSW_SCS_GREEN) ? HSW_SCS_BLUE : scs;
425 }
426
427 static unsigned
428 brw_find_matching_rb(const struct gl_framebuffer *fb,
429 const struct intel_mipmap_tree *mt)
430 {
431 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
432 const struct intel_renderbuffer *irb =
433 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
434
435 if (irb && irb->mt == mt)
436 return i;
437 }
438
439 return fb->_NumColorDrawBuffers;
440 }
441
442 static inline bool
443 brw_texture_view_sane(const struct brw_context *brw,
444 const struct intel_mipmap_tree *mt,
445 const struct isl_view *view)
446 {
447 /* There are special cases only for lossless compression. */
448 if (!intel_miptree_is_lossless_compressed(brw, mt))
449 return true;
450
451 if (isl_format_supports_lossless_compression(&brw->screen->devinfo,
452 view->format))
453 return true;
454
455 /* Logic elsewhere needs to take care to resolve the color buffer prior
456 * to sampling it as non-compressed.
457 */
458 if (intel_miptree_has_color_unresolved(mt, view->base_level, view->levels,
459 view->base_array_layer,
460 view->array_len))
461 return false;
462
463 const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
464 const unsigned rb_index = brw_find_matching_rb(fb, mt);
465
466 if (rb_index == fb->_NumColorDrawBuffers)
467 return true;
468
469 /* Underlying surface is compressed but it is sampled using a format that
470 * the sampling engine doesn't support as compressed. Compression must be
471 * disabled for both sampling engine and data port in case the same surface
472 * is used also as render target.
473 */
474 return brw->draw_aux_buffer_disabled[rb_index];
475 }
476
477 static bool
478 brw_disable_aux_surface(const struct brw_context *brw,
479 const struct intel_mipmap_tree *mt,
480 const struct isl_view *view)
481 {
482 /* Nothing to disable. */
483 if (!mt->mcs_buf)
484 return false;
485
486 const bool is_unresolved = intel_miptree_has_color_unresolved(
487 mt, view->base_level, view->levels,
488 view->base_array_layer, view->array_len);
489
490 /* There are special cases only for lossless compression. */
491 if (!intel_miptree_is_lossless_compressed(brw, mt))
492 return !is_unresolved;
493
494 const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
495 const unsigned rb_index = brw_find_matching_rb(fb, mt);
496
497 /* If we are drawing into this with compression enabled, then we must also
498 * enable compression when texturing from it regardless of
499 * fast_clear_state. If we don't then, after the first draw call with
500 * this setup, there will be data in the CCS which won't get picked up by
501 * subsequent texturing operations as required by ARB_texture_barrier.
502 * Since we don't want to re-emit the binding table or do a resolve
503 * operation every draw call, the easiest thing to do is just enable
504 * compression on the texturing side. This is completely safe to do
505 * since, if compressed texturing weren't allowed, we would have disabled
506 * compression of render targets in whatever_that_function_is_called().
507 */
508 if (rb_index < fb->_NumColorDrawBuffers) {
509 if (brw->draw_aux_buffer_disabled[rb_index]) {
510 assert(!is_unresolved);
511 }
512
513 return brw->draw_aux_buffer_disabled[rb_index];
514 }
515
516 return !is_unresolved;
517 }
518
519 void
520 brw_update_texture_surface(struct gl_context *ctx,
521 unsigned unit,
522 uint32_t *surf_offset,
523 bool for_gather,
524 uint32_t plane)
525 {
526 struct brw_context *brw = brw_context(ctx);
527 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
528
529 if (obj->Target == GL_TEXTURE_BUFFER) {
530 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
531
532 } else {
533 struct intel_texture_object *intel_obj = intel_texture_object(obj);
534 struct intel_mipmap_tree *mt = intel_obj->mt;
535
536 if (plane > 0) {
537 if (mt->plane[plane - 1] == NULL)
538 return;
539 mt = mt->plane[plane - 1];
540 }
541
542 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
543 /* If this is a view with restricted NumLayers, then our effective depth
544 * is not just the miptree depth.
545 */
546 const unsigned view_num_layers =
547 (obj->Immutable && obj->Target != GL_TEXTURE_3D) ? obj->NumLayers :
548 mt->logical_depth0;
549
550 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
551 * texturing functions that return a float, as our code generation always
552 * selects the .x channel (which would always be 0).
553 */
554 struct gl_texture_image *firstImage = obj->Image[0][obj->BaseLevel];
555 const bool alpha_depth = obj->DepthMode == GL_ALPHA &&
556 (firstImage->_BaseFormat == GL_DEPTH_COMPONENT ||
557 firstImage->_BaseFormat == GL_DEPTH_STENCIL);
558 const unsigned swizzle = (unlikely(alpha_depth) ? SWIZZLE_XYZW :
559 brw_get_texture_swizzle(&brw->ctx, obj));
560
561 mesa_format mesa_fmt = plane == 0 ? intel_obj->_Format : mt->format;
562 unsigned format = translate_tex_format(brw, mesa_fmt,
563 sampler->sRGBDecode);
564
565 /* Implement gen6 and gen7 gather work-around */
566 bool need_green_to_blue = false;
567 if (for_gather) {
568 if (brw->gen == 7 && format == BRW_SURFACEFORMAT_R32G32_FLOAT) {
569 format = BRW_SURFACEFORMAT_R32G32_FLOAT_LD;
570 need_green_to_blue = brw->is_haswell;
571 } else if (brw->gen == 6) {
572 /* Sandybridge's gather4 message is broken for integer formats.
573 * To work around this, we pretend the surface is UNORM for
574 * 8 or 16-bit formats, and emit shader instructions to recover
575 * the real INT/UINT value. For 32-bit formats, we pretend
576 * the surface is FLOAT, and simply reinterpret the resulting
577 * bits.
578 */
579 switch (format) {
580 case BRW_SURFACEFORMAT_R8_SINT:
581 case BRW_SURFACEFORMAT_R8_UINT:
582 format = BRW_SURFACEFORMAT_R8_UNORM;
583 break;
584
585 case BRW_SURFACEFORMAT_R16_SINT:
586 case BRW_SURFACEFORMAT_R16_UINT:
587 format = BRW_SURFACEFORMAT_R16_UNORM;
588 break;
589
590 case BRW_SURFACEFORMAT_R32_SINT:
591 case BRW_SURFACEFORMAT_R32_UINT:
592 format = BRW_SURFACEFORMAT_R32_FLOAT;
593 break;
594
595 default:
596 break;
597 }
598 }
599 }
600
601 if (obj->StencilSampling && firstImage->_BaseFormat == GL_DEPTH_STENCIL) {
602 if (brw->gen <= 7) {
603 assert(mt->r8stencil_mt && !mt->stencil_mt->r8stencil_needs_update);
604 mt = mt->r8stencil_mt;
605 } else {
606 mt = mt->stencil_mt;
607 }
608 format = BRW_SURFACEFORMAT_R8_UINT;
609 } else if (brw->gen <= 7 && mt->format == MESA_FORMAT_S_UINT8) {
610 assert(mt->r8stencil_mt && !mt->r8stencil_needs_update);
611 mt = mt->r8stencil_mt;
612 format = BRW_SURFACEFORMAT_R8_UINT;
613 }
614
615 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
616
617 struct isl_view view = {
618 .format = format,
619 .base_level = obj->MinLevel + obj->BaseLevel,
620 .levels = intel_obj->_MaxLevel - obj->BaseLevel + 1,
621 .base_array_layer = obj->MinLayer,
622 .array_len = view_num_layers,
623 .swizzle = {
624 .r = swizzle_to_scs(GET_SWZ(swizzle, 0), need_green_to_blue),
625 .g = swizzle_to_scs(GET_SWZ(swizzle, 1), need_green_to_blue),
626 .b = swizzle_to_scs(GET_SWZ(swizzle, 2), need_green_to_blue),
627 .a = swizzle_to_scs(GET_SWZ(swizzle, 3), need_green_to_blue),
628 },
629 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
630 };
631
632 if (obj->Target == GL_TEXTURE_CUBE_MAP ||
633 obj->Target == GL_TEXTURE_CUBE_MAP_ARRAY)
634 view.usage |= ISL_SURF_USAGE_CUBE_BIT;
635
636 assert(brw_texture_view_sane(brw, mt, &view));
637
638 const int flags = brw_disable_aux_surface(brw, mt, &view) ?
639 INTEL_AUX_BUFFER_DISABLED : 0;
640 brw_emit_surface_state(brw, mt, flags, mt->target, view,
641 tex_mocs[brw->gen],
642 surf_offset, surf_index,
643 I915_GEM_DOMAIN_SAMPLER, 0);
644 }
645 }
646
647 void
648 brw_emit_buffer_surface_state(struct brw_context *brw,
649 uint32_t *out_offset,
650 drm_intel_bo *bo,
651 unsigned buffer_offset,
652 unsigned surface_format,
653 unsigned buffer_size,
654 unsigned pitch,
655 bool rw)
656 {
657 uint32_t *dw = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
658 brw->isl_dev.ss.size,
659 brw->isl_dev.ss.align,
660 out_offset);
661
662 isl_buffer_fill_state(&brw->isl_dev, dw,
663 .address = (bo ? bo->offset64 : 0) + buffer_offset,
664 .size = buffer_size,
665 .format = surface_format,
666 .stride = pitch,
667 .mocs = tex_mocs[brw->gen]);
668
669 if (bo) {
670 drm_intel_bo_emit_reloc(brw->batch.bo,
671 *out_offset + brw->isl_dev.ss.addr_offset,
672 bo, buffer_offset,
673 I915_GEM_DOMAIN_SAMPLER,
674 (rw ? I915_GEM_DOMAIN_SAMPLER : 0));
675 }
676 }
677
678 void
679 brw_update_buffer_texture_surface(struct gl_context *ctx,
680 unsigned unit,
681 uint32_t *surf_offset)
682 {
683 struct brw_context *brw = brw_context(ctx);
684 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
685 struct intel_buffer_object *intel_obj =
686 intel_buffer_object(tObj->BufferObject);
687 uint32_t size = tObj->BufferSize;
688 drm_intel_bo *bo = NULL;
689 mesa_format format = tObj->_BufferObjectFormat;
690 uint32_t brw_format = brw_format_for_mesa_format(format);
691 int texel_size = _mesa_get_format_bytes(format);
692
693 if (intel_obj) {
694 size = MIN2(size, intel_obj->Base.Size);
695 bo = intel_bufferobj_buffer(brw, intel_obj, tObj->BufferOffset, size);
696 }
697
698 if (brw_format == 0 && format != MESA_FORMAT_RGBA_FLOAT32) {
699 _mesa_problem(NULL, "bad format %s for texture buffer\n",
700 _mesa_get_format_name(format));
701 }
702
703 brw_emit_buffer_surface_state(brw, surf_offset, bo,
704 tObj->BufferOffset,
705 brw_format,
706 size,
707 texel_size,
708 false /* rw */);
709 }
710
711 /**
712 * Create the constant buffer surface. Vertex/fragment shader constants will be
713 * read from this buffer with Data Port Read instructions/messages.
714 */
715 void
716 brw_create_constant_surface(struct brw_context *brw,
717 drm_intel_bo *bo,
718 uint32_t offset,
719 uint32_t size,
720 uint32_t *out_offset)
721 {
722 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
723 BRW_SURFACEFORMAT_R32G32B32A32_FLOAT,
724 size, 1, false);
725 }
726
727 /**
728 * Create the buffer surface. Shader buffer variables will be
729 * read from / write to this buffer with Data Port Read/Write
730 * instructions/messages.
731 */
732 void
733 brw_create_buffer_surface(struct brw_context *brw,
734 drm_intel_bo *bo,
735 uint32_t offset,
736 uint32_t size,
737 uint32_t *out_offset)
738 {
739 /* Use a raw surface so we can reuse existing untyped read/write/atomic
740 * messages. We need these specifically for the fragment shader since they
741 * include a pixel mask header that we need to ensure correct behavior
742 * with helper invocations, which cannot write to the buffer.
743 */
744 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
745 BRW_SURFACEFORMAT_RAW,
746 size, 1, true);
747 }
748
749 /**
750 * Set up a binding table entry for use by stream output logic (transform
751 * feedback).
752 *
753 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
754 */
755 void
756 brw_update_sol_surface(struct brw_context *brw,
757 struct gl_buffer_object *buffer_obj,
758 uint32_t *out_offset, unsigned num_vector_components,
759 unsigned stride_dwords, unsigned offset_dwords)
760 {
761 struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
762 uint32_t offset_bytes = 4 * offset_dwords;
763 drm_intel_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
764 offset_bytes,
765 buffer_obj->Size - offset_bytes);
766 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
767 out_offset);
768 uint32_t pitch_minus_1 = 4*stride_dwords - 1;
769 size_t size_dwords = buffer_obj->Size / 4;
770 uint32_t buffer_size_minus_1, width, height, depth, surface_format;
771
772 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
773 * too big to map using a single binding table entry?
774 */
775 assert((size_dwords - offset_dwords) / stride_dwords
776 <= BRW_MAX_NUM_BUFFER_ENTRIES);
777
778 if (size_dwords > offset_dwords + num_vector_components) {
779 /* There is room for at least 1 transform feedback output in the buffer.
780 * Compute the number of additional transform feedback outputs the
781 * buffer has room for.
782 */
783 buffer_size_minus_1 =
784 (size_dwords - offset_dwords - num_vector_components) / stride_dwords;
785 } else {
786 /* There isn't even room for a single transform feedback output in the
787 * buffer. We can't configure the binding table entry to prevent output
788 * entirely; we'll have to rely on the geometry shader to detect
789 * overflow. But to minimize the damage in case of a bug, set up the
790 * binding table entry to just allow a single output.
791 */
792 buffer_size_minus_1 = 0;
793 }
794 width = buffer_size_minus_1 & 0x7f;
795 height = (buffer_size_minus_1 & 0xfff80) >> 7;
796 depth = (buffer_size_minus_1 & 0x7f00000) >> 20;
797
798 switch (num_vector_components) {
799 case 1:
800 surface_format = BRW_SURFACEFORMAT_R32_FLOAT;
801 break;
802 case 2:
803 surface_format = BRW_SURFACEFORMAT_R32G32_FLOAT;
804 break;
805 case 3:
806 surface_format = BRW_SURFACEFORMAT_R32G32B32_FLOAT;
807 break;
808 case 4:
809 surface_format = BRW_SURFACEFORMAT_R32G32B32A32_FLOAT;
810 break;
811 default:
812 unreachable("Invalid vector size for transform feedback output");
813 }
814
815 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
816 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
817 surface_format << BRW_SURFACE_FORMAT_SHIFT |
818 BRW_SURFACE_RC_READ_WRITE;
819 surf[1] = bo->offset64 + offset_bytes; /* reloc */
820 surf[2] = (width << BRW_SURFACE_WIDTH_SHIFT |
821 height << BRW_SURFACE_HEIGHT_SHIFT);
822 surf[3] = (depth << BRW_SURFACE_DEPTH_SHIFT |
823 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
824 surf[4] = 0;
825 surf[5] = 0;
826
827 /* Emit relocation to surface contents. */
828 drm_intel_bo_emit_reloc(brw->batch.bo,
829 *out_offset + 4,
830 bo, offset_bytes,
831 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
832 }
833
834 /* Creates a new WM constant buffer reflecting the current fragment program's
835 * constants, if needed by the fragment program.
836 *
837 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
838 * state atom.
839 */
840 static void
841 brw_upload_wm_pull_constants(struct brw_context *brw)
842 {
843 struct brw_stage_state *stage_state = &brw->wm.base;
844 /* BRW_NEW_FRAGMENT_PROGRAM */
845 struct brw_program *fp = (struct brw_program *) brw->fragment_program;
846 /* BRW_NEW_FS_PROG_DATA */
847 struct brw_stage_prog_data *prog_data = brw->wm.base.prog_data;
848
849 _mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_FRAGMENT);
850 /* _NEW_PROGRAM_CONSTANTS */
851 brw_upload_pull_constants(brw, BRW_NEW_SURFACES, &fp->program,
852 stage_state, prog_data);
853 }
854
855 const struct brw_tracked_state brw_wm_pull_constants = {
856 .dirty = {
857 .mesa = _NEW_PROGRAM_CONSTANTS,
858 .brw = BRW_NEW_BATCH |
859 BRW_NEW_BLORP |
860 BRW_NEW_FRAGMENT_PROGRAM |
861 BRW_NEW_FS_PROG_DATA,
862 },
863 .emit = brw_upload_wm_pull_constants,
864 };
865
866 /**
867 * Creates a null renderbuffer surface.
868 *
869 * This is used when the shader doesn't write to any color output. An FB
870 * write to target 0 will still be emitted, because that's how the thread is
871 * terminated (and computed depth is returned), so we need to have the
872 * hardware discard the target 0 color output..
873 */
874 static void
875 brw_emit_null_surface_state(struct brw_context *brw,
876 unsigned width,
877 unsigned height,
878 unsigned samples,
879 uint32_t *out_offset)
880 {
881 /* From the Sandy bridge PRM, Vol4 Part1 p71 (Surface Type: Programming
882 * Notes):
883 *
884 * A null surface will be used in instances where an actual surface is
885 * not bound. When a write message is generated to a null surface, no
886 * actual surface is written to. When a read message (including any
887 * sampling engine message) is generated to a null surface, the result
888 * is all zeros. Note that a null surface type is allowed to be used
889 * with all messages, even if it is not specificially indicated as
890 * supported. All of the remaining fields in surface state are ignored
891 * for null surfaces, with the following exceptions:
892 *
893 * - [DevSNB+]: Width, Height, Depth, and LOD fields must match the
894 * depth buffer’s corresponding state for all render target surfaces,
895 * including null.
896 *
897 * - Surface Format must be R8G8B8A8_UNORM.
898 */
899 unsigned surface_type = BRW_SURFACE_NULL;
900 drm_intel_bo *bo = NULL;
901 unsigned pitch_minus_1 = 0;
902 uint32_t multisampling_state = 0;
903 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
904 out_offset);
905
906 if (samples > 1) {
907 /* On Gen6, null render targets seem to cause GPU hangs when
908 * multisampling. So work around this problem by rendering into dummy
909 * color buffer.
910 *
911 * To decrease the amount of memory needed by the workaround buffer, we
912 * set its pitch to 128 bytes (the width of a Y tile). This means that
913 * the amount of memory needed for the workaround buffer is
914 * (width_in_tiles + height_in_tiles - 1) tiles.
915 *
916 * Note that since the workaround buffer will be interpreted by the
917 * hardware as an interleaved multisampled buffer, we need to compute
918 * width_in_tiles and height_in_tiles by dividing the width and height
919 * by 16 rather than the normal Y-tile size of 32.
920 */
921 unsigned width_in_tiles = ALIGN(width, 16) / 16;
922 unsigned height_in_tiles = ALIGN(height, 16) / 16;
923 unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
924 brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
925 size_needed);
926 bo = brw->wm.multisampled_null_render_target_bo;
927 surface_type = BRW_SURFACE_2D;
928 pitch_minus_1 = 127;
929 multisampling_state = brw_get_surface_num_multisamples(samples);
930 }
931
932 surf[0] = (surface_type << BRW_SURFACE_TYPE_SHIFT |
933 BRW_SURFACEFORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
934 if (brw->gen < 6) {
935 surf[0] |= (1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT |
936 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT |
937 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT |
938 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT);
939 }
940 surf[1] = bo ? bo->offset64 : 0;
941 surf[2] = ((width - 1) << BRW_SURFACE_WIDTH_SHIFT |
942 (height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
943
944 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
945 * Notes):
946 *
947 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
948 */
949 surf[3] = (BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y |
950 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
951 surf[4] = multisampling_state;
952 surf[5] = 0;
953
954 if (bo) {
955 drm_intel_bo_emit_reloc(brw->batch.bo,
956 *out_offset + 4,
957 bo, 0,
958 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
959 }
960 }
961
962 /**
963 * Sets up a surface state structure to point at the given region.
964 * While it is only used for the front/back buffer currently, it should be
965 * usable for further buffers when doing ARB_draw_buffer support.
966 */
967 static uint32_t
968 gen4_update_renderbuffer_surface(struct brw_context *brw,
969 struct gl_renderbuffer *rb,
970 uint32_t flags, unsigned unit,
971 uint32_t surf_index)
972 {
973 struct gl_context *ctx = &brw->ctx;
974 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
975 struct intel_mipmap_tree *mt = irb->mt;
976 uint32_t *surf;
977 uint32_t tile_x, tile_y;
978 uint32_t format = 0;
979 uint32_t offset;
980 /* _NEW_BUFFERS */
981 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
982 /* BRW_NEW_FS_PROG_DATA */
983
984 assert(!(flags & INTEL_RENDERBUFFER_LAYERED));
985 assert(!(flags & INTEL_AUX_BUFFER_DISABLED));
986
987 if (rb->TexImage && !brw->has_surface_tile_offset) {
988 intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y);
989
990 if (tile_x != 0 || tile_y != 0) {
991 /* Original gen4 hardware couldn't draw to a non-tile-aligned
992 * destination in a miptree unless you actually setup your renderbuffer
993 * as a miptree and used the fragile lod/array_index/etc. controls to
994 * select the image. So, instead, we just make a new single-level
995 * miptree and render into that.
996 */
997 intel_renderbuffer_move_to_temp(brw, irb, false);
998 mt = irb->mt;
999 }
1000 }
1001
1002 surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32, &offset);
1003
1004 format = brw->render_target_format[rb_format];
1005 if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
1006 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
1007 __func__, _mesa_get_format_name(rb_format));
1008 }
1009
1010 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
1011 format << BRW_SURFACE_FORMAT_SHIFT);
1012
1013 /* reloc */
1014 assert(mt->offset % mt->cpp == 0);
1015 surf[1] = (intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y) +
1016 mt->bo->offset64 + mt->offset);
1017
1018 surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
1019 (rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
1020
1021 surf[3] = (brw_get_surface_tiling_bits(mt->tiling) |
1022 (mt->pitch - 1) << BRW_SURFACE_PITCH_SHIFT);
1023
1024 surf[4] = brw_get_surface_num_multisamples(mt->num_samples);
1025
1026 assert(brw->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
1027 /* Note that the low bits of these fields are missing, so
1028 * there's the possibility of getting in trouble.
1029 */
1030 assert(tile_x % 4 == 0);
1031 assert(tile_y % 2 == 0);
1032 surf[5] = ((tile_x / 4) << BRW_SURFACE_X_OFFSET_SHIFT |
1033 (tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT |
1034 (mt->valign == 4 ? BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
1035
1036 if (brw->gen < 6) {
1037 /* _NEW_COLOR */
1038 if (!ctx->Color.ColorLogicOpEnabled && !ctx->Color._AdvancedBlendMode &&
1039 (ctx->Color.BlendEnabled & (1 << unit)))
1040 surf[0] |= BRW_SURFACE_BLEND_ENABLED;
1041
1042 if (!ctx->Color.ColorMask[unit][0])
1043 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT;
1044 if (!ctx->Color.ColorMask[unit][1])
1045 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT;
1046 if (!ctx->Color.ColorMask[unit][2])
1047 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT;
1048
1049 /* As mentioned above, disable writes to the alpha component when the
1050 * renderbuffer is XRGB.
1051 */
1052 if (ctx->DrawBuffer->Visual.alphaBits == 0 ||
1053 !ctx->Color.ColorMask[unit][3]) {
1054 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT;
1055 }
1056 }
1057
1058 drm_intel_bo_emit_reloc(brw->batch.bo,
1059 offset + 4,
1060 mt->bo,
1061 surf[1] - mt->bo->offset64,
1062 I915_GEM_DOMAIN_RENDER,
1063 I915_GEM_DOMAIN_RENDER);
1064
1065 return offset;
1066 }
1067
1068 /**
1069 * Construct SURFACE_STATE objects for renderbuffers/draw buffers.
1070 */
1071 void
1072 brw_update_renderbuffer_surfaces(struct brw_context *brw,
1073 const struct gl_framebuffer *fb,
1074 uint32_t render_target_start,
1075 uint32_t *surf_offset)
1076 {
1077 GLuint i;
1078 const unsigned int w = _mesa_geometric_width(fb);
1079 const unsigned int h = _mesa_geometric_height(fb);
1080 const unsigned int s = _mesa_geometric_samples(fb);
1081
1082 /* Update surfaces for drawing buffers */
1083 if (fb->_NumColorDrawBuffers >= 1) {
1084 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
1085 const uint32_t surf_index = render_target_start + i;
1086 const int flags = (_mesa_geometric_layers(fb) > 0 ?
1087 INTEL_RENDERBUFFER_LAYERED : 0) |
1088 (brw->draw_aux_buffer_disabled[i] ?
1089 INTEL_AUX_BUFFER_DISABLED : 0);
1090
1091 if (intel_renderbuffer(fb->_ColorDrawBuffers[i])) {
1092 surf_offset[surf_index] =
1093 brw->vtbl.update_renderbuffer_surface(
1094 brw, fb->_ColorDrawBuffers[i], flags, i, surf_index);
1095 } else {
1096 brw->vtbl.emit_null_surface_state(brw, w, h, s,
1097 &surf_offset[surf_index]);
1098 }
1099 }
1100 } else {
1101 const uint32_t surf_index = render_target_start;
1102 brw->vtbl.emit_null_surface_state(brw, w, h, s,
1103 &surf_offset[surf_index]);
1104 }
1105 }
1106
1107 static void
1108 update_renderbuffer_surfaces(struct brw_context *brw)
1109 {
1110 const struct gl_context *ctx = &brw->ctx;
1111
1112 /* BRW_NEW_FS_PROG_DATA */
1113 const struct brw_wm_prog_data *wm_prog_data =
1114 brw_wm_prog_data(brw->wm.base.prog_data);
1115
1116 /* _NEW_BUFFERS | _NEW_COLOR */
1117 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1118 brw_update_renderbuffer_surfaces(
1119 brw, fb,
1120 wm_prog_data->binding_table.render_target_start,
1121 brw->wm.base.surf_offset);
1122 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1123 }
1124
1125 const struct brw_tracked_state brw_renderbuffer_surfaces = {
1126 .dirty = {
1127 .mesa = _NEW_BUFFERS |
1128 _NEW_COLOR,
1129 .brw = BRW_NEW_BATCH |
1130 BRW_NEW_BLORP |
1131 BRW_NEW_FS_PROG_DATA,
1132 },
1133 .emit = update_renderbuffer_surfaces,
1134 };
1135
1136 const struct brw_tracked_state gen6_renderbuffer_surfaces = {
1137 .dirty = {
1138 .mesa = _NEW_BUFFERS,
1139 .brw = BRW_NEW_BATCH |
1140 BRW_NEW_BLORP,
1141 },
1142 .emit = update_renderbuffer_surfaces,
1143 };
1144
1145 static void
1146 update_renderbuffer_read_surfaces(struct brw_context *brw)
1147 {
1148 const struct gl_context *ctx = &brw->ctx;
1149
1150 /* BRW_NEW_FS_PROG_DATA */
1151 const struct brw_wm_prog_data *wm_prog_data =
1152 brw_wm_prog_data(brw->wm.base.prog_data);
1153
1154 /* BRW_NEW_FRAGMENT_PROGRAM */
1155 if (!ctx->Extensions.MESA_shader_framebuffer_fetch &&
1156 brw->fragment_program && brw->fragment_program->info.outputs_read) {
1157 /* _NEW_BUFFERS */
1158 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1159
1160 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
1161 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
1162 const struct intel_renderbuffer *irb = intel_renderbuffer(rb);
1163 const unsigned surf_index =
1164 wm_prog_data->binding_table.render_target_read_start + i;
1165 uint32_t *surf_offset = &brw->wm.base.surf_offset[surf_index];
1166
1167 if (irb) {
1168 const unsigned format = brw->render_target_format[
1169 _mesa_get_render_format(ctx, intel_rb_format(irb))];
1170 assert(isl_format_supports_sampling(&brw->screen->devinfo,
1171 format));
1172
1173 /* Override the target of the texture if the render buffer is a
1174 * single slice of a 3D texture (since the minimum array element
1175 * field of the surface state structure is ignored by the sampler
1176 * unit for 3D textures on some hardware), or if the render buffer
1177 * is a 1D array (since shaders always provide the array index
1178 * coordinate at the Z component to avoid state-dependent
1179 * recompiles when changing the texture target of the
1180 * framebuffer).
1181 */
1182 const GLenum target =
1183 (irb->mt->target == GL_TEXTURE_3D &&
1184 irb->layer_count == 1) ? GL_TEXTURE_2D :
1185 irb->mt->target == GL_TEXTURE_1D_ARRAY ? GL_TEXTURE_2D_ARRAY :
1186 irb->mt->target;
1187
1188 /* intel_renderbuffer::mt_layer is expressed in sample units for
1189 * the UMS and CMS multisample layouts, but
1190 * intel_renderbuffer::layer_count is expressed in units of whole
1191 * logical layers regardless of the multisample layout.
1192 */
1193 const unsigned mt_layer_unit =
1194 (irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
1195 irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) ?
1196 MAX2(irb->mt->num_samples, 1) : 1;
1197
1198 const struct isl_view view = {
1199 .format = format,
1200 .base_level = irb->mt_level - irb->mt->first_level,
1201 .levels = 1,
1202 .base_array_layer = irb->mt_layer / mt_layer_unit,
1203 .array_len = irb->layer_count,
1204 .swizzle = ISL_SWIZZLE_IDENTITY,
1205 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
1206 };
1207
1208 const int flags = brw->draw_aux_buffer_disabled[i] ?
1209 INTEL_AUX_BUFFER_DISABLED : 0;
1210 brw_emit_surface_state(brw, irb->mt, flags, target, view,
1211 tex_mocs[brw->gen],
1212 surf_offset, surf_index,
1213 I915_GEM_DOMAIN_SAMPLER, 0);
1214
1215 } else {
1216 brw->vtbl.emit_null_surface_state(
1217 brw, _mesa_geometric_width(fb), _mesa_geometric_height(fb),
1218 _mesa_geometric_samples(fb), surf_offset);
1219 }
1220 }
1221
1222 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1223 }
1224 }
1225
1226 const struct brw_tracked_state brw_renderbuffer_read_surfaces = {
1227 .dirty = {
1228 .mesa = _NEW_BUFFERS,
1229 .brw = BRW_NEW_BATCH |
1230 BRW_NEW_FRAGMENT_PROGRAM |
1231 BRW_NEW_FS_PROG_DATA,
1232 },
1233 .emit = update_renderbuffer_read_surfaces,
1234 };
1235
1236 static void
1237 update_stage_texture_surfaces(struct brw_context *brw,
1238 const struct gl_program *prog,
1239 struct brw_stage_state *stage_state,
1240 bool for_gather, uint32_t plane)
1241 {
1242 if (!prog)
1243 return;
1244
1245 struct gl_context *ctx = &brw->ctx;
1246
1247 uint32_t *surf_offset = stage_state->surf_offset;
1248
1249 /* BRW_NEW_*_PROG_DATA */
1250 if (for_gather)
1251 surf_offset += stage_state->prog_data->binding_table.gather_texture_start;
1252 else
1253 surf_offset += stage_state->prog_data->binding_table.plane_start[plane];
1254
1255 unsigned num_samplers = util_last_bit(prog->SamplersUsed);
1256 for (unsigned s = 0; s < num_samplers; s++) {
1257 surf_offset[s] = 0;
1258
1259 if (prog->SamplersUsed & (1 << s)) {
1260 const unsigned unit = prog->SamplerUnits[s];
1261
1262 /* _NEW_TEXTURE */
1263 if (ctx->Texture.Unit[unit]._Current) {
1264 brw_update_texture_surface(ctx, unit, surf_offset + s, for_gather, plane);
1265 }
1266 }
1267 }
1268 }
1269
1270
1271 /**
1272 * Construct SURFACE_STATE objects for enabled textures.
1273 */
1274 static void
1275 brw_update_texture_surfaces(struct brw_context *brw)
1276 {
1277 /* BRW_NEW_VERTEX_PROGRAM */
1278 struct gl_program *vs = (struct gl_program *) brw->vertex_program;
1279
1280 /* BRW_NEW_TESS_PROGRAMS */
1281 struct gl_program *tcs = (struct gl_program *) brw->tess_ctrl_program;
1282 struct gl_program *tes = (struct gl_program *) brw->tess_eval_program;
1283
1284 /* BRW_NEW_GEOMETRY_PROGRAM */
1285 struct gl_program *gs = (struct gl_program *) brw->geometry_program;
1286
1287 /* BRW_NEW_FRAGMENT_PROGRAM */
1288 struct gl_program *fs = (struct gl_program *) brw->fragment_program;
1289
1290 /* _NEW_TEXTURE */
1291 update_stage_texture_surfaces(brw, vs, &brw->vs.base, false, 0);
1292 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, false, 0);
1293 update_stage_texture_surfaces(brw, tes, &brw->tes.base, false, 0);
1294 update_stage_texture_surfaces(brw, gs, &brw->gs.base, false, 0);
1295 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 0);
1296
1297 /* emit alternate set of surface state for gather. this
1298 * allows the surface format to be overriden for only the
1299 * gather4 messages. */
1300 if (brw->gen < 8) {
1301 if (vs && vs->nir->info->uses_texture_gather)
1302 update_stage_texture_surfaces(brw, vs, &brw->vs.base, true, 0);
1303 if (tcs && tcs->nir->info->uses_texture_gather)
1304 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, true, 0);
1305 if (tes && tes->nir->info->uses_texture_gather)
1306 update_stage_texture_surfaces(brw, tes, &brw->tes.base, true, 0);
1307 if (gs && gs->nir->info->uses_texture_gather)
1308 update_stage_texture_surfaces(brw, gs, &brw->gs.base, true, 0);
1309 if (fs && fs->nir->info->uses_texture_gather)
1310 update_stage_texture_surfaces(brw, fs, &brw->wm.base, true, 0);
1311 }
1312
1313 if (fs) {
1314 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 1);
1315 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 2);
1316 }
1317
1318 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1319 }
1320
1321 const struct brw_tracked_state brw_texture_surfaces = {
1322 .dirty = {
1323 .mesa = _NEW_TEXTURE,
1324 .brw = BRW_NEW_BATCH |
1325 BRW_NEW_BLORP |
1326 BRW_NEW_FRAGMENT_PROGRAM |
1327 BRW_NEW_FS_PROG_DATA |
1328 BRW_NEW_GEOMETRY_PROGRAM |
1329 BRW_NEW_GS_PROG_DATA |
1330 BRW_NEW_TESS_PROGRAMS |
1331 BRW_NEW_TCS_PROG_DATA |
1332 BRW_NEW_TES_PROG_DATA |
1333 BRW_NEW_TEXTURE_BUFFER |
1334 BRW_NEW_VERTEX_PROGRAM |
1335 BRW_NEW_VS_PROG_DATA,
1336 },
1337 .emit = brw_update_texture_surfaces,
1338 };
1339
1340 static void
1341 brw_update_cs_texture_surfaces(struct brw_context *brw)
1342 {
1343 /* BRW_NEW_COMPUTE_PROGRAM */
1344 struct gl_program *cs = (struct gl_program *) brw->compute_program;
1345
1346 /* _NEW_TEXTURE */
1347 update_stage_texture_surfaces(brw, cs, &brw->cs.base, false, 0);
1348
1349 /* emit alternate set of surface state for gather. this
1350 * allows the surface format to be overriden for only the
1351 * gather4 messages.
1352 */
1353 if (brw->gen < 8) {
1354 if (cs && cs->nir->info->uses_texture_gather)
1355 update_stage_texture_surfaces(brw, cs, &brw->cs.base, true, 0);
1356 }
1357
1358 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1359 }
1360
1361 const struct brw_tracked_state brw_cs_texture_surfaces = {
1362 .dirty = {
1363 .mesa = _NEW_TEXTURE,
1364 .brw = BRW_NEW_BATCH |
1365 BRW_NEW_BLORP |
1366 BRW_NEW_COMPUTE_PROGRAM,
1367 },
1368 .emit = brw_update_cs_texture_surfaces,
1369 };
1370
1371
1372 void
1373 brw_upload_ubo_surfaces(struct brw_context *brw, struct gl_program *prog,
1374 struct brw_stage_state *stage_state,
1375 struct brw_stage_prog_data *prog_data)
1376 {
1377 struct gl_context *ctx = &brw->ctx;
1378
1379 if (!prog)
1380 return;
1381
1382 uint32_t *ubo_surf_offsets =
1383 &stage_state->surf_offset[prog_data->binding_table.ubo_start];
1384
1385 for (int i = 0; i < prog->info.num_ubos; i++) {
1386 struct gl_uniform_buffer_binding *binding =
1387 &ctx->UniformBufferBindings[prog->sh.UniformBlocks[i]->Binding];
1388
1389 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1390 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ubo_surf_offsets[i]);
1391 } else {
1392 struct intel_buffer_object *intel_bo =
1393 intel_buffer_object(binding->BufferObject);
1394 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1395 if (!binding->AutomaticSize)
1396 size = MIN2(size, binding->Size);
1397 drm_intel_bo *bo =
1398 intel_bufferobj_buffer(brw, intel_bo,
1399 binding->Offset,
1400 size);
1401 brw_create_constant_surface(brw, bo, binding->Offset,
1402 size,
1403 &ubo_surf_offsets[i]);
1404 }
1405 }
1406
1407 uint32_t *ssbo_surf_offsets =
1408 &stage_state->surf_offset[prog_data->binding_table.ssbo_start];
1409
1410 for (int i = 0; i < prog->info.num_ssbos; i++) {
1411 struct gl_shader_storage_buffer_binding *binding =
1412 &ctx->ShaderStorageBufferBindings[prog->sh.ShaderStorageBlocks[i]->Binding];
1413
1414 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1415 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ssbo_surf_offsets[i]);
1416 } else {
1417 struct intel_buffer_object *intel_bo =
1418 intel_buffer_object(binding->BufferObject);
1419 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1420 if (!binding->AutomaticSize)
1421 size = MIN2(size, binding->Size);
1422 drm_intel_bo *bo =
1423 intel_bufferobj_buffer(brw, intel_bo,
1424 binding->Offset,
1425 size);
1426 brw_create_buffer_surface(brw, bo, binding->Offset,
1427 size,
1428 &ssbo_surf_offsets[i]);
1429 }
1430 }
1431
1432 if (prog->info.num_ubos || prog->info.num_ssbos)
1433 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1434 }
1435
1436 static void
1437 brw_upload_wm_ubo_surfaces(struct brw_context *brw)
1438 {
1439 struct gl_context *ctx = &brw->ctx;
1440 /* _NEW_PROGRAM */
1441 struct gl_program *prog = ctx->_Shader->_CurrentFragmentProgram;
1442
1443 /* BRW_NEW_FS_PROG_DATA */
1444 brw_upload_ubo_surfaces(brw, prog, &brw->wm.base, brw->wm.base.prog_data);
1445 }
1446
1447 const struct brw_tracked_state brw_wm_ubo_surfaces = {
1448 .dirty = {
1449 .mesa = _NEW_PROGRAM,
1450 .brw = BRW_NEW_BATCH |
1451 BRW_NEW_BLORP |
1452 BRW_NEW_FS_PROG_DATA |
1453 BRW_NEW_UNIFORM_BUFFER,
1454 },
1455 .emit = brw_upload_wm_ubo_surfaces,
1456 };
1457
1458 static void
1459 brw_upload_cs_ubo_surfaces(struct brw_context *brw)
1460 {
1461 struct gl_context *ctx = &brw->ctx;
1462 /* _NEW_PROGRAM */
1463 struct gl_shader_program *prog =
1464 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1465
1466 if (!prog || !prog->_LinkedShaders[MESA_SHADER_COMPUTE])
1467 return;
1468
1469 /* BRW_NEW_CS_PROG_DATA */
1470 brw_upload_ubo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_COMPUTE]->Program,
1471 &brw->cs.base, brw->cs.base.prog_data);
1472 }
1473
1474 const struct brw_tracked_state brw_cs_ubo_surfaces = {
1475 .dirty = {
1476 .mesa = _NEW_PROGRAM,
1477 .brw = BRW_NEW_BATCH |
1478 BRW_NEW_BLORP |
1479 BRW_NEW_CS_PROG_DATA |
1480 BRW_NEW_UNIFORM_BUFFER,
1481 },
1482 .emit = brw_upload_cs_ubo_surfaces,
1483 };
1484
1485 void
1486 brw_upload_abo_surfaces(struct brw_context *brw,
1487 const struct gl_program *prog,
1488 struct brw_stage_state *stage_state,
1489 struct brw_stage_prog_data *prog_data)
1490 {
1491 struct gl_context *ctx = &brw->ctx;
1492 uint32_t *surf_offsets =
1493 &stage_state->surf_offset[prog_data->binding_table.abo_start];
1494
1495 if (prog->info.num_abos) {
1496 for (unsigned i = 0; i < prog->info.num_abos; i++) {
1497 struct gl_atomic_buffer_binding *binding =
1498 &ctx->AtomicBufferBindings[prog->sh.AtomicBuffers[i]->Binding];
1499 struct intel_buffer_object *intel_bo =
1500 intel_buffer_object(binding->BufferObject);
1501 drm_intel_bo *bo = intel_bufferobj_buffer(
1502 brw, intel_bo, binding->Offset, intel_bo->Base.Size - binding->Offset);
1503
1504 brw_emit_buffer_surface_state(brw, &surf_offsets[i], bo,
1505 binding->Offset, BRW_SURFACEFORMAT_RAW,
1506 bo->size - binding->Offset, 1, true);
1507 }
1508
1509 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1510 }
1511 }
1512
1513 static void
1514 brw_upload_wm_abo_surfaces(struct brw_context *brw)
1515 {
1516 /* _NEW_PROGRAM */
1517 const struct gl_program *wm = brw->fragment_program;
1518
1519 if (wm) {
1520 /* BRW_NEW_FS_PROG_DATA */
1521 brw_upload_abo_surfaces(brw, wm, &brw->wm.base, brw->wm.base.prog_data);
1522 }
1523 }
1524
1525 const struct brw_tracked_state brw_wm_abo_surfaces = {
1526 .dirty = {
1527 .mesa = _NEW_PROGRAM,
1528 .brw = BRW_NEW_ATOMIC_BUFFER |
1529 BRW_NEW_BLORP |
1530 BRW_NEW_BATCH |
1531 BRW_NEW_FS_PROG_DATA,
1532 },
1533 .emit = brw_upload_wm_abo_surfaces,
1534 };
1535
1536 static void
1537 brw_upload_cs_abo_surfaces(struct brw_context *brw)
1538 {
1539 /* _NEW_PROGRAM */
1540 const struct gl_program *cp = brw->compute_program;
1541
1542 if (cp) {
1543 /* BRW_NEW_CS_PROG_DATA */
1544 brw_upload_abo_surfaces(brw, cp, &brw->cs.base, brw->cs.base.prog_data);
1545 }
1546 }
1547
1548 const struct brw_tracked_state brw_cs_abo_surfaces = {
1549 .dirty = {
1550 .mesa = _NEW_PROGRAM,
1551 .brw = BRW_NEW_ATOMIC_BUFFER |
1552 BRW_NEW_BLORP |
1553 BRW_NEW_BATCH |
1554 BRW_NEW_CS_PROG_DATA,
1555 },
1556 .emit = brw_upload_cs_abo_surfaces,
1557 };
1558
1559 static void
1560 brw_upload_cs_image_surfaces(struct brw_context *brw)
1561 {
1562 /* _NEW_PROGRAM */
1563 const struct gl_program *cp = brw->compute_program;
1564
1565 if (cp) {
1566 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1567 brw_upload_image_surfaces(brw, cp, &brw->cs.base,
1568 brw->cs.base.prog_data);
1569 }
1570 }
1571
1572 const struct brw_tracked_state brw_cs_image_surfaces = {
1573 .dirty = {
1574 .mesa = _NEW_TEXTURE | _NEW_PROGRAM,
1575 .brw = BRW_NEW_BATCH |
1576 BRW_NEW_BLORP |
1577 BRW_NEW_CS_PROG_DATA |
1578 BRW_NEW_IMAGE_UNITS
1579 },
1580 .emit = brw_upload_cs_image_surfaces,
1581 };
1582
1583 static uint32_t
1584 get_image_format(struct brw_context *brw, mesa_format format, GLenum access)
1585 {
1586 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1587 uint32_t hw_format = brw_format_for_mesa_format(format);
1588 if (access == GL_WRITE_ONLY) {
1589 return hw_format;
1590 } else if (isl_has_matching_typed_storage_image_format(devinfo, hw_format)) {
1591 /* Typed surface reads support a very limited subset of the shader
1592 * image formats. Translate it into the closest format the
1593 * hardware supports.
1594 */
1595 return isl_lower_storage_image_format(devinfo, hw_format);
1596 } else {
1597 /* The hardware doesn't actually support a typed format that we can use
1598 * so we have to fall back to untyped read/write messages.
1599 */
1600 return BRW_SURFACEFORMAT_RAW;
1601 }
1602 }
1603
1604 static void
1605 update_default_image_param(struct brw_context *brw,
1606 struct gl_image_unit *u,
1607 unsigned surface_idx,
1608 struct brw_image_param *param)
1609 {
1610 memset(param, 0, sizeof(*param));
1611 param->surface_idx = surface_idx;
1612 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1613 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1614 * detailed explanation of these parameters.
1615 */
1616 param->swizzling[0] = 0xff;
1617 param->swizzling[1] = 0xff;
1618 }
1619
1620 static void
1621 update_buffer_image_param(struct brw_context *brw,
1622 struct gl_image_unit *u,
1623 unsigned surface_idx,
1624 struct brw_image_param *param)
1625 {
1626 struct gl_buffer_object *obj = u->TexObj->BufferObject;
1627 const uint32_t size = MIN2((uint32_t)u->TexObj->BufferSize, obj->Size);
1628 update_default_image_param(brw, u, surface_idx, param);
1629
1630 param->size[0] = size / _mesa_get_format_bytes(u->_ActualFormat);
1631 param->stride[0] = _mesa_get_format_bytes(u->_ActualFormat);
1632 }
1633
1634 static void
1635 update_texture_image_param(struct brw_context *brw,
1636 struct gl_image_unit *u,
1637 unsigned surface_idx,
1638 struct brw_image_param *param)
1639 {
1640 struct intel_mipmap_tree *mt = intel_texture_object(u->TexObj)->mt;
1641
1642 update_default_image_param(brw, u, surface_idx, param);
1643
1644 param->size[0] = minify(mt->logical_width0, u->Level);
1645 param->size[1] = minify(mt->logical_height0, u->Level);
1646 param->size[2] = (!u->Layered ? 1 :
1647 u->TexObj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1648 u->TexObj->Target == GL_TEXTURE_3D ?
1649 minify(mt->logical_depth0, u->Level) :
1650 mt->logical_depth0);
1651
1652 intel_miptree_get_image_offset(mt, u->Level, u->_Layer,
1653 &param->offset[0],
1654 &param->offset[1]);
1655
1656 param->stride[0] = mt->cpp;
1657 param->stride[1] = mt->pitch / mt->cpp;
1658 param->stride[2] =
1659 brw_miptree_get_horizontal_slice_pitch(brw, mt, u->Level);
1660 param->stride[3] =
1661 brw_miptree_get_vertical_slice_pitch(brw, mt, u->Level);
1662
1663 if (mt->tiling == I915_TILING_X) {
1664 /* An X tile is a rectangular block of 512x8 bytes. */
1665 param->tiling[0] = _mesa_logbase2(512 / mt->cpp);
1666 param->tiling[1] = _mesa_logbase2(8);
1667
1668 if (brw->has_swizzling) {
1669 /* Right shifts required to swizzle bits 9 and 10 of the memory
1670 * address with bit 6.
1671 */
1672 param->swizzling[0] = 3;
1673 param->swizzling[1] = 4;
1674 }
1675 } else if (mt->tiling == I915_TILING_Y) {
1676 /* The layout of a Y-tiled surface in memory isn't really fundamentally
1677 * different to the layout of an X-tiled surface, we simply pretend that
1678 * the surface is broken up in a number of smaller 16Bx32 tiles, each
1679 * one arranged in X-major order just like is the case for X-tiling.
1680 */
1681 param->tiling[0] = _mesa_logbase2(16 / mt->cpp);
1682 param->tiling[1] = _mesa_logbase2(32);
1683
1684 if (brw->has_swizzling) {
1685 /* Right shift required to swizzle bit 9 of the memory address with
1686 * bit 6.
1687 */
1688 param->swizzling[0] = 3;
1689 }
1690 }
1691
1692 /* 3D textures are arranged in 2D in memory with 2^lod slices per row. The
1693 * address calculation algorithm (emit_address_calculation() in
1694 * brw_fs_surface_builder.cpp) handles this as a sort of tiling with
1695 * modulus equal to the LOD.
1696 */
1697 param->tiling[2] = (u->TexObj->Target == GL_TEXTURE_3D ? u->Level :
1698 0);
1699 }
1700
1701 static void
1702 update_image_surface(struct brw_context *brw,
1703 struct gl_image_unit *u,
1704 GLenum access,
1705 unsigned surface_idx,
1706 uint32_t *surf_offset,
1707 struct brw_image_param *param)
1708 {
1709 if (_mesa_is_image_unit_valid(&brw->ctx, u)) {
1710 struct gl_texture_object *obj = u->TexObj;
1711 const unsigned format = get_image_format(brw, u->_ActualFormat, access);
1712
1713 if (obj->Target == GL_TEXTURE_BUFFER) {
1714 struct intel_buffer_object *intel_obj =
1715 intel_buffer_object(obj->BufferObject);
1716 const unsigned texel_size = (format == BRW_SURFACEFORMAT_RAW ? 1 :
1717 _mesa_get_format_bytes(u->_ActualFormat));
1718
1719 brw_emit_buffer_surface_state(
1720 brw, surf_offset, intel_obj->buffer, obj->BufferOffset,
1721 format, intel_obj->Base.Size, texel_size,
1722 access != GL_READ_ONLY);
1723
1724 update_buffer_image_param(brw, u, surface_idx, param);
1725
1726 } else {
1727 struct intel_texture_object *intel_obj = intel_texture_object(obj);
1728 struct intel_mipmap_tree *mt = intel_obj->mt;
1729
1730 if (format == BRW_SURFACEFORMAT_RAW) {
1731 brw_emit_buffer_surface_state(
1732 brw, surf_offset, mt->bo, mt->offset,
1733 format, mt->bo->size - mt->offset, 1 /* pitch */,
1734 access != GL_READ_ONLY);
1735
1736 } else {
1737 const unsigned num_layers = (!u->Layered ? 1 :
1738 obj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1739 mt->logical_depth0);
1740
1741 struct isl_view view = {
1742 .format = format,
1743 .base_level = obj->MinLevel + u->Level,
1744 .levels = 1,
1745 .base_array_layer = obj->MinLayer + u->_Layer,
1746 .array_len = num_layers,
1747 .swizzle = ISL_SWIZZLE_IDENTITY,
1748 .usage = ISL_SURF_USAGE_STORAGE_BIT,
1749 };
1750
1751 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
1752 const bool unresolved = intel_miptree_has_color_unresolved(
1753 mt, view.base_level, view.levels,
1754 view.base_array_layer, view.array_len);
1755 const int flags = unresolved ? 0 : INTEL_AUX_BUFFER_DISABLED;
1756 brw_emit_surface_state(brw, mt, flags, mt->target, view,
1757 tex_mocs[brw->gen],
1758 surf_offset, surf_index,
1759 I915_GEM_DOMAIN_SAMPLER,
1760 access == GL_READ_ONLY ? 0 :
1761 I915_GEM_DOMAIN_SAMPLER);
1762 }
1763
1764 update_texture_image_param(brw, u, surface_idx, param);
1765 }
1766
1767 } else {
1768 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, surf_offset);
1769 update_default_image_param(brw, u, surface_idx, param);
1770 }
1771 }
1772
1773 void
1774 brw_upload_image_surfaces(struct brw_context *brw,
1775 const struct gl_program *prog,
1776 struct brw_stage_state *stage_state,
1777 struct brw_stage_prog_data *prog_data)
1778 {
1779 assert(prog);
1780 struct gl_context *ctx = &brw->ctx;
1781
1782 if (prog->info.num_images) {
1783 for (unsigned i = 0; i < prog->info.num_images; i++) {
1784 struct gl_image_unit *u = &ctx->ImageUnits[prog->sh.ImageUnits[i]];
1785 const unsigned surf_idx = prog_data->binding_table.image_start + i;
1786
1787 update_image_surface(brw, u, prog->sh.ImageAccess[i],
1788 surf_idx,
1789 &stage_state->surf_offset[surf_idx],
1790 &prog_data->image_param[i]);
1791 }
1792
1793 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1794 /* This may have changed the image metadata dependent on the context
1795 * image unit state and passed to the program as uniforms, make sure
1796 * that push and pull constants are reuploaded.
1797 */
1798 brw->NewGLState |= _NEW_PROGRAM_CONSTANTS;
1799 }
1800 }
1801
1802 static void
1803 brw_upload_wm_image_surfaces(struct brw_context *brw)
1804 {
1805 /* BRW_NEW_FRAGMENT_PROGRAM */
1806 const struct gl_program *wm = brw->fragment_program;
1807
1808 if (wm) {
1809 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1810 brw_upload_image_surfaces(brw, wm, &brw->wm.base,
1811 brw->wm.base.prog_data);
1812 }
1813 }
1814
1815 const struct brw_tracked_state brw_wm_image_surfaces = {
1816 .dirty = {
1817 .mesa = _NEW_TEXTURE,
1818 .brw = BRW_NEW_BATCH |
1819 BRW_NEW_BLORP |
1820 BRW_NEW_FRAGMENT_PROGRAM |
1821 BRW_NEW_FS_PROG_DATA |
1822 BRW_NEW_IMAGE_UNITS
1823 },
1824 .emit = brw_upload_wm_image_surfaces,
1825 };
1826
1827 void
1828 gen4_init_vtable_surface_functions(struct brw_context *brw)
1829 {
1830 brw->vtbl.update_renderbuffer_surface = gen4_update_renderbuffer_surface;
1831 brw->vtbl.emit_null_surface_state = brw_emit_null_surface_state;
1832 }
1833
1834 void
1835 gen6_init_vtable_surface_functions(struct brw_context *brw)
1836 {
1837 gen4_init_vtable_surface_functions(brw);
1838 brw->vtbl.update_renderbuffer_surface = brw_update_renderbuffer_surface;
1839 }
1840
1841 static void
1842 brw_upload_cs_work_groups_surface(struct brw_context *brw)
1843 {
1844 struct gl_context *ctx = &brw->ctx;
1845 /* _NEW_PROGRAM */
1846 struct gl_shader_program *prog =
1847 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1848 /* BRW_NEW_CS_PROG_DATA */
1849 const struct brw_cs_prog_data *cs_prog_data =
1850 brw_cs_prog_data(brw->cs.base.prog_data);
1851
1852 if (prog && cs_prog_data->uses_num_work_groups) {
1853 const unsigned surf_idx =
1854 cs_prog_data->binding_table.work_groups_start;
1855 uint32_t *surf_offset = &brw->cs.base.surf_offset[surf_idx];
1856 drm_intel_bo *bo;
1857 uint32_t bo_offset;
1858
1859 if (brw->compute.num_work_groups_bo == NULL) {
1860 bo = NULL;
1861 intel_upload_data(brw,
1862 (void *)brw->compute.num_work_groups,
1863 3 * sizeof(GLuint),
1864 sizeof(GLuint),
1865 &bo,
1866 &bo_offset);
1867 } else {
1868 bo = brw->compute.num_work_groups_bo;
1869 bo_offset = brw->compute.num_work_groups_offset;
1870 }
1871
1872 brw_emit_buffer_surface_state(brw, surf_offset,
1873 bo, bo_offset,
1874 BRW_SURFACEFORMAT_RAW,
1875 3 * sizeof(GLuint), 1, true);
1876 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1877 }
1878 }
1879
1880 const struct brw_tracked_state brw_cs_work_groups_surface = {
1881 .dirty = {
1882 .brw = BRW_NEW_BLORP |
1883 BRW_NEW_CS_PROG_DATA |
1884 BRW_NEW_CS_WORK_GROUPS
1885 },
1886 .emit = brw_upload_cs_work_groups_surface,
1887 };