d40ccbf926d55b7fb4be720e250a84a183c63411
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_surface_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "compiler/nir/nir.h"
34 #include "main/context.h"
35 #include "main/blend.h"
36 #include "main/mtypes.h"
37 #include "main/samplerobj.h"
38 #include "main/shaderimage.h"
39 #include "main/teximage.h"
40 #include "program/prog_parameter.h"
41 #include "program/prog_instruction.h"
42 #include "main/framebuffer.h"
43 #include "main/shaderapi.h"
44
45 #include "isl/isl.h"
46
47 #include "intel_mipmap_tree.h"
48 #include "intel_batchbuffer.h"
49 #include "intel_tex.h"
50 #include "intel_fbo.h"
51 #include "intel_buffer_objects.h"
52
53 #include "brw_context.h"
54 #include "brw_state.h"
55 #include "brw_defines.h"
56 #include "brw_wm.h"
57
58 enum {
59 INTEL_RENDERBUFFER_LAYERED = 1 << 0,
60 INTEL_AUX_BUFFER_DISABLED = 1 << 1,
61 };
62
63 uint32_t tex_mocs[] = {
64 [7] = GEN7_MOCS_L3,
65 [8] = BDW_MOCS_WB,
66 [9] = SKL_MOCS_WB,
67 };
68
69 uint32_t rb_mocs[] = {
70 [7] = GEN7_MOCS_L3,
71 [8] = BDW_MOCS_PTE,
72 [9] = SKL_MOCS_PTE,
73 };
74
75 static void
76 brw_emit_surface_state(struct brw_context *brw,
77 struct intel_mipmap_tree *mt, uint32_t flags,
78 GLenum target, struct isl_view view,
79 uint32_t mocs, uint32_t *surf_offset, int surf_index,
80 unsigned read_domains, unsigned write_domains)
81 {
82 uint32_t tile_x = mt->level[0].slice[0].x_offset;
83 uint32_t tile_y = mt->level[0].slice[0].y_offset;
84 uint32_t offset = mt->offset;
85
86 struct isl_surf surf;
87 intel_miptree_get_isl_surf(brw, mt, &surf);
88
89 surf.dim = get_isl_surf_dim(target);
90
91 const enum isl_dim_layout dim_layout =
92 get_isl_dim_layout(&brw->screen->devinfo, mt->tiling, target);
93
94 if (surf.dim_layout != dim_layout) {
95 /* The layout of the specified texture target is not compatible with the
96 * actual layout of the miptree structure in memory -- You're entering
97 * dangerous territory, this can only possibly work if you only intended
98 * to access a single level and slice of the texture, and the hardware
99 * supports the tile offset feature in order to allow non-tile-aligned
100 * base offsets, since we'll have to point the hardware to the first
101 * texel of the level instead of relying on the usual base level/layer
102 * controls.
103 */
104 assert(brw->has_surface_tile_offset);
105 assert(view.levels == 1 && view.array_len == 1);
106 assert(tile_x == 0 && tile_y == 0);
107
108 offset += intel_miptree_get_tile_offsets(mt, view.base_level,
109 view.base_array_layer,
110 &tile_x, &tile_y);
111
112 /* Minify the logical dimensions of the texture. */
113 const unsigned l = view.base_level - mt->first_level;
114 surf.logical_level0_px.width = minify(surf.logical_level0_px.width, l);
115 surf.logical_level0_px.height = surf.dim <= ISL_SURF_DIM_1D ? 1 :
116 minify(surf.logical_level0_px.height, l);
117 surf.logical_level0_px.depth = surf.dim <= ISL_SURF_DIM_2D ? 1 :
118 minify(surf.logical_level0_px.depth, l);
119
120 /* Only the base level and layer can be addressed with the overridden
121 * layout.
122 */
123 surf.logical_level0_px.array_len = 1;
124 surf.levels = 1;
125 surf.dim_layout = dim_layout;
126
127 /* The requested slice of the texture is now at the base level and
128 * layer.
129 */
130 view.base_level = 0;
131 view.base_array_layer = 0;
132 }
133
134 union isl_color_value clear_color = { .u32 = { 0, 0, 0, 0 } };
135
136 drm_intel_bo *aux_bo;
137 struct isl_surf *aux_surf = NULL, aux_surf_s;
138 uint64_t aux_offset = 0;
139 enum isl_aux_usage aux_usage = ISL_AUX_USAGE_NONE;
140 if ((mt->mcs_buf || intel_miptree_sample_with_hiz(brw, mt)) &&
141 !(flags & INTEL_AUX_BUFFER_DISABLED)) {
142 intel_miptree_get_aux_isl_surf(brw, mt, &aux_surf_s, &aux_usage);
143 aux_surf = &aux_surf_s;
144
145 if (mt->mcs_buf) {
146 assert(mt->mcs_buf->offset == 0);
147 aux_bo = mt->mcs_buf->bo;
148 aux_offset = mt->mcs_buf->bo->offset64 + mt->mcs_buf->offset;
149 } else {
150 aux_bo = mt->hiz_buf->aux_base.bo;
151 aux_offset = mt->hiz_buf->aux_base.bo->offset64;
152 }
153
154 /* We only really need a clear color if we also have an auxiliary
155 * surface. Without one, it does nothing.
156 */
157 clear_color = intel_miptree_get_isl_clear_color(brw, mt);
158 }
159
160 void *state = __brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
161 brw->isl_dev.ss.size,
162 brw->isl_dev.ss.align,
163 surf_index, surf_offset);
164
165 isl_surf_fill_state(&brw->isl_dev, state, .surf = &surf, .view = &view,
166 .address = mt->bo->offset64 + offset,
167 .aux_surf = aux_surf, .aux_usage = aux_usage,
168 .aux_address = aux_offset,
169 .mocs = mocs, .clear_color = clear_color,
170 .x_offset_sa = tile_x, .y_offset_sa = tile_y);
171
172 drm_intel_bo_emit_reloc(brw->batch.bo,
173 *surf_offset + brw->isl_dev.ss.addr_offset,
174 mt->bo, offset,
175 read_domains, write_domains);
176
177 if (aux_surf) {
178 /* On gen7 and prior, the upper 20 bits of surface state DWORD 6 are the
179 * upper 20 bits of the GPU address of the MCS buffer; the lower 12 bits
180 * contain other control information. Since buffer addresses are always
181 * on 4k boundaries (and thus have their lower 12 bits zero), we can use
182 * an ordinary reloc to do the necessary address translation.
183 */
184 assert((aux_offset & 0xfff) == 0);
185 uint32_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
186 drm_intel_bo_emit_reloc(brw->batch.bo,
187 *surf_offset + brw->isl_dev.ss.aux_addr_offset,
188 aux_bo, *aux_addr & 0xfff,
189 read_domains, write_domains);
190 }
191 }
192
193 uint32_t
194 brw_update_renderbuffer_surface(struct brw_context *brw,
195 struct gl_renderbuffer *rb,
196 uint32_t flags, unsigned unit /* unused */,
197 uint32_t surf_index)
198 {
199 struct gl_context *ctx = &brw->ctx;
200 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
201 struct intel_mipmap_tree *mt = irb->mt;
202
203 if (brw->gen < 9) {
204 assert(!(flags & INTEL_AUX_BUFFER_DISABLED));
205 }
206
207 assert(brw_render_target_supported(brw, rb));
208 intel_miptree_used_for_rendering(mt);
209
210 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
211 if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
212 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
213 __func__, _mesa_get_format_name(rb_format));
214 }
215
216 const unsigned layer_multiplier =
217 (irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
218 irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) ?
219 MAX2(irb->mt->num_samples, 1) : 1;
220
221 struct isl_view view = {
222 .format = brw->render_target_format[rb_format],
223 .base_level = irb->mt_level - irb->mt->first_level,
224 .levels = 1,
225 .base_array_layer = irb->mt_layer / layer_multiplier,
226 .array_len = MAX2(irb->layer_count, 1),
227 .swizzle = ISL_SWIZZLE_IDENTITY,
228 .usage = ISL_SURF_USAGE_RENDER_TARGET_BIT,
229 };
230
231 uint32_t offset;
232 brw_emit_surface_state(brw, mt, flags, mt->target, view,
233 rb_mocs[brw->gen],
234 &offset, surf_index,
235 I915_GEM_DOMAIN_RENDER,
236 I915_GEM_DOMAIN_RENDER);
237 return offset;
238 }
239
240 GLuint
241 translate_tex_target(GLenum target)
242 {
243 switch (target) {
244 case GL_TEXTURE_1D:
245 case GL_TEXTURE_1D_ARRAY_EXT:
246 return BRW_SURFACE_1D;
247
248 case GL_TEXTURE_RECTANGLE_NV:
249 return BRW_SURFACE_2D;
250
251 case GL_TEXTURE_2D:
252 case GL_TEXTURE_2D_ARRAY_EXT:
253 case GL_TEXTURE_EXTERNAL_OES:
254 case GL_TEXTURE_2D_MULTISAMPLE:
255 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
256 return BRW_SURFACE_2D;
257
258 case GL_TEXTURE_3D:
259 return BRW_SURFACE_3D;
260
261 case GL_TEXTURE_CUBE_MAP:
262 case GL_TEXTURE_CUBE_MAP_ARRAY:
263 return BRW_SURFACE_CUBE;
264
265 default:
266 unreachable("not reached");
267 }
268 }
269
270 uint32_t
271 brw_get_surface_tiling_bits(uint32_t tiling)
272 {
273 switch (tiling) {
274 case I915_TILING_X:
275 return BRW_SURFACE_TILED;
276 case I915_TILING_Y:
277 return BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y;
278 default:
279 return 0;
280 }
281 }
282
283
284 uint32_t
285 brw_get_surface_num_multisamples(unsigned num_samples)
286 {
287 if (num_samples > 1)
288 return BRW_SURFACE_MULTISAMPLECOUNT_4;
289 else
290 return BRW_SURFACE_MULTISAMPLECOUNT_1;
291 }
292
293 /**
294 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
295 * swizzling.
296 */
297 int
298 brw_get_texture_swizzle(const struct gl_context *ctx,
299 const struct gl_texture_object *t)
300 {
301 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
302
303 int swizzles[SWIZZLE_NIL + 1] = {
304 SWIZZLE_X,
305 SWIZZLE_Y,
306 SWIZZLE_Z,
307 SWIZZLE_W,
308 SWIZZLE_ZERO,
309 SWIZZLE_ONE,
310 SWIZZLE_NIL
311 };
312
313 if (img->_BaseFormat == GL_DEPTH_COMPONENT ||
314 img->_BaseFormat == GL_DEPTH_STENCIL) {
315 GLenum depth_mode = t->DepthMode;
316
317 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
318 * with depth component data specified with a sized internal format.
319 * Otherwise, it's left at the old default, GL_LUMINANCE.
320 */
321 if (_mesa_is_gles3(ctx) &&
322 img->InternalFormat != GL_DEPTH_COMPONENT &&
323 img->InternalFormat != GL_DEPTH_STENCIL) {
324 depth_mode = GL_RED;
325 }
326
327 switch (depth_mode) {
328 case GL_ALPHA:
329 swizzles[0] = SWIZZLE_ZERO;
330 swizzles[1] = SWIZZLE_ZERO;
331 swizzles[2] = SWIZZLE_ZERO;
332 swizzles[3] = SWIZZLE_X;
333 break;
334 case GL_LUMINANCE:
335 swizzles[0] = SWIZZLE_X;
336 swizzles[1] = SWIZZLE_X;
337 swizzles[2] = SWIZZLE_X;
338 swizzles[3] = SWIZZLE_ONE;
339 break;
340 case GL_INTENSITY:
341 swizzles[0] = SWIZZLE_X;
342 swizzles[1] = SWIZZLE_X;
343 swizzles[2] = SWIZZLE_X;
344 swizzles[3] = SWIZZLE_X;
345 break;
346 case GL_RED:
347 swizzles[0] = SWIZZLE_X;
348 swizzles[1] = SWIZZLE_ZERO;
349 swizzles[2] = SWIZZLE_ZERO;
350 swizzles[3] = SWIZZLE_ONE;
351 break;
352 }
353 }
354
355 GLenum datatype = _mesa_get_format_datatype(img->TexFormat);
356
357 /* If the texture's format is alpha-only, force R, G, and B to
358 * 0.0. Similarly, if the texture's format has no alpha channel,
359 * force the alpha value read to 1.0. This allows for the
360 * implementation to use an RGBA texture for any of these formats
361 * without leaking any unexpected values.
362 */
363 switch (img->_BaseFormat) {
364 case GL_ALPHA:
365 swizzles[0] = SWIZZLE_ZERO;
366 swizzles[1] = SWIZZLE_ZERO;
367 swizzles[2] = SWIZZLE_ZERO;
368 break;
369 case GL_LUMINANCE:
370 if (t->_IsIntegerFormat || datatype == GL_SIGNED_NORMALIZED) {
371 swizzles[0] = SWIZZLE_X;
372 swizzles[1] = SWIZZLE_X;
373 swizzles[2] = SWIZZLE_X;
374 swizzles[3] = SWIZZLE_ONE;
375 }
376 break;
377 case GL_LUMINANCE_ALPHA:
378 if (datatype == GL_SIGNED_NORMALIZED) {
379 swizzles[0] = SWIZZLE_X;
380 swizzles[1] = SWIZZLE_X;
381 swizzles[2] = SWIZZLE_X;
382 swizzles[3] = SWIZZLE_W;
383 }
384 break;
385 case GL_INTENSITY:
386 if (datatype == GL_SIGNED_NORMALIZED) {
387 swizzles[0] = SWIZZLE_X;
388 swizzles[1] = SWIZZLE_X;
389 swizzles[2] = SWIZZLE_X;
390 swizzles[3] = SWIZZLE_X;
391 }
392 break;
393 case GL_RED:
394 case GL_RG:
395 case GL_RGB:
396 if (_mesa_get_format_bits(img->TexFormat, GL_ALPHA_BITS) > 0)
397 swizzles[3] = SWIZZLE_ONE;
398 break;
399 }
400
401 return MAKE_SWIZZLE4(swizzles[GET_SWZ(t->_Swizzle, 0)],
402 swizzles[GET_SWZ(t->_Swizzle, 1)],
403 swizzles[GET_SWZ(t->_Swizzle, 2)],
404 swizzles[GET_SWZ(t->_Swizzle, 3)]);
405 }
406
407 /**
408 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
409 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
410 *
411 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
412 * 0 1 2 3 4 5
413 * 4 5 6 7 0 1
414 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
415 *
416 * which is simply adding 4 then modding by 8 (or anding with 7).
417 *
418 * We then may need to apply workarounds for textureGather hardware bugs.
419 */
420 static unsigned
421 swizzle_to_scs(GLenum swizzle, bool need_green_to_blue)
422 {
423 unsigned scs = (swizzle + 4) & 7;
424
425 return (need_green_to_blue && scs == HSW_SCS_GREEN) ? HSW_SCS_BLUE : scs;
426 }
427
428 static unsigned
429 brw_find_matching_rb(const struct gl_framebuffer *fb,
430 const struct intel_mipmap_tree *mt)
431 {
432 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
433 const struct intel_renderbuffer *irb =
434 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
435
436 if (irb && irb->mt == mt)
437 return i;
438 }
439
440 return fb->_NumColorDrawBuffers;
441 }
442
443 static inline bool
444 brw_texture_view_sane(const struct brw_context *brw,
445 const struct intel_mipmap_tree *mt, unsigned format)
446 {
447 /* There are special cases only for lossless compression. */
448 if (!intel_miptree_is_lossless_compressed(brw, mt))
449 return true;
450
451 if (isl_format_supports_lossless_compression(&brw->screen->devinfo,
452 format))
453 return true;
454
455 /* Logic elsewhere needs to take care to resolve the color buffer prior
456 * to sampling it as non-compressed.
457 */
458 if (mt->fast_clear_state != INTEL_FAST_CLEAR_STATE_RESOLVED)
459 return false;
460
461 const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
462 const unsigned rb_index = brw_find_matching_rb(fb, mt);
463
464 if (rb_index == fb->_NumColorDrawBuffers)
465 return true;
466
467 /* Underlying surface is compressed but it is sampled using a format that
468 * the sampling engine doesn't support as compressed. Compression must be
469 * disabled for both sampling engine and data port in case the same surface
470 * is used also as render target.
471 */
472 return brw->draw_aux_buffer_disabled[rb_index];
473 }
474
475 static bool
476 brw_disable_aux_surface(const struct brw_context *brw,
477 const struct intel_mipmap_tree *mt)
478 {
479 /* Nothing to disable. */
480 if (!mt->mcs_buf)
481 return false;
482
483 /* There are special cases only for lossless compression. */
484 if (!intel_miptree_is_lossless_compressed(brw, mt))
485 return mt->fast_clear_state == INTEL_FAST_CLEAR_STATE_RESOLVED;
486
487 const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
488 const unsigned rb_index = brw_find_matching_rb(fb, mt);
489
490 /* If we are drawing into this with compression enabled, then we must also
491 * enable compression when texturing from it regardless of
492 * fast_clear_state. If we don't then, after the first draw call with
493 * this setup, there will be data in the CCS which won't get picked up by
494 * subsequent texturing operations as required by ARB_texture_barrier.
495 * Since we don't want to re-emit the binding table or do a resolve
496 * operation every draw call, the easiest thing to do is just enable
497 * compression on the texturing side. This is completely safe to do
498 * since, if compressed texturing weren't allowed, we would have disabled
499 * compression of render targets in whatever_that_function_is_called().
500 */
501 if (rb_index < fb->_NumColorDrawBuffers) {
502 if (brw->draw_aux_buffer_disabled[rb_index]) {
503 assert(mt->fast_clear_state == INTEL_FAST_CLEAR_STATE_RESOLVED);
504 }
505
506 return brw->draw_aux_buffer_disabled[rb_index];
507 }
508
509 return mt->fast_clear_state == INTEL_FAST_CLEAR_STATE_RESOLVED;
510 }
511
512 void
513 brw_update_texture_surface(struct gl_context *ctx,
514 unsigned unit,
515 uint32_t *surf_offset,
516 bool for_gather,
517 uint32_t plane)
518 {
519 struct brw_context *brw = brw_context(ctx);
520 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
521
522 if (obj->Target == GL_TEXTURE_BUFFER) {
523 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
524
525 } else {
526 struct intel_texture_object *intel_obj = intel_texture_object(obj);
527 struct intel_mipmap_tree *mt = intel_obj->mt;
528
529 if (plane > 0) {
530 if (mt->plane[plane - 1] == NULL)
531 return;
532 mt = mt->plane[plane - 1];
533 }
534
535 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
536 /* If this is a view with restricted NumLayers, then our effective depth
537 * is not just the miptree depth.
538 */
539 const unsigned view_num_layers =
540 (obj->Immutable && obj->Target != GL_TEXTURE_3D) ? obj->NumLayers :
541 mt->logical_depth0;
542
543 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
544 * texturing functions that return a float, as our code generation always
545 * selects the .x channel (which would always be 0).
546 */
547 struct gl_texture_image *firstImage = obj->Image[0][obj->BaseLevel];
548 const bool alpha_depth = obj->DepthMode == GL_ALPHA &&
549 (firstImage->_BaseFormat == GL_DEPTH_COMPONENT ||
550 firstImage->_BaseFormat == GL_DEPTH_STENCIL);
551 const unsigned swizzle = (unlikely(alpha_depth) ? SWIZZLE_XYZW :
552 brw_get_texture_swizzle(&brw->ctx, obj));
553
554 mesa_format mesa_fmt = plane == 0 ? intel_obj->_Format : mt->format;
555 unsigned format = translate_tex_format(brw, mesa_fmt,
556 sampler->sRGBDecode);
557
558 /* Implement gen6 and gen7 gather work-around */
559 bool need_green_to_blue = false;
560 if (for_gather) {
561 if (brw->gen == 7 && format == BRW_SURFACEFORMAT_R32G32_FLOAT) {
562 format = BRW_SURFACEFORMAT_R32G32_FLOAT_LD;
563 need_green_to_blue = brw->is_haswell;
564 } else if (brw->gen == 6) {
565 /* Sandybridge's gather4 message is broken for integer formats.
566 * To work around this, we pretend the surface is UNORM for
567 * 8 or 16-bit formats, and emit shader instructions to recover
568 * the real INT/UINT value. For 32-bit formats, we pretend
569 * the surface is FLOAT, and simply reinterpret the resulting
570 * bits.
571 */
572 switch (format) {
573 case BRW_SURFACEFORMAT_R8_SINT:
574 case BRW_SURFACEFORMAT_R8_UINT:
575 format = BRW_SURFACEFORMAT_R8_UNORM;
576 break;
577
578 case BRW_SURFACEFORMAT_R16_SINT:
579 case BRW_SURFACEFORMAT_R16_UINT:
580 format = BRW_SURFACEFORMAT_R16_UNORM;
581 break;
582
583 case BRW_SURFACEFORMAT_R32_SINT:
584 case BRW_SURFACEFORMAT_R32_UINT:
585 format = BRW_SURFACEFORMAT_R32_FLOAT;
586 break;
587
588 default:
589 break;
590 }
591 }
592 }
593
594 if (obj->StencilSampling && firstImage->_BaseFormat == GL_DEPTH_STENCIL) {
595 if (brw->gen <= 7) {
596 assert(mt->r8stencil_mt && !mt->stencil_mt->r8stencil_needs_update);
597 mt = mt->r8stencil_mt;
598 } else {
599 mt = mt->stencil_mt;
600 }
601 format = BRW_SURFACEFORMAT_R8_UINT;
602 } else if (brw->gen <= 7 && mt->format == MESA_FORMAT_S_UINT8) {
603 assert(mt->r8stencil_mt && !mt->r8stencil_needs_update);
604 mt = mt->r8stencil_mt;
605 format = BRW_SURFACEFORMAT_R8_UINT;
606 }
607
608 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
609
610 struct isl_view view = {
611 .format = format,
612 .base_level = obj->MinLevel + obj->BaseLevel,
613 .levels = intel_obj->_MaxLevel - obj->BaseLevel + 1,
614 .base_array_layer = obj->MinLayer,
615 .array_len = view_num_layers,
616 .swizzle = {
617 .r = swizzle_to_scs(GET_SWZ(swizzle, 0), need_green_to_blue),
618 .g = swizzle_to_scs(GET_SWZ(swizzle, 1), need_green_to_blue),
619 .b = swizzle_to_scs(GET_SWZ(swizzle, 2), need_green_to_blue),
620 .a = swizzle_to_scs(GET_SWZ(swizzle, 3), need_green_to_blue),
621 },
622 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
623 };
624
625 if (obj->Target == GL_TEXTURE_CUBE_MAP ||
626 obj->Target == GL_TEXTURE_CUBE_MAP_ARRAY)
627 view.usage |= ISL_SURF_USAGE_CUBE_BIT;
628
629 assert(brw_texture_view_sane(brw, mt, format));
630
631 const int flags =
632 brw_disable_aux_surface(brw, mt) ? INTEL_AUX_BUFFER_DISABLED : 0;
633 brw_emit_surface_state(brw, mt, flags, mt->target, view,
634 tex_mocs[brw->gen],
635 surf_offset, surf_index,
636 I915_GEM_DOMAIN_SAMPLER, 0);
637 }
638 }
639
640 void
641 brw_emit_buffer_surface_state(struct brw_context *brw,
642 uint32_t *out_offset,
643 drm_intel_bo *bo,
644 unsigned buffer_offset,
645 unsigned surface_format,
646 unsigned buffer_size,
647 unsigned pitch,
648 bool rw)
649 {
650 uint32_t *dw = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
651 brw->isl_dev.ss.size,
652 brw->isl_dev.ss.align,
653 out_offset);
654
655 isl_buffer_fill_state(&brw->isl_dev, dw,
656 .address = (bo ? bo->offset64 : 0) + buffer_offset,
657 .size = buffer_size,
658 .format = surface_format,
659 .stride = pitch,
660 .mocs = tex_mocs[brw->gen]);
661
662 if (bo) {
663 drm_intel_bo_emit_reloc(brw->batch.bo,
664 *out_offset + brw->isl_dev.ss.addr_offset,
665 bo, buffer_offset,
666 I915_GEM_DOMAIN_SAMPLER,
667 (rw ? I915_GEM_DOMAIN_SAMPLER : 0));
668 }
669 }
670
671 void
672 brw_update_buffer_texture_surface(struct gl_context *ctx,
673 unsigned unit,
674 uint32_t *surf_offset)
675 {
676 struct brw_context *brw = brw_context(ctx);
677 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
678 struct intel_buffer_object *intel_obj =
679 intel_buffer_object(tObj->BufferObject);
680 uint32_t size = tObj->BufferSize;
681 drm_intel_bo *bo = NULL;
682 mesa_format format = tObj->_BufferObjectFormat;
683 uint32_t brw_format = brw_format_for_mesa_format(format);
684 int texel_size = _mesa_get_format_bytes(format);
685
686 if (intel_obj) {
687 size = MIN2(size, intel_obj->Base.Size);
688 bo = intel_bufferobj_buffer(brw, intel_obj, tObj->BufferOffset, size);
689 }
690
691 if (brw_format == 0 && format != MESA_FORMAT_RGBA_FLOAT32) {
692 _mesa_problem(NULL, "bad format %s for texture buffer\n",
693 _mesa_get_format_name(format));
694 }
695
696 brw_emit_buffer_surface_state(brw, surf_offset, bo,
697 tObj->BufferOffset,
698 brw_format,
699 size,
700 texel_size,
701 false /* rw */);
702 }
703
704 /**
705 * Create the constant buffer surface. Vertex/fragment shader constants will be
706 * read from this buffer with Data Port Read instructions/messages.
707 */
708 void
709 brw_create_constant_surface(struct brw_context *brw,
710 drm_intel_bo *bo,
711 uint32_t offset,
712 uint32_t size,
713 uint32_t *out_offset)
714 {
715 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
716 BRW_SURFACEFORMAT_R32G32B32A32_FLOAT,
717 size, 1, false);
718 }
719
720 /**
721 * Create the buffer surface. Shader buffer variables will be
722 * read from / write to this buffer with Data Port Read/Write
723 * instructions/messages.
724 */
725 void
726 brw_create_buffer_surface(struct brw_context *brw,
727 drm_intel_bo *bo,
728 uint32_t offset,
729 uint32_t size,
730 uint32_t *out_offset)
731 {
732 /* Use a raw surface so we can reuse existing untyped read/write/atomic
733 * messages. We need these specifically for the fragment shader since they
734 * include a pixel mask header that we need to ensure correct behavior
735 * with helper invocations, which cannot write to the buffer.
736 */
737 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
738 BRW_SURFACEFORMAT_RAW,
739 size, 1, true);
740 }
741
742 /**
743 * Set up a binding table entry for use by stream output logic (transform
744 * feedback).
745 *
746 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
747 */
748 void
749 brw_update_sol_surface(struct brw_context *brw,
750 struct gl_buffer_object *buffer_obj,
751 uint32_t *out_offset, unsigned num_vector_components,
752 unsigned stride_dwords, unsigned offset_dwords)
753 {
754 struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
755 uint32_t offset_bytes = 4 * offset_dwords;
756 drm_intel_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
757 offset_bytes,
758 buffer_obj->Size - offset_bytes);
759 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
760 out_offset);
761 uint32_t pitch_minus_1 = 4*stride_dwords - 1;
762 size_t size_dwords = buffer_obj->Size / 4;
763 uint32_t buffer_size_minus_1, width, height, depth, surface_format;
764
765 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
766 * too big to map using a single binding table entry?
767 */
768 assert((size_dwords - offset_dwords) / stride_dwords
769 <= BRW_MAX_NUM_BUFFER_ENTRIES);
770
771 if (size_dwords > offset_dwords + num_vector_components) {
772 /* There is room for at least 1 transform feedback output in the buffer.
773 * Compute the number of additional transform feedback outputs the
774 * buffer has room for.
775 */
776 buffer_size_minus_1 =
777 (size_dwords - offset_dwords - num_vector_components) / stride_dwords;
778 } else {
779 /* There isn't even room for a single transform feedback output in the
780 * buffer. We can't configure the binding table entry to prevent output
781 * entirely; we'll have to rely on the geometry shader to detect
782 * overflow. But to minimize the damage in case of a bug, set up the
783 * binding table entry to just allow a single output.
784 */
785 buffer_size_minus_1 = 0;
786 }
787 width = buffer_size_minus_1 & 0x7f;
788 height = (buffer_size_minus_1 & 0xfff80) >> 7;
789 depth = (buffer_size_minus_1 & 0x7f00000) >> 20;
790
791 switch (num_vector_components) {
792 case 1:
793 surface_format = BRW_SURFACEFORMAT_R32_FLOAT;
794 break;
795 case 2:
796 surface_format = BRW_SURFACEFORMAT_R32G32_FLOAT;
797 break;
798 case 3:
799 surface_format = BRW_SURFACEFORMAT_R32G32B32_FLOAT;
800 break;
801 case 4:
802 surface_format = BRW_SURFACEFORMAT_R32G32B32A32_FLOAT;
803 break;
804 default:
805 unreachable("Invalid vector size for transform feedback output");
806 }
807
808 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
809 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
810 surface_format << BRW_SURFACE_FORMAT_SHIFT |
811 BRW_SURFACE_RC_READ_WRITE;
812 surf[1] = bo->offset64 + offset_bytes; /* reloc */
813 surf[2] = (width << BRW_SURFACE_WIDTH_SHIFT |
814 height << BRW_SURFACE_HEIGHT_SHIFT);
815 surf[3] = (depth << BRW_SURFACE_DEPTH_SHIFT |
816 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
817 surf[4] = 0;
818 surf[5] = 0;
819
820 /* Emit relocation to surface contents. */
821 drm_intel_bo_emit_reloc(brw->batch.bo,
822 *out_offset + 4,
823 bo, offset_bytes,
824 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
825 }
826
827 /* Creates a new WM constant buffer reflecting the current fragment program's
828 * constants, if needed by the fragment program.
829 *
830 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
831 * state atom.
832 */
833 static void
834 brw_upload_wm_pull_constants(struct brw_context *brw)
835 {
836 struct brw_stage_state *stage_state = &brw->wm.base;
837 /* BRW_NEW_FRAGMENT_PROGRAM */
838 struct brw_program *fp = (struct brw_program *) brw->fragment_program;
839 /* BRW_NEW_FS_PROG_DATA */
840 struct brw_stage_prog_data *prog_data = brw->wm.base.prog_data;
841
842 _mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_FRAGMENT);
843 /* _NEW_PROGRAM_CONSTANTS */
844 brw_upload_pull_constants(brw, BRW_NEW_SURFACES, &fp->program,
845 stage_state, prog_data);
846 }
847
848 const struct brw_tracked_state brw_wm_pull_constants = {
849 .dirty = {
850 .mesa = _NEW_PROGRAM_CONSTANTS,
851 .brw = BRW_NEW_BATCH |
852 BRW_NEW_BLORP |
853 BRW_NEW_FRAGMENT_PROGRAM |
854 BRW_NEW_FS_PROG_DATA,
855 },
856 .emit = brw_upload_wm_pull_constants,
857 };
858
859 /**
860 * Creates a null renderbuffer surface.
861 *
862 * This is used when the shader doesn't write to any color output. An FB
863 * write to target 0 will still be emitted, because that's how the thread is
864 * terminated (and computed depth is returned), so we need to have the
865 * hardware discard the target 0 color output..
866 */
867 static void
868 brw_emit_null_surface_state(struct brw_context *brw,
869 unsigned width,
870 unsigned height,
871 unsigned samples,
872 uint32_t *out_offset)
873 {
874 /* From the Sandy bridge PRM, Vol4 Part1 p71 (Surface Type: Programming
875 * Notes):
876 *
877 * A null surface will be used in instances where an actual surface is
878 * not bound. When a write message is generated to a null surface, no
879 * actual surface is written to. When a read message (including any
880 * sampling engine message) is generated to a null surface, the result
881 * is all zeros. Note that a null surface type is allowed to be used
882 * with all messages, even if it is not specificially indicated as
883 * supported. All of the remaining fields in surface state are ignored
884 * for null surfaces, with the following exceptions:
885 *
886 * - [DevSNB+]: Width, Height, Depth, and LOD fields must match the
887 * depth buffer’s corresponding state for all render target surfaces,
888 * including null.
889 *
890 * - Surface Format must be R8G8B8A8_UNORM.
891 */
892 unsigned surface_type = BRW_SURFACE_NULL;
893 drm_intel_bo *bo = NULL;
894 unsigned pitch_minus_1 = 0;
895 uint32_t multisampling_state = 0;
896 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
897 out_offset);
898
899 if (samples > 1) {
900 /* On Gen6, null render targets seem to cause GPU hangs when
901 * multisampling. So work around this problem by rendering into dummy
902 * color buffer.
903 *
904 * To decrease the amount of memory needed by the workaround buffer, we
905 * set its pitch to 128 bytes (the width of a Y tile). This means that
906 * the amount of memory needed for the workaround buffer is
907 * (width_in_tiles + height_in_tiles - 1) tiles.
908 *
909 * Note that since the workaround buffer will be interpreted by the
910 * hardware as an interleaved multisampled buffer, we need to compute
911 * width_in_tiles and height_in_tiles by dividing the width and height
912 * by 16 rather than the normal Y-tile size of 32.
913 */
914 unsigned width_in_tiles = ALIGN(width, 16) / 16;
915 unsigned height_in_tiles = ALIGN(height, 16) / 16;
916 unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
917 brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
918 size_needed);
919 bo = brw->wm.multisampled_null_render_target_bo;
920 surface_type = BRW_SURFACE_2D;
921 pitch_minus_1 = 127;
922 multisampling_state = brw_get_surface_num_multisamples(samples);
923 }
924
925 surf[0] = (surface_type << BRW_SURFACE_TYPE_SHIFT |
926 BRW_SURFACEFORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
927 if (brw->gen < 6) {
928 surf[0] |= (1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT |
929 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT |
930 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT |
931 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT);
932 }
933 surf[1] = bo ? bo->offset64 : 0;
934 surf[2] = ((width - 1) << BRW_SURFACE_WIDTH_SHIFT |
935 (height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
936
937 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
938 * Notes):
939 *
940 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
941 */
942 surf[3] = (BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y |
943 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
944 surf[4] = multisampling_state;
945 surf[5] = 0;
946
947 if (bo) {
948 drm_intel_bo_emit_reloc(brw->batch.bo,
949 *out_offset + 4,
950 bo, 0,
951 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
952 }
953 }
954
955 /**
956 * Sets up a surface state structure to point at the given region.
957 * While it is only used for the front/back buffer currently, it should be
958 * usable for further buffers when doing ARB_draw_buffer support.
959 */
960 static uint32_t
961 gen4_update_renderbuffer_surface(struct brw_context *brw,
962 struct gl_renderbuffer *rb,
963 uint32_t flags, unsigned unit,
964 uint32_t surf_index)
965 {
966 struct gl_context *ctx = &brw->ctx;
967 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
968 struct intel_mipmap_tree *mt = irb->mt;
969 uint32_t *surf;
970 uint32_t tile_x, tile_y;
971 uint32_t format = 0;
972 uint32_t offset;
973 /* _NEW_BUFFERS */
974 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
975 /* BRW_NEW_FS_PROG_DATA */
976
977 assert(!(flags & INTEL_RENDERBUFFER_LAYERED));
978 assert(!(flags & INTEL_AUX_BUFFER_DISABLED));
979
980 if (rb->TexImage && !brw->has_surface_tile_offset) {
981 intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y);
982
983 if (tile_x != 0 || tile_y != 0) {
984 /* Original gen4 hardware couldn't draw to a non-tile-aligned
985 * destination in a miptree unless you actually setup your renderbuffer
986 * as a miptree and used the fragile lod/array_index/etc. controls to
987 * select the image. So, instead, we just make a new single-level
988 * miptree and render into that.
989 */
990 intel_renderbuffer_move_to_temp(brw, irb, false);
991 mt = irb->mt;
992 }
993 }
994
995 intel_miptree_used_for_rendering(irb->mt);
996
997 surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32, &offset);
998
999 format = brw->render_target_format[rb_format];
1000 if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
1001 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
1002 __func__, _mesa_get_format_name(rb_format));
1003 }
1004
1005 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
1006 format << BRW_SURFACE_FORMAT_SHIFT);
1007
1008 /* reloc */
1009 assert(mt->offset % mt->cpp == 0);
1010 surf[1] = (intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y) +
1011 mt->bo->offset64 + mt->offset);
1012
1013 surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
1014 (rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
1015
1016 surf[3] = (brw_get_surface_tiling_bits(mt->tiling) |
1017 (mt->pitch - 1) << BRW_SURFACE_PITCH_SHIFT);
1018
1019 surf[4] = brw_get_surface_num_multisamples(mt->num_samples);
1020
1021 assert(brw->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
1022 /* Note that the low bits of these fields are missing, so
1023 * there's the possibility of getting in trouble.
1024 */
1025 assert(tile_x % 4 == 0);
1026 assert(tile_y % 2 == 0);
1027 surf[5] = ((tile_x / 4) << BRW_SURFACE_X_OFFSET_SHIFT |
1028 (tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT |
1029 (mt->valign == 4 ? BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
1030
1031 if (brw->gen < 6) {
1032 /* _NEW_COLOR */
1033 if (!ctx->Color.ColorLogicOpEnabled && !ctx->Color._AdvancedBlendMode &&
1034 (ctx->Color.BlendEnabled & (1 << unit)))
1035 surf[0] |= BRW_SURFACE_BLEND_ENABLED;
1036
1037 if (!ctx->Color.ColorMask[unit][0])
1038 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT;
1039 if (!ctx->Color.ColorMask[unit][1])
1040 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT;
1041 if (!ctx->Color.ColorMask[unit][2])
1042 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT;
1043
1044 /* As mentioned above, disable writes to the alpha component when the
1045 * renderbuffer is XRGB.
1046 */
1047 if (ctx->DrawBuffer->Visual.alphaBits == 0 ||
1048 !ctx->Color.ColorMask[unit][3]) {
1049 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT;
1050 }
1051 }
1052
1053 drm_intel_bo_emit_reloc(brw->batch.bo,
1054 offset + 4,
1055 mt->bo,
1056 surf[1] - mt->bo->offset64,
1057 I915_GEM_DOMAIN_RENDER,
1058 I915_GEM_DOMAIN_RENDER);
1059
1060 return offset;
1061 }
1062
1063 /**
1064 * Construct SURFACE_STATE objects for renderbuffers/draw buffers.
1065 */
1066 void
1067 brw_update_renderbuffer_surfaces(struct brw_context *brw,
1068 const struct gl_framebuffer *fb,
1069 uint32_t render_target_start,
1070 uint32_t *surf_offset)
1071 {
1072 GLuint i;
1073 const unsigned int w = _mesa_geometric_width(fb);
1074 const unsigned int h = _mesa_geometric_height(fb);
1075 const unsigned int s = _mesa_geometric_samples(fb);
1076
1077 /* Update surfaces for drawing buffers */
1078 if (fb->_NumColorDrawBuffers >= 1) {
1079 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
1080 const uint32_t surf_index = render_target_start + i;
1081 const int flags = (_mesa_geometric_layers(fb) > 0 ?
1082 INTEL_RENDERBUFFER_LAYERED : 0) |
1083 (brw->draw_aux_buffer_disabled[i] ?
1084 INTEL_AUX_BUFFER_DISABLED : 0);
1085
1086 if (intel_renderbuffer(fb->_ColorDrawBuffers[i])) {
1087 surf_offset[surf_index] =
1088 brw->vtbl.update_renderbuffer_surface(
1089 brw, fb->_ColorDrawBuffers[i], flags, i, surf_index);
1090 } else {
1091 brw->vtbl.emit_null_surface_state(brw, w, h, s,
1092 &surf_offset[surf_index]);
1093 }
1094 }
1095 } else {
1096 const uint32_t surf_index = render_target_start;
1097 brw->vtbl.emit_null_surface_state(brw, w, h, s,
1098 &surf_offset[surf_index]);
1099 }
1100 }
1101
1102 static void
1103 update_renderbuffer_surfaces(struct brw_context *brw)
1104 {
1105 const struct gl_context *ctx = &brw->ctx;
1106
1107 /* BRW_NEW_FS_PROG_DATA */
1108 const struct brw_wm_prog_data *wm_prog_data =
1109 brw_wm_prog_data(brw->wm.base.prog_data);
1110
1111 /* _NEW_BUFFERS | _NEW_COLOR */
1112 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1113 brw_update_renderbuffer_surfaces(
1114 brw, fb,
1115 wm_prog_data->binding_table.render_target_start,
1116 brw->wm.base.surf_offset);
1117 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1118 }
1119
1120 const struct brw_tracked_state brw_renderbuffer_surfaces = {
1121 .dirty = {
1122 .mesa = _NEW_BUFFERS |
1123 _NEW_COLOR,
1124 .brw = BRW_NEW_BATCH |
1125 BRW_NEW_BLORP |
1126 BRW_NEW_FS_PROG_DATA,
1127 },
1128 .emit = update_renderbuffer_surfaces,
1129 };
1130
1131 const struct brw_tracked_state gen6_renderbuffer_surfaces = {
1132 .dirty = {
1133 .mesa = _NEW_BUFFERS,
1134 .brw = BRW_NEW_BATCH |
1135 BRW_NEW_BLORP,
1136 },
1137 .emit = update_renderbuffer_surfaces,
1138 };
1139
1140 static void
1141 update_renderbuffer_read_surfaces(struct brw_context *brw)
1142 {
1143 const struct gl_context *ctx = &brw->ctx;
1144
1145 /* BRW_NEW_FS_PROG_DATA */
1146 const struct brw_wm_prog_data *wm_prog_data =
1147 brw_wm_prog_data(brw->wm.base.prog_data);
1148
1149 /* BRW_NEW_FRAGMENT_PROGRAM */
1150 if (!ctx->Extensions.MESA_shader_framebuffer_fetch &&
1151 brw->fragment_program && brw->fragment_program->info.outputs_read) {
1152 /* _NEW_BUFFERS */
1153 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1154
1155 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
1156 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
1157 const struct intel_renderbuffer *irb = intel_renderbuffer(rb);
1158 const unsigned surf_index =
1159 wm_prog_data->binding_table.render_target_read_start + i;
1160 uint32_t *surf_offset = &brw->wm.base.surf_offset[surf_index];
1161
1162 if (irb) {
1163 const unsigned format = brw->render_target_format[
1164 _mesa_get_render_format(ctx, intel_rb_format(irb))];
1165 assert(isl_format_supports_sampling(&brw->screen->devinfo,
1166 format));
1167
1168 /* Override the target of the texture if the render buffer is a
1169 * single slice of a 3D texture (since the minimum array element
1170 * field of the surface state structure is ignored by the sampler
1171 * unit for 3D textures on some hardware), or if the render buffer
1172 * is a 1D array (since shaders always provide the array index
1173 * coordinate at the Z component to avoid state-dependent
1174 * recompiles when changing the texture target of the
1175 * framebuffer).
1176 */
1177 const GLenum target =
1178 (irb->mt->target == GL_TEXTURE_3D &&
1179 irb->layer_count == 1) ? GL_TEXTURE_2D :
1180 irb->mt->target == GL_TEXTURE_1D_ARRAY ? GL_TEXTURE_2D_ARRAY :
1181 irb->mt->target;
1182
1183 /* intel_renderbuffer::mt_layer is expressed in sample units for
1184 * the UMS and CMS multisample layouts, but
1185 * intel_renderbuffer::layer_count is expressed in units of whole
1186 * logical layers regardless of the multisample layout.
1187 */
1188 const unsigned mt_layer_unit =
1189 (irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
1190 irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) ?
1191 MAX2(irb->mt->num_samples, 1) : 1;
1192
1193 const struct isl_view view = {
1194 .format = format,
1195 .base_level = irb->mt_level - irb->mt->first_level,
1196 .levels = 1,
1197 .base_array_layer = irb->mt_layer / mt_layer_unit,
1198 .array_len = irb->layer_count,
1199 .swizzle = ISL_SWIZZLE_IDENTITY,
1200 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
1201 };
1202
1203 const int flags = brw->draw_aux_buffer_disabled[i] ?
1204 INTEL_AUX_BUFFER_DISABLED : 0;
1205 brw_emit_surface_state(brw, irb->mt, flags, target, view,
1206 tex_mocs[brw->gen],
1207 surf_offset, surf_index,
1208 I915_GEM_DOMAIN_SAMPLER, 0);
1209
1210 } else {
1211 brw->vtbl.emit_null_surface_state(
1212 brw, _mesa_geometric_width(fb), _mesa_geometric_height(fb),
1213 _mesa_geometric_samples(fb), surf_offset);
1214 }
1215 }
1216
1217 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1218 }
1219 }
1220
1221 const struct brw_tracked_state brw_renderbuffer_read_surfaces = {
1222 .dirty = {
1223 .mesa = _NEW_BUFFERS,
1224 .brw = BRW_NEW_BATCH |
1225 BRW_NEW_FRAGMENT_PROGRAM |
1226 BRW_NEW_FS_PROG_DATA,
1227 },
1228 .emit = update_renderbuffer_read_surfaces,
1229 };
1230
1231 static void
1232 update_stage_texture_surfaces(struct brw_context *brw,
1233 const struct gl_program *prog,
1234 struct brw_stage_state *stage_state,
1235 bool for_gather, uint32_t plane)
1236 {
1237 if (!prog)
1238 return;
1239
1240 struct gl_context *ctx = &brw->ctx;
1241
1242 uint32_t *surf_offset = stage_state->surf_offset;
1243
1244 /* BRW_NEW_*_PROG_DATA */
1245 if (for_gather)
1246 surf_offset += stage_state->prog_data->binding_table.gather_texture_start;
1247 else
1248 surf_offset += stage_state->prog_data->binding_table.plane_start[plane];
1249
1250 unsigned num_samplers = util_last_bit(prog->SamplersUsed);
1251 for (unsigned s = 0; s < num_samplers; s++) {
1252 surf_offset[s] = 0;
1253
1254 if (prog->SamplersUsed & (1 << s)) {
1255 const unsigned unit = prog->SamplerUnits[s];
1256
1257 /* _NEW_TEXTURE */
1258 if (ctx->Texture.Unit[unit]._Current) {
1259 brw_update_texture_surface(ctx, unit, surf_offset + s, for_gather, plane);
1260 }
1261 }
1262 }
1263 }
1264
1265
1266 /**
1267 * Construct SURFACE_STATE objects for enabled textures.
1268 */
1269 static void
1270 brw_update_texture_surfaces(struct brw_context *brw)
1271 {
1272 /* BRW_NEW_VERTEX_PROGRAM */
1273 struct gl_program *vs = (struct gl_program *) brw->vertex_program;
1274
1275 /* BRW_NEW_TESS_PROGRAMS */
1276 struct gl_program *tcs = (struct gl_program *) brw->tess_ctrl_program;
1277 struct gl_program *tes = (struct gl_program *) brw->tess_eval_program;
1278
1279 /* BRW_NEW_GEOMETRY_PROGRAM */
1280 struct gl_program *gs = (struct gl_program *) brw->geometry_program;
1281
1282 /* BRW_NEW_FRAGMENT_PROGRAM */
1283 struct gl_program *fs = (struct gl_program *) brw->fragment_program;
1284
1285 /* _NEW_TEXTURE */
1286 update_stage_texture_surfaces(brw, vs, &brw->vs.base, false, 0);
1287 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, false, 0);
1288 update_stage_texture_surfaces(brw, tes, &brw->tes.base, false, 0);
1289 update_stage_texture_surfaces(brw, gs, &brw->gs.base, false, 0);
1290 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 0);
1291
1292 /* emit alternate set of surface state for gather. this
1293 * allows the surface format to be overriden for only the
1294 * gather4 messages. */
1295 if (brw->gen < 8) {
1296 if (vs && vs->nir->info->uses_texture_gather)
1297 update_stage_texture_surfaces(brw, vs, &brw->vs.base, true, 0);
1298 if (tcs && tcs->nir->info->uses_texture_gather)
1299 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, true, 0);
1300 if (tes && tes->nir->info->uses_texture_gather)
1301 update_stage_texture_surfaces(brw, tes, &brw->tes.base, true, 0);
1302 if (gs && gs->nir->info->uses_texture_gather)
1303 update_stage_texture_surfaces(brw, gs, &brw->gs.base, true, 0);
1304 if (fs && fs->nir->info->uses_texture_gather)
1305 update_stage_texture_surfaces(brw, fs, &brw->wm.base, true, 0);
1306 }
1307
1308 if (fs) {
1309 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 1);
1310 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 2);
1311 }
1312
1313 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1314 }
1315
1316 const struct brw_tracked_state brw_texture_surfaces = {
1317 .dirty = {
1318 .mesa = _NEW_TEXTURE,
1319 .brw = BRW_NEW_BATCH |
1320 BRW_NEW_BLORP |
1321 BRW_NEW_FRAGMENT_PROGRAM |
1322 BRW_NEW_FS_PROG_DATA |
1323 BRW_NEW_GEOMETRY_PROGRAM |
1324 BRW_NEW_GS_PROG_DATA |
1325 BRW_NEW_TESS_PROGRAMS |
1326 BRW_NEW_TCS_PROG_DATA |
1327 BRW_NEW_TES_PROG_DATA |
1328 BRW_NEW_TEXTURE_BUFFER |
1329 BRW_NEW_VERTEX_PROGRAM |
1330 BRW_NEW_VS_PROG_DATA,
1331 },
1332 .emit = brw_update_texture_surfaces,
1333 };
1334
1335 static void
1336 brw_update_cs_texture_surfaces(struct brw_context *brw)
1337 {
1338 /* BRW_NEW_COMPUTE_PROGRAM */
1339 struct gl_program *cs = (struct gl_program *) brw->compute_program;
1340
1341 /* _NEW_TEXTURE */
1342 update_stage_texture_surfaces(brw, cs, &brw->cs.base, false, 0);
1343
1344 /* emit alternate set of surface state for gather. this
1345 * allows the surface format to be overriden for only the
1346 * gather4 messages.
1347 */
1348 if (brw->gen < 8) {
1349 if (cs && cs->nir->info->uses_texture_gather)
1350 update_stage_texture_surfaces(brw, cs, &brw->cs.base, true, 0);
1351 }
1352
1353 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1354 }
1355
1356 const struct brw_tracked_state brw_cs_texture_surfaces = {
1357 .dirty = {
1358 .mesa = _NEW_TEXTURE,
1359 .brw = BRW_NEW_BATCH |
1360 BRW_NEW_BLORP |
1361 BRW_NEW_COMPUTE_PROGRAM,
1362 },
1363 .emit = brw_update_cs_texture_surfaces,
1364 };
1365
1366
1367 void
1368 brw_upload_ubo_surfaces(struct brw_context *brw,
1369 struct gl_linked_shader *shader,
1370 struct brw_stage_state *stage_state,
1371 struct brw_stage_prog_data *prog_data)
1372 {
1373 struct gl_context *ctx = &brw->ctx;
1374
1375 if (!shader)
1376 return;
1377
1378 uint32_t *ubo_surf_offsets =
1379 &stage_state->surf_offset[prog_data->binding_table.ubo_start];
1380
1381 for (int i = 0; i < shader->NumUniformBlocks; i++) {
1382 struct gl_uniform_buffer_binding *binding =
1383 &ctx->UniformBufferBindings[shader->UniformBlocks[i]->Binding];
1384
1385 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1386 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ubo_surf_offsets[i]);
1387 } else {
1388 struct intel_buffer_object *intel_bo =
1389 intel_buffer_object(binding->BufferObject);
1390 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1391 if (!binding->AutomaticSize)
1392 size = MIN2(size, binding->Size);
1393 drm_intel_bo *bo =
1394 intel_bufferobj_buffer(brw, intel_bo,
1395 binding->Offset,
1396 size);
1397 brw_create_constant_surface(brw, bo, binding->Offset,
1398 size,
1399 &ubo_surf_offsets[i]);
1400 }
1401 }
1402
1403 uint32_t *ssbo_surf_offsets =
1404 &stage_state->surf_offset[prog_data->binding_table.ssbo_start];
1405
1406 for (int i = 0; i < shader->NumShaderStorageBlocks; i++) {
1407 struct gl_shader_storage_buffer_binding *binding =
1408 &ctx->ShaderStorageBufferBindings[shader->ShaderStorageBlocks[i]->Binding];
1409
1410 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1411 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ssbo_surf_offsets[i]);
1412 } else {
1413 struct intel_buffer_object *intel_bo =
1414 intel_buffer_object(binding->BufferObject);
1415 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1416 if (!binding->AutomaticSize)
1417 size = MIN2(size, binding->Size);
1418 drm_intel_bo *bo =
1419 intel_bufferobj_buffer(brw, intel_bo,
1420 binding->Offset,
1421 size);
1422 brw_create_buffer_surface(brw, bo, binding->Offset,
1423 size,
1424 &ssbo_surf_offsets[i]);
1425 }
1426 }
1427
1428 if (shader->NumUniformBlocks || shader->NumShaderStorageBlocks)
1429 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1430 }
1431
1432 static void
1433 brw_upload_wm_ubo_surfaces(struct brw_context *brw)
1434 {
1435 struct gl_context *ctx = &brw->ctx;
1436 /* _NEW_PROGRAM */
1437 struct gl_shader_program *prog = ctx->_Shader->_CurrentFragmentProgram;
1438
1439 if (!prog)
1440 return;
1441
1442 /* BRW_NEW_FS_PROG_DATA */
1443 brw_upload_ubo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
1444 &brw->wm.base, brw->wm.base.prog_data);
1445 }
1446
1447 const struct brw_tracked_state brw_wm_ubo_surfaces = {
1448 .dirty = {
1449 .mesa = _NEW_PROGRAM,
1450 .brw = BRW_NEW_BATCH |
1451 BRW_NEW_BLORP |
1452 BRW_NEW_FS_PROG_DATA |
1453 BRW_NEW_UNIFORM_BUFFER,
1454 },
1455 .emit = brw_upload_wm_ubo_surfaces,
1456 };
1457
1458 static void
1459 brw_upload_cs_ubo_surfaces(struct brw_context *brw)
1460 {
1461 struct gl_context *ctx = &brw->ctx;
1462 /* _NEW_PROGRAM */
1463 struct gl_shader_program *prog =
1464 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1465
1466 if (!prog)
1467 return;
1468
1469 /* BRW_NEW_CS_PROG_DATA */
1470 brw_upload_ubo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_COMPUTE],
1471 &brw->cs.base, brw->cs.base.prog_data);
1472 }
1473
1474 const struct brw_tracked_state brw_cs_ubo_surfaces = {
1475 .dirty = {
1476 .mesa = _NEW_PROGRAM,
1477 .brw = BRW_NEW_BATCH |
1478 BRW_NEW_BLORP |
1479 BRW_NEW_CS_PROG_DATA |
1480 BRW_NEW_UNIFORM_BUFFER,
1481 },
1482 .emit = brw_upload_cs_ubo_surfaces,
1483 };
1484
1485 void
1486 brw_upload_abo_surfaces(struct brw_context *brw,
1487 const struct gl_program *prog,
1488 struct brw_stage_state *stage_state,
1489 struct brw_stage_prog_data *prog_data)
1490 {
1491 struct gl_context *ctx = &brw->ctx;
1492 uint32_t *surf_offsets =
1493 &stage_state->surf_offset[prog_data->binding_table.abo_start];
1494
1495 if (prog->info.num_abos) {
1496 for (unsigned i = 0; i < prog->info.num_abos; i++) {
1497 struct gl_atomic_buffer_binding *binding =
1498 &ctx->AtomicBufferBindings[prog->sh.AtomicBuffers[i]->Binding];
1499 struct intel_buffer_object *intel_bo =
1500 intel_buffer_object(binding->BufferObject);
1501 drm_intel_bo *bo = intel_bufferobj_buffer(
1502 brw, intel_bo, binding->Offset, intel_bo->Base.Size - binding->Offset);
1503
1504 brw_emit_buffer_surface_state(brw, &surf_offsets[i], bo,
1505 binding->Offset, BRW_SURFACEFORMAT_RAW,
1506 bo->size - binding->Offset, 1, true);
1507 }
1508
1509 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1510 }
1511 }
1512
1513 static void
1514 brw_upload_wm_abo_surfaces(struct brw_context *brw)
1515 {
1516 /* _NEW_PROGRAM */
1517 const struct gl_program *wm = brw->fragment_program;
1518
1519 if (wm) {
1520 /* BRW_NEW_FS_PROG_DATA */
1521 brw_upload_abo_surfaces(brw, wm, &brw->wm.base, brw->wm.base.prog_data);
1522 }
1523 }
1524
1525 const struct brw_tracked_state brw_wm_abo_surfaces = {
1526 .dirty = {
1527 .mesa = _NEW_PROGRAM,
1528 .brw = BRW_NEW_ATOMIC_BUFFER |
1529 BRW_NEW_BLORP |
1530 BRW_NEW_BATCH |
1531 BRW_NEW_FS_PROG_DATA,
1532 },
1533 .emit = brw_upload_wm_abo_surfaces,
1534 };
1535
1536 static void
1537 brw_upload_cs_abo_surfaces(struct brw_context *brw)
1538 {
1539 /* _NEW_PROGRAM */
1540 const struct gl_program *cp = brw->compute_program;
1541
1542 if (cp) {
1543 /* BRW_NEW_CS_PROG_DATA */
1544 brw_upload_abo_surfaces(brw, cp, &brw->cs.base, brw->cs.base.prog_data);
1545 }
1546 }
1547
1548 const struct brw_tracked_state brw_cs_abo_surfaces = {
1549 .dirty = {
1550 .mesa = _NEW_PROGRAM,
1551 .brw = BRW_NEW_ATOMIC_BUFFER |
1552 BRW_NEW_BLORP |
1553 BRW_NEW_BATCH |
1554 BRW_NEW_CS_PROG_DATA,
1555 },
1556 .emit = brw_upload_cs_abo_surfaces,
1557 };
1558
1559 static void
1560 brw_upload_cs_image_surfaces(struct brw_context *brw)
1561 {
1562 struct gl_context *ctx = &brw->ctx;
1563 /* _NEW_PROGRAM */
1564 struct gl_shader_program *prog =
1565 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1566 const struct gl_program *cp = brw->compute_program;
1567
1568 if (cp && prog) {
1569 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1570 brw_upload_image_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_COMPUTE],
1571 cp, &brw->cs.base, brw->cs.base.prog_data);
1572 }
1573 }
1574
1575 const struct brw_tracked_state brw_cs_image_surfaces = {
1576 .dirty = {
1577 .mesa = _NEW_TEXTURE | _NEW_PROGRAM,
1578 .brw = BRW_NEW_BATCH |
1579 BRW_NEW_BLORP |
1580 BRW_NEW_CS_PROG_DATA |
1581 BRW_NEW_IMAGE_UNITS
1582 },
1583 .emit = brw_upload_cs_image_surfaces,
1584 };
1585
1586 static uint32_t
1587 get_image_format(struct brw_context *brw, mesa_format format, GLenum access)
1588 {
1589 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1590 uint32_t hw_format = brw_format_for_mesa_format(format);
1591 if (access == GL_WRITE_ONLY) {
1592 return hw_format;
1593 } else if (isl_has_matching_typed_storage_image_format(devinfo, hw_format)) {
1594 /* Typed surface reads support a very limited subset of the shader
1595 * image formats. Translate it into the closest format the
1596 * hardware supports.
1597 */
1598 return isl_lower_storage_image_format(devinfo, hw_format);
1599 } else {
1600 /* The hardware doesn't actually support a typed format that we can use
1601 * so we have to fall back to untyped read/write messages.
1602 */
1603 return BRW_SURFACEFORMAT_RAW;
1604 }
1605 }
1606
1607 static void
1608 update_default_image_param(struct brw_context *brw,
1609 struct gl_image_unit *u,
1610 unsigned surface_idx,
1611 struct brw_image_param *param)
1612 {
1613 memset(param, 0, sizeof(*param));
1614 param->surface_idx = surface_idx;
1615 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1616 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1617 * detailed explanation of these parameters.
1618 */
1619 param->swizzling[0] = 0xff;
1620 param->swizzling[1] = 0xff;
1621 }
1622
1623 static void
1624 update_buffer_image_param(struct brw_context *brw,
1625 struct gl_image_unit *u,
1626 unsigned surface_idx,
1627 struct brw_image_param *param)
1628 {
1629 struct gl_buffer_object *obj = u->TexObj->BufferObject;
1630 const uint32_t size = MIN2((uint32_t)u->TexObj->BufferSize, obj->Size);
1631 update_default_image_param(brw, u, surface_idx, param);
1632
1633 param->size[0] = size / _mesa_get_format_bytes(u->_ActualFormat);
1634 param->stride[0] = _mesa_get_format_bytes(u->_ActualFormat);
1635 }
1636
1637 static void
1638 update_texture_image_param(struct brw_context *brw,
1639 struct gl_image_unit *u,
1640 unsigned surface_idx,
1641 struct brw_image_param *param)
1642 {
1643 struct intel_mipmap_tree *mt = intel_texture_object(u->TexObj)->mt;
1644
1645 update_default_image_param(brw, u, surface_idx, param);
1646
1647 param->size[0] = minify(mt->logical_width0, u->Level);
1648 param->size[1] = minify(mt->logical_height0, u->Level);
1649 param->size[2] = (!u->Layered ? 1 :
1650 u->TexObj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1651 u->TexObj->Target == GL_TEXTURE_3D ?
1652 minify(mt->logical_depth0, u->Level) :
1653 mt->logical_depth0);
1654
1655 intel_miptree_get_image_offset(mt, u->Level, u->_Layer,
1656 &param->offset[0],
1657 &param->offset[1]);
1658
1659 param->stride[0] = mt->cpp;
1660 param->stride[1] = mt->pitch / mt->cpp;
1661 param->stride[2] =
1662 brw_miptree_get_horizontal_slice_pitch(brw, mt, u->Level);
1663 param->stride[3] =
1664 brw_miptree_get_vertical_slice_pitch(brw, mt, u->Level);
1665
1666 if (mt->tiling == I915_TILING_X) {
1667 /* An X tile is a rectangular block of 512x8 bytes. */
1668 param->tiling[0] = _mesa_logbase2(512 / mt->cpp);
1669 param->tiling[1] = _mesa_logbase2(8);
1670
1671 if (brw->has_swizzling) {
1672 /* Right shifts required to swizzle bits 9 and 10 of the memory
1673 * address with bit 6.
1674 */
1675 param->swizzling[0] = 3;
1676 param->swizzling[1] = 4;
1677 }
1678 } else if (mt->tiling == I915_TILING_Y) {
1679 /* The layout of a Y-tiled surface in memory isn't really fundamentally
1680 * different to the layout of an X-tiled surface, we simply pretend that
1681 * the surface is broken up in a number of smaller 16Bx32 tiles, each
1682 * one arranged in X-major order just like is the case for X-tiling.
1683 */
1684 param->tiling[0] = _mesa_logbase2(16 / mt->cpp);
1685 param->tiling[1] = _mesa_logbase2(32);
1686
1687 if (brw->has_swizzling) {
1688 /* Right shift required to swizzle bit 9 of the memory address with
1689 * bit 6.
1690 */
1691 param->swizzling[0] = 3;
1692 }
1693 }
1694
1695 /* 3D textures are arranged in 2D in memory with 2^lod slices per row. The
1696 * address calculation algorithm (emit_address_calculation() in
1697 * brw_fs_surface_builder.cpp) handles this as a sort of tiling with
1698 * modulus equal to the LOD.
1699 */
1700 param->tiling[2] = (u->TexObj->Target == GL_TEXTURE_3D ? u->Level :
1701 0);
1702 }
1703
1704 static void
1705 update_image_surface(struct brw_context *brw,
1706 struct gl_image_unit *u,
1707 GLenum access,
1708 unsigned surface_idx,
1709 uint32_t *surf_offset,
1710 struct brw_image_param *param)
1711 {
1712 if (_mesa_is_image_unit_valid(&brw->ctx, u)) {
1713 struct gl_texture_object *obj = u->TexObj;
1714 const unsigned format = get_image_format(brw, u->_ActualFormat, access);
1715
1716 if (obj->Target == GL_TEXTURE_BUFFER) {
1717 struct intel_buffer_object *intel_obj =
1718 intel_buffer_object(obj->BufferObject);
1719 const unsigned texel_size = (format == BRW_SURFACEFORMAT_RAW ? 1 :
1720 _mesa_get_format_bytes(u->_ActualFormat));
1721
1722 brw_emit_buffer_surface_state(
1723 brw, surf_offset, intel_obj->buffer, obj->BufferOffset,
1724 format, intel_obj->Base.Size, texel_size,
1725 access != GL_READ_ONLY);
1726
1727 update_buffer_image_param(brw, u, surface_idx, param);
1728
1729 } else {
1730 struct intel_texture_object *intel_obj = intel_texture_object(obj);
1731 struct intel_mipmap_tree *mt = intel_obj->mt;
1732
1733 if (format == BRW_SURFACEFORMAT_RAW) {
1734 brw_emit_buffer_surface_state(
1735 brw, surf_offset, mt->bo, mt->offset,
1736 format, mt->bo->size - mt->offset, 1 /* pitch */,
1737 access != GL_READ_ONLY);
1738
1739 } else {
1740 const unsigned num_layers = (!u->Layered ? 1 :
1741 obj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1742 mt->logical_depth0);
1743
1744 struct isl_view view = {
1745 .format = format,
1746 .base_level = obj->MinLevel + u->Level,
1747 .levels = 1,
1748 .base_array_layer = obj->MinLayer + u->_Layer,
1749 .array_len = num_layers,
1750 .swizzle = ISL_SWIZZLE_IDENTITY,
1751 .usage = ISL_SURF_USAGE_STORAGE_BIT,
1752 };
1753
1754 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
1755 const int flags =
1756 mt->fast_clear_state == INTEL_FAST_CLEAR_STATE_RESOLVED ?
1757 INTEL_AUX_BUFFER_DISABLED : 0;
1758 brw_emit_surface_state(brw, mt, flags, mt->target, view,
1759 tex_mocs[brw->gen],
1760 surf_offset, surf_index,
1761 I915_GEM_DOMAIN_SAMPLER,
1762 access == GL_READ_ONLY ? 0 :
1763 I915_GEM_DOMAIN_SAMPLER);
1764 }
1765
1766 update_texture_image_param(brw, u, surface_idx, param);
1767 }
1768
1769 } else {
1770 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, surf_offset);
1771 update_default_image_param(brw, u, surface_idx, param);
1772 }
1773 }
1774
1775 void
1776 brw_upload_image_surfaces(struct brw_context *brw,
1777 struct gl_linked_shader *shader,
1778 const struct gl_program *prog,
1779 struct brw_stage_state *stage_state,
1780 struct brw_stage_prog_data *prog_data)
1781 {
1782 assert(prog);
1783 struct gl_context *ctx = &brw->ctx;
1784
1785 if (prog->info.num_images && shader) {
1786 for (unsigned i = 0; i < prog->info.num_images; i++) {
1787 struct gl_image_unit *u = &ctx->ImageUnits[shader->ImageUnits[i]];
1788 const unsigned surf_idx = prog_data->binding_table.image_start + i;
1789
1790 update_image_surface(brw, u, shader->ImageAccess[i],
1791 surf_idx,
1792 &stage_state->surf_offset[surf_idx],
1793 &prog_data->image_param[i]);
1794 }
1795
1796 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1797 /* This may have changed the image metadata dependent on the context
1798 * image unit state and passed to the program as uniforms, make sure
1799 * that push and pull constants are reuploaded.
1800 */
1801 brw->NewGLState |= _NEW_PROGRAM_CONSTANTS;
1802 }
1803 }
1804
1805 static void
1806 brw_upload_wm_image_surfaces(struct brw_context *brw)
1807 {
1808 struct gl_context *ctx = &brw->ctx;
1809 /* BRW_NEW_FRAGMENT_PROGRAM */
1810 struct gl_shader_program *prog = ctx->_Shader->_CurrentFragmentProgram;
1811 const struct gl_program *wm = brw->fragment_program;
1812
1813 if (wm && prog) {
1814 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1815 brw_upload_image_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
1816 wm, &brw->wm.base, brw->wm.base.prog_data);
1817 }
1818 }
1819
1820 const struct brw_tracked_state brw_wm_image_surfaces = {
1821 .dirty = {
1822 .mesa = _NEW_TEXTURE,
1823 .brw = BRW_NEW_BATCH |
1824 BRW_NEW_BLORP |
1825 BRW_NEW_FRAGMENT_PROGRAM |
1826 BRW_NEW_FS_PROG_DATA |
1827 BRW_NEW_IMAGE_UNITS
1828 },
1829 .emit = brw_upload_wm_image_surfaces,
1830 };
1831
1832 void
1833 gen4_init_vtable_surface_functions(struct brw_context *brw)
1834 {
1835 brw->vtbl.update_renderbuffer_surface = gen4_update_renderbuffer_surface;
1836 brw->vtbl.emit_null_surface_state = brw_emit_null_surface_state;
1837 }
1838
1839 void
1840 gen6_init_vtable_surface_functions(struct brw_context *brw)
1841 {
1842 gen4_init_vtable_surface_functions(brw);
1843 brw->vtbl.update_renderbuffer_surface = brw_update_renderbuffer_surface;
1844 }
1845
1846 static void
1847 brw_upload_cs_work_groups_surface(struct brw_context *brw)
1848 {
1849 struct gl_context *ctx = &brw->ctx;
1850 /* _NEW_PROGRAM */
1851 struct gl_shader_program *prog =
1852 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1853 /* BRW_NEW_CS_PROG_DATA */
1854 const struct brw_cs_prog_data *cs_prog_data =
1855 brw_cs_prog_data(brw->cs.base.prog_data);
1856
1857 if (prog && cs_prog_data->uses_num_work_groups) {
1858 const unsigned surf_idx =
1859 cs_prog_data->binding_table.work_groups_start;
1860 uint32_t *surf_offset = &brw->cs.base.surf_offset[surf_idx];
1861 drm_intel_bo *bo;
1862 uint32_t bo_offset;
1863
1864 if (brw->compute.num_work_groups_bo == NULL) {
1865 bo = NULL;
1866 intel_upload_data(brw,
1867 (void *)brw->compute.num_work_groups,
1868 3 * sizeof(GLuint),
1869 sizeof(GLuint),
1870 &bo,
1871 &bo_offset);
1872 } else {
1873 bo = brw->compute.num_work_groups_bo;
1874 bo_offset = brw->compute.num_work_groups_offset;
1875 }
1876
1877 brw_emit_buffer_surface_state(brw, surf_offset,
1878 bo, bo_offset,
1879 BRW_SURFACEFORMAT_RAW,
1880 3 * sizeof(GLuint), 1, true);
1881 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1882 }
1883 }
1884
1885 const struct brw_tracked_state brw_cs_work_groups_surface = {
1886 .dirty = {
1887 .brw = BRW_NEW_BLORP |
1888 BRW_NEW_CS_PROG_DATA |
1889 BRW_NEW_CS_WORK_GROUPS
1890 },
1891 .emit = brw_upload_cs_work_groups_surface,
1892 };