i965/miptree: Refactor isl aux usage resolver
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_surface_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "compiler/nir/nir.h"
34 #include "main/context.h"
35 #include "main/blend.h"
36 #include "main/mtypes.h"
37 #include "main/samplerobj.h"
38 #include "main/shaderimage.h"
39 #include "main/teximage.h"
40 #include "program/prog_parameter.h"
41 #include "program/prog_instruction.h"
42 #include "main/framebuffer.h"
43 #include "main/shaderapi.h"
44
45 #include "isl/isl.h"
46
47 #include "intel_mipmap_tree.h"
48 #include "intel_batchbuffer.h"
49 #include "intel_tex.h"
50 #include "intel_fbo.h"
51 #include "intel_buffer_objects.h"
52
53 #include "brw_context.h"
54 #include "brw_state.h"
55 #include "brw_defines.h"
56 #include "brw_wm.h"
57
58 enum {
59 INTEL_RENDERBUFFER_LAYERED = 1 << 0,
60 INTEL_AUX_BUFFER_DISABLED = 1 << 1,
61 };
62
63 uint32_t tex_mocs[] = {
64 [7] = GEN7_MOCS_L3,
65 [8] = BDW_MOCS_WB,
66 [9] = SKL_MOCS_WB,
67 [10] = CNL_MOCS_WB,
68 };
69
70 uint32_t rb_mocs[] = {
71 [7] = GEN7_MOCS_L3,
72 [8] = BDW_MOCS_PTE,
73 [9] = SKL_MOCS_PTE,
74 [10] = CNL_MOCS_PTE,
75 };
76
77 static void
78 brw_emit_surface_state(struct brw_context *brw,
79 struct intel_mipmap_tree *mt, uint32_t flags,
80 GLenum target, struct isl_view view,
81 uint32_t mocs, uint32_t *surf_offset, int surf_index,
82 unsigned read_domains, unsigned write_domains)
83 {
84 uint32_t tile_x = mt->level[0].slice[0].x_offset;
85 uint32_t tile_y = mt->level[0].slice[0].y_offset;
86 uint32_t offset = mt->offset;
87
88 struct isl_surf surf;
89 intel_miptree_get_isl_surf(brw, mt, &surf);
90
91 surf.dim = get_isl_surf_dim(target);
92
93 const enum isl_dim_layout dim_layout =
94 get_isl_dim_layout(&brw->screen->devinfo, mt->tiling, target,
95 mt->array_layout);
96
97 if (surf.dim_layout != dim_layout) {
98 /* The layout of the specified texture target is not compatible with the
99 * actual layout of the miptree structure in memory -- You're entering
100 * dangerous territory, this can only possibly work if you only intended
101 * to access a single level and slice of the texture, and the hardware
102 * supports the tile offset feature in order to allow non-tile-aligned
103 * base offsets, since we'll have to point the hardware to the first
104 * texel of the level instead of relying on the usual base level/layer
105 * controls.
106 */
107 assert(brw->has_surface_tile_offset);
108 assert(view.levels == 1 && view.array_len == 1);
109 assert(tile_x == 0 && tile_y == 0);
110
111 offset += intel_miptree_get_tile_offsets(mt, view.base_level,
112 view.base_array_layer,
113 &tile_x, &tile_y);
114
115 /* Minify the logical dimensions of the texture. */
116 const unsigned l = view.base_level - mt->first_level;
117 surf.logical_level0_px.width = minify(surf.logical_level0_px.width, l);
118 surf.logical_level0_px.height = surf.dim <= ISL_SURF_DIM_1D ? 1 :
119 minify(surf.logical_level0_px.height, l);
120 surf.logical_level0_px.depth = surf.dim <= ISL_SURF_DIM_2D ? 1 :
121 minify(surf.logical_level0_px.depth, l);
122
123 /* Only the base level and layer can be addressed with the overridden
124 * layout.
125 */
126 surf.logical_level0_px.array_len = 1;
127 surf.levels = 1;
128 surf.dim_layout = dim_layout;
129
130 /* The requested slice of the texture is now at the base level and
131 * layer.
132 */
133 view.base_level = 0;
134 view.base_array_layer = 0;
135 }
136
137 union isl_color_value clear_color = { .u32 = { 0, 0, 0, 0 } };
138
139 struct brw_bo *aux_bo;
140 struct isl_surf *aux_surf = NULL, aux_surf_s;
141 uint64_t aux_offset = 0;
142 enum isl_aux_usage aux_usage = ISL_AUX_USAGE_NONE;
143 if ((mt->mcs_buf || intel_miptree_sample_with_hiz(brw, mt)) &&
144 !(flags & INTEL_AUX_BUFFER_DISABLED)) {
145 aux_usage = intel_miptree_get_aux_isl_usage(brw, mt);
146 intel_miptree_get_aux_isl_surf(brw, mt, aux_usage, &aux_surf_s);
147 aux_surf = &aux_surf_s;
148
149 if (mt->mcs_buf) {
150 aux_bo = mt->mcs_buf->bo;
151 aux_offset = mt->mcs_buf->bo->offset64 + mt->mcs_buf->offset;
152 } else {
153 aux_bo = mt->hiz_buf->aux_base.bo;
154 aux_offset = mt->hiz_buf->aux_base.bo->offset64;
155 }
156
157 /* We only really need a clear color if we also have an auxiliary
158 * surface. Without one, it does nothing.
159 */
160 clear_color = mt->fast_clear_color;
161 }
162
163 void *state = brw_state_batch(brw,
164 brw->isl_dev.ss.size,
165 brw->isl_dev.ss.align,
166 surf_offset);
167
168 isl_surf_fill_state(&brw->isl_dev, state, .surf = &surf, .view = &view,
169 .address = mt->bo->offset64 + offset,
170 .aux_surf = aux_surf, .aux_usage = aux_usage,
171 .aux_address = aux_offset,
172 .mocs = mocs, .clear_color = clear_color,
173 .x_offset_sa = tile_x, .y_offset_sa = tile_y);
174
175 brw_emit_reloc(&brw->batch, *surf_offset + brw->isl_dev.ss.addr_offset,
176 mt->bo, offset, read_domains, write_domains);
177
178 if (aux_surf) {
179 /* On gen7 and prior, the upper 20 bits of surface state DWORD 6 are the
180 * upper 20 bits of the GPU address of the MCS buffer; the lower 12 bits
181 * contain other control information. Since buffer addresses are always
182 * on 4k boundaries (and thus have their lower 12 bits zero), we can use
183 * an ordinary reloc to do the necessary address translation.
184 */
185 assert((aux_offset & 0xfff) == 0);
186 uint32_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
187 brw_emit_reloc(&brw->batch,
188 *surf_offset + brw->isl_dev.ss.aux_addr_offset,
189 aux_bo, *aux_addr - aux_bo->offset64,
190 read_domains, write_domains);
191 }
192 }
193
194 uint32_t
195 brw_update_renderbuffer_surface(struct brw_context *brw,
196 struct gl_renderbuffer *rb,
197 uint32_t flags, unsigned unit /* unused */,
198 uint32_t surf_index)
199 {
200 struct gl_context *ctx = &brw->ctx;
201 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
202 struct intel_mipmap_tree *mt = irb->mt;
203
204 if (brw->gen < 9) {
205 assert(!(flags & INTEL_AUX_BUFFER_DISABLED));
206 }
207
208 assert(brw_render_target_supported(brw, rb));
209
210 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
211 if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
212 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
213 __func__, _mesa_get_format_name(rb_format));
214 }
215
216 const unsigned layer_multiplier =
217 (irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
218 irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) ?
219 MAX2(irb->mt->num_samples, 1) : 1;
220
221 struct isl_view view = {
222 .format = brw->render_target_format[rb_format],
223 .base_level = irb->mt_level - irb->mt->first_level,
224 .levels = 1,
225 .base_array_layer = irb->mt_layer / layer_multiplier,
226 .array_len = MAX2(irb->layer_count, 1),
227 .swizzle = ISL_SWIZZLE_IDENTITY,
228 .usage = ISL_SURF_USAGE_RENDER_TARGET_BIT,
229 };
230
231 uint32_t offset;
232 brw_emit_surface_state(brw, mt, flags, mt->target, view,
233 rb_mocs[brw->gen],
234 &offset, surf_index,
235 I915_GEM_DOMAIN_RENDER,
236 I915_GEM_DOMAIN_RENDER);
237 return offset;
238 }
239
240 GLuint
241 translate_tex_target(GLenum target)
242 {
243 switch (target) {
244 case GL_TEXTURE_1D:
245 case GL_TEXTURE_1D_ARRAY_EXT:
246 return BRW_SURFACE_1D;
247
248 case GL_TEXTURE_RECTANGLE_NV:
249 return BRW_SURFACE_2D;
250
251 case GL_TEXTURE_2D:
252 case GL_TEXTURE_2D_ARRAY_EXT:
253 case GL_TEXTURE_EXTERNAL_OES:
254 case GL_TEXTURE_2D_MULTISAMPLE:
255 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
256 return BRW_SURFACE_2D;
257
258 case GL_TEXTURE_3D:
259 return BRW_SURFACE_3D;
260
261 case GL_TEXTURE_CUBE_MAP:
262 case GL_TEXTURE_CUBE_MAP_ARRAY:
263 return BRW_SURFACE_CUBE;
264
265 default:
266 unreachable("not reached");
267 }
268 }
269
270 uint32_t
271 brw_get_surface_tiling_bits(uint32_t tiling)
272 {
273 switch (tiling) {
274 case I915_TILING_X:
275 return BRW_SURFACE_TILED;
276 case I915_TILING_Y:
277 return BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y;
278 default:
279 return 0;
280 }
281 }
282
283
284 uint32_t
285 brw_get_surface_num_multisamples(unsigned num_samples)
286 {
287 if (num_samples > 1)
288 return BRW_SURFACE_MULTISAMPLECOUNT_4;
289 else
290 return BRW_SURFACE_MULTISAMPLECOUNT_1;
291 }
292
293 /**
294 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
295 * swizzling.
296 */
297 int
298 brw_get_texture_swizzle(const struct gl_context *ctx,
299 const struct gl_texture_object *t)
300 {
301 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
302
303 int swizzles[SWIZZLE_NIL + 1] = {
304 SWIZZLE_X,
305 SWIZZLE_Y,
306 SWIZZLE_Z,
307 SWIZZLE_W,
308 SWIZZLE_ZERO,
309 SWIZZLE_ONE,
310 SWIZZLE_NIL
311 };
312
313 if (img->_BaseFormat == GL_DEPTH_COMPONENT ||
314 img->_BaseFormat == GL_DEPTH_STENCIL) {
315 GLenum depth_mode = t->DepthMode;
316
317 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
318 * with depth component data specified with a sized internal format.
319 * Otherwise, it's left at the old default, GL_LUMINANCE.
320 */
321 if (_mesa_is_gles3(ctx) &&
322 img->InternalFormat != GL_DEPTH_COMPONENT &&
323 img->InternalFormat != GL_DEPTH_STENCIL) {
324 depth_mode = GL_RED;
325 }
326
327 switch (depth_mode) {
328 case GL_ALPHA:
329 swizzles[0] = SWIZZLE_ZERO;
330 swizzles[1] = SWIZZLE_ZERO;
331 swizzles[2] = SWIZZLE_ZERO;
332 swizzles[3] = SWIZZLE_X;
333 break;
334 case GL_LUMINANCE:
335 swizzles[0] = SWIZZLE_X;
336 swizzles[1] = SWIZZLE_X;
337 swizzles[2] = SWIZZLE_X;
338 swizzles[3] = SWIZZLE_ONE;
339 break;
340 case GL_INTENSITY:
341 swizzles[0] = SWIZZLE_X;
342 swizzles[1] = SWIZZLE_X;
343 swizzles[2] = SWIZZLE_X;
344 swizzles[3] = SWIZZLE_X;
345 break;
346 case GL_RED:
347 swizzles[0] = SWIZZLE_X;
348 swizzles[1] = SWIZZLE_ZERO;
349 swizzles[2] = SWIZZLE_ZERO;
350 swizzles[3] = SWIZZLE_ONE;
351 break;
352 }
353 }
354
355 GLenum datatype = _mesa_get_format_datatype(img->TexFormat);
356
357 /* If the texture's format is alpha-only, force R, G, and B to
358 * 0.0. Similarly, if the texture's format has no alpha channel,
359 * force the alpha value read to 1.0. This allows for the
360 * implementation to use an RGBA texture for any of these formats
361 * without leaking any unexpected values.
362 */
363 switch (img->_BaseFormat) {
364 case GL_ALPHA:
365 swizzles[0] = SWIZZLE_ZERO;
366 swizzles[1] = SWIZZLE_ZERO;
367 swizzles[2] = SWIZZLE_ZERO;
368 break;
369 case GL_LUMINANCE:
370 if (t->_IsIntegerFormat || datatype == GL_SIGNED_NORMALIZED) {
371 swizzles[0] = SWIZZLE_X;
372 swizzles[1] = SWIZZLE_X;
373 swizzles[2] = SWIZZLE_X;
374 swizzles[3] = SWIZZLE_ONE;
375 }
376 break;
377 case GL_LUMINANCE_ALPHA:
378 if (datatype == GL_SIGNED_NORMALIZED) {
379 swizzles[0] = SWIZZLE_X;
380 swizzles[1] = SWIZZLE_X;
381 swizzles[2] = SWIZZLE_X;
382 swizzles[3] = SWIZZLE_W;
383 }
384 break;
385 case GL_INTENSITY:
386 if (datatype == GL_SIGNED_NORMALIZED) {
387 swizzles[0] = SWIZZLE_X;
388 swizzles[1] = SWIZZLE_X;
389 swizzles[2] = SWIZZLE_X;
390 swizzles[3] = SWIZZLE_X;
391 }
392 break;
393 case GL_RED:
394 case GL_RG:
395 case GL_RGB:
396 if (_mesa_get_format_bits(img->TexFormat, GL_ALPHA_BITS) > 0 ||
397 img->TexFormat == MESA_FORMAT_RGB_DXT1 ||
398 img->TexFormat == MESA_FORMAT_SRGB_DXT1)
399 swizzles[3] = SWIZZLE_ONE;
400 break;
401 }
402
403 return MAKE_SWIZZLE4(swizzles[GET_SWZ(t->_Swizzle, 0)],
404 swizzles[GET_SWZ(t->_Swizzle, 1)],
405 swizzles[GET_SWZ(t->_Swizzle, 2)],
406 swizzles[GET_SWZ(t->_Swizzle, 3)]);
407 }
408
409 /**
410 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
411 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
412 *
413 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
414 * 0 1 2 3 4 5
415 * 4 5 6 7 0 1
416 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
417 *
418 * which is simply adding 4 then modding by 8 (or anding with 7).
419 *
420 * We then may need to apply workarounds for textureGather hardware bugs.
421 */
422 static unsigned
423 swizzle_to_scs(GLenum swizzle, bool need_green_to_blue)
424 {
425 unsigned scs = (swizzle + 4) & 7;
426
427 return (need_green_to_blue && scs == HSW_SCS_GREEN) ? HSW_SCS_BLUE : scs;
428 }
429
430 static unsigned
431 brw_find_matching_rb(const struct gl_framebuffer *fb,
432 const struct intel_mipmap_tree *mt)
433 {
434 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
435 const struct intel_renderbuffer *irb =
436 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
437
438 if (irb && irb->mt == mt)
439 return i;
440 }
441
442 return fb->_NumColorDrawBuffers;
443 }
444
445 static inline bool
446 brw_texture_view_sane(const struct brw_context *brw,
447 const struct intel_mipmap_tree *mt,
448 const struct isl_view *view)
449 {
450 /* There are special cases only for lossless compression. */
451 if (!intel_miptree_is_lossless_compressed(brw, mt))
452 return true;
453
454 if (isl_format_supports_ccs_e(&brw->screen->devinfo, view->format))
455 return true;
456
457 /* Logic elsewhere needs to take care to resolve the color buffer prior
458 * to sampling it as non-compressed.
459 */
460 if (intel_miptree_has_color_unresolved(mt, view->base_level, view->levels,
461 view->base_array_layer,
462 view->array_len))
463 return false;
464
465 const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
466 const unsigned rb_index = brw_find_matching_rb(fb, mt);
467
468 if (rb_index == fb->_NumColorDrawBuffers)
469 return true;
470
471 /* Underlying surface is compressed but it is sampled using a format that
472 * the sampling engine doesn't support as compressed. Compression must be
473 * disabled for both sampling engine and data port in case the same surface
474 * is used also as render target.
475 */
476 return brw->draw_aux_buffer_disabled[rb_index];
477 }
478
479 static bool
480 brw_disable_aux_surface(const struct brw_context *brw,
481 const struct intel_mipmap_tree *mt,
482 const struct isl_view *view)
483 {
484 /* Nothing to disable. */
485 if (!mt->mcs_buf)
486 return false;
487
488 const bool is_unresolved = intel_miptree_has_color_unresolved(
489 mt, view->base_level, view->levels,
490 view->base_array_layer, view->array_len);
491
492 /* There are special cases only for lossless compression. */
493 if (!intel_miptree_is_lossless_compressed(brw, mt))
494 return !is_unresolved;
495
496 const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
497 const unsigned rb_index = brw_find_matching_rb(fb, mt);
498
499 /* If we are drawing into this with compression enabled, then we must also
500 * enable compression when texturing from it regardless of
501 * fast_clear_state. If we don't then, after the first draw call with
502 * this setup, there will be data in the CCS which won't get picked up by
503 * subsequent texturing operations as required by ARB_texture_barrier.
504 * Since we don't want to re-emit the binding table or do a resolve
505 * operation every draw call, the easiest thing to do is just enable
506 * compression on the texturing side. This is completely safe to do
507 * since, if compressed texturing weren't allowed, we would have disabled
508 * compression of render targets in whatever_that_function_is_called().
509 */
510 if (rb_index < fb->_NumColorDrawBuffers) {
511 if (brw->draw_aux_buffer_disabled[rb_index]) {
512 assert(!is_unresolved);
513 }
514
515 return brw->draw_aux_buffer_disabled[rb_index];
516 }
517
518 return !is_unresolved;
519 }
520
521 void
522 brw_update_texture_surface(struct gl_context *ctx,
523 unsigned unit,
524 uint32_t *surf_offset,
525 bool for_gather,
526 uint32_t plane)
527 {
528 struct brw_context *brw = brw_context(ctx);
529 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
530
531 if (obj->Target == GL_TEXTURE_BUFFER) {
532 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
533
534 } else {
535 struct intel_texture_object *intel_obj = intel_texture_object(obj);
536 struct intel_mipmap_tree *mt = intel_obj->mt;
537
538 if (plane > 0) {
539 if (mt->plane[plane - 1] == NULL)
540 return;
541 mt = mt->plane[plane - 1];
542 }
543
544 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
545 /* If this is a view with restricted NumLayers, then our effective depth
546 * is not just the miptree depth.
547 */
548 const unsigned view_num_layers =
549 (obj->Immutable && obj->Target != GL_TEXTURE_3D) ? obj->NumLayers :
550 mt->logical_depth0;
551
552 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
553 * texturing functions that return a float, as our code generation always
554 * selects the .x channel (which would always be 0).
555 */
556 struct gl_texture_image *firstImage = obj->Image[0][obj->BaseLevel];
557 const bool alpha_depth = obj->DepthMode == GL_ALPHA &&
558 (firstImage->_BaseFormat == GL_DEPTH_COMPONENT ||
559 firstImage->_BaseFormat == GL_DEPTH_STENCIL);
560 const unsigned swizzle = (unlikely(alpha_depth) ? SWIZZLE_XYZW :
561 brw_get_texture_swizzle(&brw->ctx, obj));
562
563 mesa_format mesa_fmt = plane == 0 ? intel_obj->_Format : mt->format;
564 enum isl_format format = translate_tex_format(brw, mesa_fmt,
565 sampler->sRGBDecode);
566
567 /* Implement gen6 and gen7 gather work-around */
568 bool need_green_to_blue = false;
569 if (for_gather) {
570 if (brw->gen == 7 && (format == ISL_FORMAT_R32G32_FLOAT ||
571 format == ISL_FORMAT_R32G32_SINT ||
572 format == ISL_FORMAT_R32G32_UINT)) {
573 format = ISL_FORMAT_R32G32_FLOAT_LD;
574 need_green_to_blue = brw->is_haswell;
575 } else if (brw->gen == 6) {
576 /* Sandybridge's gather4 message is broken for integer formats.
577 * To work around this, we pretend the surface is UNORM for
578 * 8 or 16-bit formats, and emit shader instructions to recover
579 * the real INT/UINT value. For 32-bit formats, we pretend
580 * the surface is FLOAT, and simply reinterpret the resulting
581 * bits.
582 */
583 switch (format) {
584 case ISL_FORMAT_R8_SINT:
585 case ISL_FORMAT_R8_UINT:
586 format = ISL_FORMAT_R8_UNORM;
587 break;
588
589 case ISL_FORMAT_R16_SINT:
590 case ISL_FORMAT_R16_UINT:
591 format = ISL_FORMAT_R16_UNORM;
592 break;
593
594 case ISL_FORMAT_R32_SINT:
595 case ISL_FORMAT_R32_UINT:
596 format = ISL_FORMAT_R32_FLOAT;
597 break;
598
599 default:
600 break;
601 }
602 }
603 }
604
605 if (obj->StencilSampling && firstImage->_BaseFormat == GL_DEPTH_STENCIL) {
606 if (brw->gen <= 7) {
607 assert(mt->r8stencil_mt && !mt->stencil_mt->r8stencil_needs_update);
608 mt = mt->r8stencil_mt;
609 } else {
610 mt = mt->stencil_mt;
611 }
612 format = ISL_FORMAT_R8_UINT;
613 } else if (brw->gen <= 7 && mt->format == MESA_FORMAT_S_UINT8) {
614 assert(mt->r8stencil_mt && !mt->r8stencil_needs_update);
615 mt = mt->r8stencil_mt;
616 format = ISL_FORMAT_R8_UINT;
617 }
618
619 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
620
621 struct isl_view view = {
622 .format = format,
623 .base_level = obj->MinLevel + obj->BaseLevel,
624 .levels = intel_obj->_MaxLevel - obj->BaseLevel + 1,
625 .base_array_layer = obj->MinLayer,
626 .array_len = view_num_layers,
627 .swizzle = {
628 .r = swizzle_to_scs(GET_SWZ(swizzle, 0), need_green_to_blue),
629 .g = swizzle_to_scs(GET_SWZ(swizzle, 1), need_green_to_blue),
630 .b = swizzle_to_scs(GET_SWZ(swizzle, 2), need_green_to_blue),
631 .a = swizzle_to_scs(GET_SWZ(swizzle, 3), need_green_to_blue),
632 },
633 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
634 };
635
636 if (obj->Target == GL_TEXTURE_CUBE_MAP ||
637 obj->Target == GL_TEXTURE_CUBE_MAP_ARRAY)
638 view.usage |= ISL_SURF_USAGE_CUBE_BIT;
639
640 assert(brw_texture_view_sane(brw, mt, &view));
641
642 const int flags = brw_disable_aux_surface(brw, mt, &view) ?
643 INTEL_AUX_BUFFER_DISABLED : 0;
644 brw_emit_surface_state(brw, mt, flags, mt->target, view,
645 tex_mocs[brw->gen],
646 surf_offset, surf_index,
647 I915_GEM_DOMAIN_SAMPLER, 0);
648 }
649 }
650
651 void
652 brw_emit_buffer_surface_state(struct brw_context *brw,
653 uint32_t *out_offset,
654 struct brw_bo *bo,
655 unsigned buffer_offset,
656 unsigned surface_format,
657 unsigned buffer_size,
658 unsigned pitch,
659 bool rw)
660 {
661 uint32_t *dw = brw_state_batch(brw,
662 brw->isl_dev.ss.size,
663 brw->isl_dev.ss.align,
664 out_offset);
665
666 isl_buffer_fill_state(&brw->isl_dev, dw,
667 .address = (bo ? bo->offset64 : 0) + buffer_offset,
668 .size = buffer_size,
669 .format = surface_format,
670 .stride = pitch,
671 .mocs = tex_mocs[brw->gen]);
672
673 if (bo) {
674 brw_emit_reloc(&brw->batch, *out_offset + brw->isl_dev.ss.addr_offset,
675 bo, buffer_offset,
676 I915_GEM_DOMAIN_SAMPLER,
677 (rw ? I915_GEM_DOMAIN_SAMPLER : 0));
678 }
679 }
680
681 void
682 brw_update_buffer_texture_surface(struct gl_context *ctx,
683 unsigned unit,
684 uint32_t *surf_offset)
685 {
686 struct brw_context *brw = brw_context(ctx);
687 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
688 struct intel_buffer_object *intel_obj =
689 intel_buffer_object(tObj->BufferObject);
690 uint32_t size = tObj->BufferSize;
691 struct brw_bo *bo = NULL;
692 mesa_format format = tObj->_BufferObjectFormat;
693 const enum isl_format isl_format = brw_isl_format_for_mesa_format(format);
694 int texel_size = _mesa_get_format_bytes(format);
695
696 if (intel_obj) {
697 size = MIN2(size, intel_obj->Base.Size);
698 bo = intel_bufferobj_buffer(brw, intel_obj, tObj->BufferOffset, size);
699 }
700
701 /* The ARB_texture_buffer_specification says:
702 *
703 * "The number of texels in the buffer texture's texel array is given by
704 *
705 * floor(<buffer_size> / (<components> * sizeof(<base_type>)),
706 *
707 * where <buffer_size> is the size of the buffer object, in basic
708 * machine units and <components> and <base_type> are the element count
709 * and base data type for elements, as specified in Table X.1. The
710 * number of texels in the texel array is then clamped to the
711 * implementation-dependent limit MAX_TEXTURE_BUFFER_SIZE_ARB."
712 *
713 * We need to clamp the size in bytes to MAX_TEXTURE_BUFFER_SIZE * stride,
714 * so that when ISL divides by stride to obtain the number of texels, that
715 * texel count is clamped to MAX_TEXTURE_BUFFER_SIZE.
716 */
717 size = MIN2(size, ctx->Const.MaxTextureBufferSize * (unsigned) texel_size);
718
719 if (isl_format == ISL_FORMAT_UNSUPPORTED) {
720 _mesa_problem(NULL, "bad format %s for texture buffer\n",
721 _mesa_get_format_name(format));
722 }
723
724 brw_emit_buffer_surface_state(brw, surf_offset, bo,
725 tObj->BufferOffset,
726 isl_format,
727 size,
728 texel_size,
729 false /* rw */);
730 }
731
732 /**
733 * Create the constant buffer surface. Vertex/fragment shader constants will be
734 * read from this buffer with Data Port Read instructions/messages.
735 */
736 void
737 brw_create_constant_surface(struct brw_context *brw,
738 struct brw_bo *bo,
739 uint32_t offset,
740 uint32_t size,
741 uint32_t *out_offset)
742 {
743 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
744 ISL_FORMAT_R32G32B32A32_FLOAT,
745 size, 1, false);
746 }
747
748 /**
749 * Create the buffer surface. Shader buffer variables will be
750 * read from / write to this buffer with Data Port Read/Write
751 * instructions/messages.
752 */
753 void
754 brw_create_buffer_surface(struct brw_context *brw,
755 struct brw_bo *bo,
756 uint32_t offset,
757 uint32_t size,
758 uint32_t *out_offset)
759 {
760 /* Use a raw surface so we can reuse existing untyped read/write/atomic
761 * messages. We need these specifically for the fragment shader since they
762 * include a pixel mask header that we need to ensure correct behavior
763 * with helper invocations, which cannot write to the buffer.
764 */
765 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
766 ISL_FORMAT_RAW,
767 size, 1, true);
768 }
769
770 /**
771 * Set up a binding table entry for use by stream output logic (transform
772 * feedback).
773 *
774 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
775 */
776 void
777 brw_update_sol_surface(struct brw_context *brw,
778 struct gl_buffer_object *buffer_obj,
779 uint32_t *out_offset, unsigned num_vector_components,
780 unsigned stride_dwords, unsigned offset_dwords)
781 {
782 struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
783 uint32_t offset_bytes = 4 * offset_dwords;
784 struct brw_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
785 offset_bytes,
786 buffer_obj->Size - offset_bytes);
787 uint32_t *surf = brw_state_batch(brw, 6 * 4, 32, out_offset);
788 uint32_t pitch_minus_1 = 4*stride_dwords - 1;
789 size_t size_dwords = buffer_obj->Size / 4;
790 uint32_t buffer_size_minus_1, width, height, depth, surface_format;
791
792 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
793 * too big to map using a single binding table entry?
794 */
795 assert((size_dwords - offset_dwords) / stride_dwords
796 <= BRW_MAX_NUM_BUFFER_ENTRIES);
797
798 if (size_dwords > offset_dwords + num_vector_components) {
799 /* There is room for at least 1 transform feedback output in the buffer.
800 * Compute the number of additional transform feedback outputs the
801 * buffer has room for.
802 */
803 buffer_size_minus_1 =
804 (size_dwords - offset_dwords - num_vector_components) / stride_dwords;
805 } else {
806 /* There isn't even room for a single transform feedback output in the
807 * buffer. We can't configure the binding table entry to prevent output
808 * entirely; we'll have to rely on the geometry shader to detect
809 * overflow. But to minimize the damage in case of a bug, set up the
810 * binding table entry to just allow a single output.
811 */
812 buffer_size_minus_1 = 0;
813 }
814 width = buffer_size_minus_1 & 0x7f;
815 height = (buffer_size_minus_1 & 0xfff80) >> 7;
816 depth = (buffer_size_minus_1 & 0x7f00000) >> 20;
817
818 switch (num_vector_components) {
819 case 1:
820 surface_format = ISL_FORMAT_R32_FLOAT;
821 break;
822 case 2:
823 surface_format = ISL_FORMAT_R32G32_FLOAT;
824 break;
825 case 3:
826 surface_format = ISL_FORMAT_R32G32B32_FLOAT;
827 break;
828 case 4:
829 surface_format = ISL_FORMAT_R32G32B32A32_FLOAT;
830 break;
831 default:
832 unreachable("Invalid vector size for transform feedback output");
833 }
834
835 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
836 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
837 surface_format << BRW_SURFACE_FORMAT_SHIFT |
838 BRW_SURFACE_RC_READ_WRITE;
839 surf[1] = bo->offset64 + offset_bytes; /* reloc */
840 surf[2] = (width << BRW_SURFACE_WIDTH_SHIFT |
841 height << BRW_SURFACE_HEIGHT_SHIFT);
842 surf[3] = (depth << BRW_SURFACE_DEPTH_SHIFT |
843 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
844 surf[4] = 0;
845 surf[5] = 0;
846
847 /* Emit relocation to surface contents. */
848 brw_emit_reloc(&brw->batch, *out_offset + 4, bo, offset_bytes,
849 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
850 }
851
852 /* Creates a new WM constant buffer reflecting the current fragment program's
853 * constants, if needed by the fragment program.
854 *
855 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
856 * state atom.
857 */
858 static void
859 brw_upload_wm_pull_constants(struct brw_context *brw)
860 {
861 struct brw_stage_state *stage_state = &brw->wm.base;
862 /* BRW_NEW_FRAGMENT_PROGRAM */
863 struct brw_program *fp = (struct brw_program *) brw->fragment_program;
864 /* BRW_NEW_FS_PROG_DATA */
865 struct brw_stage_prog_data *prog_data = brw->wm.base.prog_data;
866
867 _mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_FRAGMENT);
868 /* _NEW_PROGRAM_CONSTANTS */
869 brw_upload_pull_constants(brw, BRW_NEW_SURFACES, &fp->program,
870 stage_state, prog_data);
871 }
872
873 const struct brw_tracked_state brw_wm_pull_constants = {
874 .dirty = {
875 .mesa = _NEW_PROGRAM_CONSTANTS,
876 .brw = BRW_NEW_BATCH |
877 BRW_NEW_BLORP |
878 BRW_NEW_FRAGMENT_PROGRAM |
879 BRW_NEW_FS_PROG_DATA,
880 },
881 .emit = brw_upload_wm_pull_constants,
882 };
883
884 /**
885 * Creates a null renderbuffer surface.
886 *
887 * This is used when the shader doesn't write to any color output. An FB
888 * write to target 0 will still be emitted, because that's how the thread is
889 * terminated (and computed depth is returned), so we need to have the
890 * hardware discard the target 0 color output..
891 */
892 static void
893 brw_emit_null_surface_state(struct brw_context *brw,
894 unsigned width,
895 unsigned height,
896 unsigned samples,
897 uint32_t *out_offset)
898 {
899 /* From the Sandy bridge PRM, Vol4 Part1 p71 (Surface Type: Programming
900 * Notes):
901 *
902 * A null surface will be used in instances where an actual surface is
903 * not bound. When a write message is generated to a null surface, no
904 * actual surface is written to. When a read message (including any
905 * sampling engine message) is generated to a null surface, the result
906 * is all zeros. Note that a null surface type is allowed to be used
907 * with all messages, even if it is not specificially indicated as
908 * supported. All of the remaining fields in surface state are ignored
909 * for null surfaces, with the following exceptions:
910 *
911 * - [DevSNB+]: Width, Height, Depth, and LOD fields must match the
912 * depth buffer’s corresponding state for all render target surfaces,
913 * including null.
914 *
915 * - Surface Format must be R8G8B8A8_UNORM.
916 */
917 unsigned surface_type = BRW_SURFACE_NULL;
918 struct brw_bo *bo = NULL;
919 unsigned pitch_minus_1 = 0;
920 uint32_t multisampling_state = 0;
921 uint32_t *surf = brw_state_batch(brw, 6 * 4, 32, out_offset);
922
923 if (samples > 1) {
924 /* On Gen6, null render targets seem to cause GPU hangs when
925 * multisampling. So work around this problem by rendering into dummy
926 * color buffer.
927 *
928 * To decrease the amount of memory needed by the workaround buffer, we
929 * set its pitch to 128 bytes (the width of a Y tile). This means that
930 * the amount of memory needed for the workaround buffer is
931 * (width_in_tiles + height_in_tiles - 1) tiles.
932 *
933 * Note that since the workaround buffer will be interpreted by the
934 * hardware as an interleaved multisampled buffer, we need to compute
935 * width_in_tiles and height_in_tiles by dividing the width and height
936 * by 16 rather than the normal Y-tile size of 32.
937 */
938 unsigned width_in_tiles = ALIGN(width, 16) / 16;
939 unsigned height_in_tiles = ALIGN(height, 16) / 16;
940 unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
941 brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
942 size_needed);
943 bo = brw->wm.multisampled_null_render_target_bo;
944 surface_type = BRW_SURFACE_2D;
945 pitch_minus_1 = 127;
946 multisampling_state = brw_get_surface_num_multisamples(samples);
947 }
948
949 surf[0] = (surface_type << BRW_SURFACE_TYPE_SHIFT |
950 ISL_FORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
951 if (brw->gen < 6) {
952 surf[0] |= (1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT |
953 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT |
954 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT |
955 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT);
956 }
957 surf[1] = bo ? bo->offset64 : 0;
958 surf[2] = ((width - 1) << BRW_SURFACE_WIDTH_SHIFT |
959 (height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
960
961 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
962 * Notes):
963 *
964 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
965 */
966 surf[3] = (BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y |
967 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
968 surf[4] = multisampling_state;
969 surf[5] = 0;
970
971 if (bo) {
972 brw_emit_reloc(&brw->batch, *out_offset + 4, bo, 0,
973 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
974 }
975 }
976
977 /**
978 * Sets up a surface state structure to point at the given region.
979 * While it is only used for the front/back buffer currently, it should be
980 * usable for further buffers when doing ARB_draw_buffer support.
981 */
982 static uint32_t
983 gen4_update_renderbuffer_surface(struct brw_context *brw,
984 struct gl_renderbuffer *rb,
985 uint32_t flags, unsigned unit,
986 uint32_t surf_index)
987 {
988 struct gl_context *ctx = &brw->ctx;
989 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
990 struct intel_mipmap_tree *mt = irb->mt;
991 uint32_t *surf;
992 uint32_t tile_x, tile_y;
993 enum isl_format format;
994 uint32_t offset;
995 /* _NEW_BUFFERS */
996 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
997 /* BRW_NEW_FS_PROG_DATA */
998
999 assert(!(flags & INTEL_RENDERBUFFER_LAYERED));
1000 assert(!(flags & INTEL_AUX_BUFFER_DISABLED));
1001
1002 if (rb->TexImage && !brw->has_surface_tile_offset) {
1003 intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y);
1004
1005 if (tile_x != 0 || tile_y != 0) {
1006 /* Original gen4 hardware couldn't draw to a non-tile-aligned
1007 * destination in a miptree unless you actually setup your renderbuffer
1008 * as a miptree and used the fragile lod/array_index/etc. controls to
1009 * select the image. So, instead, we just make a new single-level
1010 * miptree and render into that.
1011 */
1012 intel_renderbuffer_move_to_temp(brw, irb, false);
1013 assert(irb->align_wa_mt);
1014 mt = irb->align_wa_mt;
1015 }
1016 }
1017
1018 surf = brw_state_batch(brw, 6 * 4, 32, &offset);
1019
1020 format = brw->render_target_format[rb_format];
1021 if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
1022 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
1023 __func__, _mesa_get_format_name(rb_format));
1024 }
1025
1026 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
1027 format << BRW_SURFACE_FORMAT_SHIFT);
1028
1029 /* reloc */
1030 assert(mt->offset % mt->cpp == 0);
1031 surf[1] = (intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y) +
1032 mt->bo->offset64 + mt->offset);
1033
1034 surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
1035 (rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
1036
1037 surf[3] = (brw_get_surface_tiling_bits(mt->tiling) |
1038 (mt->pitch - 1) << BRW_SURFACE_PITCH_SHIFT);
1039
1040 surf[4] = brw_get_surface_num_multisamples(mt->num_samples);
1041
1042 assert(brw->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
1043 /* Note that the low bits of these fields are missing, so
1044 * there's the possibility of getting in trouble.
1045 */
1046 assert(tile_x % 4 == 0);
1047 assert(tile_y % 2 == 0);
1048 surf[5] = ((tile_x / 4) << BRW_SURFACE_X_OFFSET_SHIFT |
1049 (tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT |
1050 (mt->valign == 4 ? BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
1051
1052 if (brw->gen < 6) {
1053 /* _NEW_COLOR */
1054 if (!ctx->Color.ColorLogicOpEnabled && !ctx->Color._AdvancedBlendMode &&
1055 (ctx->Color.BlendEnabled & (1 << unit)))
1056 surf[0] |= BRW_SURFACE_BLEND_ENABLED;
1057
1058 if (!ctx->Color.ColorMask[unit][0])
1059 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT;
1060 if (!ctx->Color.ColorMask[unit][1])
1061 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT;
1062 if (!ctx->Color.ColorMask[unit][2])
1063 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT;
1064
1065 /* As mentioned above, disable writes to the alpha component when the
1066 * renderbuffer is XRGB.
1067 */
1068 if (ctx->DrawBuffer->Visual.alphaBits == 0 ||
1069 !ctx->Color.ColorMask[unit][3]) {
1070 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT;
1071 }
1072 }
1073
1074 brw_emit_reloc(&brw->batch, offset + 4, mt->bo, surf[1] - mt->bo->offset64,
1075 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
1076
1077 return offset;
1078 }
1079
1080 /**
1081 * Construct SURFACE_STATE objects for renderbuffers/draw buffers.
1082 */
1083 void
1084 brw_update_renderbuffer_surfaces(struct brw_context *brw,
1085 const struct gl_framebuffer *fb,
1086 uint32_t render_target_start,
1087 uint32_t *surf_offset)
1088 {
1089 GLuint i;
1090 const unsigned int w = _mesa_geometric_width(fb);
1091 const unsigned int h = _mesa_geometric_height(fb);
1092 const unsigned int s = _mesa_geometric_samples(fb);
1093
1094 /* Update surfaces for drawing buffers */
1095 if (fb->_NumColorDrawBuffers >= 1) {
1096 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
1097 const uint32_t surf_index = render_target_start + i;
1098 const int flags = (_mesa_geometric_layers(fb) > 0 ?
1099 INTEL_RENDERBUFFER_LAYERED : 0) |
1100 (brw->draw_aux_buffer_disabled[i] ?
1101 INTEL_AUX_BUFFER_DISABLED : 0);
1102
1103 if (intel_renderbuffer(fb->_ColorDrawBuffers[i])) {
1104 surf_offset[surf_index] =
1105 brw->vtbl.update_renderbuffer_surface(
1106 brw, fb->_ColorDrawBuffers[i], flags, i, surf_index);
1107 } else {
1108 brw->vtbl.emit_null_surface_state(brw, w, h, s,
1109 &surf_offset[surf_index]);
1110 }
1111 }
1112 } else {
1113 const uint32_t surf_index = render_target_start;
1114 brw->vtbl.emit_null_surface_state(brw, w, h, s,
1115 &surf_offset[surf_index]);
1116 }
1117 }
1118
1119 static void
1120 update_renderbuffer_surfaces(struct brw_context *brw)
1121 {
1122 const struct gl_context *ctx = &brw->ctx;
1123
1124 /* BRW_NEW_FS_PROG_DATA */
1125 const struct brw_wm_prog_data *wm_prog_data =
1126 brw_wm_prog_data(brw->wm.base.prog_data);
1127
1128 /* _NEW_BUFFERS | _NEW_COLOR */
1129 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1130 brw_update_renderbuffer_surfaces(
1131 brw, fb,
1132 wm_prog_data->binding_table.render_target_start,
1133 brw->wm.base.surf_offset);
1134 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1135 }
1136
1137 const struct brw_tracked_state brw_renderbuffer_surfaces = {
1138 .dirty = {
1139 .mesa = _NEW_BUFFERS |
1140 _NEW_COLOR,
1141 .brw = BRW_NEW_BATCH |
1142 BRW_NEW_BLORP |
1143 BRW_NEW_FS_PROG_DATA,
1144 },
1145 .emit = update_renderbuffer_surfaces,
1146 };
1147
1148 const struct brw_tracked_state gen6_renderbuffer_surfaces = {
1149 .dirty = {
1150 .mesa = _NEW_BUFFERS,
1151 .brw = BRW_NEW_BATCH |
1152 BRW_NEW_BLORP,
1153 },
1154 .emit = update_renderbuffer_surfaces,
1155 };
1156
1157 static void
1158 update_renderbuffer_read_surfaces(struct brw_context *brw)
1159 {
1160 const struct gl_context *ctx = &brw->ctx;
1161
1162 /* BRW_NEW_FS_PROG_DATA */
1163 const struct brw_wm_prog_data *wm_prog_data =
1164 brw_wm_prog_data(brw->wm.base.prog_data);
1165
1166 /* BRW_NEW_FRAGMENT_PROGRAM */
1167 if (!ctx->Extensions.MESA_shader_framebuffer_fetch &&
1168 brw->fragment_program && brw->fragment_program->info.outputs_read) {
1169 /* _NEW_BUFFERS */
1170 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1171
1172 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
1173 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
1174 const struct intel_renderbuffer *irb = intel_renderbuffer(rb);
1175 const unsigned surf_index =
1176 wm_prog_data->binding_table.render_target_read_start + i;
1177 uint32_t *surf_offset = &brw->wm.base.surf_offset[surf_index];
1178
1179 if (irb) {
1180 const enum isl_format format = brw->render_target_format[
1181 _mesa_get_render_format(ctx, intel_rb_format(irb))];
1182 assert(isl_format_supports_sampling(&brw->screen->devinfo,
1183 format));
1184
1185 /* Override the target of the texture if the render buffer is a
1186 * single slice of a 3D texture (since the minimum array element
1187 * field of the surface state structure is ignored by the sampler
1188 * unit for 3D textures on some hardware), or if the render buffer
1189 * is a 1D array (since shaders always provide the array index
1190 * coordinate at the Z component to avoid state-dependent
1191 * recompiles when changing the texture target of the
1192 * framebuffer).
1193 */
1194 const GLenum target =
1195 (irb->mt->target == GL_TEXTURE_3D &&
1196 irb->layer_count == 1) ? GL_TEXTURE_2D :
1197 irb->mt->target == GL_TEXTURE_1D_ARRAY ? GL_TEXTURE_2D_ARRAY :
1198 irb->mt->target;
1199
1200 /* intel_renderbuffer::mt_layer is expressed in sample units for
1201 * the UMS and CMS multisample layouts, but
1202 * intel_renderbuffer::layer_count is expressed in units of whole
1203 * logical layers regardless of the multisample layout.
1204 */
1205 const unsigned mt_layer_unit =
1206 (irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
1207 irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) ?
1208 MAX2(irb->mt->num_samples, 1) : 1;
1209
1210 const struct isl_view view = {
1211 .format = format,
1212 .base_level = irb->mt_level - irb->mt->first_level,
1213 .levels = 1,
1214 .base_array_layer = irb->mt_layer / mt_layer_unit,
1215 .array_len = irb->layer_count,
1216 .swizzle = ISL_SWIZZLE_IDENTITY,
1217 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
1218 };
1219
1220 const int flags = brw->draw_aux_buffer_disabled[i] ?
1221 INTEL_AUX_BUFFER_DISABLED : 0;
1222 brw_emit_surface_state(brw, irb->mt, flags, target, view,
1223 tex_mocs[brw->gen],
1224 surf_offset, surf_index,
1225 I915_GEM_DOMAIN_SAMPLER, 0);
1226
1227 } else {
1228 brw->vtbl.emit_null_surface_state(
1229 brw, _mesa_geometric_width(fb), _mesa_geometric_height(fb),
1230 _mesa_geometric_samples(fb), surf_offset);
1231 }
1232 }
1233
1234 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1235 }
1236 }
1237
1238 const struct brw_tracked_state brw_renderbuffer_read_surfaces = {
1239 .dirty = {
1240 .mesa = _NEW_BUFFERS,
1241 .brw = BRW_NEW_BATCH |
1242 BRW_NEW_FRAGMENT_PROGRAM |
1243 BRW_NEW_FS_PROG_DATA,
1244 },
1245 .emit = update_renderbuffer_read_surfaces,
1246 };
1247
1248 static void
1249 update_stage_texture_surfaces(struct brw_context *brw,
1250 const struct gl_program *prog,
1251 struct brw_stage_state *stage_state,
1252 bool for_gather, uint32_t plane)
1253 {
1254 if (!prog)
1255 return;
1256
1257 struct gl_context *ctx = &brw->ctx;
1258
1259 uint32_t *surf_offset = stage_state->surf_offset;
1260
1261 /* BRW_NEW_*_PROG_DATA */
1262 if (for_gather)
1263 surf_offset += stage_state->prog_data->binding_table.gather_texture_start;
1264 else
1265 surf_offset += stage_state->prog_data->binding_table.plane_start[plane];
1266
1267 unsigned num_samplers = util_last_bit(prog->SamplersUsed);
1268 for (unsigned s = 0; s < num_samplers; s++) {
1269 surf_offset[s] = 0;
1270
1271 if (prog->SamplersUsed & (1 << s)) {
1272 const unsigned unit = prog->SamplerUnits[s];
1273
1274 /* _NEW_TEXTURE */
1275 if (ctx->Texture.Unit[unit]._Current) {
1276 brw_update_texture_surface(ctx, unit, surf_offset + s, for_gather, plane);
1277 }
1278 }
1279 }
1280 }
1281
1282
1283 /**
1284 * Construct SURFACE_STATE objects for enabled textures.
1285 */
1286 static void
1287 brw_update_texture_surfaces(struct brw_context *brw)
1288 {
1289 /* BRW_NEW_VERTEX_PROGRAM */
1290 struct gl_program *vs = (struct gl_program *) brw->vertex_program;
1291
1292 /* BRW_NEW_TESS_PROGRAMS */
1293 struct gl_program *tcs = (struct gl_program *) brw->tess_ctrl_program;
1294 struct gl_program *tes = (struct gl_program *) brw->tess_eval_program;
1295
1296 /* BRW_NEW_GEOMETRY_PROGRAM */
1297 struct gl_program *gs = (struct gl_program *) brw->geometry_program;
1298
1299 /* BRW_NEW_FRAGMENT_PROGRAM */
1300 struct gl_program *fs = (struct gl_program *) brw->fragment_program;
1301
1302 /* _NEW_TEXTURE */
1303 update_stage_texture_surfaces(brw, vs, &brw->vs.base, false, 0);
1304 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, false, 0);
1305 update_stage_texture_surfaces(brw, tes, &brw->tes.base, false, 0);
1306 update_stage_texture_surfaces(brw, gs, &brw->gs.base, false, 0);
1307 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 0);
1308
1309 /* emit alternate set of surface state for gather. this
1310 * allows the surface format to be overriden for only the
1311 * gather4 messages. */
1312 if (brw->gen < 8) {
1313 if (vs && vs->nir->info.uses_texture_gather)
1314 update_stage_texture_surfaces(brw, vs, &brw->vs.base, true, 0);
1315 if (tcs && tcs->nir->info.uses_texture_gather)
1316 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, true, 0);
1317 if (tes && tes->nir->info.uses_texture_gather)
1318 update_stage_texture_surfaces(brw, tes, &brw->tes.base, true, 0);
1319 if (gs && gs->nir->info.uses_texture_gather)
1320 update_stage_texture_surfaces(brw, gs, &brw->gs.base, true, 0);
1321 if (fs && fs->nir->info.uses_texture_gather)
1322 update_stage_texture_surfaces(brw, fs, &brw->wm.base, true, 0);
1323 }
1324
1325 if (fs) {
1326 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 1);
1327 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 2);
1328 }
1329
1330 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1331 }
1332
1333 const struct brw_tracked_state brw_texture_surfaces = {
1334 .dirty = {
1335 .mesa = _NEW_TEXTURE,
1336 .brw = BRW_NEW_BATCH |
1337 BRW_NEW_BLORP |
1338 BRW_NEW_FRAGMENT_PROGRAM |
1339 BRW_NEW_FS_PROG_DATA |
1340 BRW_NEW_GEOMETRY_PROGRAM |
1341 BRW_NEW_GS_PROG_DATA |
1342 BRW_NEW_TESS_PROGRAMS |
1343 BRW_NEW_TCS_PROG_DATA |
1344 BRW_NEW_TES_PROG_DATA |
1345 BRW_NEW_TEXTURE_BUFFER |
1346 BRW_NEW_VERTEX_PROGRAM |
1347 BRW_NEW_VS_PROG_DATA,
1348 },
1349 .emit = brw_update_texture_surfaces,
1350 };
1351
1352 static void
1353 brw_update_cs_texture_surfaces(struct brw_context *brw)
1354 {
1355 /* BRW_NEW_COMPUTE_PROGRAM */
1356 struct gl_program *cs = (struct gl_program *) brw->compute_program;
1357
1358 /* _NEW_TEXTURE */
1359 update_stage_texture_surfaces(brw, cs, &brw->cs.base, false, 0);
1360
1361 /* emit alternate set of surface state for gather. this
1362 * allows the surface format to be overriden for only the
1363 * gather4 messages.
1364 */
1365 if (brw->gen < 8) {
1366 if (cs && cs->nir->info.uses_texture_gather)
1367 update_stage_texture_surfaces(brw, cs, &brw->cs.base, true, 0);
1368 }
1369
1370 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1371 }
1372
1373 const struct brw_tracked_state brw_cs_texture_surfaces = {
1374 .dirty = {
1375 .mesa = _NEW_TEXTURE,
1376 .brw = BRW_NEW_BATCH |
1377 BRW_NEW_BLORP |
1378 BRW_NEW_COMPUTE_PROGRAM,
1379 },
1380 .emit = brw_update_cs_texture_surfaces,
1381 };
1382
1383
1384 void
1385 brw_upload_ubo_surfaces(struct brw_context *brw, struct gl_program *prog,
1386 struct brw_stage_state *stage_state,
1387 struct brw_stage_prog_data *prog_data)
1388 {
1389 struct gl_context *ctx = &brw->ctx;
1390
1391 if (!prog)
1392 return;
1393
1394 uint32_t *ubo_surf_offsets =
1395 &stage_state->surf_offset[prog_data->binding_table.ubo_start];
1396
1397 for (int i = 0; i < prog->info.num_ubos; i++) {
1398 struct gl_uniform_buffer_binding *binding =
1399 &ctx->UniformBufferBindings[prog->sh.UniformBlocks[i]->Binding];
1400
1401 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1402 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ubo_surf_offsets[i]);
1403 } else {
1404 struct intel_buffer_object *intel_bo =
1405 intel_buffer_object(binding->BufferObject);
1406 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1407 if (!binding->AutomaticSize)
1408 size = MIN2(size, binding->Size);
1409 struct brw_bo *bo =
1410 intel_bufferobj_buffer(brw, intel_bo,
1411 binding->Offset,
1412 size);
1413 brw_create_constant_surface(brw, bo, binding->Offset,
1414 size,
1415 &ubo_surf_offsets[i]);
1416 }
1417 }
1418
1419 uint32_t *ssbo_surf_offsets =
1420 &stage_state->surf_offset[prog_data->binding_table.ssbo_start];
1421
1422 for (int i = 0; i < prog->info.num_ssbos; i++) {
1423 struct gl_shader_storage_buffer_binding *binding =
1424 &ctx->ShaderStorageBufferBindings[prog->sh.ShaderStorageBlocks[i]->Binding];
1425
1426 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1427 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ssbo_surf_offsets[i]);
1428 } else {
1429 struct intel_buffer_object *intel_bo =
1430 intel_buffer_object(binding->BufferObject);
1431 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1432 if (!binding->AutomaticSize)
1433 size = MIN2(size, binding->Size);
1434 struct brw_bo *bo =
1435 intel_bufferobj_buffer(brw, intel_bo,
1436 binding->Offset,
1437 size);
1438 brw_create_buffer_surface(brw, bo, binding->Offset,
1439 size,
1440 &ssbo_surf_offsets[i]);
1441 }
1442 }
1443
1444 if (prog->info.num_ubos || prog->info.num_ssbos)
1445 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1446 }
1447
1448 static void
1449 brw_upload_wm_ubo_surfaces(struct brw_context *brw)
1450 {
1451 struct gl_context *ctx = &brw->ctx;
1452 /* _NEW_PROGRAM */
1453 struct gl_program *prog = ctx->FragmentProgram._Current;
1454
1455 /* BRW_NEW_FS_PROG_DATA */
1456 brw_upload_ubo_surfaces(brw, prog, &brw->wm.base, brw->wm.base.prog_data);
1457 }
1458
1459 const struct brw_tracked_state brw_wm_ubo_surfaces = {
1460 .dirty = {
1461 .mesa = _NEW_PROGRAM,
1462 .brw = BRW_NEW_BATCH |
1463 BRW_NEW_BLORP |
1464 BRW_NEW_FS_PROG_DATA |
1465 BRW_NEW_UNIFORM_BUFFER,
1466 },
1467 .emit = brw_upload_wm_ubo_surfaces,
1468 };
1469
1470 static void
1471 brw_upload_cs_ubo_surfaces(struct brw_context *brw)
1472 {
1473 struct gl_context *ctx = &brw->ctx;
1474 /* _NEW_PROGRAM */
1475 struct gl_program *prog =
1476 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1477
1478 /* BRW_NEW_CS_PROG_DATA */
1479 brw_upload_ubo_surfaces(brw, prog, &brw->cs.base, brw->cs.base.prog_data);
1480 }
1481
1482 const struct brw_tracked_state brw_cs_ubo_surfaces = {
1483 .dirty = {
1484 .mesa = _NEW_PROGRAM,
1485 .brw = BRW_NEW_BATCH |
1486 BRW_NEW_BLORP |
1487 BRW_NEW_CS_PROG_DATA |
1488 BRW_NEW_UNIFORM_BUFFER,
1489 },
1490 .emit = brw_upload_cs_ubo_surfaces,
1491 };
1492
1493 void
1494 brw_upload_abo_surfaces(struct brw_context *brw,
1495 const struct gl_program *prog,
1496 struct brw_stage_state *stage_state,
1497 struct brw_stage_prog_data *prog_data)
1498 {
1499 struct gl_context *ctx = &brw->ctx;
1500 uint32_t *surf_offsets =
1501 &stage_state->surf_offset[prog_data->binding_table.abo_start];
1502
1503 if (prog->info.num_abos) {
1504 for (unsigned i = 0; i < prog->info.num_abos; i++) {
1505 struct gl_atomic_buffer_binding *binding =
1506 &ctx->AtomicBufferBindings[prog->sh.AtomicBuffers[i]->Binding];
1507 struct intel_buffer_object *intel_bo =
1508 intel_buffer_object(binding->BufferObject);
1509 struct brw_bo *bo = intel_bufferobj_buffer(
1510 brw, intel_bo, binding->Offset, intel_bo->Base.Size - binding->Offset);
1511
1512 brw_emit_buffer_surface_state(brw, &surf_offsets[i], bo,
1513 binding->Offset, ISL_FORMAT_RAW,
1514 bo->size - binding->Offset, 1, true);
1515 }
1516
1517 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1518 }
1519 }
1520
1521 static void
1522 brw_upload_wm_abo_surfaces(struct brw_context *brw)
1523 {
1524 /* _NEW_PROGRAM */
1525 const struct gl_program *wm = brw->fragment_program;
1526
1527 if (wm) {
1528 /* BRW_NEW_FS_PROG_DATA */
1529 brw_upload_abo_surfaces(brw, wm, &brw->wm.base, brw->wm.base.prog_data);
1530 }
1531 }
1532
1533 const struct brw_tracked_state brw_wm_abo_surfaces = {
1534 .dirty = {
1535 .mesa = _NEW_PROGRAM,
1536 .brw = BRW_NEW_ATOMIC_BUFFER |
1537 BRW_NEW_BLORP |
1538 BRW_NEW_BATCH |
1539 BRW_NEW_FS_PROG_DATA,
1540 },
1541 .emit = brw_upload_wm_abo_surfaces,
1542 };
1543
1544 static void
1545 brw_upload_cs_abo_surfaces(struct brw_context *brw)
1546 {
1547 /* _NEW_PROGRAM */
1548 const struct gl_program *cp = brw->compute_program;
1549
1550 if (cp) {
1551 /* BRW_NEW_CS_PROG_DATA */
1552 brw_upload_abo_surfaces(brw, cp, &brw->cs.base, brw->cs.base.prog_data);
1553 }
1554 }
1555
1556 const struct brw_tracked_state brw_cs_abo_surfaces = {
1557 .dirty = {
1558 .mesa = _NEW_PROGRAM,
1559 .brw = BRW_NEW_ATOMIC_BUFFER |
1560 BRW_NEW_BLORP |
1561 BRW_NEW_BATCH |
1562 BRW_NEW_CS_PROG_DATA,
1563 },
1564 .emit = brw_upload_cs_abo_surfaces,
1565 };
1566
1567 static void
1568 brw_upload_cs_image_surfaces(struct brw_context *brw)
1569 {
1570 /* _NEW_PROGRAM */
1571 const struct gl_program *cp = brw->compute_program;
1572
1573 if (cp) {
1574 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1575 brw_upload_image_surfaces(brw, cp, &brw->cs.base,
1576 brw->cs.base.prog_data);
1577 }
1578 }
1579
1580 const struct brw_tracked_state brw_cs_image_surfaces = {
1581 .dirty = {
1582 .mesa = _NEW_TEXTURE | _NEW_PROGRAM,
1583 .brw = BRW_NEW_BATCH |
1584 BRW_NEW_BLORP |
1585 BRW_NEW_CS_PROG_DATA |
1586 BRW_NEW_IMAGE_UNITS
1587 },
1588 .emit = brw_upload_cs_image_surfaces,
1589 };
1590
1591 static uint32_t
1592 get_image_format(struct brw_context *brw, mesa_format format, GLenum access)
1593 {
1594 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1595 enum isl_format hw_format = brw_isl_format_for_mesa_format(format);
1596 if (access == GL_WRITE_ONLY) {
1597 return hw_format;
1598 } else if (isl_has_matching_typed_storage_image_format(devinfo, hw_format)) {
1599 /* Typed surface reads support a very limited subset of the shader
1600 * image formats. Translate it into the closest format the
1601 * hardware supports.
1602 */
1603 return isl_lower_storage_image_format(devinfo, hw_format);
1604 } else {
1605 /* The hardware doesn't actually support a typed format that we can use
1606 * so we have to fall back to untyped read/write messages.
1607 */
1608 return ISL_FORMAT_RAW;
1609 }
1610 }
1611
1612 static void
1613 update_default_image_param(struct brw_context *brw,
1614 struct gl_image_unit *u,
1615 unsigned surface_idx,
1616 struct brw_image_param *param)
1617 {
1618 memset(param, 0, sizeof(*param));
1619 param->surface_idx = surface_idx;
1620 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1621 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1622 * detailed explanation of these parameters.
1623 */
1624 param->swizzling[0] = 0xff;
1625 param->swizzling[1] = 0xff;
1626 }
1627
1628 static void
1629 update_buffer_image_param(struct brw_context *brw,
1630 struct gl_image_unit *u,
1631 unsigned surface_idx,
1632 struct brw_image_param *param)
1633 {
1634 struct gl_buffer_object *obj = u->TexObj->BufferObject;
1635 const uint32_t size = MIN2((uint32_t)u->TexObj->BufferSize, obj->Size);
1636 update_default_image_param(brw, u, surface_idx, param);
1637
1638 param->size[0] = size / _mesa_get_format_bytes(u->_ActualFormat);
1639 param->stride[0] = _mesa_get_format_bytes(u->_ActualFormat);
1640 }
1641
1642 static void
1643 update_texture_image_param(struct brw_context *brw,
1644 struct gl_image_unit *u,
1645 unsigned surface_idx,
1646 struct brw_image_param *param)
1647 {
1648 struct intel_mipmap_tree *mt = intel_texture_object(u->TexObj)->mt;
1649
1650 update_default_image_param(brw, u, surface_idx, param);
1651
1652 param->size[0] = minify(mt->logical_width0, u->Level);
1653 param->size[1] = minify(mt->logical_height0, u->Level);
1654 param->size[2] = (!u->Layered ? 1 :
1655 u->TexObj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1656 u->TexObj->Target == GL_TEXTURE_3D ?
1657 minify(mt->logical_depth0, u->Level) :
1658 mt->logical_depth0);
1659
1660 intel_miptree_get_image_offset(mt, u->Level, u->_Layer,
1661 &param->offset[0],
1662 &param->offset[1]);
1663
1664 param->stride[0] = mt->cpp;
1665 param->stride[1] = mt->pitch / mt->cpp;
1666 param->stride[2] =
1667 brw_miptree_get_horizontal_slice_pitch(brw, mt, u->Level);
1668 param->stride[3] =
1669 brw_miptree_get_vertical_slice_pitch(brw, mt, u->Level);
1670
1671 if (mt->tiling == I915_TILING_X) {
1672 /* An X tile is a rectangular block of 512x8 bytes. */
1673 param->tiling[0] = _mesa_logbase2(512 / mt->cpp);
1674 param->tiling[1] = _mesa_logbase2(8);
1675
1676 if (brw->has_swizzling) {
1677 /* Right shifts required to swizzle bits 9 and 10 of the memory
1678 * address with bit 6.
1679 */
1680 param->swizzling[0] = 3;
1681 param->swizzling[1] = 4;
1682 }
1683 } else if (mt->tiling == I915_TILING_Y) {
1684 /* The layout of a Y-tiled surface in memory isn't really fundamentally
1685 * different to the layout of an X-tiled surface, we simply pretend that
1686 * the surface is broken up in a number of smaller 16Bx32 tiles, each
1687 * one arranged in X-major order just like is the case for X-tiling.
1688 */
1689 param->tiling[0] = _mesa_logbase2(16 / mt->cpp);
1690 param->tiling[1] = _mesa_logbase2(32);
1691
1692 if (brw->has_swizzling) {
1693 /* Right shift required to swizzle bit 9 of the memory address with
1694 * bit 6.
1695 */
1696 param->swizzling[0] = 3;
1697 }
1698 }
1699
1700 /* 3D textures are arranged in 2D in memory with 2^lod slices per row. The
1701 * address calculation algorithm (emit_address_calculation() in
1702 * brw_fs_surface_builder.cpp) handles this as a sort of tiling with
1703 * modulus equal to the LOD.
1704 */
1705 param->tiling[2] = (u->TexObj->Target == GL_TEXTURE_3D ? u->Level :
1706 0);
1707 }
1708
1709 static void
1710 update_image_surface(struct brw_context *brw,
1711 struct gl_image_unit *u,
1712 GLenum access,
1713 unsigned surface_idx,
1714 uint32_t *surf_offset,
1715 struct brw_image_param *param)
1716 {
1717 if (_mesa_is_image_unit_valid(&brw->ctx, u)) {
1718 struct gl_texture_object *obj = u->TexObj;
1719 const unsigned format = get_image_format(brw, u->_ActualFormat, access);
1720
1721 if (obj->Target == GL_TEXTURE_BUFFER) {
1722 struct intel_buffer_object *intel_obj =
1723 intel_buffer_object(obj->BufferObject);
1724 const unsigned texel_size = (format == ISL_FORMAT_RAW ? 1 :
1725 _mesa_get_format_bytes(u->_ActualFormat));
1726
1727 brw_emit_buffer_surface_state(
1728 brw, surf_offset, intel_obj->buffer, obj->BufferOffset,
1729 format, intel_obj->Base.Size, texel_size,
1730 access != GL_READ_ONLY);
1731
1732 update_buffer_image_param(brw, u, surface_idx, param);
1733
1734 } else {
1735 struct intel_texture_object *intel_obj = intel_texture_object(obj);
1736 struct intel_mipmap_tree *mt = intel_obj->mt;
1737
1738 if (format == ISL_FORMAT_RAW) {
1739 brw_emit_buffer_surface_state(
1740 brw, surf_offset, mt->bo, mt->offset,
1741 format, mt->bo->size - mt->offset, 1 /* pitch */,
1742 access != GL_READ_ONLY);
1743
1744 } else {
1745 const unsigned num_layers = (!u->Layered ? 1 :
1746 obj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1747 mt->logical_depth0);
1748
1749 struct isl_view view = {
1750 .format = format,
1751 .base_level = obj->MinLevel + u->Level,
1752 .levels = 1,
1753 .base_array_layer = obj->MinLayer + u->_Layer,
1754 .array_len = num_layers,
1755 .swizzle = ISL_SWIZZLE_IDENTITY,
1756 .usage = ISL_SURF_USAGE_STORAGE_BIT,
1757 };
1758
1759 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
1760 assert(!intel_miptree_has_color_unresolved(mt,
1761 view.base_level, 1,
1762 view.base_array_layer,
1763 view.array_len));
1764 brw_emit_surface_state(brw, mt, INTEL_AUX_BUFFER_DISABLED,
1765 mt->target, view, tex_mocs[brw->gen],
1766 surf_offset, surf_index,
1767 I915_GEM_DOMAIN_SAMPLER,
1768 access == GL_READ_ONLY ? 0 :
1769 I915_GEM_DOMAIN_SAMPLER);
1770 }
1771
1772 update_texture_image_param(brw, u, surface_idx, param);
1773 }
1774
1775 } else {
1776 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, surf_offset);
1777 update_default_image_param(brw, u, surface_idx, param);
1778 }
1779 }
1780
1781 void
1782 brw_upload_image_surfaces(struct brw_context *brw,
1783 const struct gl_program *prog,
1784 struct brw_stage_state *stage_state,
1785 struct brw_stage_prog_data *prog_data)
1786 {
1787 assert(prog);
1788 struct gl_context *ctx = &brw->ctx;
1789
1790 if (prog->info.num_images) {
1791 for (unsigned i = 0; i < prog->info.num_images; i++) {
1792 struct gl_image_unit *u = &ctx->ImageUnits[prog->sh.ImageUnits[i]];
1793 const unsigned surf_idx = prog_data->binding_table.image_start + i;
1794
1795 update_image_surface(brw, u, prog->sh.ImageAccess[i],
1796 surf_idx,
1797 &stage_state->surf_offset[surf_idx],
1798 &prog_data->image_param[i]);
1799 }
1800
1801 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1802 /* This may have changed the image metadata dependent on the context
1803 * image unit state and passed to the program as uniforms, make sure
1804 * that push and pull constants are reuploaded.
1805 */
1806 brw->NewGLState |= _NEW_PROGRAM_CONSTANTS;
1807 }
1808 }
1809
1810 static void
1811 brw_upload_wm_image_surfaces(struct brw_context *brw)
1812 {
1813 /* BRW_NEW_FRAGMENT_PROGRAM */
1814 const struct gl_program *wm = brw->fragment_program;
1815
1816 if (wm) {
1817 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1818 brw_upload_image_surfaces(brw, wm, &brw->wm.base,
1819 brw->wm.base.prog_data);
1820 }
1821 }
1822
1823 const struct brw_tracked_state brw_wm_image_surfaces = {
1824 .dirty = {
1825 .mesa = _NEW_TEXTURE,
1826 .brw = BRW_NEW_BATCH |
1827 BRW_NEW_BLORP |
1828 BRW_NEW_FRAGMENT_PROGRAM |
1829 BRW_NEW_FS_PROG_DATA |
1830 BRW_NEW_IMAGE_UNITS
1831 },
1832 .emit = brw_upload_wm_image_surfaces,
1833 };
1834
1835 void
1836 gen4_init_vtable_surface_functions(struct brw_context *brw)
1837 {
1838 brw->vtbl.update_renderbuffer_surface = gen4_update_renderbuffer_surface;
1839 brw->vtbl.emit_null_surface_state = brw_emit_null_surface_state;
1840 }
1841
1842 void
1843 gen6_init_vtable_surface_functions(struct brw_context *brw)
1844 {
1845 gen4_init_vtable_surface_functions(brw);
1846 brw->vtbl.update_renderbuffer_surface = brw_update_renderbuffer_surface;
1847 }
1848
1849 static void
1850 brw_upload_cs_work_groups_surface(struct brw_context *brw)
1851 {
1852 struct gl_context *ctx = &brw->ctx;
1853 /* _NEW_PROGRAM */
1854 struct gl_program *prog =
1855 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1856 /* BRW_NEW_CS_PROG_DATA */
1857 const struct brw_cs_prog_data *cs_prog_data =
1858 brw_cs_prog_data(brw->cs.base.prog_data);
1859
1860 if (prog && cs_prog_data->uses_num_work_groups) {
1861 const unsigned surf_idx =
1862 cs_prog_data->binding_table.work_groups_start;
1863 uint32_t *surf_offset = &brw->cs.base.surf_offset[surf_idx];
1864 struct brw_bo *bo;
1865 uint32_t bo_offset;
1866
1867 if (brw->compute.num_work_groups_bo == NULL) {
1868 bo = NULL;
1869 intel_upload_data(brw,
1870 (void *)brw->compute.num_work_groups,
1871 3 * sizeof(GLuint),
1872 sizeof(GLuint),
1873 &bo,
1874 &bo_offset);
1875 } else {
1876 bo = brw->compute.num_work_groups_bo;
1877 bo_offset = brw->compute.num_work_groups_offset;
1878 }
1879
1880 brw_emit_buffer_surface_state(brw, surf_offset,
1881 bo, bo_offset,
1882 ISL_FORMAT_RAW,
1883 3 * sizeof(GLuint), 1, true);
1884 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1885 }
1886 }
1887
1888 const struct brw_tracked_state brw_cs_work_groups_surface = {
1889 .dirty = {
1890 .brw = BRW_NEW_BLORP |
1891 BRW_NEW_CS_PROG_DATA |
1892 BRW_NEW_CS_WORK_GROUPS
1893 },
1894 .emit = brw_upload_cs_work_groups_surface,
1895 };