i965/drm: Rename drm_bacon_bo to brw_bo.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_surface_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "compiler/nir/nir.h"
34 #include "main/context.h"
35 #include "main/blend.h"
36 #include "main/mtypes.h"
37 #include "main/samplerobj.h"
38 #include "main/shaderimage.h"
39 #include "main/teximage.h"
40 #include "program/prog_parameter.h"
41 #include "program/prog_instruction.h"
42 #include "main/framebuffer.h"
43 #include "main/shaderapi.h"
44
45 #include "isl/isl.h"
46
47 #include "intel_mipmap_tree.h"
48 #include "intel_batchbuffer.h"
49 #include "intel_tex.h"
50 #include "intel_fbo.h"
51 #include "intel_buffer_objects.h"
52
53 #include "brw_context.h"
54 #include "brw_state.h"
55 #include "brw_defines.h"
56 #include "brw_wm.h"
57
58 enum {
59 INTEL_RENDERBUFFER_LAYERED = 1 << 0,
60 INTEL_AUX_BUFFER_DISABLED = 1 << 1,
61 };
62
63 uint32_t tex_mocs[] = {
64 [7] = GEN7_MOCS_L3,
65 [8] = BDW_MOCS_WB,
66 [9] = SKL_MOCS_WB,
67 };
68
69 uint32_t rb_mocs[] = {
70 [7] = GEN7_MOCS_L3,
71 [8] = BDW_MOCS_PTE,
72 [9] = SKL_MOCS_PTE,
73 };
74
75 static void
76 brw_emit_surface_state(struct brw_context *brw,
77 struct intel_mipmap_tree *mt, uint32_t flags,
78 GLenum target, struct isl_view view,
79 uint32_t mocs, uint32_t *surf_offset, int surf_index,
80 unsigned read_domains, unsigned write_domains)
81 {
82 uint32_t tile_x = mt->level[0].slice[0].x_offset;
83 uint32_t tile_y = mt->level[0].slice[0].y_offset;
84 uint32_t offset = mt->offset;
85
86 struct isl_surf surf;
87 intel_miptree_get_isl_surf(brw, mt, &surf);
88
89 surf.dim = get_isl_surf_dim(target);
90
91 const enum isl_dim_layout dim_layout =
92 get_isl_dim_layout(&brw->screen->devinfo, mt->tiling, target);
93
94 if (surf.dim_layout != dim_layout) {
95 /* The layout of the specified texture target is not compatible with the
96 * actual layout of the miptree structure in memory -- You're entering
97 * dangerous territory, this can only possibly work if you only intended
98 * to access a single level and slice of the texture, and the hardware
99 * supports the tile offset feature in order to allow non-tile-aligned
100 * base offsets, since we'll have to point the hardware to the first
101 * texel of the level instead of relying on the usual base level/layer
102 * controls.
103 */
104 assert(brw->has_surface_tile_offset);
105 assert(view.levels == 1 && view.array_len == 1);
106 assert(tile_x == 0 && tile_y == 0);
107
108 offset += intel_miptree_get_tile_offsets(mt, view.base_level,
109 view.base_array_layer,
110 &tile_x, &tile_y);
111
112 /* Minify the logical dimensions of the texture. */
113 const unsigned l = view.base_level - mt->first_level;
114 surf.logical_level0_px.width = minify(surf.logical_level0_px.width, l);
115 surf.logical_level0_px.height = surf.dim <= ISL_SURF_DIM_1D ? 1 :
116 minify(surf.logical_level0_px.height, l);
117 surf.logical_level0_px.depth = surf.dim <= ISL_SURF_DIM_2D ? 1 :
118 minify(surf.logical_level0_px.depth, l);
119
120 /* Only the base level and layer can be addressed with the overridden
121 * layout.
122 */
123 surf.logical_level0_px.array_len = 1;
124 surf.levels = 1;
125 surf.dim_layout = dim_layout;
126
127 /* The requested slice of the texture is now at the base level and
128 * layer.
129 */
130 view.base_level = 0;
131 view.base_array_layer = 0;
132 }
133
134 union isl_color_value clear_color = { .u32 = { 0, 0, 0, 0 } };
135
136 struct brw_bo *aux_bo;
137 struct isl_surf *aux_surf = NULL, aux_surf_s;
138 uint64_t aux_offset = 0;
139 enum isl_aux_usage aux_usage = ISL_AUX_USAGE_NONE;
140 if ((mt->mcs_buf || intel_miptree_sample_with_hiz(brw, mt)) &&
141 !(flags & INTEL_AUX_BUFFER_DISABLED)) {
142 intel_miptree_get_aux_isl_surf(brw, mt, &aux_surf_s, &aux_usage);
143 aux_surf = &aux_surf_s;
144
145 if (mt->mcs_buf) {
146 aux_bo = mt->mcs_buf->bo;
147 aux_offset = mt->mcs_buf->bo->offset64 + mt->mcs_buf->offset;
148 } else {
149 aux_bo = mt->hiz_buf->aux_base.bo;
150 aux_offset = mt->hiz_buf->aux_base.bo->offset64;
151 }
152
153 /* We only really need a clear color if we also have an auxiliary
154 * surface. Without one, it does nothing.
155 */
156 clear_color = intel_miptree_get_isl_clear_color(brw, mt);
157 }
158
159 void *state = brw_state_batch(brw,
160 brw->isl_dev.ss.size,
161 brw->isl_dev.ss.align,
162 surf_offset);
163
164 isl_surf_fill_state(&brw->isl_dev, state, .surf = &surf, .view = &view,
165 .address = mt->bo->offset64 + offset,
166 .aux_surf = aux_surf, .aux_usage = aux_usage,
167 .aux_address = aux_offset,
168 .mocs = mocs, .clear_color = clear_color,
169 .x_offset_sa = tile_x, .y_offset_sa = tile_y);
170
171 brw_emit_reloc(&brw->batch, *surf_offset + brw->isl_dev.ss.addr_offset,
172 mt->bo, offset, read_domains, write_domains);
173
174 if (aux_surf) {
175 /* On gen7 and prior, the upper 20 bits of surface state DWORD 6 are the
176 * upper 20 bits of the GPU address of the MCS buffer; the lower 12 bits
177 * contain other control information. Since buffer addresses are always
178 * on 4k boundaries (and thus have their lower 12 bits zero), we can use
179 * an ordinary reloc to do the necessary address translation.
180 */
181 assert((aux_offset & 0xfff) == 0);
182 uint32_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
183 brw_emit_reloc(&brw->batch,
184 *surf_offset + brw->isl_dev.ss.aux_addr_offset,
185 aux_bo, *aux_addr - aux_bo->offset64,
186 read_domains, write_domains);
187 }
188 }
189
190 uint32_t
191 brw_update_renderbuffer_surface(struct brw_context *brw,
192 struct gl_renderbuffer *rb,
193 uint32_t flags, unsigned unit /* unused */,
194 uint32_t surf_index)
195 {
196 struct gl_context *ctx = &brw->ctx;
197 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
198 struct intel_mipmap_tree *mt = irb->mt;
199
200 if (brw->gen < 9) {
201 assert(!(flags & INTEL_AUX_BUFFER_DISABLED));
202 }
203
204 assert(brw_render_target_supported(brw, rb));
205
206 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
207 if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
208 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
209 __func__, _mesa_get_format_name(rb_format));
210 }
211
212 const unsigned layer_multiplier =
213 (irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
214 irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) ?
215 MAX2(irb->mt->num_samples, 1) : 1;
216
217 struct isl_view view = {
218 .format = brw->render_target_format[rb_format],
219 .base_level = irb->mt_level - irb->mt->first_level,
220 .levels = 1,
221 .base_array_layer = irb->mt_layer / layer_multiplier,
222 .array_len = MAX2(irb->layer_count, 1),
223 .swizzle = ISL_SWIZZLE_IDENTITY,
224 .usage = ISL_SURF_USAGE_RENDER_TARGET_BIT,
225 };
226
227 uint32_t offset;
228 brw_emit_surface_state(brw, mt, flags, mt->target, view,
229 rb_mocs[brw->gen],
230 &offset, surf_index,
231 I915_GEM_DOMAIN_RENDER,
232 I915_GEM_DOMAIN_RENDER);
233 return offset;
234 }
235
236 GLuint
237 translate_tex_target(GLenum target)
238 {
239 switch (target) {
240 case GL_TEXTURE_1D:
241 case GL_TEXTURE_1D_ARRAY_EXT:
242 return BRW_SURFACE_1D;
243
244 case GL_TEXTURE_RECTANGLE_NV:
245 return BRW_SURFACE_2D;
246
247 case GL_TEXTURE_2D:
248 case GL_TEXTURE_2D_ARRAY_EXT:
249 case GL_TEXTURE_EXTERNAL_OES:
250 case GL_TEXTURE_2D_MULTISAMPLE:
251 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
252 return BRW_SURFACE_2D;
253
254 case GL_TEXTURE_3D:
255 return BRW_SURFACE_3D;
256
257 case GL_TEXTURE_CUBE_MAP:
258 case GL_TEXTURE_CUBE_MAP_ARRAY:
259 return BRW_SURFACE_CUBE;
260
261 default:
262 unreachable("not reached");
263 }
264 }
265
266 uint32_t
267 brw_get_surface_tiling_bits(uint32_t tiling)
268 {
269 switch (tiling) {
270 case I915_TILING_X:
271 return BRW_SURFACE_TILED;
272 case I915_TILING_Y:
273 return BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y;
274 default:
275 return 0;
276 }
277 }
278
279
280 uint32_t
281 brw_get_surface_num_multisamples(unsigned num_samples)
282 {
283 if (num_samples > 1)
284 return BRW_SURFACE_MULTISAMPLECOUNT_4;
285 else
286 return BRW_SURFACE_MULTISAMPLECOUNT_1;
287 }
288
289 /**
290 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
291 * swizzling.
292 */
293 int
294 brw_get_texture_swizzle(const struct gl_context *ctx,
295 const struct gl_texture_object *t)
296 {
297 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
298
299 int swizzles[SWIZZLE_NIL + 1] = {
300 SWIZZLE_X,
301 SWIZZLE_Y,
302 SWIZZLE_Z,
303 SWIZZLE_W,
304 SWIZZLE_ZERO,
305 SWIZZLE_ONE,
306 SWIZZLE_NIL
307 };
308
309 if (img->_BaseFormat == GL_DEPTH_COMPONENT ||
310 img->_BaseFormat == GL_DEPTH_STENCIL) {
311 GLenum depth_mode = t->DepthMode;
312
313 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
314 * with depth component data specified with a sized internal format.
315 * Otherwise, it's left at the old default, GL_LUMINANCE.
316 */
317 if (_mesa_is_gles3(ctx) &&
318 img->InternalFormat != GL_DEPTH_COMPONENT &&
319 img->InternalFormat != GL_DEPTH_STENCIL) {
320 depth_mode = GL_RED;
321 }
322
323 switch (depth_mode) {
324 case GL_ALPHA:
325 swizzles[0] = SWIZZLE_ZERO;
326 swizzles[1] = SWIZZLE_ZERO;
327 swizzles[2] = SWIZZLE_ZERO;
328 swizzles[3] = SWIZZLE_X;
329 break;
330 case GL_LUMINANCE:
331 swizzles[0] = SWIZZLE_X;
332 swizzles[1] = SWIZZLE_X;
333 swizzles[2] = SWIZZLE_X;
334 swizzles[3] = SWIZZLE_ONE;
335 break;
336 case GL_INTENSITY:
337 swizzles[0] = SWIZZLE_X;
338 swizzles[1] = SWIZZLE_X;
339 swizzles[2] = SWIZZLE_X;
340 swizzles[3] = SWIZZLE_X;
341 break;
342 case GL_RED:
343 swizzles[0] = SWIZZLE_X;
344 swizzles[1] = SWIZZLE_ZERO;
345 swizzles[2] = SWIZZLE_ZERO;
346 swizzles[3] = SWIZZLE_ONE;
347 break;
348 }
349 }
350
351 GLenum datatype = _mesa_get_format_datatype(img->TexFormat);
352
353 /* If the texture's format is alpha-only, force R, G, and B to
354 * 0.0. Similarly, if the texture's format has no alpha channel,
355 * force the alpha value read to 1.0. This allows for the
356 * implementation to use an RGBA texture for any of these formats
357 * without leaking any unexpected values.
358 */
359 switch (img->_BaseFormat) {
360 case GL_ALPHA:
361 swizzles[0] = SWIZZLE_ZERO;
362 swizzles[1] = SWIZZLE_ZERO;
363 swizzles[2] = SWIZZLE_ZERO;
364 break;
365 case GL_LUMINANCE:
366 if (t->_IsIntegerFormat || datatype == GL_SIGNED_NORMALIZED) {
367 swizzles[0] = SWIZZLE_X;
368 swizzles[1] = SWIZZLE_X;
369 swizzles[2] = SWIZZLE_X;
370 swizzles[3] = SWIZZLE_ONE;
371 }
372 break;
373 case GL_LUMINANCE_ALPHA:
374 if (datatype == GL_SIGNED_NORMALIZED) {
375 swizzles[0] = SWIZZLE_X;
376 swizzles[1] = SWIZZLE_X;
377 swizzles[2] = SWIZZLE_X;
378 swizzles[3] = SWIZZLE_W;
379 }
380 break;
381 case GL_INTENSITY:
382 if (datatype == GL_SIGNED_NORMALIZED) {
383 swizzles[0] = SWIZZLE_X;
384 swizzles[1] = SWIZZLE_X;
385 swizzles[2] = SWIZZLE_X;
386 swizzles[3] = SWIZZLE_X;
387 }
388 break;
389 case GL_RED:
390 case GL_RG:
391 case GL_RGB:
392 if (_mesa_get_format_bits(img->TexFormat, GL_ALPHA_BITS) > 0)
393 swizzles[3] = SWIZZLE_ONE;
394 break;
395 }
396
397 return MAKE_SWIZZLE4(swizzles[GET_SWZ(t->_Swizzle, 0)],
398 swizzles[GET_SWZ(t->_Swizzle, 1)],
399 swizzles[GET_SWZ(t->_Swizzle, 2)],
400 swizzles[GET_SWZ(t->_Swizzle, 3)]);
401 }
402
403 /**
404 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
405 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
406 *
407 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
408 * 0 1 2 3 4 5
409 * 4 5 6 7 0 1
410 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
411 *
412 * which is simply adding 4 then modding by 8 (or anding with 7).
413 *
414 * We then may need to apply workarounds for textureGather hardware bugs.
415 */
416 static unsigned
417 swizzle_to_scs(GLenum swizzle, bool need_green_to_blue)
418 {
419 unsigned scs = (swizzle + 4) & 7;
420
421 return (need_green_to_blue && scs == HSW_SCS_GREEN) ? HSW_SCS_BLUE : scs;
422 }
423
424 static unsigned
425 brw_find_matching_rb(const struct gl_framebuffer *fb,
426 const struct intel_mipmap_tree *mt)
427 {
428 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
429 const struct intel_renderbuffer *irb =
430 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
431
432 if (irb && irb->mt == mt)
433 return i;
434 }
435
436 return fb->_NumColorDrawBuffers;
437 }
438
439 static inline bool
440 brw_texture_view_sane(const struct brw_context *brw,
441 const struct intel_mipmap_tree *mt,
442 const struct isl_view *view)
443 {
444 /* There are special cases only for lossless compression. */
445 if (!intel_miptree_is_lossless_compressed(brw, mt))
446 return true;
447
448 if (isl_format_supports_ccs_e(&brw->screen->devinfo, view->format))
449 return true;
450
451 /* Logic elsewhere needs to take care to resolve the color buffer prior
452 * to sampling it as non-compressed.
453 */
454 if (intel_miptree_has_color_unresolved(mt, view->base_level, view->levels,
455 view->base_array_layer,
456 view->array_len))
457 return false;
458
459 const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
460 const unsigned rb_index = brw_find_matching_rb(fb, mt);
461
462 if (rb_index == fb->_NumColorDrawBuffers)
463 return true;
464
465 /* Underlying surface is compressed but it is sampled using a format that
466 * the sampling engine doesn't support as compressed. Compression must be
467 * disabled for both sampling engine and data port in case the same surface
468 * is used also as render target.
469 */
470 return brw->draw_aux_buffer_disabled[rb_index];
471 }
472
473 static bool
474 brw_disable_aux_surface(const struct brw_context *brw,
475 const struct intel_mipmap_tree *mt,
476 const struct isl_view *view)
477 {
478 /* Nothing to disable. */
479 if (!mt->mcs_buf)
480 return false;
481
482 const bool is_unresolved = intel_miptree_has_color_unresolved(
483 mt, view->base_level, view->levels,
484 view->base_array_layer, view->array_len);
485
486 /* There are special cases only for lossless compression. */
487 if (!intel_miptree_is_lossless_compressed(brw, mt))
488 return !is_unresolved;
489
490 const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
491 const unsigned rb_index = brw_find_matching_rb(fb, mt);
492
493 /* If we are drawing into this with compression enabled, then we must also
494 * enable compression when texturing from it regardless of
495 * fast_clear_state. If we don't then, after the first draw call with
496 * this setup, there will be data in the CCS which won't get picked up by
497 * subsequent texturing operations as required by ARB_texture_barrier.
498 * Since we don't want to re-emit the binding table or do a resolve
499 * operation every draw call, the easiest thing to do is just enable
500 * compression on the texturing side. This is completely safe to do
501 * since, if compressed texturing weren't allowed, we would have disabled
502 * compression of render targets in whatever_that_function_is_called().
503 */
504 if (rb_index < fb->_NumColorDrawBuffers) {
505 if (brw->draw_aux_buffer_disabled[rb_index]) {
506 assert(!is_unresolved);
507 }
508
509 return brw->draw_aux_buffer_disabled[rb_index];
510 }
511
512 return !is_unresolved;
513 }
514
515 void
516 brw_update_texture_surface(struct gl_context *ctx,
517 unsigned unit,
518 uint32_t *surf_offset,
519 bool for_gather,
520 uint32_t plane)
521 {
522 struct brw_context *brw = brw_context(ctx);
523 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
524
525 if (obj->Target == GL_TEXTURE_BUFFER) {
526 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
527
528 } else {
529 struct intel_texture_object *intel_obj = intel_texture_object(obj);
530 struct intel_mipmap_tree *mt = intel_obj->mt;
531
532 if (plane > 0) {
533 if (mt->plane[plane - 1] == NULL)
534 return;
535 mt = mt->plane[plane - 1];
536 }
537
538 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
539 /* If this is a view with restricted NumLayers, then our effective depth
540 * is not just the miptree depth.
541 */
542 const unsigned view_num_layers =
543 (obj->Immutable && obj->Target != GL_TEXTURE_3D) ? obj->NumLayers :
544 mt->logical_depth0;
545
546 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
547 * texturing functions that return a float, as our code generation always
548 * selects the .x channel (which would always be 0).
549 */
550 struct gl_texture_image *firstImage = obj->Image[0][obj->BaseLevel];
551 const bool alpha_depth = obj->DepthMode == GL_ALPHA &&
552 (firstImage->_BaseFormat == GL_DEPTH_COMPONENT ||
553 firstImage->_BaseFormat == GL_DEPTH_STENCIL);
554 const unsigned swizzle = (unlikely(alpha_depth) ? SWIZZLE_XYZW :
555 brw_get_texture_swizzle(&brw->ctx, obj));
556
557 mesa_format mesa_fmt = plane == 0 ? intel_obj->_Format : mt->format;
558 unsigned format = translate_tex_format(brw, mesa_fmt,
559 sampler->sRGBDecode);
560
561 /* Implement gen6 and gen7 gather work-around */
562 bool need_green_to_blue = false;
563 if (for_gather) {
564 if (brw->gen == 7 && (format == ISL_FORMAT_R32G32_FLOAT ||
565 format == ISL_FORMAT_R32G32_SINT ||
566 format == ISL_FORMAT_R32G32_UINT)) {
567 format = ISL_FORMAT_R32G32_FLOAT_LD;
568 need_green_to_blue = brw->is_haswell;
569 } else if (brw->gen == 6) {
570 /* Sandybridge's gather4 message is broken for integer formats.
571 * To work around this, we pretend the surface is UNORM for
572 * 8 or 16-bit formats, and emit shader instructions to recover
573 * the real INT/UINT value. For 32-bit formats, we pretend
574 * the surface is FLOAT, and simply reinterpret the resulting
575 * bits.
576 */
577 switch (format) {
578 case ISL_FORMAT_R8_SINT:
579 case ISL_FORMAT_R8_UINT:
580 format = ISL_FORMAT_R8_UNORM;
581 break;
582
583 case ISL_FORMAT_R16_SINT:
584 case ISL_FORMAT_R16_UINT:
585 format = ISL_FORMAT_R16_UNORM;
586 break;
587
588 case ISL_FORMAT_R32_SINT:
589 case ISL_FORMAT_R32_UINT:
590 format = ISL_FORMAT_R32_FLOAT;
591 break;
592
593 default:
594 break;
595 }
596 }
597 }
598
599 if (obj->StencilSampling && firstImage->_BaseFormat == GL_DEPTH_STENCIL) {
600 if (brw->gen <= 7) {
601 assert(mt->r8stencil_mt && !mt->stencil_mt->r8stencil_needs_update);
602 mt = mt->r8stencil_mt;
603 } else {
604 mt = mt->stencil_mt;
605 }
606 format = ISL_FORMAT_R8_UINT;
607 } else if (brw->gen <= 7 && mt->format == MESA_FORMAT_S_UINT8) {
608 assert(mt->r8stencil_mt && !mt->r8stencil_needs_update);
609 mt = mt->r8stencil_mt;
610 format = ISL_FORMAT_R8_UINT;
611 }
612
613 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
614
615 struct isl_view view = {
616 .format = format,
617 .base_level = obj->MinLevel + obj->BaseLevel,
618 .levels = intel_obj->_MaxLevel - obj->BaseLevel + 1,
619 .base_array_layer = obj->MinLayer,
620 .array_len = view_num_layers,
621 .swizzle = {
622 .r = swizzle_to_scs(GET_SWZ(swizzle, 0), need_green_to_blue),
623 .g = swizzle_to_scs(GET_SWZ(swizzle, 1), need_green_to_blue),
624 .b = swizzle_to_scs(GET_SWZ(swizzle, 2), need_green_to_blue),
625 .a = swizzle_to_scs(GET_SWZ(swizzle, 3), need_green_to_blue),
626 },
627 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
628 };
629
630 if (obj->Target == GL_TEXTURE_CUBE_MAP ||
631 obj->Target == GL_TEXTURE_CUBE_MAP_ARRAY)
632 view.usage |= ISL_SURF_USAGE_CUBE_BIT;
633
634 assert(brw_texture_view_sane(brw, mt, &view));
635
636 const int flags = brw_disable_aux_surface(brw, mt, &view) ?
637 INTEL_AUX_BUFFER_DISABLED : 0;
638 brw_emit_surface_state(brw, mt, flags, mt->target, view,
639 tex_mocs[brw->gen],
640 surf_offset, surf_index,
641 I915_GEM_DOMAIN_SAMPLER, 0);
642 }
643 }
644
645 void
646 brw_emit_buffer_surface_state(struct brw_context *brw,
647 uint32_t *out_offset,
648 struct brw_bo *bo,
649 unsigned buffer_offset,
650 unsigned surface_format,
651 unsigned buffer_size,
652 unsigned pitch,
653 bool rw)
654 {
655 uint32_t *dw = brw_state_batch(brw,
656 brw->isl_dev.ss.size,
657 brw->isl_dev.ss.align,
658 out_offset);
659
660 isl_buffer_fill_state(&brw->isl_dev, dw,
661 .address = (bo ? bo->offset64 : 0) + buffer_offset,
662 .size = buffer_size,
663 .format = surface_format,
664 .stride = pitch,
665 .mocs = tex_mocs[brw->gen]);
666
667 if (bo) {
668 brw_emit_reloc(&brw->batch, *out_offset + brw->isl_dev.ss.addr_offset,
669 bo, buffer_offset,
670 I915_GEM_DOMAIN_SAMPLER,
671 (rw ? I915_GEM_DOMAIN_SAMPLER : 0));
672 }
673 }
674
675 void
676 brw_update_buffer_texture_surface(struct gl_context *ctx,
677 unsigned unit,
678 uint32_t *surf_offset)
679 {
680 struct brw_context *brw = brw_context(ctx);
681 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
682 struct intel_buffer_object *intel_obj =
683 intel_buffer_object(tObj->BufferObject);
684 uint32_t size = tObj->BufferSize;
685 struct brw_bo *bo = NULL;
686 mesa_format format = tObj->_BufferObjectFormat;
687 uint32_t brw_format = brw_isl_format_for_mesa_format(format);
688 int texel_size = _mesa_get_format_bytes(format);
689
690 if (intel_obj) {
691 size = MIN2(size, intel_obj->Base.Size);
692 bo = intel_bufferobj_buffer(brw, intel_obj, tObj->BufferOffset, size);
693 }
694
695 /* The ARB_texture_buffer_specification says:
696 *
697 * "The number of texels in the buffer texture's texel array is given by
698 *
699 * floor(<buffer_size> / (<components> * sizeof(<base_type>)),
700 *
701 * where <buffer_size> is the size of the buffer object, in basic
702 * machine units and <components> and <base_type> are the element count
703 * and base data type for elements, as specified in Table X.1. The
704 * number of texels in the texel array is then clamped to the
705 * implementation-dependent limit MAX_TEXTURE_BUFFER_SIZE_ARB."
706 *
707 * We need to clamp the size in bytes to MAX_TEXTURE_BUFFER_SIZE * stride,
708 * so that when ISL divides by stride to obtain the number of texels, that
709 * texel count is clamped to MAX_TEXTURE_BUFFER_SIZE.
710 */
711 size = MIN2(size, ctx->Const.MaxTextureBufferSize * (unsigned) texel_size);
712
713 if (brw_format == 0 && format != MESA_FORMAT_RGBA_FLOAT32) {
714 _mesa_problem(NULL, "bad format %s for texture buffer\n",
715 _mesa_get_format_name(format));
716 }
717
718 brw_emit_buffer_surface_state(brw, surf_offset, bo,
719 tObj->BufferOffset,
720 brw_format,
721 size,
722 texel_size,
723 false /* rw */);
724 }
725
726 /**
727 * Create the constant buffer surface. Vertex/fragment shader constants will be
728 * read from this buffer with Data Port Read instructions/messages.
729 */
730 void
731 brw_create_constant_surface(struct brw_context *brw,
732 struct brw_bo *bo,
733 uint32_t offset,
734 uint32_t size,
735 uint32_t *out_offset)
736 {
737 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
738 ISL_FORMAT_R32G32B32A32_FLOAT,
739 size, 1, false);
740 }
741
742 /**
743 * Create the buffer surface. Shader buffer variables will be
744 * read from / write to this buffer with Data Port Read/Write
745 * instructions/messages.
746 */
747 void
748 brw_create_buffer_surface(struct brw_context *brw,
749 struct brw_bo *bo,
750 uint32_t offset,
751 uint32_t size,
752 uint32_t *out_offset)
753 {
754 /* Use a raw surface so we can reuse existing untyped read/write/atomic
755 * messages. We need these specifically for the fragment shader since they
756 * include a pixel mask header that we need to ensure correct behavior
757 * with helper invocations, which cannot write to the buffer.
758 */
759 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
760 ISL_FORMAT_RAW,
761 size, 1, true);
762 }
763
764 /**
765 * Set up a binding table entry for use by stream output logic (transform
766 * feedback).
767 *
768 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
769 */
770 void
771 brw_update_sol_surface(struct brw_context *brw,
772 struct gl_buffer_object *buffer_obj,
773 uint32_t *out_offset, unsigned num_vector_components,
774 unsigned stride_dwords, unsigned offset_dwords)
775 {
776 struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
777 uint32_t offset_bytes = 4 * offset_dwords;
778 struct brw_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
779 offset_bytes,
780 buffer_obj->Size - offset_bytes);
781 uint32_t *surf = brw_state_batch(brw, 6 * 4, 32, out_offset);
782 uint32_t pitch_minus_1 = 4*stride_dwords - 1;
783 size_t size_dwords = buffer_obj->Size / 4;
784 uint32_t buffer_size_minus_1, width, height, depth, surface_format;
785
786 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
787 * too big to map using a single binding table entry?
788 */
789 assert((size_dwords - offset_dwords) / stride_dwords
790 <= BRW_MAX_NUM_BUFFER_ENTRIES);
791
792 if (size_dwords > offset_dwords + num_vector_components) {
793 /* There is room for at least 1 transform feedback output in the buffer.
794 * Compute the number of additional transform feedback outputs the
795 * buffer has room for.
796 */
797 buffer_size_minus_1 =
798 (size_dwords - offset_dwords - num_vector_components) / stride_dwords;
799 } else {
800 /* There isn't even room for a single transform feedback output in the
801 * buffer. We can't configure the binding table entry to prevent output
802 * entirely; we'll have to rely on the geometry shader to detect
803 * overflow. But to minimize the damage in case of a bug, set up the
804 * binding table entry to just allow a single output.
805 */
806 buffer_size_minus_1 = 0;
807 }
808 width = buffer_size_minus_1 & 0x7f;
809 height = (buffer_size_minus_1 & 0xfff80) >> 7;
810 depth = (buffer_size_minus_1 & 0x7f00000) >> 20;
811
812 switch (num_vector_components) {
813 case 1:
814 surface_format = ISL_FORMAT_R32_FLOAT;
815 break;
816 case 2:
817 surface_format = ISL_FORMAT_R32G32_FLOAT;
818 break;
819 case 3:
820 surface_format = ISL_FORMAT_R32G32B32_FLOAT;
821 break;
822 case 4:
823 surface_format = ISL_FORMAT_R32G32B32A32_FLOAT;
824 break;
825 default:
826 unreachable("Invalid vector size for transform feedback output");
827 }
828
829 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
830 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
831 surface_format << BRW_SURFACE_FORMAT_SHIFT |
832 BRW_SURFACE_RC_READ_WRITE;
833 surf[1] = bo->offset64 + offset_bytes; /* reloc */
834 surf[2] = (width << BRW_SURFACE_WIDTH_SHIFT |
835 height << BRW_SURFACE_HEIGHT_SHIFT);
836 surf[3] = (depth << BRW_SURFACE_DEPTH_SHIFT |
837 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
838 surf[4] = 0;
839 surf[5] = 0;
840
841 /* Emit relocation to surface contents. */
842 brw_emit_reloc(&brw->batch, *out_offset + 4, bo, offset_bytes,
843 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
844 }
845
846 /* Creates a new WM constant buffer reflecting the current fragment program's
847 * constants, if needed by the fragment program.
848 *
849 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
850 * state atom.
851 */
852 static void
853 brw_upload_wm_pull_constants(struct brw_context *brw)
854 {
855 struct brw_stage_state *stage_state = &brw->wm.base;
856 /* BRW_NEW_FRAGMENT_PROGRAM */
857 struct brw_program *fp = (struct brw_program *) brw->fragment_program;
858 /* BRW_NEW_FS_PROG_DATA */
859 struct brw_stage_prog_data *prog_data = brw->wm.base.prog_data;
860
861 _mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_FRAGMENT);
862 /* _NEW_PROGRAM_CONSTANTS */
863 brw_upload_pull_constants(brw, BRW_NEW_SURFACES, &fp->program,
864 stage_state, prog_data);
865 }
866
867 const struct brw_tracked_state brw_wm_pull_constants = {
868 .dirty = {
869 .mesa = _NEW_PROGRAM_CONSTANTS,
870 .brw = BRW_NEW_BATCH |
871 BRW_NEW_BLORP |
872 BRW_NEW_FRAGMENT_PROGRAM |
873 BRW_NEW_FS_PROG_DATA,
874 },
875 .emit = brw_upload_wm_pull_constants,
876 };
877
878 /**
879 * Creates a null renderbuffer surface.
880 *
881 * This is used when the shader doesn't write to any color output. An FB
882 * write to target 0 will still be emitted, because that's how the thread is
883 * terminated (and computed depth is returned), so we need to have the
884 * hardware discard the target 0 color output..
885 */
886 static void
887 brw_emit_null_surface_state(struct brw_context *brw,
888 unsigned width,
889 unsigned height,
890 unsigned samples,
891 uint32_t *out_offset)
892 {
893 /* From the Sandy bridge PRM, Vol4 Part1 p71 (Surface Type: Programming
894 * Notes):
895 *
896 * A null surface will be used in instances where an actual surface is
897 * not bound. When a write message is generated to a null surface, no
898 * actual surface is written to. When a read message (including any
899 * sampling engine message) is generated to a null surface, the result
900 * is all zeros. Note that a null surface type is allowed to be used
901 * with all messages, even if it is not specificially indicated as
902 * supported. All of the remaining fields in surface state are ignored
903 * for null surfaces, with the following exceptions:
904 *
905 * - [DevSNB+]: Width, Height, Depth, and LOD fields must match the
906 * depth buffer’s corresponding state for all render target surfaces,
907 * including null.
908 *
909 * - Surface Format must be R8G8B8A8_UNORM.
910 */
911 unsigned surface_type = BRW_SURFACE_NULL;
912 struct brw_bo *bo = NULL;
913 unsigned pitch_minus_1 = 0;
914 uint32_t multisampling_state = 0;
915 uint32_t *surf = brw_state_batch(brw, 6 * 4, 32, out_offset);
916
917 if (samples > 1) {
918 /* On Gen6, null render targets seem to cause GPU hangs when
919 * multisampling. So work around this problem by rendering into dummy
920 * color buffer.
921 *
922 * To decrease the amount of memory needed by the workaround buffer, we
923 * set its pitch to 128 bytes (the width of a Y tile). This means that
924 * the amount of memory needed for the workaround buffer is
925 * (width_in_tiles + height_in_tiles - 1) tiles.
926 *
927 * Note that since the workaround buffer will be interpreted by the
928 * hardware as an interleaved multisampled buffer, we need to compute
929 * width_in_tiles and height_in_tiles by dividing the width and height
930 * by 16 rather than the normal Y-tile size of 32.
931 */
932 unsigned width_in_tiles = ALIGN(width, 16) / 16;
933 unsigned height_in_tiles = ALIGN(height, 16) / 16;
934 unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
935 brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
936 size_needed);
937 bo = brw->wm.multisampled_null_render_target_bo;
938 surface_type = BRW_SURFACE_2D;
939 pitch_minus_1 = 127;
940 multisampling_state = brw_get_surface_num_multisamples(samples);
941 }
942
943 surf[0] = (surface_type << BRW_SURFACE_TYPE_SHIFT |
944 ISL_FORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
945 if (brw->gen < 6) {
946 surf[0] |= (1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT |
947 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT |
948 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT |
949 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT);
950 }
951 surf[1] = bo ? bo->offset64 : 0;
952 surf[2] = ((width - 1) << BRW_SURFACE_WIDTH_SHIFT |
953 (height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
954
955 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
956 * Notes):
957 *
958 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
959 */
960 surf[3] = (BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y |
961 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
962 surf[4] = multisampling_state;
963 surf[5] = 0;
964
965 if (bo) {
966 brw_emit_reloc(&brw->batch, *out_offset + 4, bo, 0,
967 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
968 }
969 }
970
971 /**
972 * Sets up a surface state structure to point at the given region.
973 * While it is only used for the front/back buffer currently, it should be
974 * usable for further buffers when doing ARB_draw_buffer support.
975 */
976 static uint32_t
977 gen4_update_renderbuffer_surface(struct brw_context *brw,
978 struct gl_renderbuffer *rb,
979 uint32_t flags, unsigned unit,
980 uint32_t surf_index)
981 {
982 struct gl_context *ctx = &brw->ctx;
983 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
984 struct intel_mipmap_tree *mt = irb->mt;
985 uint32_t *surf;
986 uint32_t tile_x, tile_y;
987 uint32_t format = 0;
988 uint32_t offset;
989 /* _NEW_BUFFERS */
990 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
991 /* BRW_NEW_FS_PROG_DATA */
992
993 assert(!(flags & INTEL_RENDERBUFFER_LAYERED));
994 assert(!(flags & INTEL_AUX_BUFFER_DISABLED));
995
996 if (rb->TexImage && !brw->has_surface_tile_offset) {
997 intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y);
998
999 if (tile_x != 0 || tile_y != 0) {
1000 /* Original gen4 hardware couldn't draw to a non-tile-aligned
1001 * destination in a miptree unless you actually setup your renderbuffer
1002 * as a miptree and used the fragile lod/array_index/etc. controls to
1003 * select the image. So, instead, we just make a new single-level
1004 * miptree and render into that.
1005 */
1006 intel_renderbuffer_move_to_temp(brw, irb, false);
1007 mt = irb->mt;
1008 }
1009 }
1010
1011 surf = brw_state_batch(brw, 6 * 4, 32, &offset);
1012
1013 format = brw->render_target_format[rb_format];
1014 if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
1015 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
1016 __func__, _mesa_get_format_name(rb_format));
1017 }
1018
1019 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
1020 format << BRW_SURFACE_FORMAT_SHIFT);
1021
1022 /* reloc */
1023 assert(mt->offset % mt->cpp == 0);
1024 surf[1] = (intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y) +
1025 mt->bo->offset64 + mt->offset);
1026
1027 surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
1028 (rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
1029
1030 surf[3] = (brw_get_surface_tiling_bits(mt->tiling) |
1031 (mt->pitch - 1) << BRW_SURFACE_PITCH_SHIFT);
1032
1033 surf[4] = brw_get_surface_num_multisamples(mt->num_samples);
1034
1035 assert(brw->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
1036 /* Note that the low bits of these fields are missing, so
1037 * there's the possibility of getting in trouble.
1038 */
1039 assert(tile_x % 4 == 0);
1040 assert(tile_y % 2 == 0);
1041 surf[5] = ((tile_x / 4) << BRW_SURFACE_X_OFFSET_SHIFT |
1042 (tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT |
1043 (mt->valign == 4 ? BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
1044
1045 if (brw->gen < 6) {
1046 /* _NEW_COLOR */
1047 if (!ctx->Color.ColorLogicOpEnabled && !ctx->Color._AdvancedBlendMode &&
1048 (ctx->Color.BlendEnabled & (1 << unit)))
1049 surf[0] |= BRW_SURFACE_BLEND_ENABLED;
1050
1051 if (!ctx->Color.ColorMask[unit][0])
1052 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT;
1053 if (!ctx->Color.ColorMask[unit][1])
1054 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT;
1055 if (!ctx->Color.ColorMask[unit][2])
1056 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT;
1057
1058 /* As mentioned above, disable writes to the alpha component when the
1059 * renderbuffer is XRGB.
1060 */
1061 if (ctx->DrawBuffer->Visual.alphaBits == 0 ||
1062 !ctx->Color.ColorMask[unit][3]) {
1063 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT;
1064 }
1065 }
1066
1067 brw_emit_reloc(&brw->batch, offset + 4, mt->bo, surf[1] - mt->bo->offset64,
1068 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
1069
1070 return offset;
1071 }
1072
1073 /**
1074 * Construct SURFACE_STATE objects for renderbuffers/draw buffers.
1075 */
1076 void
1077 brw_update_renderbuffer_surfaces(struct brw_context *brw,
1078 const struct gl_framebuffer *fb,
1079 uint32_t render_target_start,
1080 uint32_t *surf_offset)
1081 {
1082 GLuint i;
1083 const unsigned int w = _mesa_geometric_width(fb);
1084 const unsigned int h = _mesa_geometric_height(fb);
1085 const unsigned int s = _mesa_geometric_samples(fb);
1086
1087 /* Update surfaces for drawing buffers */
1088 if (fb->_NumColorDrawBuffers >= 1) {
1089 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
1090 const uint32_t surf_index = render_target_start + i;
1091 const int flags = (_mesa_geometric_layers(fb) > 0 ?
1092 INTEL_RENDERBUFFER_LAYERED : 0) |
1093 (brw->draw_aux_buffer_disabled[i] ?
1094 INTEL_AUX_BUFFER_DISABLED : 0);
1095
1096 if (intel_renderbuffer(fb->_ColorDrawBuffers[i])) {
1097 surf_offset[surf_index] =
1098 brw->vtbl.update_renderbuffer_surface(
1099 brw, fb->_ColorDrawBuffers[i], flags, i, surf_index);
1100 } else {
1101 brw->vtbl.emit_null_surface_state(brw, w, h, s,
1102 &surf_offset[surf_index]);
1103 }
1104 }
1105 } else {
1106 const uint32_t surf_index = render_target_start;
1107 brw->vtbl.emit_null_surface_state(brw, w, h, s,
1108 &surf_offset[surf_index]);
1109 }
1110 }
1111
1112 static void
1113 update_renderbuffer_surfaces(struct brw_context *brw)
1114 {
1115 const struct gl_context *ctx = &brw->ctx;
1116
1117 /* BRW_NEW_FS_PROG_DATA */
1118 const struct brw_wm_prog_data *wm_prog_data =
1119 brw_wm_prog_data(brw->wm.base.prog_data);
1120
1121 /* _NEW_BUFFERS | _NEW_COLOR */
1122 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1123 brw_update_renderbuffer_surfaces(
1124 brw, fb,
1125 wm_prog_data->binding_table.render_target_start,
1126 brw->wm.base.surf_offset);
1127 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1128 }
1129
1130 const struct brw_tracked_state brw_renderbuffer_surfaces = {
1131 .dirty = {
1132 .mesa = _NEW_BUFFERS |
1133 _NEW_COLOR,
1134 .brw = BRW_NEW_BATCH |
1135 BRW_NEW_BLORP |
1136 BRW_NEW_FS_PROG_DATA,
1137 },
1138 .emit = update_renderbuffer_surfaces,
1139 };
1140
1141 const struct brw_tracked_state gen6_renderbuffer_surfaces = {
1142 .dirty = {
1143 .mesa = _NEW_BUFFERS,
1144 .brw = BRW_NEW_BATCH |
1145 BRW_NEW_BLORP,
1146 },
1147 .emit = update_renderbuffer_surfaces,
1148 };
1149
1150 static void
1151 update_renderbuffer_read_surfaces(struct brw_context *brw)
1152 {
1153 const struct gl_context *ctx = &brw->ctx;
1154
1155 /* BRW_NEW_FS_PROG_DATA */
1156 const struct brw_wm_prog_data *wm_prog_data =
1157 brw_wm_prog_data(brw->wm.base.prog_data);
1158
1159 /* BRW_NEW_FRAGMENT_PROGRAM */
1160 if (!ctx->Extensions.MESA_shader_framebuffer_fetch &&
1161 brw->fragment_program && brw->fragment_program->info.outputs_read) {
1162 /* _NEW_BUFFERS */
1163 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1164
1165 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
1166 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
1167 const struct intel_renderbuffer *irb = intel_renderbuffer(rb);
1168 const unsigned surf_index =
1169 wm_prog_data->binding_table.render_target_read_start + i;
1170 uint32_t *surf_offset = &brw->wm.base.surf_offset[surf_index];
1171
1172 if (irb) {
1173 const unsigned format = brw->render_target_format[
1174 _mesa_get_render_format(ctx, intel_rb_format(irb))];
1175 assert(isl_format_supports_sampling(&brw->screen->devinfo,
1176 format));
1177
1178 /* Override the target of the texture if the render buffer is a
1179 * single slice of a 3D texture (since the minimum array element
1180 * field of the surface state structure is ignored by the sampler
1181 * unit for 3D textures on some hardware), or if the render buffer
1182 * is a 1D array (since shaders always provide the array index
1183 * coordinate at the Z component to avoid state-dependent
1184 * recompiles when changing the texture target of the
1185 * framebuffer).
1186 */
1187 const GLenum target =
1188 (irb->mt->target == GL_TEXTURE_3D &&
1189 irb->layer_count == 1) ? GL_TEXTURE_2D :
1190 irb->mt->target == GL_TEXTURE_1D_ARRAY ? GL_TEXTURE_2D_ARRAY :
1191 irb->mt->target;
1192
1193 /* intel_renderbuffer::mt_layer is expressed in sample units for
1194 * the UMS and CMS multisample layouts, but
1195 * intel_renderbuffer::layer_count is expressed in units of whole
1196 * logical layers regardless of the multisample layout.
1197 */
1198 const unsigned mt_layer_unit =
1199 (irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
1200 irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) ?
1201 MAX2(irb->mt->num_samples, 1) : 1;
1202
1203 const struct isl_view view = {
1204 .format = format,
1205 .base_level = irb->mt_level - irb->mt->first_level,
1206 .levels = 1,
1207 .base_array_layer = irb->mt_layer / mt_layer_unit,
1208 .array_len = irb->layer_count,
1209 .swizzle = ISL_SWIZZLE_IDENTITY,
1210 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
1211 };
1212
1213 const int flags = brw->draw_aux_buffer_disabled[i] ?
1214 INTEL_AUX_BUFFER_DISABLED : 0;
1215 brw_emit_surface_state(brw, irb->mt, flags, target, view,
1216 tex_mocs[brw->gen],
1217 surf_offset, surf_index,
1218 I915_GEM_DOMAIN_SAMPLER, 0);
1219
1220 } else {
1221 brw->vtbl.emit_null_surface_state(
1222 brw, _mesa_geometric_width(fb), _mesa_geometric_height(fb),
1223 _mesa_geometric_samples(fb), surf_offset);
1224 }
1225 }
1226
1227 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1228 }
1229 }
1230
1231 const struct brw_tracked_state brw_renderbuffer_read_surfaces = {
1232 .dirty = {
1233 .mesa = _NEW_BUFFERS,
1234 .brw = BRW_NEW_BATCH |
1235 BRW_NEW_FRAGMENT_PROGRAM |
1236 BRW_NEW_FS_PROG_DATA,
1237 },
1238 .emit = update_renderbuffer_read_surfaces,
1239 };
1240
1241 static void
1242 update_stage_texture_surfaces(struct brw_context *brw,
1243 const struct gl_program *prog,
1244 struct brw_stage_state *stage_state,
1245 bool for_gather, uint32_t plane)
1246 {
1247 if (!prog)
1248 return;
1249
1250 struct gl_context *ctx = &brw->ctx;
1251
1252 uint32_t *surf_offset = stage_state->surf_offset;
1253
1254 /* BRW_NEW_*_PROG_DATA */
1255 if (for_gather)
1256 surf_offset += stage_state->prog_data->binding_table.gather_texture_start;
1257 else
1258 surf_offset += stage_state->prog_data->binding_table.plane_start[plane];
1259
1260 unsigned num_samplers = util_last_bit(prog->SamplersUsed);
1261 for (unsigned s = 0; s < num_samplers; s++) {
1262 surf_offset[s] = 0;
1263
1264 if (prog->SamplersUsed & (1 << s)) {
1265 const unsigned unit = prog->SamplerUnits[s];
1266
1267 /* _NEW_TEXTURE */
1268 if (ctx->Texture.Unit[unit]._Current) {
1269 brw_update_texture_surface(ctx, unit, surf_offset + s, for_gather, plane);
1270 }
1271 }
1272 }
1273 }
1274
1275
1276 /**
1277 * Construct SURFACE_STATE objects for enabled textures.
1278 */
1279 static void
1280 brw_update_texture_surfaces(struct brw_context *brw)
1281 {
1282 /* BRW_NEW_VERTEX_PROGRAM */
1283 struct gl_program *vs = (struct gl_program *) brw->vertex_program;
1284
1285 /* BRW_NEW_TESS_PROGRAMS */
1286 struct gl_program *tcs = (struct gl_program *) brw->tess_ctrl_program;
1287 struct gl_program *tes = (struct gl_program *) brw->tess_eval_program;
1288
1289 /* BRW_NEW_GEOMETRY_PROGRAM */
1290 struct gl_program *gs = (struct gl_program *) brw->geometry_program;
1291
1292 /* BRW_NEW_FRAGMENT_PROGRAM */
1293 struct gl_program *fs = (struct gl_program *) brw->fragment_program;
1294
1295 /* _NEW_TEXTURE */
1296 update_stage_texture_surfaces(brw, vs, &brw->vs.base, false, 0);
1297 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, false, 0);
1298 update_stage_texture_surfaces(brw, tes, &brw->tes.base, false, 0);
1299 update_stage_texture_surfaces(brw, gs, &brw->gs.base, false, 0);
1300 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 0);
1301
1302 /* emit alternate set of surface state for gather. this
1303 * allows the surface format to be overriden for only the
1304 * gather4 messages. */
1305 if (brw->gen < 8) {
1306 if (vs && vs->nir->info->uses_texture_gather)
1307 update_stage_texture_surfaces(brw, vs, &brw->vs.base, true, 0);
1308 if (tcs && tcs->nir->info->uses_texture_gather)
1309 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, true, 0);
1310 if (tes && tes->nir->info->uses_texture_gather)
1311 update_stage_texture_surfaces(brw, tes, &brw->tes.base, true, 0);
1312 if (gs && gs->nir->info->uses_texture_gather)
1313 update_stage_texture_surfaces(brw, gs, &brw->gs.base, true, 0);
1314 if (fs && fs->nir->info->uses_texture_gather)
1315 update_stage_texture_surfaces(brw, fs, &brw->wm.base, true, 0);
1316 }
1317
1318 if (fs) {
1319 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 1);
1320 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 2);
1321 }
1322
1323 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1324 }
1325
1326 const struct brw_tracked_state brw_texture_surfaces = {
1327 .dirty = {
1328 .mesa = _NEW_TEXTURE,
1329 .brw = BRW_NEW_BATCH |
1330 BRW_NEW_BLORP |
1331 BRW_NEW_FRAGMENT_PROGRAM |
1332 BRW_NEW_FS_PROG_DATA |
1333 BRW_NEW_GEOMETRY_PROGRAM |
1334 BRW_NEW_GS_PROG_DATA |
1335 BRW_NEW_TESS_PROGRAMS |
1336 BRW_NEW_TCS_PROG_DATA |
1337 BRW_NEW_TES_PROG_DATA |
1338 BRW_NEW_TEXTURE_BUFFER |
1339 BRW_NEW_VERTEX_PROGRAM |
1340 BRW_NEW_VS_PROG_DATA,
1341 },
1342 .emit = brw_update_texture_surfaces,
1343 };
1344
1345 static void
1346 brw_update_cs_texture_surfaces(struct brw_context *brw)
1347 {
1348 /* BRW_NEW_COMPUTE_PROGRAM */
1349 struct gl_program *cs = (struct gl_program *) brw->compute_program;
1350
1351 /* _NEW_TEXTURE */
1352 update_stage_texture_surfaces(brw, cs, &brw->cs.base, false, 0);
1353
1354 /* emit alternate set of surface state for gather. this
1355 * allows the surface format to be overriden for only the
1356 * gather4 messages.
1357 */
1358 if (brw->gen < 8) {
1359 if (cs && cs->nir->info->uses_texture_gather)
1360 update_stage_texture_surfaces(brw, cs, &brw->cs.base, true, 0);
1361 }
1362
1363 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1364 }
1365
1366 const struct brw_tracked_state brw_cs_texture_surfaces = {
1367 .dirty = {
1368 .mesa = _NEW_TEXTURE,
1369 .brw = BRW_NEW_BATCH |
1370 BRW_NEW_BLORP |
1371 BRW_NEW_COMPUTE_PROGRAM,
1372 },
1373 .emit = brw_update_cs_texture_surfaces,
1374 };
1375
1376
1377 void
1378 brw_upload_ubo_surfaces(struct brw_context *brw, struct gl_program *prog,
1379 struct brw_stage_state *stage_state,
1380 struct brw_stage_prog_data *prog_data)
1381 {
1382 struct gl_context *ctx = &brw->ctx;
1383
1384 if (!prog)
1385 return;
1386
1387 uint32_t *ubo_surf_offsets =
1388 &stage_state->surf_offset[prog_data->binding_table.ubo_start];
1389
1390 for (int i = 0; i < prog->info.num_ubos; i++) {
1391 struct gl_uniform_buffer_binding *binding =
1392 &ctx->UniformBufferBindings[prog->sh.UniformBlocks[i]->Binding];
1393
1394 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1395 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ubo_surf_offsets[i]);
1396 } else {
1397 struct intel_buffer_object *intel_bo =
1398 intel_buffer_object(binding->BufferObject);
1399 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1400 if (!binding->AutomaticSize)
1401 size = MIN2(size, binding->Size);
1402 struct brw_bo *bo =
1403 intel_bufferobj_buffer(brw, intel_bo,
1404 binding->Offset,
1405 size);
1406 brw_create_constant_surface(brw, bo, binding->Offset,
1407 size,
1408 &ubo_surf_offsets[i]);
1409 }
1410 }
1411
1412 uint32_t *ssbo_surf_offsets =
1413 &stage_state->surf_offset[prog_data->binding_table.ssbo_start];
1414
1415 for (int i = 0; i < prog->info.num_ssbos; i++) {
1416 struct gl_shader_storage_buffer_binding *binding =
1417 &ctx->ShaderStorageBufferBindings[prog->sh.ShaderStorageBlocks[i]->Binding];
1418
1419 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1420 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ssbo_surf_offsets[i]);
1421 } else {
1422 struct intel_buffer_object *intel_bo =
1423 intel_buffer_object(binding->BufferObject);
1424 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1425 if (!binding->AutomaticSize)
1426 size = MIN2(size, binding->Size);
1427 struct brw_bo *bo =
1428 intel_bufferobj_buffer(brw, intel_bo,
1429 binding->Offset,
1430 size);
1431 brw_create_buffer_surface(brw, bo, binding->Offset,
1432 size,
1433 &ssbo_surf_offsets[i]);
1434 }
1435 }
1436
1437 if (prog->info.num_ubos || prog->info.num_ssbos)
1438 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1439 }
1440
1441 static void
1442 brw_upload_wm_ubo_surfaces(struct brw_context *brw)
1443 {
1444 struct gl_context *ctx = &brw->ctx;
1445 /* _NEW_PROGRAM */
1446 struct gl_program *prog = ctx->_Shader->_CurrentFragmentProgram;
1447
1448 /* BRW_NEW_FS_PROG_DATA */
1449 brw_upload_ubo_surfaces(brw, prog, &brw->wm.base, brw->wm.base.prog_data);
1450 }
1451
1452 const struct brw_tracked_state brw_wm_ubo_surfaces = {
1453 .dirty = {
1454 .mesa = _NEW_PROGRAM,
1455 .brw = BRW_NEW_BATCH |
1456 BRW_NEW_BLORP |
1457 BRW_NEW_FS_PROG_DATA |
1458 BRW_NEW_UNIFORM_BUFFER,
1459 },
1460 .emit = brw_upload_wm_ubo_surfaces,
1461 };
1462
1463 static void
1464 brw_upload_cs_ubo_surfaces(struct brw_context *brw)
1465 {
1466 struct gl_context *ctx = &brw->ctx;
1467 /* _NEW_PROGRAM */
1468 struct gl_program *prog =
1469 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1470
1471 /* BRW_NEW_CS_PROG_DATA */
1472 brw_upload_ubo_surfaces(brw, prog, &brw->cs.base, brw->cs.base.prog_data);
1473 }
1474
1475 const struct brw_tracked_state brw_cs_ubo_surfaces = {
1476 .dirty = {
1477 .mesa = _NEW_PROGRAM,
1478 .brw = BRW_NEW_BATCH |
1479 BRW_NEW_BLORP |
1480 BRW_NEW_CS_PROG_DATA |
1481 BRW_NEW_UNIFORM_BUFFER,
1482 },
1483 .emit = brw_upload_cs_ubo_surfaces,
1484 };
1485
1486 void
1487 brw_upload_abo_surfaces(struct brw_context *brw,
1488 const struct gl_program *prog,
1489 struct brw_stage_state *stage_state,
1490 struct brw_stage_prog_data *prog_data)
1491 {
1492 struct gl_context *ctx = &brw->ctx;
1493 uint32_t *surf_offsets =
1494 &stage_state->surf_offset[prog_data->binding_table.abo_start];
1495
1496 if (prog->info.num_abos) {
1497 for (unsigned i = 0; i < prog->info.num_abos; i++) {
1498 struct gl_atomic_buffer_binding *binding =
1499 &ctx->AtomicBufferBindings[prog->sh.AtomicBuffers[i]->Binding];
1500 struct intel_buffer_object *intel_bo =
1501 intel_buffer_object(binding->BufferObject);
1502 struct brw_bo *bo = intel_bufferobj_buffer(
1503 brw, intel_bo, binding->Offset, intel_bo->Base.Size - binding->Offset);
1504
1505 brw_emit_buffer_surface_state(brw, &surf_offsets[i], bo,
1506 binding->Offset, ISL_FORMAT_RAW,
1507 bo->size - binding->Offset, 1, true);
1508 }
1509
1510 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1511 }
1512 }
1513
1514 static void
1515 brw_upload_wm_abo_surfaces(struct brw_context *brw)
1516 {
1517 /* _NEW_PROGRAM */
1518 const struct gl_program *wm = brw->fragment_program;
1519
1520 if (wm) {
1521 /* BRW_NEW_FS_PROG_DATA */
1522 brw_upload_abo_surfaces(brw, wm, &brw->wm.base, brw->wm.base.prog_data);
1523 }
1524 }
1525
1526 const struct brw_tracked_state brw_wm_abo_surfaces = {
1527 .dirty = {
1528 .mesa = _NEW_PROGRAM,
1529 .brw = BRW_NEW_ATOMIC_BUFFER |
1530 BRW_NEW_BLORP |
1531 BRW_NEW_BATCH |
1532 BRW_NEW_FS_PROG_DATA,
1533 },
1534 .emit = brw_upload_wm_abo_surfaces,
1535 };
1536
1537 static void
1538 brw_upload_cs_abo_surfaces(struct brw_context *brw)
1539 {
1540 /* _NEW_PROGRAM */
1541 const struct gl_program *cp = brw->compute_program;
1542
1543 if (cp) {
1544 /* BRW_NEW_CS_PROG_DATA */
1545 brw_upload_abo_surfaces(brw, cp, &brw->cs.base, brw->cs.base.prog_data);
1546 }
1547 }
1548
1549 const struct brw_tracked_state brw_cs_abo_surfaces = {
1550 .dirty = {
1551 .mesa = _NEW_PROGRAM,
1552 .brw = BRW_NEW_ATOMIC_BUFFER |
1553 BRW_NEW_BLORP |
1554 BRW_NEW_BATCH |
1555 BRW_NEW_CS_PROG_DATA,
1556 },
1557 .emit = brw_upload_cs_abo_surfaces,
1558 };
1559
1560 static void
1561 brw_upload_cs_image_surfaces(struct brw_context *brw)
1562 {
1563 /* _NEW_PROGRAM */
1564 const struct gl_program *cp = brw->compute_program;
1565
1566 if (cp) {
1567 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1568 brw_upload_image_surfaces(brw, cp, &brw->cs.base,
1569 brw->cs.base.prog_data);
1570 }
1571 }
1572
1573 const struct brw_tracked_state brw_cs_image_surfaces = {
1574 .dirty = {
1575 .mesa = _NEW_TEXTURE | _NEW_PROGRAM,
1576 .brw = BRW_NEW_BATCH |
1577 BRW_NEW_BLORP |
1578 BRW_NEW_CS_PROG_DATA |
1579 BRW_NEW_IMAGE_UNITS
1580 },
1581 .emit = brw_upload_cs_image_surfaces,
1582 };
1583
1584 static uint32_t
1585 get_image_format(struct brw_context *brw, mesa_format format, GLenum access)
1586 {
1587 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1588 uint32_t hw_format = brw_isl_format_for_mesa_format(format);
1589 if (access == GL_WRITE_ONLY) {
1590 return hw_format;
1591 } else if (isl_has_matching_typed_storage_image_format(devinfo, hw_format)) {
1592 /* Typed surface reads support a very limited subset of the shader
1593 * image formats. Translate it into the closest format the
1594 * hardware supports.
1595 */
1596 return isl_lower_storage_image_format(devinfo, hw_format);
1597 } else {
1598 /* The hardware doesn't actually support a typed format that we can use
1599 * so we have to fall back to untyped read/write messages.
1600 */
1601 return ISL_FORMAT_RAW;
1602 }
1603 }
1604
1605 static void
1606 update_default_image_param(struct brw_context *brw,
1607 struct gl_image_unit *u,
1608 unsigned surface_idx,
1609 struct brw_image_param *param)
1610 {
1611 memset(param, 0, sizeof(*param));
1612 param->surface_idx = surface_idx;
1613 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1614 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1615 * detailed explanation of these parameters.
1616 */
1617 param->swizzling[0] = 0xff;
1618 param->swizzling[1] = 0xff;
1619 }
1620
1621 static void
1622 update_buffer_image_param(struct brw_context *brw,
1623 struct gl_image_unit *u,
1624 unsigned surface_idx,
1625 struct brw_image_param *param)
1626 {
1627 struct gl_buffer_object *obj = u->TexObj->BufferObject;
1628 const uint32_t size = MIN2((uint32_t)u->TexObj->BufferSize, obj->Size);
1629 update_default_image_param(brw, u, surface_idx, param);
1630
1631 param->size[0] = size / _mesa_get_format_bytes(u->_ActualFormat);
1632 param->stride[0] = _mesa_get_format_bytes(u->_ActualFormat);
1633 }
1634
1635 static void
1636 update_texture_image_param(struct brw_context *brw,
1637 struct gl_image_unit *u,
1638 unsigned surface_idx,
1639 struct brw_image_param *param)
1640 {
1641 struct intel_mipmap_tree *mt = intel_texture_object(u->TexObj)->mt;
1642
1643 update_default_image_param(brw, u, surface_idx, param);
1644
1645 param->size[0] = minify(mt->logical_width0, u->Level);
1646 param->size[1] = minify(mt->logical_height0, u->Level);
1647 param->size[2] = (!u->Layered ? 1 :
1648 u->TexObj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1649 u->TexObj->Target == GL_TEXTURE_3D ?
1650 minify(mt->logical_depth0, u->Level) :
1651 mt->logical_depth0);
1652
1653 intel_miptree_get_image_offset(mt, u->Level, u->_Layer,
1654 &param->offset[0],
1655 &param->offset[1]);
1656
1657 param->stride[0] = mt->cpp;
1658 param->stride[1] = mt->pitch / mt->cpp;
1659 param->stride[2] =
1660 brw_miptree_get_horizontal_slice_pitch(brw, mt, u->Level);
1661 param->stride[3] =
1662 brw_miptree_get_vertical_slice_pitch(brw, mt, u->Level);
1663
1664 if (mt->tiling == I915_TILING_X) {
1665 /* An X tile is a rectangular block of 512x8 bytes. */
1666 param->tiling[0] = _mesa_logbase2(512 / mt->cpp);
1667 param->tiling[1] = _mesa_logbase2(8);
1668
1669 if (brw->has_swizzling) {
1670 /* Right shifts required to swizzle bits 9 and 10 of the memory
1671 * address with bit 6.
1672 */
1673 param->swizzling[0] = 3;
1674 param->swizzling[1] = 4;
1675 }
1676 } else if (mt->tiling == I915_TILING_Y) {
1677 /* The layout of a Y-tiled surface in memory isn't really fundamentally
1678 * different to the layout of an X-tiled surface, we simply pretend that
1679 * the surface is broken up in a number of smaller 16Bx32 tiles, each
1680 * one arranged in X-major order just like is the case for X-tiling.
1681 */
1682 param->tiling[0] = _mesa_logbase2(16 / mt->cpp);
1683 param->tiling[1] = _mesa_logbase2(32);
1684
1685 if (brw->has_swizzling) {
1686 /* Right shift required to swizzle bit 9 of the memory address with
1687 * bit 6.
1688 */
1689 param->swizzling[0] = 3;
1690 }
1691 }
1692
1693 /* 3D textures are arranged in 2D in memory with 2^lod slices per row. The
1694 * address calculation algorithm (emit_address_calculation() in
1695 * brw_fs_surface_builder.cpp) handles this as a sort of tiling with
1696 * modulus equal to the LOD.
1697 */
1698 param->tiling[2] = (u->TexObj->Target == GL_TEXTURE_3D ? u->Level :
1699 0);
1700 }
1701
1702 static void
1703 update_image_surface(struct brw_context *brw,
1704 struct gl_image_unit *u,
1705 GLenum access,
1706 unsigned surface_idx,
1707 uint32_t *surf_offset,
1708 struct brw_image_param *param)
1709 {
1710 if (_mesa_is_image_unit_valid(&brw->ctx, u)) {
1711 struct gl_texture_object *obj = u->TexObj;
1712 const unsigned format = get_image_format(brw, u->_ActualFormat, access);
1713
1714 if (obj->Target == GL_TEXTURE_BUFFER) {
1715 struct intel_buffer_object *intel_obj =
1716 intel_buffer_object(obj->BufferObject);
1717 const unsigned texel_size = (format == ISL_FORMAT_RAW ? 1 :
1718 _mesa_get_format_bytes(u->_ActualFormat));
1719
1720 brw_emit_buffer_surface_state(
1721 brw, surf_offset, intel_obj->buffer, obj->BufferOffset,
1722 format, intel_obj->Base.Size, texel_size,
1723 access != GL_READ_ONLY);
1724
1725 update_buffer_image_param(brw, u, surface_idx, param);
1726
1727 } else {
1728 struct intel_texture_object *intel_obj = intel_texture_object(obj);
1729 struct intel_mipmap_tree *mt = intel_obj->mt;
1730
1731 if (format == ISL_FORMAT_RAW) {
1732 brw_emit_buffer_surface_state(
1733 brw, surf_offset, mt->bo, mt->offset,
1734 format, mt->bo->size - mt->offset, 1 /* pitch */,
1735 access != GL_READ_ONLY);
1736
1737 } else {
1738 const unsigned num_layers = (!u->Layered ? 1 :
1739 obj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1740 mt->logical_depth0);
1741
1742 struct isl_view view = {
1743 .format = format,
1744 .base_level = obj->MinLevel + u->Level,
1745 .levels = 1,
1746 .base_array_layer = obj->MinLayer + u->_Layer,
1747 .array_len = num_layers,
1748 .swizzle = ISL_SWIZZLE_IDENTITY,
1749 .usage = ISL_SURF_USAGE_STORAGE_BIT,
1750 };
1751
1752 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
1753 const bool unresolved = intel_miptree_has_color_unresolved(
1754 mt, view.base_level, view.levels,
1755 view.base_array_layer, view.array_len);
1756 const int flags = unresolved ? 0 : INTEL_AUX_BUFFER_DISABLED;
1757 brw_emit_surface_state(brw, mt, flags, mt->target, view,
1758 tex_mocs[brw->gen],
1759 surf_offset, surf_index,
1760 I915_GEM_DOMAIN_SAMPLER,
1761 access == GL_READ_ONLY ? 0 :
1762 I915_GEM_DOMAIN_SAMPLER);
1763 }
1764
1765 update_texture_image_param(brw, u, surface_idx, param);
1766 }
1767
1768 } else {
1769 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, surf_offset);
1770 update_default_image_param(brw, u, surface_idx, param);
1771 }
1772 }
1773
1774 void
1775 brw_upload_image_surfaces(struct brw_context *brw,
1776 const struct gl_program *prog,
1777 struct brw_stage_state *stage_state,
1778 struct brw_stage_prog_data *prog_data)
1779 {
1780 assert(prog);
1781 struct gl_context *ctx = &brw->ctx;
1782
1783 if (prog->info.num_images) {
1784 for (unsigned i = 0; i < prog->info.num_images; i++) {
1785 struct gl_image_unit *u = &ctx->ImageUnits[prog->sh.ImageUnits[i]];
1786 const unsigned surf_idx = prog_data->binding_table.image_start + i;
1787
1788 update_image_surface(brw, u, prog->sh.ImageAccess[i],
1789 surf_idx,
1790 &stage_state->surf_offset[surf_idx],
1791 &prog_data->image_param[i]);
1792 }
1793
1794 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1795 /* This may have changed the image metadata dependent on the context
1796 * image unit state and passed to the program as uniforms, make sure
1797 * that push and pull constants are reuploaded.
1798 */
1799 brw->NewGLState |= _NEW_PROGRAM_CONSTANTS;
1800 }
1801 }
1802
1803 static void
1804 brw_upload_wm_image_surfaces(struct brw_context *brw)
1805 {
1806 /* BRW_NEW_FRAGMENT_PROGRAM */
1807 const struct gl_program *wm = brw->fragment_program;
1808
1809 if (wm) {
1810 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1811 brw_upload_image_surfaces(brw, wm, &brw->wm.base,
1812 brw->wm.base.prog_data);
1813 }
1814 }
1815
1816 const struct brw_tracked_state brw_wm_image_surfaces = {
1817 .dirty = {
1818 .mesa = _NEW_TEXTURE,
1819 .brw = BRW_NEW_BATCH |
1820 BRW_NEW_BLORP |
1821 BRW_NEW_FRAGMENT_PROGRAM |
1822 BRW_NEW_FS_PROG_DATA |
1823 BRW_NEW_IMAGE_UNITS
1824 },
1825 .emit = brw_upload_wm_image_surfaces,
1826 };
1827
1828 void
1829 gen4_init_vtable_surface_functions(struct brw_context *brw)
1830 {
1831 brw->vtbl.update_renderbuffer_surface = gen4_update_renderbuffer_surface;
1832 brw->vtbl.emit_null_surface_state = brw_emit_null_surface_state;
1833 }
1834
1835 void
1836 gen6_init_vtable_surface_functions(struct brw_context *brw)
1837 {
1838 gen4_init_vtable_surface_functions(brw);
1839 brw->vtbl.update_renderbuffer_surface = brw_update_renderbuffer_surface;
1840 }
1841
1842 static void
1843 brw_upload_cs_work_groups_surface(struct brw_context *brw)
1844 {
1845 struct gl_context *ctx = &brw->ctx;
1846 /* _NEW_PROGRAM */
1847 struct gl_program *prog =
1848 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1849 /* BRW_NEW_CS_PROG_DATA */
1850 const struct brw_cs_prog_data *cs_prog_data =
1851 brw_cs_prog_data(brw->cs.base.prog_data);
1852
1853 if (prog && cs_prog_data->uses_num_work_groups) {
1854 const unsigned surf_idx =
1855 cs_prog_data->binding_table.work_groups_start;
1856 uint32_t *surf_offset = &brw->cs.base.surf_offset[surf_idx];
1857 struct brw_bo *bo;
1858 uint32_t bo_offset;
1859
1860 if (brw->compute.num_work_groups_bo == NULL) {
1861 bo = NULL;
1862 intel_upload_data(brw,
1863 (void *)brw->compute.num_work_groups,
1864 3 * sizeof(GLuint),
1865 sizeof(GLuint),
1866 &bo,
1867 &bo_offset);
1868 } else {
1869 bo = brw->compute.num_work_groups_bo;
1870 bo_offset = brw->compute.num_work_groups_offset;
1871 }
1872
1873 brw_emit_buffer_surface_state(brw, surf_offset,
1874 bo, bo_offset,
1875 ISL_FORMAT_RAW,
1876 3 * sizeof(GLuint), 1, true);
1877 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1878 }
1879 }
1880
1881 const struct brw_tracked_state brw_cs_work_groups_surface = {
1882 .dirty = {
1883 .brw = BRW_NEW_BLORP |
1884 BRW_NEW_CS_PROG_DATA |
1885 BRW_NEW_CS_WORK_GROUPS
1886 },
1887 .emit = brw_upload_cs_work_groups_surface,
1888 };