i965: replace brw_fragment_program with brw_program
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_surface_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "compiler/nir/nir.h"
34 #include "main/context.h"
35 #include "main/blend.h"
36 #include "main/mtypes.h"
37 #include "main/samplerobj.h"
38 #include "main/shaderimage.h"
39 #include "main/teximage.h"
40 #include "program/prog_parameter.h"
41 #include "program/prog_instruction.h"
42 #include "main/framebuffer.h"
43 #include "main/shaderapi.h"
44
45 #include "isl/isl.h"
46
47 #include "intel_mipmap_tree.h"
48 #include "intel_batchbuffer.h"
49 #include "intel_tex.h"
50 #include "intel_fbo.h"
51 #include "intel_buffer_objects.h"
52
53 #include "brw_context.h"
54 #include "brw_state.h"
55 #include "brw_defines.h"
56 #include "brw_wm.h"
57
58 enum {
59 INTEL_RENDERBUFFER_LAYERED = 1 << 0,
60 INTEL_AUX_BUFFER_DISABLED = 1 << 1,
61 };
62
63 struct surface_state_info {
64 unsigned num_dwords;
65 unsigned ss_align; /* Required alignment of RENDER_SURFACE_STATE in bytes */
66 unsigned reloc_dw;
67 unsigned aux_reloc_dw;
68 unsigned tex_mocs;
69 unsigned rb_mocs;
70 };
71
72 static const struct surface_state_info surface_state_infos[] = {
73 [4] = {6, 32, 1, 0},
74 [5] = {6, 32, 1, 0},
75 [6] = {6, 32, 1, 0},
76 [7] = {8, 32, 1, 6, GEN7_MOCS_L3, GEN7_MOCS_L3},
77 [8] = {13, 64, 8, 10, BDW_MOCS_WB, BDW_MOCS_PTE},
78 [9] = {16, 64, 8, 10, SKL_MOCS_WB, SKL_MOCS_PTE},
79 };
80
81 static void
82 brw_emit_surface_state(struct brw_context *brw,
83 struct intel_mipmap_tree *mt, uint32_t flags,
84 GLenum target, struct isl_view view,
85 uint32_t mocs, uint32_t *surf_offset, int surf_index,
86 unsigned read_domains, unsigned write_domains)
87 {
88 const struct surface_state_info ss_info = surface_state_infos[brw->gen];
89 uint32_t tile_x = mt->level[0].slice[0].x_offset;
90 uint32_t tile_y = mt->level[0].slice[0].y_offset;
91 uint32_t offset = mt->offset;
92
93 struct isl_surf surf;
94 intel_miptree_get_isl_surf(brw, mt, &surf);
95
96 surf.dim = get_isl_surf_dim(target);
97
98 const enum isl_dim_layout dim_layout =
99 get_isl_dim_layout(&brw->screen->devinfo, mt->tiling, target);
100
101 if (surf.dim_layout != dim_layout) {
102 /* The layout of the specified texture target is not compatible with the
103 * actual layout of the miptree structure in memory -- You're entering
104 * dangerous territory, this can only possibly work if you only intended
105 * to access a single level and slice of the texture, and the hardware
106 * supports the tile offset feature in order to allow non-tile-aligned
107 * base offsets, since we'll have to point the hardware to the first
108 * texel of the level instead of relying on the usual base level/layer
109 * controls.
110 */
111 assert(brw->has_surface_tile_offset);
112 assert(view.levels == 1 && view.array_len == 1);
113 assert(tile_x == 0 && tile_y == 0);
114
115 offset += intel_miptree_get_tile_offsets(mt, view.base_level,
116 view.base_array_layer,
117 &tile_x, &tile_y);
118
119 /* Minify the logical dimensions of the texture. */
120 const unsigned l = view.base_level - mt->first_level;
121 surf.logical_level0_px.width = minify(surf.logical_level0_px.width, l);
122 surf.logical_level0_px.height = surf.dim <= ISL_SURF_DIM_1D ? 1 :
123 minify(surf.logical_level0_px.height, l);
124 surf.logical_level0_px.depth = surf.dim <= ISL_SURF_DIM_2D ? 1 :
125 minify(surf.logical_level0_px.depth, l);
126
127 /* Only the base level and layer can be addressed with the overridden
128 * layout.
129 */
130 surf.logical_level0_px.array_len = 1;
131 surf.levels = 1;
132 surf.dim_layout = dim_layout;
133
134 /* The requested slice of the texture is now at the base level and
135 * layer.
136 */
137 view.base_level = 0;
138 view.base_array_layer = 0;
139 }
140
141 union isl_color_value clear_color = { .u32 = { 0, 0, 0, 0 } };
142
143 struct isl_surf *aux_surf = NULL, aux_surf_s;
144 uint64_t aux_offset = 0;
145 enum isl_aux_usage aux_usage = ISL_AUX_USAGE_NONE;
146 if (mt->mcs_mt && !(flags & INTEL_AUX_BUFFER_DISABLED)) {
147 intel_miptree_get_aux_isl_surf(brw, mt, &aux_surf_s, &aux_usage);
148 aux_surf = &aux_surf_s;
149 assert(mt->mcs_mt->offset == 0);
150 aux_offset = mt->mcs_mt->bo->offset64;
151
152 /* We only really need a clear color if we also have an auxiliary
153 * surfacae. Without one, it does nothing.
154 */
155 clear_color = intel_miptree_get_isl_clear_color(brw, mt);
156 }
157
158 uint32_t *dw = __brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
159 ss_info.num_dwords * 4, ss_info.ss_align,
160 surf_index, surf_offset);
161
162 isl_surf_fill_state(&brw->isl_dev, dw, .surf = &surf, .view = &view,
163 .address = mt->bo->offset64 + offset,
164 .aux_surf = aux_surf, .aux_usage = aux_usage,
165 .aux_address = aux_offset,
166 .mocs = mocs, .clear_color = clear_color,
167 .x_offset_sa = tile_x, .y_offset_sa = tile_y);
168
169 drm_intel_bo_emit_reloc(brw->batch.bo,
170 *surf_offset + 4 * ss_info.reloc_dw,
171 mt->bo, offset,
172 read_domains, write_domains);
173
174 if (aux_surf) {
175 /* On gen7 and prior, the upper 20 bits of surface state DWORD 6 are the
176 * upper 20 bits of the GPU address of the MCS buffer; the lower 12 bits
177 * contain other control information. Since buffer addresses are always
178 * on 4k boundaries (and thus have their lower 12 bits zero), we can use
179 * an ordinary reloc to do the necessary address translation.
180 */
181 assert((aux_offset & 0xfff) == 0);
182 drm_intel_bo_emit_reloc(brw->batch.bo,
183 *surf_offset + 4 * ss_info.aux_reloc_dw,
184 mt->mcs_mt->bo, dw[ss_info.aux_reloc_dw] & 0xfff,
185 read_domains, write_domains);
186 }
187 }
188
189 uint32_t
190 brw_update_renderbuffer_surface(struct brw_context *brw,
191 struct gl_renderbuffer *rb,
192 uint32_t flags, unsigned unit /* unused */,
193 uint32_t surf_index)
194 {
195 struct gl_context *ctx = &brw->ctx;
196 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
197 struct intel_mipmap_tree *mt = irb->mt;
198
199 if (brw->gen < 9) {
200 assert(!(flags & INTEL_AUX_BUFFER_DISABLED));
201 }
202
203 assert(brw_render_target_supported(brw, rb));
204 intel_miptree_used_for_rendering(mt);
205
206 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
207 if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
208 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
209 __func__, _mesa_get_format_name(rb_format));
210 }
211
212 const unsigned layer_multiplier =
213 (irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
214 irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) ?
215 MAX2(irb->mt->num_samples, 1) : 1;
216
217 struct isl_view view = {
218 .format = brw->render_target_format[rb_format],
219 .base_level = irb->mt_level - irb->mt->first_level,
220 .levels = 1,
221 .base_array_layer = irb->mt_layer / layer_multiplier,
222 .array_len = MAX2(irb->layer_count, 1),
223 .swizzle = ISL_SWIZZLE_IDENTITY,
224 .usage = ISL_SURF_USAGE_RENDER_TARGET_BIT,
225 };
226
227 uint32_t offset;
228 brw_emit_surface_state(brw, mt, flags, mt->target, view,
229 surface_state_infos[brw->gen].rb_mocs,
230 &offset, surf_index,
231 I915_GEM_DOMAIN_RENDER,
232 I915_GEM_DOMAIN_RENDER);
233 return offset;
234 }
235
236 GLuint
237 translate_tex_target(GLenum target)
238 {
239 switch (target) {
240 case GL_TEXTURE_1D:
241 case GL_TEXTURE_1D_ARRAY_EXT:
242 return BRW_SURFACE_1D;
243
244 case GL_TEXTURE_RECTANGLE_NV:
245 return BRW_SURFACE_2D;
246
247 case GL_TEXTURE_2D:
248 case GL_TEXTURE_2D_ARRAY_EXT:
249 case GL_TEXTURE_EXTERNAL_OES:
250 case GL_TEXTURE_2D_MULTISAMPLE:
251 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
252 return BRW_SURFACE_2D;
253
254 case GL_TEXTURE_3D:
255 return BRW_SURFACE_3D;
256
257 case GL_TEXTURE_CUBE_MAP:
258 case GL_TEXTURE_CUBE_MAP_ARRAY:
259 return BRW_SURFACE_CUBE;
260
261 default:
262 unreachable("not reached");
263 }
264 }
265
266 uint32_t
267 brw_get_surface_tiling_bits(uint32_t tiling)
268 {
269 switch (tiling) {
270 case I915_TILING_X:
271 return BRW_SURFACE_TILED;
272 case I915_TILING_Y:
273 return BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y;
274 default:
275 return 0;
276 }
277 }
278
279
280 uint32_t
281 brw_get_surface_num_multisamples(unsigned num_samples)
282 {
283 if (num_samples > 1)
284 return BRW_SURFACE_MULTISAMPLECOUNT_4;
285 else
286 return BRW_SURFACE_MULTISAMPLECOUNT_1;
287 }
288
289 /**
290 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
291 * swizzling.
292 */
293 int
294 brw_get_texture_swizzle(const struct gl_context *ctx,
295 const struct gl_texture_object *t)
296 {
297 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
298
299 int swizzles[SWIZZLE_NIL + 1] = {
300 SWIZZLE_X,
301 SWIZZLE_Y,
302 SWIZZLE_Z,
303 SWIZZLE_W,
304 SWIZZLE_ZERO,
305 SWIZZLE_ONE,
306 SWIZZLE_NIL
307 };
308
309 if (img->_BaseFormat == GL_DEPTH_COMPONENT ||
310 img->_BaseFormat == GL_DEPTH_STENCIL) {
311 GLenum depth_mode = t->DepthMode;
312
313 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
314 * with depth component data specified with a sized internal format.
315 * Otherwise, it's left at the old default, GL_LUMINANCE.
316 */
317 if (_mesa_is_gles3(ctx) &&
318 img->InternalFormat != GL_DEPTH_COMPONENT &&
319 img->InternalFormat != GL_DEPTH_STENCIL) {
320 depth_mode = GL_RED;
321 }
322
323 switch (depth_mode) {
324 case GL_ALPHA:
325 swizzles[0] = SWIZZLE_ZERO;
326 swizzles[1] = SWIZZLE_ZERO;
327 swizzles[2] = SWIZZLE_ZERO;
328 swizzles[3] = SWIZZLE_X;
329 break;
330 case GL_LUMINANCE:
331 swizzles[0] = SWIZZLE_X;
332 swizzles[1] = SWIZZLE_X;
333 swizzles[2] = SWIZZLE_X;
334 swizzles[3] = SWIZZLE_ONE;
335 break;
336 case GL_INTENSITY:
337 swizzles[0] = SWIZZLE_X;
338 swizzles[1] = SWIZZLE_X;
339 swizzles[2] = SWIZZLE_X;
340 swizzles[3] = SWIZZLE_X;
341 break;
342 case GL_RED:
343 swizzles[0] = SWIZZLE_X;
344 swizzles[1] = SWIZZLE_ZERO;
345 swizzles[2] = SWIZZLE_ZERO;
346 swizzles[3] = SWIZZLE_ONE;
347 break;
348 }
349 }
350
351 GLenum datatype = _mesa_get_format_datatype(img->TexFormat);
352
353 /* If the texture's format is alpha-only, force R, G, and B to
354 * 0.0. Similarly, if the texture's format has no alpha channel,
355 * force the alpha value read to 1.0. This allows for the
356 * implementation to use an RGBA texture for any of these formats
357 * without leaking any unexpected values.
358 */
359 switch (img->_BaseFormat) {
360 case GL_ALPHA:
361 swizzles[0] = SWIZZLE_ZERO;
362 swizzles[1] = SWIZZLE_ZERO;
363 swizzles[2] = SWIZZLE_ZERO;
364 break;
365 case GL_LUMINANCE:
366 if (t->_IsIntegerFormat || datatype == GL_SIGNED_NORMALIZED) {
367 swizzles[0] = SWIZZLE_X;
368 swizzles[1] = SWIZZLE_X;
369 swizzles[2] = SWIZZLE_X;
370 swizzles[3] = SWIZZLE_ONE;
371 }
372 break;
373 case GL_LUMINANCE_ALPHA:
374 if (datatype == GL_SIGNED_NORMALIZED) {
375 swizzles[0] = SWIZZLE_X;
376 swizzles[1] = SWIZZLE_X;
377 swizzles[2] = SWIZZLE_X;
378 swizzles[3] = SWIZZLE_W;
379 }
380 break;
381 case GL_INTENSITY:
382 if (datatype == GL_SIGNED_NORMALIZED) {
383 swizzles[0] = SWIZZLE_X;
384 swizzles[1] = SWIZZLE_X;
385 swizzles[2] = SWIZZLE_X;
386 swizzles[3] = SWIZZLE_X;
387 }
388 break;
389 case GL_RED:
390 case GL_RG:
391 case GL_RGB:
392 if (_mesa_get_format_bits(img->TexFormat, GL_ALPHA_BITS) > 0)
393 swizzles[3] = SWIZZLE_ONE;
394 break;
395 }
396
397 return MAKE_SWIZZLE4(swizzles[GET_SWZ(t->_Swizzle, 0)],
398 swizzles[GET_SWZ(t->_Swizzle, 1)],
399 swizzles[GET_SWZ(t->_Swizzle, 2)],
400 swizzles[GET_SWZ(t->_Swizzle, 3)]);
401 }
402
403 /**
404 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
405 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
406 *
407 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
408 * 0 1 2 3 4 5
409 * 4 5 6 7 0 1
410 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
411 *
412 * which is simply adding 4 then modding by 8 (or anding with 7).
413 *
414 * We then may need to apply workarounds for textureGather hardware bugs.
415 */
416 static unsigned
417 swizzle_to_scs(GLenum swizzle, bool need_green_to_blue)
418 {
419 unsigned scs = (swizzle + 4) & 7;
420
421 return (need_green_to_blue && scs == HSW_SCS_GREEN) ? HSW_SCS_BLUE : scs;
422 }
423
424 static unsigned
425 brw_find_matching_rb(const struct gl_framebuffer *fb,
426 const struct intel_mipmap_tree *mt)
427 {
428 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
429 const struct intel_renderbuffer *irb =
430 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
431
432 if (irb && irb->mt == mt)
433 return i;
434 }
435
436 return fb->_NumColorDrawBuffers;
437 }
438
439 static inline bool
440 brw_texture_view_sane(const struct brw_context *brw,
441 const struct intel_mipmap_tree *mt, unsigned format)
442 {
443 /* There are special cases only for lossless compression. */
444 if (!intel_miptree_is_lossless_compressed(brw, mt))
445 return true;
446
447 if (isl_format_supports_lossless_compression(&brw->screen->devinfo,
448 format))
449 return true;
450
451 /* Logic elsewhere needs to take care to resolve the color buffer prior
452 * to sampling it as non-compressed.
453 */
454 if (mt->fast_clear_state != INTEL_FAST_CLEAR_STATE_RESOLVED)
455 return false;
456
457 const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
458 const unsigned rb_index = brw_find_matching_rb(fb, mt);
459
460 if (rb_index == fb->_NumColorDrawBuffers)
461 return true;
462
463 /* Underlying surface is compressed but it is sampled using a format that
464 * the sampling engine doesn't support as compressed. Compression must be
465 * disabled for both sampling engine and data port in case the same surface
466 * is used also as render target.
467 */
468 return brw->draw_aux_buffer_disabled[rb_index];
469 }
470
471 static bool
472 brw_disable_aux_surface(const struct brw_context *brw,
473 const struct intel_mipmap_tree *mt)
474 {
475 /* Nothing to disable. */
476 if (!mt->mcs_mt)
477 return false;
478
479 /* There are special cases only for lossless compression. */
480 if (!intel_miptree_is_lossless_compressed(brw, mt))
481 return mt->fast_clear_state == INTEL_FAST_CLEAR_STATE_RESOLVED;
482
483 const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
484 const unsigned rb_index = brw_find_matching_rb(fb, mt);
485
486 /* If we are drawing into this with compression enabled, then we must also
487 * enable compression when texturing from it regardless of
488 * fast_clear_state. If we don't then, after the first draw call with
489 * this setup, there will be data in the CCS which won't get picked up by
490 * subsequent texturing operations as required by ARB_texture_barrier.
491 * Since we don't want to re-emit the binding table or do a resolve
492 * operation every draw call, the easiest thing to do is just enable
493 * compression on the texturing side. This is completely safe to do
494 * since, if compressed texturing weren't allowed, we would have disabled
495 * compression of render targets in whatever_that_function_is_called().
496 */
497 if (rb_index < fb->_NumColorDrawBuffers) {
498 if (brw->draw_aux_buffer_disabled[rb_index]) {
499 assert(mt->fast_clear_state == INTEL_FAST_CLEAR_STATE_RESOLVED);
500 }
501
502 return brw->draw_aux_buffer_disabled[rb_index];
503 }
504
505 return mt->fast_clear_state == INTEL_FAST_CLEAR_STATE_RESOLVED;
506 }
507
508 void
509 brw_update_texture_surface(struct gl_context *ctx,
510 unsigned unit,
511 uint32_t *surf_offset,
512 bool for_gather,
513 uint32_t plane)
514 {
515 struct brw_context *brw = brw_context(ctx);
516 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
517
518 if (obj->Target == GL_TEXTURE_BUFFER) {
519 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
520
521 } else {
522 struct intel_texture_object *intel_obj = intel_texture_object(obj);
523 struct intel_mipmap_tree *mt = intel_obj->mt;
524
525 if (plane > 0) {
526 if (mt->plane[plane - 1] == NULL)
527 return;
528 mt = mt->plane[plane - 1];
529 }
530
531 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
532 /* If this is a view with restricted NumLayers, then our effective depth
533 * is not just the miptree depth.
534 */
535 const unsigned view_num_layers =
536 (obj->Immutable && obj->Target != GL_TEXTURE_3D) ? obj->NumLayers :
537 mt->logical_depth0;
538
539 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
540 * texturing functions that return a float, as our code generation always
541 * selects the .x channel (which would always be 0).
542 */
543 struct gl_texture_image *firstImage = obj->Image[0][obj->BaseLevel];
544 const bool alpha_depth = obj->DepthMode == GL_ALPHA &&
545 (firstImage->_BaseFormat == GL_DEPTH_COMPONENT ||
546 firstImage->_BaseFormat == GL_DEPTH_STENCIL);
547 const unsigned swizzle = (unlikely(alpha_depth) ? SWIZZLE_XYZW :
548 brw_get_texture_swizzle(&brw->ctx, obj));
549
550 mesa_format mesa_fmt = plane == 0 ? intel_obj->_Format : mt->format;
551 unsigned format = translate_tex_format(brw, mesa_fmt,
552 sampler->sRGBDecode);
553
554 /* Implement gen6 and gen7 gather work-around */
555 bool need_green_to_blue = false;
556 if (for_gather) {
557 if (brw->gen == 7 && format == BRW_SURFACEFORMAT_R32G32_FLOAT) {
558 format = BRW_SURFACEFORMAT_R32G32_FLOAT_LD;
559 need_green_to_blue = brw->is_haswell;
560 } else if (brw->gen == 6) {
561 /* Sandybridge's gather4 message is broken for integer formats.
562 * To work around this, we pretend the surface is UNORM for
563 * 8 or 16-bit formats, and emit shader instructions to recover
564 * the real INT/UINT value. For 32-bit formats, we pretend
565 * the surface is FLOAT, and simply reinterpret the resulting
566 * bits.
567 */
568 switch (format) {
569 case BRW_SURFACEFORMAT_R8_SINT:
570 case BRW_SURFACEFORMAT_R8_UINT:
571 format = BRW_SURFACEFORMAT_R8_UNORM;
572 break;
573
574 case BRW_SURFACEFORMAT_R16_SINT:
575 case BRW_SURFACEFORMAT_R16_UINT:
576 format = BRW_SURFACEFORMAT_R16_UNORM;
577 break;
578
579 case BRW_SURFACEFORMAT_R32_SINT:
580 case BRW_SURFACEFORMAT_R32_UINT:
581 format = BRW_SURFACEFORMAT_R32_FLOAT;
582 break;
583
584 default:
585 break;
586 }
587 }
588 }
589
590 if (obj->StencilSampling && firstImage->_BaseFormat == GL_DEPTH_STENCIL) {
591 if (brw->gen <= 7) {
592 assert(mt->r8stencil_mt && !mt->stencil_mt->r8stencil_needs_update);
593 mt = mt->r8stencil_mt;
594 } else {
595 mt = mt->stencil_mt;
596 }
597 format = BRW_SURFACEFORMAT_R8_UINT;
598 } else if (brw->gen <= 7 && mt->format == MESA_FORMAT_S_UINT8) {
599 assert(mt->r8stencil_mt && !mt->r8stencil_needs_update);
600 mt = mt->r8stencil_mt;
601 format = BRW_SURFACEFORMAT_R8_UINT;
602 }
603
604 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
605
606 struct isl_view view = {
607 .format = format,
608 .base_level = obj->MinLevel + obj->BaseLevel,
609 .levels = intel_obj->_MaxLevel - obj->BaseLevel + 1,
610 .base_array_layer = obj->MinLayer,
611 .array_len = view_num_layers,
612 .swizzle = {
613 .r = swizzle_to_scs(GET_SWZ(swizzle, 0), need_green_to_blue),
614 .g = swizzle_to_scs(GET_SWZ(swizzle, 1), need_green_to_blue),
615 .b = swizzle_to_scs(GET_SWZ(swizzle, 2), need_green_to_blue),
616 .a = swizzle_to_scs(GET_SWZ(swizzle, 3), need_green_to_blue),
617 },
618 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
619 };
620
621 if (obj->Target == GL_TEXTURE_CUBE_MAP ||
622 obj->Target == GL_TEXTURE_CUBE_MAP_ARRAY)
623 view.usage |= ISL_SURF_USAGE_CUBE_BIT;
624
625 assert(brw_texture_view_sane(brw, mt, format));
626
627 const int flags =
628 brw_disable_aux_surface(brw, mt) ? INTEL_AUX_BUFFER_DISABLED : 0;
629 brw_emit_surface_state(brw, mt, flags, mt->target, view,
630 surface_state_infos[brw->gen].tex_mocs,
631 surf_offset, surf_index,
632 I915_GEM_DOMAIN_SAMPLER, 0);
633 }
634 }
635
636 void
637 brw_emit_buffer_surface_state(struct brw_context *brw,
638 uint32_t *out_offset,
639 drm_intel_bo *bo,
640 unsigned buffer_offset,
641 unsigned surface_format,
642 unsigned buffer_size,
643 unsigned pitch,
644 bool rw)
645 {
646 const struct surface_state_info ss_info = surface_state_infos[brw->gen];
647
648 uint32_t *dw = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
649 ss_info.num_dwords * 4, ss_info.ss_align,
650 out_offset);
651
652 isl_buffer_fill_state(&brw->isl_dev, dw,
653 .address = (bo ? bo->offset64 : 0) + buffer_offset,
654 .size = buffer_size,
655 .format = surface_format,
656 .stride = pitch,
657 .mocs = ss_info.tex_mocs);
658
659 if (bo) {
660 drm_intel_bo_emit_reloc(brw->batch.bo,
661 *out_offset + 4 * ss_info.reloc_dw,
662 bo, buffer_offset,
663 I915_GEM_DOMAIN_SAMPLER,
664 (rw ? I915_GEM_DOMAIN_SAMPLER : 0));
665 }
666 }
667
668 void
669 brw_update_buffer_texture_surface(struct gl_context *ctx,
670 unsigned unit,
671 uint32_t *surf_offset)
672 {
673 struct brw_context *brw = brw_context(ctx);
674 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
675 struct intel_buffer_object *intel_obj =
676 intel_buffer_object(tObj->BufferObject);
677 uint32_t size = tObj->BufferSize;
678 drm_intel_bo *bo = NULL;
679 mesa_format format = tObj->_BufferObjectFormat;
680 uint32_t brw_format = brw_format_for_mesa_format(format);
681 int texel_size = _mesa_get_format_bytes(format);
682
683 if (intel_obj) {
684 size = MIN2(size, intel_obj->Base.Size);
685 bo = intel_bufferobj_buffer(brw, intel_obj, tObj->BufferOffset, size);
686 }
687
688 if (brw_format == 0 && format != MESA_FORMAT_RGBA_FLOAT32) {
689 _mesa_problem(NULL, "bad format %s for texture buffer\n",
690 _mesa_get_format_name(format));
691 }
692
693 brw_emit_buffer_surface_state(brw, surf_offset, bo,
694 tObj->BufferOffset,
695 brw_format,
696 size,
697 texel_size,
698 false /* rw */);
699 }
700
701 /**
702 * Create the constant buffer surface. Vertex/fragment shader constants will be
703 * read from this buffer with Data Port Read instructions/messages.
704 */
705 void
706 brw_create_constant_surface(struct brw_context *brw,
707 drm_intel_bo *bo,
708 uint32_t offset,
709 uint32_t size,
710 uint32_t *out_offset)
711 {
712 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
713 BRW_SURFACEFORMAT_R32G32B32A32_FLOAT,
714 size, 1, false);
715 }
716
717 /**
718 * Create the buffer surface. Shader buffer variables will be
719 * read from / write to this buffer with Data Port Read/Write
720 * instructions/messages.
721 */
722 void
723 brw_create_buffer_surface(struct brw_context *brw,
724 drm_intel_bo *bo,
725 uint32_t offset,
726 uint32_t size,
727 uint32_t *out_offset)
728 {
729 /* Use a raw surface so we can reuse existing untyped read/write/atomic
730 * messages. We need these specifically for the fragment shader since they
731 * include a pixel mask header that we need to ensure correct behavior
732 * with helper invocations, which cannot write to the buffer.
733 */
734 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
735 BRW_SURFACEFORMAT_RAW,
736 size, 1, true);
737 }
738
739 /**
740 * Set up a binding table entry for use by stream output logic (transform
741 * feedback).
742 *
743 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
744 */
745 void
746 brw_update_sol_surface(struct brw_context *brw,
747 struct gl_buffer_object *buffer_obj,
748 uint32_t *out_offset, unsigned num_vector_components,
749 unsigned stride_dwords, unsigned offset_dwords)
750 {
751 struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
752 uint32_t offset_bytes = 4 * offset_dwords;
753 drm_intel_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
754 offset_bytes,
755 buffer_obj->Size - offset_bytes);
756 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
757 out_offset);
758 uint32_t pitch_minus_1 = 4*stride_dwords - 1;
759 size_t size_dwords = buffer_obj->Size / 4;
760 uint32_t buffer_size_minus_1, width, height, depth, surface_format;
761
762 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
763 * too big to map using a single binding table entry?
764 */
765 assert((size_dwords - offset_dwords) / stride_dwords
766 <= BRW_MAX_NUM_BUFFER_ENTRIES);
767
768 if (size_dwords > offset_dwords + num_vector_components) {
769 /* There is room for at least 1 transform feedback output in the buffer.
770 * Compute the number of additional transform feedback outputs the
771 * buffer has room for.
772 */
773 buffer_size_minus_1 =
774 (size_dwords - offset_dwords - num_vector_components) / stride_dwords;
775 } else {
776 /* There isn't even room for a single transform feedback output in the
777 * buffer. We can't configure the binding table entry to prevent output
778 * entirely; we'll have to rely on the geometry shader to detect
779 * overflow. But to minimize the damage in case of a bug, set up the
780 * binding table entry to just allow a single output.
781 */
782 buffer_size_minus_1 = 0;
783 }
784 width = buffer_size_minus_1 & 0x7f;
785 height = (buffer_size_minus_1 & 0xfff80) >> 7;
786 depth = (buffer_size_minus_1 & 0x7f00000) >> 20;
787
788 switch (num_vector_components) {
789 case 1:
790 surface_format = BRW_SURFACEFORMAT_R32_FLOAT;
791 break;
792 case 2:
793 surface_format = BRW_SURFACEFORMAT_R32G32_FLOAT;
794 break;
795 case 3:
796 surface_format = BRW_SURFACEFORMAT_R32G32B32_FLOAT;
797 break;
798 case 4:
799 surface_format = BRW_SURFACEFORMAT_R32G32B32A32_FLOAT;
800 break;
801 default:
802 unreachable("Invalid vector size for transform feedback output");
803 }
804
805 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
806 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
807 surface_format << BRW_SURFACE_FORMAT_SHIFT |
808 BRW_SURFACE_RC_READ_WRITE;
809 surf[1] = bo->offset64 + offset_bytes; /* reloc */
810 surf[2] = (width << BRW_SURFACE_WIDTH_SHIFT |
811 height << BRW_SURFACE_HEIGHT_SHIFT);
812 surf[3] = (depth << BRW_SURFACE_DEPTH_SHIFT |
813 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
814 surf[4] = 0;
815 surf[5] = 0;
816
817 /* Emit relocation to surface contents. */
818 drm_intel_bo_emit_reloc(brw->batch.bo,
819 *out_offset + 4,
820 bo, offset_bytes,
821 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
822 }
823
824 /* Creates a new WM constant buffer reflecting the current fragment program's
825 * constants, if needed by the fragment program.
826 *
827 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
828 * state atom.
829 */
830 static void
831 brw_upload_wm_pull_constants(struct brw_context *brw)
832 {
833 struct brw_stage_state *stage_state = &brw->wm.base;
834 /* BRW_NEW_FRAGMENT_PROGRAM */
835 struct brw_program *fp = (struct brw_program *) brw->fragment_program;
836 /* BRW_NEW_FS_PROG_DATA */
837 struct brw_stage_prog_data *prog_data = brw->wm.base.prog_data;
838
839 _mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_FRAGMENT);
840 /* _NEW_PROGRAM_CONSTANTS */
841 brw_upload_pull_constants(brw, BRW_NEW_SURFACES, &fp->program,
842 stage_state, prog_data);
843 }
844
845 const struct brw_tracked_state brw_wm_pull_constants = {
846 .dirty = {
847 .mesa = _NEW_PROGRAM_CONSTANTS,
848 .brw = BRW_NEW_BATCH |
849 BRW_NEW_BLORP |
850 BRW_NEW_FRAGMENT_PROGRAM |
851 BRW_NEW_FS_PROG_DATA,
852 },
853 .emit = brw_upload_wm_pull_constants,
854 };
855
856 /**
857 * Creates a null renderbuffer surface.
858 *
859 * This is used when the shader doesn't write to any color output. An FB
860 * write to target 0 will still be emitted, because that's how the thread is
861 * terminated (and computed depth is returned), so we need to have the
862 * hardware discard the target 0 color output..
863 */
864 static void
865 brw_emit_null_surface_state(struct brw_context *brw,
866 unsigned width,
867 unsigned height,
868 unsigned samples,
869 uint32_t *out_offset)
870 {
871 /* From the Sandy bridge PRM, Vol4 Part1 p71 (Surface Type: Programming
872 * Notes):
873 *
874 * A null surface will be used in instances where an actual surface is
875 * not bound. When a write message is generated to a null surface, no
876 * actual surface is written to. When a read message (including any
877 * sampling engine message) is generated to a null surface, the result
878 * is all zeros. Note that a null surface type is allowed to be used
879 * with all messages, even if it is not specificially indicated as
880 * supported. All of the remaining fields in surface state are ignored
881 * for null surfaces, with the following exceptions:
882 *
883 * - [DevSNB+]: Width, Height, Depth, and LOD fields must match the
884 * depth buffer’s corresponding state for all render target surfaces,
885 * including null.
886 *
887 * - Surface Format must be R8G8B8A8_UNORM.
888 */
889 unsigned surface_type = BRW_SURFACE_NULL;
890 drm_intel_bo *bo = NULL;
891 unsigned pitch_minus_1 = 0;
892 uint32_t multisampling_state = 0;
893 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
894 out_offset);
895
896 if (samples > 1) {
897 /* On Gen6, null render targets seem to cause GPU hangs when
898 * multisampling. So work around this problem by rendering into dummy
899 * color buffer.
900 *
901 * To decrease the amount of memory needed by the workaround buffer, we
902 * set its pitch to 128 bytes (the width of a Y tile). This means that
903 * the amount of memory needed for the workaround buffer is
904 * (width_in_tiles + height_in_tiles - 1) tiles.
905 *
906 * Note that since the workaround buffer will be interpreted by the
907 * hardware as an interleaved multisampled buffer, we need to compute
908 * width_in_tiles and height_in_tiles by dividing the width and height
909 * by 16 rather than the normal Y-tile size of 32.
910 */
911 unsigned width_in_tiles = ALIGN(width, 16) / 16;
912 unsigned height_in_tiles = ALIGN(height, 16) / 16;
913 unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
914 brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
915 size_needed);
916 bo = brw->wm.multisampled_null_render_target_bo;
917 surface_type = BRW_SURFACE_2D;
918 pitch_minus_1 = 127;
919 multisampling_state = brw_get_surface_num_multisamples(samples);
920 }
921
922 surf[0] = (surface_type << BRW_SURFACE_TYPE_SHIFT |
923 BRW_SURFACEFORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
924 if (brw->gen < 6) {
925 surf[0] |= (1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT |
926 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT |
927 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT |
928 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT);
929 }
930 surf[1] = bo ? bo->offset64 : 0;
931 surf[2] = ((width - 1) << BRW_SURFACE_WIDTH_SHIFT |
932 (height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
933
934 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
935 * Notes):
936 *
937 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
938 */
939 surf[3] = (BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y |
940 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
941 surf[4] = multisampling_state;
942 surf[5] = 0;
943
944 if (bo) {
945 drm_intel_bo_emit_reloc(brw->batch.bo,
946 *out_offset + 4,
947 bo, 0,
948 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
949 }
950 }
951
952 /**
953 * Sets up a surface state structure to point at the given region.
954 * While it is only used for the front/back buffer currently, it should be
955 * usable for further buffers when doing ARB_draw_buffer support.
956 */
957 static uint32_t
958 gen4_update_renderbuffer_surface(struct brw_context *brw,
959 struct gl_renderbuffer *rb,
960 uint32_t flags, unsigned unit,
961 uint32_t surf_index)
962 {
963 struct gl_context *ctx = &brw->ctx;
964 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
965 struct intel_mipmap_tree *mt = irb->mt;
966 uint32_t *surf;
967 uint32_t tile_x, tile_y;
968 uint32_t format = 0;
969 uint32_t offset;
970 /* _NEW_BUFFERS */
971 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
972 /* BRW_NEW_FS_PROG_DATA */
973
974 assert(!(flags & INTEL_RENDERBUFFER_LAYERED));
975 assert(!(flags & INTEL_AUX_BUFFER_DISABLED));
976
977 if (rb->TexImage && !brw->has_surface_tile_offset) {
978 intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y);
979
980 if (tile_x != 0 || tile_y != 0) {
981 /* Original gen4 hardware couldn't draw to a non-tile-aligned
982 * destination in a miptree unless you actually setup your renderbuffer
983 * as a miptree and used the fragile lod/array_index/etc. controls to
984 * select the image. So, instead, we just make a new single-level
985 * miptree and render into that.
986 */
987 intel_renderbuffer_move_to_temp(brw, irb, false);
988 mt = irb->mt;
989 }
990 }
991
992 intel_miptree_used_for_rendering(irb->mt);
993
994 surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32, &offset);
995
996 format = brw->render_target_format[rb_format];
997 if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
998 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
999 __func__, _mesa_get_format_name(rb_format));
1000 }
1001
1002 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
1003 format << BRW_SURFACE_FORMAT_SHIFT);
1004
1005 /* reloc */
1006 assert(mt->offset % mt->cpp == 0);
1007 surf[1] = (intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y) +
1008 mt->bo->offset64 + mt->offset);
1009
1010 surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
1011 (rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
1012
1013 surf[3] = (brw_get_surface_tiling_bits(mt->tiling) |
1014 (mt->pitch - 1) << BRW_SURFACE_PITCH_SHIFT);
1015
1016 surf[4] = brw_get_surface_num_multisamples(mt->num_samples);
1017
1018 assert(brw->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
1019 /* Note that the low bits of these fields are missing, so
1020 * there's the possibility of getting in trouble.
1021 */
1022 assert(tile_x % 4 == 0);
1023 assert(tile_y % 2 == 0);
1024 surf[5] = ((tile_x / 4) << BRW_SURFACE_X_OFFSET_SHIFT |
1025 (tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT |
1026 (mt->valign == 4 ? BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
1027
1028 if (brw->gen < 6) {
1029 /* _NEW_COLOR */
1030 if (!ctx->Color.ColorLogicOpEnabled && !ctx->Color._AdvancedBlendMode &&
1031 (ctx->Color.BlendEnabled & (1 << unit)))
1032 surf[0] |= BRW_SURFACE_BLEND_ENABLED;
1033
1034 if (!ctx->Color.ColorMask[unit][0])
1035 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT;
1036 if (!ctx->Color.ColorMask[unit][1])
1037 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT;
1038 if (!ctx->Color.ColorMask[unit][2])
1039 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT;
1040
1041 /* As mentioned above, disable writes to the alpha component when the
1042 * renderbuffer is XRGB.
1043 */
1044 if (ctx->DrawBuffer->Visual.alphaBits == 0 ||
1045 !ctx->Color.ColorMask[unit][3]) {
1046 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT;
1047 }
1048 }
1049
1050 drm_intel_bo_emit_reloc(brw->batch.bo,
1051 offset + 4,
1052 mt->bo,
1053 surf[1] - mt->bo->offset64,
1054 I915_GEM_DOMAIN_RENDER,
1055 I915_GEM_DOMAIN_RENDER);
1056
1057 return offset;
1058 }
1059
1060 /**
1061 * Construct SURFACE_STATE objects for renderbuffers/draw buffers.
1062 */
1063 void
1064 brw_update_renderbuffer_surfaces(struct brw_context *brw,
1065 const struct gl_framebuffer *fb,
1066 uint32_t render_target_start,
1067 uint32_t *surf_offset)
1068 {
1069 GLuint i;
1070 const unsigned int w = _mesa_geometric_width(fb);
1071 const unsigned int h = _mesa_geometric_height(fb);
1072 const unsigned int s = _mesa_geometric_samples(fb);
1073
1074 /* Update surfaces for drawing buffers */
1075 if (fb->_NumColorDrawBuffers >= 1) {
1076 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
1077 const uint32_t surf_index = render_target_start + i;
1078 const int flags = (_mesa_geometric_layers(fb) > 0 ?
1079 INTEL_RENDERBUFFER_LAYERED : 0) |
1080 (brw->draw_aux_buffer_disabled[i] ?
1081 INTEL_AUX_BUFFER_DISABLED : 0);
1082
1083 if (intel_renderbuffer(fb->_ColorDrawBuffers[i])) {
1084 surf_offset[surf_index] =
1085 brw->vtbl.update_renderbuffer_surface(
1086 brw, fb->_ColorDrawBuffers[i], flags, i, surf_index);
1087 } else {
1088 brw->vtbl.emit_null_surface_state(brw, w, h, s,
1089 &surf_offset[surf_index]);
1090 }
1091 }
1092 } else {
1093 const uint32_t surf_index = render_target_start;
1094 brw->vtbl.emit_null_surface_state(brw, w, h, s,
1095 &surf_offset[surf_index]);
1096 }
1097 }
1098
1099 static void
1100 update_renderbuffer_surfaces(struct brw_context *brw)
1101 {
1102 const struct gl_context *ctx = &brw->ctx;
1103
1104 /* BRW_NEW_FS_PROG_DATA */
1105 const struct brw_wm_prog_data *wm_prog_data =
1106 brw_wm_prog_data(brw->wm.base.prog_data);
1107
1108 /* _NEW_BUFFERS | _NEW_COLOR */
1109 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1110 brw_update_renderbuffer_surfaces(
1111 brw, fb,
1112 wm_prog_data->binding_table.render_target_start,
1113 brw->wm.base.surf_offset);
1114 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1115 }
1116
1117 const struct brw_tracked_state brw_renderbuffer_surfaces = {
1118 .dirty = {
1119 .mesa = _NEW_BUFFERS |
1120 _NEW_COLOR,
1121 .brw = BRW_NEW_BATCH |
1122 BRW_NEW_BLORP |
1123 BRW_NEW_FS_PROG_DATA,
1124 },
1125 .emit = update_renderbuffer_surfaces,
1126 };
1127
1128 const struct brw_tracked_state gen6_renderbuffer_surfaces = {
1129 .dirty = {
1130 .mesa = _NEW_BUFFERS,
1131 .brw = BRW_NEW_BATCH |
1132 BRW_NEW_BLORP,
1133 },
1134 .emit = update_renderbuffer_surfaces,
1135 };
1136
1137 static void
1138 update_renderbuffer_read_surfaces(struct brw_context *brw)
1139 {
1140 const struct gl_context *ctx = &brw->ctx;
1141
1142 /* BRW_NEW_FS_PROG_DATA */
1143 const struct brw_wm_prog_data *wm_prog_data =
1144 brw_wm_prog_data(brw->wm.base.prog_data);
1145
1146 /* BRW_NEW_FRAGMENT_PROGRAM */
1147 if (!ctx->Extensions.MESA_shader_framebuffer_fetch &&
1148 brw->fragment_program && brw->fragment_program->info.outputs_read) {
1149 /* _NEW_BUFFERS */
1150 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1151
1152 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
1153 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
1154 const struct intel_renderbuffer *irb = intel_renderbuffer(rb);
1155 const unsigned surf_index =
1156 wm_prog_data->binding_table.render_target_read_start + i;
1157 uint32_t *surf_offset = &brw->wm.base.surf_offset[surf_index];
1158
1159 if (irb) {
1160 const unsigned format = brw->render_target_format[
1161 _mesa_get_render_format(ctx, intel_rb_format(irb))];
1162 assert(isl_format_supports_sampling(&brw->screen->devinfo,
1163 format));
1164
1165 /* Override the target of the texture if the render buffer is a
1166 * single slice of a 3D texture (since the minimum array element
1167 * field of the surface state structure is ignored by the sampler
1168 * unit for 3D textures on some hardware), or if the render buffer
1169 * is a 1D array (since shaders always provide the array index
1170 * coordinate at the Z component to avoid state-dependent
1171 * recompiles when changing the texture target of the
1172 * framebuffer).
1173 */
1174 const GLenum target =
1175 (irb->mt->target == GL_TEXTURE_3D &&
1176 irb->layer_count == 1) ? GL_TEXTURE_2D :
1177 irb->mt->target == GL_TEXTURE_1D_ARRAY ? GL_TEXTURE_2D_ARRAY :
1178 irb->mt->target;
1179
1180 /* intel_renderbuffer::mt_layer is expressed in sample units for
1181 * the UMS and CMS multisample layouts, but
1182 * intel_renderbuffer::layer_count is expressed in units of whole
1183 * logical layers regardless of the multisample layout.
1184 */
1185 const unsigned mt_layer_unit =
1186 (irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
1187 irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) ?
1188 MAX2(irb->mt->num_samples, 1) : 1;
1189
1190 const struct isl_view view = {
1191 .format = format,
1192 .base_level = irb->mt_level - irb->mt->first_level,
1193 .levels = 1,
1194 .base_array_layer = irb->mt_layer / mt_layer_unit,
1195 .array_len = irb->layer_count,
1196 .swizzle = ISL_SWIZZLE_IDENTITY,
1197 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
1198 };
1199
1200 const int flags = brw->draw_aux_buffer_disabled[i] ?
1201 INTEL_AUX_BUFFER_DISABLED : 0;
1202 brw_emit_surface_state(brw, irb->mt, flags, target, view,
1203 surface_state_infos[brw->gen].tex_mocs,
1204 surf_offset, surf_index,
1205 I915_GEM_DOMAIN_SAMPLER, 0);
1206
1207 } else {
1208 brw->vtbl.emit_null_surface_state(
1209 brw, _mesa_geometric_width(fb), _mesa_geometric_height(fb),
1210 _mesa_geometric_samples(fb), surf_offset);
1211 }
1212 }
1213
1214 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1215 }
1216 }
1217
1218 const struct brw_tracked_state brw_renderbuffer_read_surfaces = {
1219 .dirty = {
1220 .mesa = _NEW_BUFFERS,
1221 .brw = BRW_NEW_BATCH |
1222 BRW_NEW_FRAGMENT_PROGRAM |
1223 BRW_NEW_FS_PROG_DATA,
1224 },
1225 .emit = update_renderbuffer_read_surfaces,
1226 };
1227
1228 static void
1229 update_stage_texture_surfaces(struct brw_context *brw,
1230 const struct gl_program *prog,
1231 struct brw_stage_state *stage_state,
1232 bool for_gather, uint32_t plane)
1233 {
1234 if (!prog)
1235 return;
1236
1237 struct gl_context *ctx = &brw->ctx;
1238
1239 uint32_t *surf_offset = stage_state->surf_offset;
1240
1241 /* BRW_NEW_*_PROG_DATA */
1242 if (for_gather)
1243 surf_offset += stage_state->prog_data->binding_table.gather_texture_start;
1244 else
1245 surf_offset += stage_state->prog_data->binding_table.plane_start[plane];
1246
1247 unsigned num_samplers = util_last_bit(prog->SamplersUsed);
1248 for (unsigned s = 0; s < num_samplers; s++) {
1249 surf_offset[s] = 0;
1250
1251 if (prog->SamplersUsed & (1 << s)) {
1252 const unsigned unit = prog->SamplerUnits[s];
1253
1254 /* _NEW_TEXTURE */
1255 if (ctx->Texture.Unit[unit]._Current) {
1256 brw_update_texture_surface(ctx, unit, surf_offset + s, for_gather, plane);
1257 }
1258 }
1259 }
1260 }
1261
1262
1263 /**
1264 * Construct SURFACE_STATE objects for enabled textures.
1265 */
1266 static void
1267 brw_update_texture_surfaces(struct brw_context *brw)
1268 {
1269 /* BRW_NEW_VERTEX_PROGRAM */
1270 struct gl_program *vs = (struct gl_program *) brw->vertex_program;
1271
1272 /* BRW_NEW_TESS_PROGRAMS */
1273 struct gl_program *tcs = (struct gl_program *) brw->tess_ctrl_program;
1274 struct gl_program *tes = (struct gl_program *) brw->tess_eval_program;
1275
1276 /* BRW_NEW_GEOMETRY_PROGRAM */
1277 struct gl_program *gs = (struct gl_program *) brw->geometry_program;
1278
1279 /* BRW_NEW_FRAGMENT_PROGRAM */
1280 struct gl_program *fs = (struct gl_program *) brw->fragment_program;
1281
1282 /* _NEW_TEXTURE */
1283 update_stage_texture_surfaces(brw, vs, &brw->vs.base, false, 0);
1284 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, false, 0);
1285 update_stage_texture_surfaces(brw, tes, &brw->tes.base, false, 0);
1286 update_stage_texture_surfaces(brw, gs, &brw->gs.base, false, 0);
1287 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 0);
1288
1289 /* emit alternate set of surface state for gather. this
1290 * allows the surface format to be overriden for only the
1291 * gather4 messages. */
1292 if (brw->gen < 8) {
1293 if (vs && vs->nir->info->uses_texture_gather)
1294 update_stage_texture_surfaces(brw, vs, &brw->vs.base, true, 0);
1295 if (tcs && tcs->nir->info->uses_texture_gather)
1296 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, true, 0);
1297 if (tes && tes->nir->info->uses_texture_gather)
1298 update_stage_texture_surfaces(brw, tes, &brw->tes.base, true, 0);
1299 if (gs && gs->nir->info->uses_texture_gather)
1300 update_stage_texture_surfaces(brw, gs, &brw->gs.base, true, 0);
1301 if (fs && fs->nir->info->uses_texture_gather)
1302 update_stage_texture_surfaces(brw, fs, &brw->wm.base, true, 0);
1303 }
1304
1305 if (fs) {
1306 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 1);
1307 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 2);
1308 }
1309
1310 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1311 }
1312
1313 const struct brw_tracked_state brw_texture_surfaces = {
1314 .dirty = {
1315 .mesa = _NEW_TEXTURE,
1316 .brw = BRW_NEW_BATCH |
1317 BRW_NEW_BLORP |
1318 BRW_NEW_FRAGMENT_PROGRAM |
1319 BRW_NEW_FS_PROG_DATA |
1320 BRW_NEW_GEOMETRY_PROGRAM |
1321 BRW_NEW_GS_PROG_DATA |
1322 BRW_NEW_TESS_PROGRAMS |
1323 BRW_NEW_TCS_PROG_DATA |
1324 BRW_NEW_TES_PROG_DATA |
1325 BRW_NEW_TEXTURE_BUFFER |
1326 BRW_NEW_VERTEX_PROGRAM |
1327 BRW_NEW_VS_PROG_DATA,
1328 },
1329 .emit = brw_update_texture_surfaces,
1330 };
1331
1332 static void
1333 brw_update_cs_texture_surfaces(struct brw_context *brw)
1334 {
1335 /* BRW_NEW_COMPUTE_PROGRAM */
1336 struct gl_program *cs = (struct gl_program *) brw->compute_program;
1337
1338 /* _NEW_TEXTURE */
1339 update_stage_texture_surfaces(brw, cs, &brw->cs.base, false, 0);
1340
1341 /* emit alternate set of surface state for gather. this
1342 * allows the surface format to be overriden for only the
1343 * gather4 messages.
1344 */
1345 if (brw->gen < 8) {
1346 if (cs && cs->nir->info->uses_texture_gather)
1347 update_stage_texture_surfaces(brw, cs, &brw->cs.base, true, 0);
1348 }
1349
1350 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1351 }
1352
1353 const struct brw_tracked_state brw_cs_texture_surfaces = {
1354 .dirty = {
1355 .mesa = _NEW_TEXTURE,
1356 .brw = BRW_NEW_BATCH |
1357 BRW_NEW_BLORP |
1358 BRW_NEW_COMPUTE_PROGRAM,
1359 },
1360 .emit = brw_update_cs_texture_surfaces,
1361 };
1362
1363
1364 void
1365 brw_upload_ubo_surfaces(struct brw_context *brw,
1366 struct gl_linked_shader *shader,
1367 struct brw_stage_state *stage_state,
1368 struct brw_stage_prog_data *prog_data)
1369 {
1370 struct gl_context *ctx = &brw->ctx;
1371
1372 if (!shader)
1373 return;
1374
1375 uint32_t *ubo_surf_offsets =
1376 &stage_state->surf_offset[prog_data->binding_table.ubo_start];
1377
1378 for (int i = 0; i < shader->NumUniformBlocks; i++) {
1379 struct gl_uniform_buffer_binding *binding =
1380 &ctx->UniformBufferBindings[shader->UniformBlocks[i]->Binding];
1381
1382 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1383 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ubo_surf_offsets[i]);
1384 } else {
1385 struct intel_buffer_object *intel_bo =
1386 intel_buffer_object(binding->BufferObject);
1387 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1388 if (!binding->AutomaticSize)
1389 size = MIN2(size, binding->Size);
1390 drm_intel_bo *bo =
1391 intel_bufferobj_buffer(brw, intel_bo,
1392 binding->Offset,
1393 size);
1394 brw_create_constant_surface(brw, bo, binding->Offset,
1395 size,
1396 &ubo_surf_offsets[i]);
1397 }
1398 }
1399
1400 uint32_t *ssbo_surf_offsets =
1401 &stage_state->surf_offset[prog_data->binding_table.ssbo_start];
1402
1403 for (int i = 0; i < shader->NumShaderStorageBlocks; i++) {
1404 struct gl_shader_storage_buffer_binding *binding =
1405 &ctx->ShaderStorageBufferBindings[shader->ShaderStorageBlocks[i]->Binding];
1406
1407 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1408 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ssbo_surf_offsets[i]);
1409 } else {
1410 struct intel_buffer_object *intel_bo =
1411 intel_buffer_object(binding->BufferObject);
1412 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1413 if (!binding->AutomaticSize)
1414 size = MIN2(size, binding->Size);
1415 drm_intel_bo *bo =
1416 intel_bufferobj_buffer(brw, intel_bo,
1417 binding->Offset,
1418 size);
1419 brw_create_buffer_surface(brw, bo, binding->Offset,
1420 size,
1421 &ssbo_surf_offsets[i]);
1422 }
1423 }
1424
1425 if (shader->NumUniformBlocks || shader->NumShaderStorageBlocks)
1426 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1427 }
1428
1429 static void
1430 brw_upload_wm_ubo_surfaces(struct brw_context *brw)
1431 {
1432 struct gl_context *ctx = &brw->ctx;
1433 /* _NEW_PROGRAM */
1434 struct gl_shader_program *prog = ctx->_Shader->_CurrentFragmentProgram;
1435
1436 if (!prog)
1437 return;
1438
1439 /* BRW_NEW_FS_PROG_DATA */
1440 brw_upload_ubo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
1441 &brw->wm.base, brw->wm.base.prog_data);
1442 }
1443
1444 const struct brw_tracked_state brw_wm_ubo_surfaces = {
1445 .dirty = {
1446 .mesa = _NEW_PROGRAM,
1447 .brw = BRW_NEW_BATCH |
1448 BRW_NEW_BLORP |
1449 BRW_NEW_FS_PROG_DATA |
1450 BRW_NEW_UNIFORM_BUFFER,
1451 },
1452 .emit = brw_upload_wm_ubo_surfaces,
1453 };
1454
1455 static void
1456 brw_upload_cs_ubo_surfaces(struct brw_context *brw)
1457 {
1458 struct gl_context *ctx = &brw->ctx;
1459 /* _NEW_PROGRAM */
1460 struct gl_shader_program *prog =
1461 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1462
1463 if (!prog)
1464 return;
1465
1466 /* BRW_NEW_CS_PROG_DATA */
1467 brw_upload_ubo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_COMPUTE],
1468 &brw->cs.base, brw->cs.base.prog_data);
1469 }
1470
1471 const struct brw_tracked_state brw_cs_ubo_surfaces = {
1472 .dirty = {
1473 .mesa = _NEW_PROGRAM,
1474 .brw = BRW_NEW_BATCH |
1475 BRW_NEW_BLORP |
1476 BRW_NEW_CS_PROG_DATA |
1477 BRW_NEW_UNIFORM_BUFFER,
1478 },
1479 .emit = brw_upload_cs_ubo_surfaces,
1480 };
1481
1482 void
1483 brw_upload_abo_surfaces(struct brw_context *brw,
1484 struct gl_linked_shader *shader,
1485 struct brw_stage_state *stage_state,
1486 struct brw_stage_prog_data *prog_data)
1487 {
1488 struct gl_context *ctx = &brw->ctx;
1489 uint32_t *surf_offsets =
1490 &stage_state->surf_offset[prog_data->binding_table.abo_start];
1491
1492 if (shader && shader->NumAtomicBuffers) {
1493 for (unsigned i = 0; i < shader->NumAtomicBuffers; i++) {
1494 struct gl_atomic_buffer_binding *binding =
1495 &ctx->AtomicBufferBindings[shader->AtomicBuffers[i]->Binding];
1496 struct intel_buffer_object *intel_bo =
1497 intel_buffer_object(binding->BufferObject);
1498 drm_intel_bo *bo = intel_bufferobj_buffer(
1499 brw, intel_bo, binding->Offset, intel_bo->Base.Size - binding->Offset);
1500
1501 brw_emit_buffer_surface_state(brw, &surf_offsets[i], bo,
1502 binding->Offset, BRW_SURFACEFORMAT_RAW,
1503 bo->size - binding->Offset, 1, true);
1504 }
1505
1506 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1507 }
1508 }
1509
1510 static void
1511 brw_upload_wm_abo_surfaces(struct brw_context *brw)
1512 {
1513 struct gl_context *ctx = &brw->ctx;
1514 /* _NEW_PROGRAM */
1515 struct gl_shader_program *prog = ctx->_Shader->_CurrentFragmentProgram;
1516
1517 if (prog) {
1518 /* BRW_NEW_FS_PROG_DATA */
1519 brw_upload_abo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
1520 &brw->wm.base, brw->wm.base.prog_data);
1521 }
1522 }
1523
1524 const struct brw_tracked_state brw_wm_abo_surfaces = {
1525 .dirty = {
1526 .mesa = _NEW_PROGRAM,
1527 .brw = BRW_NEW_ATOMIC_BUFFER |
1528 BRW_NEW_BLORP |
1529 BRW_NEW_BATCH |
1530 BRW_NEW_FS_PROG_DATA,
1531 },
1532 .emit = brw_upload_wm_abo_surfaces,
1533 };
1534
1535 static void
1536 brw_upload_cs_abo_surfaces(struct brw_context *brw)
1537 {
1538 struct gl_context *ctx = &brw->ctx;
1539 /* _NEW_PROGRAM */
1540 struct gl_shader_program *prog =
1541 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1542
1543 if (prog) {
1544 /* BRW_NEW_CS_PROG_DATA */
1545 brw_upload_abo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_COMPUTE],
1546 &brw->cs.base, brw->cs.base.prog_data);
1547 }
1548 }
1549
1550 const struct brw_tracked_state brw_cs_abo_surfaces = {
1551 .dirty = {
1552 .mesa = _NEW_PROGRAM,
1553 .brw = BRW_NEW_ATOMIC_BUFFER |
1554 BRW_NEW_BLORP |
1555 BRW_NEW_BATCH |
1556 BRW_NEW_CS_PROG_DATA,
1557 },
1558 .emit = brw_upload_cs_abo_surfaces,
1559 };
1560
1561 static void
1562 brw_upload_cs_image_surfaces(struct brw_context *brw)
1563 {
1564 struct gl_context *ctx = &brw->ctx;
1565 /* _NEW_PROGRAM */
1566 struct gl_shader_program *prog =
1567 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1568
1569 if (prog) {
1570 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1571 brw_upload_image_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_COMPUTE],
1572 &brw->cs.base, brw->cs.base.prog_data);
1573 }
1574 }
1575
1576 const struct brw_tracked_state brw_cs_image_surfaces = {
1577 .dirty = {
1578 .mesa = _NEW_TEXTURE | _NEW_PROGRAM,
1579 .brw = BRW_NEW_BATCH |
1580 BRW_NEW_BLORP |
1581 BRW_NEW_CS_PROG_DATA |
1582 BRW_NEW_IMAGE_UNITS
1583 },
1584 .emit = brw_upload_cs_image_surfaces,
1585 };
1586
1587 static uint32_t
1588 get_image_format(struct brw_context *brw, mesa_format format, GLenum access)
1589 {
1590 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1591 uint32_t hw_format = brw_format_for_mesa_format(format);
1592 if (access == GL_WRITE_ONLY) {
1593 return hw_format;
1594 } else if (isl_has_matching_typed_storage_image_format(devinfo, hw_format)) {
1595 /* Typed surface reads support a very limited subset of the shader
1596 * image formats. Translate it into the closest format the
1597 * hardware supports.
1598 */
1599 return isl_lower_storage_image_format(devinfo, hw_format);
1600 } else {
1601 /* The hardware doesn't actually support a typed format that we can use
1602 * so we have to fall back to untyped read/write messages.
1603 */
1604 return BRW_SURFACEFORMAT_RAW;
1605 }
1606 }
1607
1608 static void
1609 update_default_image_param(struct brw_context *brw,
1610 struct gl_image_unit *u,
1611 unsigned surface_idx,
1612 struct brw_image_param *param)
1613 {
1614 memset(param, 0, sizeof(*param));
1615 param->surface_idx = surface_idx;
1616 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1617 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1618 * detailed explanation of these parameters.
1619 */
1620 param->swizzling[0] = 0xff;
1621 param->swizzling[1] = 0xff;
1622 }
1623
1624 static void
1625 update_buffer_image_param(struct brw_context *brw,
1626 struct gl_image_unit *u,
1627 unsigned surface_idx,
1628 struct brw_image_param *param)
1629 {
1630 struct gl_buffer_object *obj = u->TexObj->BufferObject;
1631 const uint32_t size = MIN2((uint32_t)u->TexObj->BufferSize, obj->Size);
1632 update_default_image_param(brw, u, surface_idx, param);
1633
1634 param->size[0] = size / _mesa_get_format_bytes(u->_ActualFormat);
1635 param->stride[0] = _mesa_get_format_bytes(u->_ActualFormat);
1636 }
1637
1638 static void
1639 update_texture_image_param(struct brw_context *brw,
1640 struct gl_image_unit *u,
1641 unsigned surface_idx,
1642 struct brw_image_param *param)
1643 {
1644 struct intel_mipmap_tree *mt = intel_texture_object(u->TexObj)->mt;
1645
1646 update_default_image_param(brw, u, surface_idx, param);
1647
1648 param->size[0] = minify(mt->logical_width0, u->Level);
1649 param->size[1] = minify(mt->logical_height0, u->Level);
1650 param->size[2] = (!u->Layered ? 1 :
1651 u->TexObj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1652 u->TexObj->Target == GL_TEXTURE_3D ?
1653 minify(mt->logical_depth0, u->Level) :
1654 mt->logical_depth0);
1655
1656 intel_miptree_get_image_offset(mt, u->Level, u->_Layer,
1657 &param->offset[0],
1658 &param->offset[1]);
1659
1660 param->stride[0] = mt->cpp;
1661 param->stride[1] = mt->pitch / mt->cpp;
1662 param->stride[2] =
1663 brw_miptree_get_horizontal_slice_pitch(brw, mt, u->Level);
1664 param->stride[3] =
1665 brw_miptree_get_vertical_slice_pitch(brw, mt, u->Level);
1666
1667 if (mt->tiling == I915_TILING_X) {
1668 /* An X tile is a rectangular block of 512x8 bytes. */
1669 param->tiling[0] = _mesa_logbase2(512 / mt->cpp);
1670 param->tiling[1] = _mesa_logbase2(8);
1671
1672 if (brw->has_swizzling) {
1673 /* Right shifts required to swizzle bits 9 and 10 of the memory
1674 * address with bit 6.
1675 */
1676 param->swizzling[0] = 3;
1677 param->swizzling[1] = 4;
1678 }
1679 } else if (mt->tiling == I915_TILING_Y) {
1680 /* The layout of a Y-tiled surface in memory isn't really fundamentally
1681 * different to the layout of an X-tiled surface, we simply pretend that
1682 * the surface is broken up in a number of smaller 16Bx32 tiles, each
1683 * one arranged in X-major order just like is the case for X-tiling.
1684 */
1685 param->tiling[0] = _mesa_logbase2(16 / mt->cpp);
1686 param->tiling[1] = _mesa_logbase2(32);
1687
1688 if (brw->has_swizzling) {
1689 /* Right shift required to swizzle bit 9 of the memory address with
1690 * bit 6.
1691 */
1692 param->swizzling[0] = 3;
1693 }
1694 }
1695
1696 /* 3D textures are arranged in 2D in memory with 2^lod slices per row. The
1697 * address calculation algorithm (emit_address_calculation() in
1698 * brw_fs_surface_builder.cpp) handles this as a sort of tiling with
1699 * modulus equal to the LOD.
1700 */
1701 param->tiling[2] = (u->TexObj->Target == GL_TEXTURE_3D ? u->Level :
1702 0);
1703 }
1704
1705 static void
1706 update_image_surface(struct brw_context *brw,
1707 struct gl_image_unit *u,
1708 GLenum access,
1709 unsigned surface_idx,
1710 uint32_t *surf_offset,
1711 struct brw_image_param *param)
1712 {
1713 if (_mesa_is_image_unit_valid(&brw->ctx, u)) {
1714 struct gl_texture_object *obj = u->TexObj;
1715 const unsigned format = get_image_format(brw, u->_ActualFormat, access);
1716
1717 if (obj->Target == GL_TEXTURE_BUFFER) {
1718 struct intel_buffer_object *intel_obj =
1719 intel_buffer_object(obj->BufferObject);
1720 const unsigned texel_size = (format == BRW_SURFACEFORMAT_RAW ? 1 :
1721 _mesa_get_format_bytes(u->_ActualFormat));
1722
1723 brw_emit_buffer_surface_state(
1724 brw, surf_offset, intel_obj->buffer, obj->BufferOffset,
1725 format, intel_obj->Base.Size, texel_size,
1726 access != GL_READ_ONLY);
1727
1728 update_buffer_image_param(brw, u, surface_idx, param);
1729
1730 } else {
1731 struct intel_texture_object *intel_obj = intel_texture_object(obj);
1732 struct intel_mipmap_tree *mt = intel_obj->mt;
1733
1734 if (format == BRW_SURFACEFORMAT_RAW) {
1735 brw_emit_buffer_surface_state(
1736 brw, surf_offset, mt->bo, mt->offset,
1737 format, mt->bo->size - mt->offset, 1 /* pitch */,
1738 access != GL_READ_ONLY);
1739
1740 } else {
1741 const unsigned num_layers = (!u->Layered ? 1 :
1742 obj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1743 mt->logical_depth0);
1744
1745 struct isl_view view = {
1746 .format = format,
1747 .base_level = obj->MinLevel + u->Level,
1748 .levels = 1,
1749 .base_array_layer = obj->MinLayer + u->_Layer,
1750 .array_len = num_layers,
1751 .swizzle = ISL_SWIZZLE_IDENTITY,
1752 .usage = ISL_SURF_USAGE_STORAGE_BIT,
1753 };
1754
1755 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
1756 const int flags =
1757 mt->fast_clear_state == INTEL_FAST_CLEAR_STATE_RESOLVED ?
1758 INTEL_AUX_BUFFER_DISABLED : 0;
1759 brw_emit_surface_state(brw, mt, flags, mt->target, view,
1760 surface_state_infos[brw->gen].tex_mocs,
1761 surf_offset, surf_index,
1762 I915_GEM_DOMAIN_SAMPLER,
1763 access == GL_READ_ONLY ? 0 :
1764 I915_GEM_DOMAIN_SAMPLER);
1765 }
1766
1767 update_texture_image_param(brw, u, surface_idx, param);
1768 }
1769
1770 } else {
1771 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, surf_offset);
1772 update_default_image_param(brw, u, surface_idx, param);
1773 }
1774 }
1775
1776 void
1777 brw_upload_image_surfaces(struct brw_context *brw,
1778 struct gl_linked_shader *shader,
1779 struct brw_stage_state *stage_state,
1780 struct brw_stage_prog_data *prog_data)
1781 {
1782 struct gl_context *ctx = &brw->ctx;
1783
1784 if (shader && shader->NumImages) {
1785 for (unsigned i = 0; i < shader->NumImages; i++) {
1786 struct gl_image_unit *u = &ctx->ImageUnits[shader->ImageUnits[i]];
1787 const unsigned surf_idx = prog_data->binding_table.image_start + i;
1788
1789 update_image_surface(brw, u, shader->ImageAccess[i],
1790 surf_idx,
1791 &stage_state->surf_offset[surf_idx],
1792 &prog_data->image_param[i]);
1793 }
1794
1795 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1796 /* This may have changed the image metadata dependent on the context
1797 * image unit state and passed to the program as uniforms, make sure
1798 * that push and pull constants are reuploaded.
1799 */
1800 brw->NewGLState |= _NEW_PROGRAM_CONSTANTS;
1801 }
1802 }
1803
1804 static void
1805 brw_upload_wm_image_surfaces(struct brw_context *brw)
1806 {
1807 struct gl_context *ctx = &brw->ctx;
1808 /* BRW_NEW_FRAGMENT_PROGRAM */
1809 struct gl_shader_program *prog = ctx->_Shader->_CurrentFragmentProgram;
1810
1811 if (prog) {
1812 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1813 brw_upload_image_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
1814 &brw->wm.base, brw->wm.base.prog_data);
1815 }
1816 }
1817
1818 const struct brw_tracked_state brw_wm_image_surfaces = {
1819 .dirty = {
1820 .mesa = _NEW_TEXTURE,
1821 .brw = BRW_NEW_BATCH |
1822 BRW_NEW_BLORP |
1823 BRW_NEW_FRAGMENT_PROGRAM |
1824 BRW_NEW_FS_PROG_DATA |
1825 BRW_NEW_IMAGE_UNITS
1826 },
1827 .emit = brw_upload_wm_image_surfaces,
1828 };
1829
1830 void
1831 gen4_init_vtable_surface_functions(struct brw_context *brw)
1832 {
1833 brw->vtbl.update_renderbuffer_surface = gen4_update_renderbuffer_surface;
1834 brw->vtbl.emit_null_surface_state = brw_emit_null_surface_state;
1835 }
1836
1837 void
1838 gen6_init_vtable_surface_functions(struct brw_context *brw)
1839 {
1840 gen4_init_vtable_surface_functions(brw);
1841 brw->vtbl.update_renderbuffer_surface = brw_update_renderbuffer_surface;
1842 }
1843
1844 static void
1845 brw_upload_cs_work_groups_surface(struct brw_context *brw)
1846 {
1847 struct gl_context *ctx = &brw->ctx;
1848 /* _NEW_PROGRAM */
1849 struct gl_shader_program *prog =
1850 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1851 /* BRW_NEW_CS_PROG_DATA */
1852 const struct brw_cs_prog_data *cs_prog_data =
1853 brw_cs_prog_data(brw->cs.base.prog_data);
1854
1855 if (prog && cs_prog_data->uses_num_work_groups) {
1856 const unsigned surf_idx =
1857 cs_prog_data->binding_table.work_groups_start;
1858 uint32_t *surf_offset = &brw->cs.base.surf_offset[surf_idx];
1859 drm_intel_bo *bo;
1860 uint32_t bo_offset;
1861
1862 if (brw->compute.num_work_groups_bo == NULL) {
1863 bo = NULL;
1864 intel_upload_data(brw,
1865 (void *)brw->compute.num_work_groups,
1866 3 * sizeof(GLuint),
1867 sizeof(GLuint),
1868 &bo,
1869 &bo_offset);
1870 } else {
1871 bo = brw->compute.num_work_groups_bo;
1872 bo_offset = brw->compute.num_work_groups_offset;
1873 }
1874
1875 brw_emit_buffer_surface_state(brw, surf_offset,
1876 bo, bo_offset,
1877 BRW_SURFACEFORMAT_RAW,
1878 3 * sizeof(GLuint), 1, true);
1879 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1880 }
1881 }
1882
1883 const struct brw_tracked_state brw_cs_work_groups_surface = {
1884 .dirty = {
1885 .brw = BRW_NEW_BLORP |
1886 BRW_NEW_CS_PROG_DATA |
1887 BRW_NEW_CS_WORK_GROUPS
1888 },
1889 .emit = brw_upload_cs_work_groups_surface,
1890 };