i965: Switch over to fully external-or-not MOCS scheme
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_surface_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "compiler/nir/nir.h"
34 #include "main/context.h"
35 #include "main/blend.h"
36 #include "main/mtypes.h"
37 #include "main/samplerobj.h"
38 #include "main/shaderimage.h"
39 #include "main/teximage.h"
40 #include "program/prog_parameter.h"
41 #include "program/prog_instruction.h"
42 #include "main/framebuffer.h"
43 #include "main/shaderapi.h"
44
45 #include "isl/isl.h"
46
47 #include "intel_mipmap_tree.h"
48 #include "intel_batchbuffer.h"
49 #include "intel_tex.h"
50 #include "intel_fbo.h"
51 #include "intel_buffer_objects.h"
52
53 #include "brw_context.h"
54 #include "brw_state.h"
55 #include "brw_defines.h"
56 #include "brw_wm.h"
57
58 uint32_t wb_mocs[] = {
59 [7] = GEN7_MOCS_L3,
60 [8] = BDW_MOCS_WB,
61 [9] = SKL_MOCS_WB,
62 [10] = CNL_MOCS_WB,
63 };
64
65 uint32_t pte_mocs[] = {
66 [7] = GEN7_MOCS_L3,
67 [8] = BDW_MOCS_PTE,
68 [9] = SKL_MOCS_PTE,
69 [10] = CNL_MOCS_PTE,
70 };
71
72 uint32_t
73 brw_get_bo_mocs(const struct gen_device_info *devinfo, struct brw_bo *bo)
74 {
75 return (bo && bo->external ? pte_mocs : wb_mocs)[devinfo->gen];
76 }
77
78 static void
79 get_isl_surf(struct brw_context *brw, struct intel_mipmap_tree *mt,
80 GLenum target, struct isl_view *view,
81 uint32_t *tile_x, uint32_t *tile_y,
82 uint32_t *offset, struct isl_surf *surf)
83 {
84 *surf = mt->surf;
85
86 const struct gen_device_info *devinfo = &brw->screen->devinfo;
87 const enum isl_dim_layout dim_layout =
88 get_isl_dim_layout(devinfo, mt->surf.tiling, target);
89
90 if (surf->dim_layout == dim_layout)
91 return;
92
93 /* The layout of the specified texture target is not compatible with the
94 * actual layout of the miptree structure in memory -- You're entering
95 * dangerous territory, this can only possibly work if you only intended
96 * to access a single level and slice of the texture, and the hardware
97 * supports the tile offset feature in order to allow non-tile-aligned
98 * base offsets, since we'll have to point the hardware to the first
99 * texel of the level instead of relying on the usual base level/layer
100 * controls.
101 */
102 assert(devinfo->has_surface_tile_offset);
103 assert(view->levels == 1 && view->array_len == 1);
104 assert(*tile_x == 0 && *tile_y == 0);
105
106 *offset += intel_miptree_get_tile_offsets(mt, view->base_level,
107 view->base_array_layer,
108 tile_x, tile_y);
109
110 /* Minify the logical dimensions of the texture. */
111 const unsigned l = view->base_level - mt->first_level;
112 surf->logical_level0_px.width = minify(surf->logical_level0_px.width, l);
113 surf->logical_level0_px.height = surf->dim <= ISL_SURF_DIM_1D ? 1 :
114 minify(surf->logical_level0_px.height, l);
115 surf->logical_level0_px.depth = surf->dim <= ISL_SURF_DIM_2D ? 1 :
116 minify(surf->logical_level0_px.depth, l);
117
118 /* Only the base level and layer can be addressed with the overridden
119 * layout.
120 */
121 surf->logical_level0_px.array_len = 1;
122 surf->levels = 1;
123 surf->dim_layout = dim_layout;
124
125 /* The requested slice of the texture is now at the base level and
126 * layer.
127 */
128 view->base_level = 0;
129 view->base_array_layer = 0;
130 }
131
132 static void
133 brw_emit_surface_state(struct brw_context *brw,
134 struct intel_mipmap_tree *mt,
135 GLenum target, struct isl_view view,
136 enum isl_aux_usage aux_usage,
137 uint32_t *surf_offset, int surf_index,
138 unsigned reloc_flags)
139 {
140 const struct gen_device_info *devinfo = &brw->screen->devinfo;
141 uint32_t tile_x = mt->level[0].level_x;
142 uint32_t tile_y = mt->level[0].level_y;
143 uint32_t offset = mt->offset;
144
145 struct isl_surf surf;
146
147 get_isl_surf(brw, mt, target, &view, &tile_x, &tile_y, &offset, &surf);
148
149 union isl_color_value clear_color = { .u32 = { 0, 0, 0, 0 } };
150
151 struct brw_bo *aux_bo;
152 struct isl_surf *aux_surf = NULL;
153 uint64_t aux_offset = 0;
154 switch (aux_usage) {
155 case ISL_AUX_USAGE_MCS:
156 case ISL_AUX_USAGE_CCS_D:
157 case ISL_AUX_USAGE_CCS_E:
158 aux_surf = &mt->mcs_buf->surf;
159 aux_bo = mt->mcs_buf->bo;
160 aux_offset = mt->mcs_buf->offset;
161 break;
162
163 case ISL_AUX_USAGE_HIZ:
164 aux_surf = &mt->hiz_buf->surf;
165 aux_bo = mt->hiz_buf->bo;
166 aux_offset = 0;
167 break;
168
169 case ISL_AUX_USAGE_NONE:
170 break;
171 }
172
173 if (aux_usage != ISL_AUX_USAGE_NONE) {
174 /* We only really need a clear color if we also have an auxiliary
175 * surface. Without one, it does nothing.
176 */
177 clear_color = mt->fast_clear_color;
178 }
179
180 void *state = brw_state_batch(brw,
181 brw->isl_dev.ss.size,
182 brw->isl_dev.ss.align,
183 surf_offset);
184
185 isl_surf_fill_state(&brw->isl_dev, state, .surf = &mt->surf, .view = &view,
186 .address = brw_state_reloc(&brw->batch,
187 *surf_offset + brw->isl_dev.ss.addr_offset,
188 mt->bo, offset, reloc_flags),
189 .aux_surf = aux_surf, .aux_usage = aux_usage,
190 .aux_address = aux_offset,
191 .mocs = brw_get_bo_mocs(devinfo, mt->bo),
192 .clear_color = clear_color,
193 .x_offset_sa = tile_x, .y_offset_sa = tile_y);
194 if (aux_surf) {
195 /* On gen7 and prior, the upper 20 bits of surface state DWORD 6 are the
196 * upper 20 bits of the GPU address of the MCS buffer; the lower 12 bits
197 * contain other control information. Since buffer addresses are always
198 * on 4k boundaries (and thus have their lower 12 bits zero), we can use
199 * an ordinary reloc to do the necessary address translation.
200 *
201 * FIXME: move to the point of assignment.
202 */
203 assert((aux_offset & 0xfff) == 0);
204 uint32_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
205 *aux_addr = brw_state_reloc(&brw->batch,
206 *surf_offset +
207 brw->isl_dev.ss.aux_addr_offset,
208 aux_bo, *aux_addr,
209 reloc_flags);
210 }
211 }
212
213 static uint32_t
214 gen6_update_renderbuffer_surface(struct brw_context *brw,
215 struct gl_renderbuffer *rb,
216 unsigned unit,
217 uint32_t surf_index)
218 {
219 struct gl_context *ctx = &brw->ctx;
220 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
221 struct intel_mipmap_tree *mt = irb->mt;
222
223 assert(brw_render_target_supported(brw, rb));
224
225 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
226 if (unlikely(!brw->mesa_format_supports_render[rb_format])) {
227 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
228 __func__, _mesa_get_format_name(rb_format));
229 }
230 enum isl_format isl_format = brw->mesa_to_isl_render_format[rb_format];
231
232 enum isl_aux_usage aux_usage =
233 brw->draw_aux_buffer_disabled[unit] ? ISL_AUX_USAGE_NONE :
234 intel_miptree_render_aux_usage(brw, mt, isl_format,
235 ctx->Color.BlendEnabled & (1 << unit));
236
237 struct isl_view view = {
238 .format = isl_format,
239 .base_level = irb->mt_level - irb->mt->first_level,
240 .levels = 1,
241 .base_array_layer = irb->mt_layer,
242 .array_len = MAX2(irb->layer_count, 1),
243 .swizzle = ISL_SWIZZLE_IDENTITY,
244 .usage = ISL_SURF_USAGE_RENDER_TARGET_BIT,
245 };
246
247 uint32_t offset;
248 brw_emit_surface_state(brw, mt, mt->target, view, aux_usage,
249 &offset, surf_index,
250 RELOC_WRITE);
251 return offset;
252 }
253
254 GLuint
255 translate_tex_target(GLenum target)
256 {
257 switch (target) {
258 case GL_TEXTURE_1D:
259 case GL_TEXTURE_1D_ARRAY_EXT:
260 return BRW_SURFACE_1D;
261
262 case GL_TEXTURE_RECTANGLE_NV:
263 return BRW_SURFACE_2D;
264
265 case GL_TEXTURE_2D:
266 case GL_TEXTURE_2D_ARRAY_EXT:
267 case GL_TEXTURE_EXTERNAL_OES:
268 case GL_TEXTURE_2D_MULTISAMPLE:
269 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
270 return BRW_SURFACE_2D;
271
272 case GL_TEXTURE_3D:
273 return BRW_SURFACE_3D;
274
275 case GL_TEXTURE_CUBE_MAP:
276 case GL_TEXTURE_CUBE_MAP_ARRAY:
277 return BRW_SURFACE_CUBE;
278
279 default:
280 unreachable("not reached");
281 }
282 }
283
284 uint32_t
285 brw_get_surface_tiling_bits(enum isl_tiling tiling)
286 {
287 switch (tiling) {
288 case ISL_TILING_X:
289 return BRW_SURFACE_TILED;
290 case ISL_TILING_Y0:
291 return BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y;
292 default:
293 return 0;
294 }
295 }
296
297
298 uint32_t
299 brw_get_surface_num_multisamples(unsigned num_samples)
300 {
301 if (num_samples > 1)
302 return BRW_SURFACE_MULTISAMPLECOUNT_4;
303 else
304 return BRW_SURFACE_MULTISAMPLECOUNT_1;
305 }
306
307 /**
308 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
309 * swizzling.
310 */
311 int
312 brw_get_texture_swizzle(const struct gl_context *ctx,
313 const struct gl_texture_object *t)
314 {
315 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
316
317 int swizzles[SWIZZLE_NIL + 1] = {
318 SWIZZLE_X,
319 SWIZZLE_Y,
320 SWIZZLE_Z,
321 SWIZZLE_W,
322 SWIZZLE_ZERO,
323 SWIZZLE_ONE,
324 SWIZZLE_NIL
325 };
326
327 if (img->_BaseFormat == GL_DEPTH_COMPONENT ||
328 img->_BaseFormat == GL_DEPTH_STENCIL) {
329 GLenum depth_mode = t->DepthMode;
330
331 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
332 * with depth component data specified with a sized internal format.
333 * Otherwise, it's left at the old default, GL_LUMINANCE.
334 */
335 if (_mesa_is_gles3(ctx) &&
336 img->InternalFormat != GL_DEPTH_COMPONENT &&
337 img->InternalFormat != GL_DEPTH_STENCIL) {
338 depth_mode = GL_RED;
339 }
340
341 switch (depth_mode) {
342 case GL_ALPHA:
343 swizzles[0] = SWIZZLE_ZERO;
344 swizzles[1] = SWIZZLE_ZERO;
345 swizzles[2] = SWIZZLE_ZERO;
346 swizzles[3] = SWIZZLE_X;
347 break;
348 case GL_LUMINANCE:
349 swizzles[0] = SWIZZLE_X;
350 swizzles[1] = SWIZZLE_X;
351 swizzles[2] = SWIZZLE_X;
352 swizzles[3] = SWIZZLE_ONE;
353 break;
354 case GL_INTENSITY:
355 swizzles[0] = SWIZZLE_X;
356 swizzles[1] = SWIZZLE_X;
357 swizzles[2] = SWIZZLE_X;
358 swizzles[3] = SWIZZLE_X;
359 break;
360 case GL_RED:
361 swizzles[0] = SWIZZLE_X;
362 swizzles[1] = SWIZZLE_ZERO;
363 swizzles[2] = SWIZZLE_ZERO;
364 swizzles[3] = SWIZZLE_ONE;
365 break;
366 }
367 }
368
369 GLenum datatype = _mesa_get_format_datatype(img->TexFormat);
370
371 /* If the texture's format is alpha-only, force R, G, and B to
372 * 0.0. Similarly, if the texture's format has no alpha channel,
373 * force the alpha value read to 1.0. This allows for the
374 * implementation to use an RGBA texture for any of these formats
375 * without leaking any unexpected values.
376 */
377 switch (img->_BaseFormat) {
378 case GL_ALPHA:
379 swizzles[0] = SWIZZLE_ZERO;
380 swizzles[1] = SWIZZLE_ZERO;
381 swizzles[2] = SWIZZLE_ZERO;
382 break;
383 case GL_LUMINANCE:
384 if (t->_IsIntegerFormat || datatype == GL_SIGNED_NORMALIZED) {
385 swizzles[0] = SWIZZLE_X;
386 swizzles[1] = SWIZZLE_X;
387 swizzles[2] = SWIZZLE_X;
388 swizzles[3] = SWIZZLE_ONE;
389 }
390 break;
391 case GL_LUMINANCE_ALPHA:
392 if (datatype == GL_SIGNED_NORMALIZED) {
393 swizzles[0] = SWIZZLE_X;
394 swizzles[1] = SWIZZLE_X;
395 swizzles[2] = SWIZZLE_X;
396 swizzles[3] = SWIZZLE_W;
397 }
398 break;
399 case GL_INTENSITY:
400 if (datatype == GL_SIGNED_NORMALIZED) {
401 swizzles[0] = SWIZZLE_X;
402 swizzles[1] = SWIZZLE_X;
403 swizzles[2] = SWIZZLE_X;
404 swizzles[3] = SWIZZLE_X;
405 }
406 break;
407 case GL_RED:
408 case GL_RG:
409 case GL_RGB:
410 if (_mesa_get_format_bits(img->TexFormat, GL_ALPHA_BITS) > 0 ||
411 img->TexFormat == MESA_FORMAT_RGB_DXT1 ||
412 img->TexFormat == MESA_FORMAT_SRGB_DXT1)
413 swizzles[3] = SWIZZLE_ONE;
414 break;
415 }
416
417 return MAKE_SWIZZLE4(swizzles[GET_SWZ(t->_Swizzle, 0)],
418 swizzles[GET_SWZ(t->_Swizzle, 1)],
419 swizzles[GET_SWZ(t->_Swizzle, 2)],
420 swizzles[GET_SWZ(t->_Swizzle, 3)]);
421 }
422
423 /**
424 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
425 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
426 *
427 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
428 * 0 1 2 3 4 5
429 * 4 5 6 7 0 1
430 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
431 *
432 * which is simply adding 4 then modding by 8 (or anding with 7).
433 *
434 * We then may need to apply workarounds for textureGather hardware bugs.
435 */
436 static unsigned
437 swizzle_to_scs(GLenum swizzle, bool need_green_to_blue)
438 {
439 unsigned scs = (swizzle + 4) & 7;
440
441 return (need_green_to_blue && scs == HSW_SCS_GREEN) ? HSW_SCS_BLUE : scs;
442 }
443
444 static bool
445 brw_aux_surface_disabled(const struct brw_context *brw,
446 const struct intel_mipmap_tree *mt)
447 {
448 const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
449
450 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
451 const struct intel_renderbuffer *irb =
452 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
453
454 if (irb && irb->mt == mt)
455 return brw->draw_aux_buffer_disabled[i];
456 }
457
458 return false;
459 }
460
461 static void
462 brw_update_texture_surface(struct gl_context *ctx,
463 unsigned unit,
464 uint32_t *surf_offset,
465 bool for_gather,
466 bool for_txf,
467 uint32_t plane)
468 {
469 struct brw_context *brw = brw_context(ctx);
470 const struct gen_device_info *devinfo = &brw->screen->devinfo;
471 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
472
473 if (obj->Target == GL_TEXTURE_BUFFER) {
474 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
475
476 } else {
477 struct intel_texture_object *intel_obj = intel_texture_object(obj);
478 struct intel_mipmap_tree *mt = intel_obj->mt;
479
480 if (plane > 0) {
481 if (mt->plane[plane - 1] == NULL)
482 return;
483 mt = mt->plane[plane - 1];
484 }
485
486 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
487 /* If this is a view with restricted NumLayers, then our effective depth
488 * is not just the miptree depth.
489 */
490 unsigned view_num_layers;
491 if (obj->Immutable && obj->Target != GL_TEXTURE_3D) {
492 view_num_layers = obj->NumLayers;
493 } else {
494 view_num_layers = mt->surf.dim == ISL_SURF_DIM_3D ?
495 mt->surf.logical_level0_px.depth :
496 mt->surf.logical_level0_px.array_len;
497 }
498
499 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
500 * texturing functions that return a float, as our code generation always
501 * selects the .x channel (which would always be 0).
502 */
503 struct gl_texture_image *firstImage = obj->Image[0][obj->BaseLevel];
504 const bool alpha_depth = obj->DepthMode == GL_ALPHA &&
505 (firstImage->_BaseFormat == GL_DEPTH_COMPONENT ||
506 firstImage->_BaseFormat == GL_DEPTH_STENCIL);
507 const unsigned swizzle = (unlikely(alpha_depth) ? SWIZZLE_XYZW :
508 brw_get_texture_swizzle(&brw->ctx, obj));
509
510 mesa_format mesa_fmt = plane == 0 ? intel_obj->_Format : mt->format;
511 enum isl_format format = translate_tex_format(brw, mesa_fmt,
512 for_txf ? GL_DECODE_EXT :
513 sampler->sRGBDecode);
514
515 /* Implement gen6 and gen7 gather work-around */
516 bool need_green_to_blue = false;
517 if (for_gather) {
518 if (devinfo->gen == 7 && (format == ISL_FORMAT_R32G32_FLOAT ||
519 format == ISL_FORMAT_R32G32_SINT ||
520 format == ISL_FORMAT_R32G32_UINT)) {
521 format = ISL_FORMAT_R32G32_FLOAT_LD;
522 need_green_to_blue = devinfo->is_haswell;
523 } else if (devinfo->gen == 6) {
524 /* Sandybridge's gather4 message is broken for integer formats.
525 * To work around this, we pretend the surface is UNORM for
526 * 8 or 16-bit formats, and emit shader instructions to recover
527 * the real INT/UINT value. For 32-bit formats, we pretend
528 * the surface is FLOAT, and simply reinterpret the resulting
529 * bits.
530 */
531 switch (format) {
532 case ISL_FORMAT_R8_SINT:
533 case ISL_FORMAT_R8_UINT:
534 format = ISL_FORMAT_R8_UNORM;
535 break;
536
537 case ISL_FORMAT_R16_SINT:
538 case ISL_FORMAT_R16_UINT:
539 format = ISL_FORMAT_R16_UNORM;
540 break;
541
542 case ISL_FORMAT_R32_SINT:
543 case ISL_FORMAT_R32_UINT:
544 format = ISL_FORMAT_R32_FLOAT;
545 break;
546
547 default:
548 break;
549 }
550 }
551 }
552
553 if (obj->StencilSampling && firstImage->_BaseFormat == GL_DEPTH_STENCIL) {
554 if (devinfo->gen <= 7) {
555 assert(mt->r8stencil_mt && !mt->stencil_mt->r8stencil_needs_update);
556 mt = mt->r8stencil_mt;
557 } else {
558 mt = mt->stencil_mt;
559 }
560 format = ISL_FORMAT_R8_UINT;
561 } else if (devinfo->gen <= 7 && mt->format == MESA_FORMAT_S_UINT8) {
562 assert(mt->r8stencil_mt && !mt->r8stencil_needs_update);
563 mt = mt->r8stencil_mt;
564 format = ISL_FORMAT_R8_UINT;
565 }
566
567 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
568
569 struct isl_view view = {
570 .format = format,
571 .base_level = obj->MinLevel + obj->BaseLevel,
572 .levels = intel_obj->_MaxLevel - obj->BaseLevel + 1,
573 .base_array_layer = obj->MinLayer,
574 .array_len = view_num_layers,
575 .swizzle = {
576 .r = swizzle_to_scs(GET_SWZ(swizzle, 0), need_green_to_blue),
577 .g = swizzle_to_scs(GET_SWZ(swizzle, 1), need_green_to_blue),
578 .b = swizzle_to_scs(GET_SWZ(swizzle, 2), need_green_to_blue),
579 .a = swizzle_to_scs(GET_SWZ(swizzle, 3), need_green_to_blue),
580 },
581 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
582 };
583
584 if (obj->Target == GL_TEXTURE_CUBE_MAP ||
585 obj->Target == GL_TEXTURE_CUBE_MAP_ARRAY)
586 view.usage |= ISL_SURF_USAGE_CUBE_BIT;
587
588 enum isl_aux_usage aux_usage =
589 intel_miptree_texture_aux_usage(brw, mt, format);
590
591 if (brw_aux_surface_disabled(brw, mt))
592 aux_usage = ISL_AUX_USAGE_NONE;
593
594 brw_emit_surface_state(brw, mt, mt->target, view, aux_usage,
595 surf_offset, surf_index,
596 0);
597 }
598 }
599
600 void
601 brw_emit_buffer_surface_state(struct brw_context *brw,
602 uint32_t *out_offset,
603 struct brw_bo *bo,
604 unsigned buffer_offset,
605 unsigned surface_format,
606 unsigned buffer_size,
607 unsigned pitch,
608 unsigned reloc_flags)
609 {
610 const struct gen_device_info *devinfo = &brw->screen->devinfo;
611 uint32_t *dw = brw_state_batch(brw,
612 brw->isl_dev.ss.size,
613 brw->isl_dev.ss.align,
614 out_offset);
615
616 isl_buffer_fill_state(&brw->isl_dev, dw,
617 .address = !bo ? buffer_offset :
618 brw_state_reloc(&brw->batch,
619 *out_offset + brw->isl_dev.ss.addr_offset,
620 bo, buffer_offset,
621 reloc_flags),
622 .size = buffer_size,
623 .format = surface_format,
624 .stride = pitch,
625 .mocs = brw_get_bo_mocs(devinfo, bo));
626 }
627
628 void
629 brw_update_buffer_texture_surface(struct gl_context *ctx,
630 unsigned unit,
631 uint32_t *surf_offset)
632 {
633 struct brw_context *brw = brw_context(ctx);
634 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
635 struct intel_buffer_object *intel_obj =
636 intel_buffer_object(tObj->BufferObject);
637 uint32_t size = tObj->BufferSize;
638 struct brw_bo *bo = NULL;
639 mesa_format format = tObj->_BufferObjectFormat;
640 const enum isl_format isl_format = brw_isl_format_for_mesa_format(format);
641 int texel_size = _mesa_get_format_bytes(format);
642
643 if (intel_obj) {
644 size = MIN2(size, intel_obj->Base.Size);
645 bo = intel_bufferobj_buffer(brw, intel_obj, tObj->BufferOffset, size,
646 false);
647 }
648
649 /* The ARB_texture_buffer_specification says:
650 *
651 * "The number of texels in the buffer texture's texel array is given by
652 *
653 * floor(<buffer_size> / (<components> * sizeof(<base_type>)),
654 *
655 * where <buffer_size> is the size of the buffer object, in basic
656 * machine units and <components> and <base_type> are the element count
657 * and base data type for elements, as specified in Table X.1. The
658 * number of texels in the texel array is then clamped to the
659 * implementation-dependent limit MAX_TEXTURE_BUFFER_SIZE_ARB."
660 *
661 * We need to clamp the size in bytes to MAX_TEXTURE_BUFFER_SIZE * stride,
662 * so that when ISL divides by stride to obtain the number of texels, that
663 * texel count is clamped to MAX_TEXTURE_BUFFER_SIZE.
664 */
665 size = MIN2(size, ctx->Const.MaxTextureBufferSize * (unsigned) texel_size);
666
667 if (isl_format == ISL_FORMAT_UNSUPPORTED) {
668 _mesa_problem(NULL, "bad format %s for texture buffer\n",
669 _mesa_get_format_name(format));
670 }
671
672 brw_emit_buffer_surface_state(brw, surf_offset, bo,
673 tObj->BufferOffset,
674 isl_format,
675 size,
676 texel_size,
677 0);
678 }
679
680 /**
681 * Create the constant buffer surface. Vertex/fragment shader constants will be
682 * read from this buffer with Data Port Read instructions/messages.
683 */
684 void
685 brw_create_constant_surface(struct brw_context *brw,
686 struct brw_bo *bo,
687 uint32_t offset,
688 uint32_t size,
689 uint32_t *out_offset)
690 {
691 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
692 ISL_FORMAT_R32G32B32A32_FLOAT,
693 size, 1, 0);
694 }
695
696 /**
697 * Create the buffer surface. Shader buffer variables will be
698 * read from / write to this buffer with Data Port Read/Write
699 * instructions/messages.
700 */
701 void
702 brw_create_buffer_surface(struct brw_context *brw,
703 struct brw_bo *bo,
704 uint32_t offset,
705 uint32_t size,
706 uint32_t *out_offset)
707 {
708 /* Use a raw surface so we can reuse existing untyped read/write/atomic
709 * messages. We need these specifically for the fragment shader since they
710 * include a pixel mask header that we need to ensure correct behavior
711 * with helper invocations, which cannot write to the buffer.
712 */
713 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
714 ISL_FORMAT_RAW,
715 size, 1, RELOC_WRITE);
716 }
717
718 /**
719 * Set up a binding table entry for use by stream output logic (transform
720 * feedback).
721 *
722 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
723 */
724 void
725 brw_update_sol_surface(struct brw_context *brw,
726 struct gl_buffer_object *buffer_obj,
727 uint32_t *out_offset, unsigned num_vector_components,
728 unsigned stride_dwords, unsigned offset_dwords)
729 {
730 struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
731 uint32_t offset_bytes = 4 * offset_dwords;
732 struct brw_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
733 offset_bytes,
734 buffer_obj->Size - offset_bytes,
735 true);
736 uint32_t *surf = brw_state_batch(brw, 6 * 4, 32, out_offset);
737 uint32_t pitch_minus_1 = 4*stride_dwords - 1;
738 size_t size_dwords = buffer_obj->Size / 4;
739 uint32_t buffer_size_minus_1, width, height, depth, surface_format;
740
741 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
742 * too big to map using a single binding table entry?
743 */
744 assert((size_dwords - offset_dwords) / stride_dwords
745 <= BRW_MAX_NUM_BUFFER_ENTRIES);
746
747 if (size_dwords > offset_dwords + num_vector_components) {
748 /* There is room for at least 1 transform feedback output in the buffer.
749 * Compute the number of additional transform feedback outputs the
750 * buffer has room for.
751 */
752 buffer_size_minus_1 =
753 (size_dwords - offset_dwords - num_vector_components) / stride_dwords;
754 } else {
755 /* There isn't even room for a single transform feedback output in the
756 * buffer. We can't configure the binding table entry to prevent output
757 * entirely; we'll have to rely on the geometry shader to detect
758 * overflow. But to minimize the damage in case of a bug, set up the
759 * binding table entry to just allow a single output.
760 */
761 buffer_size_minus_1 = 0;
762 }
763 width = buffer_size_minus_1 & 0x7f;
764 height = (buffer_size_minus_1 & 0xfff80) >> 7;
765 depth = (buffer_size_minus_1 & 0x7f00000) >> 20;
766
767 switch (num_vector_components) {
768 case 1:
769 surface_format = ISL_FORMAT_R32_FLOAT;
770 break;
771 case 2:
772 surface_format = ISL_FORMAT_R32G32_FLOAT;
773 break;
774 case 3:
775 surface_format = ISL_FORMAT_R32G32B32_FLOAT;
776 break;
777 case 4:
778 surface_format = ISL_FORMAT_R32G32B32A32_FLOAT;
779 break;
780 default:
781 unreachable("Invalid vector size for transform feedback output");
782 }
783
784 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
785 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
786 surface_format << BRW_SURFACE_FORMAT_SHIFT |
787 BRW_SURFACE_RC_READ_WRITE;
788 surf[1] = brw_state_reloc(&brw->batch,
789 *out_offset + 4, bo, offset_bytes, RELOC_WRITE);
790 surf[2] = (width << BRW_SURFACE_WIDTH_SHIFT |
791 height << BRW_SURFACE_HEIGHT_SHIFT);
792 surf[3] = (depth << BRW_SURFACE_DEPTH_SHIFT |
793 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
794 surf[4] = 0;
795 surf[5] = 0;
796 }
797
798 /* Creates a new WM constant buffer reflecting the current fragment program's
799 * constants, if needed by the fragment program.
800 *
801 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
802 * state atom.
803 */
804 static void
805 brw_upload_wm_pull_constants(struct brw_context *brw)
806 {
807 struct brw_stage_state *stage_state = &brw->wm.base;
808 /* BRW_NEW_FRAGMENT_PROGRAM */
809 struct brw_program *fp =
810 (struct brw_program *) brw->programs[MESA_SHADER_FRAGMENT];
811
812 /* BRW_NEW_FS_PROG_DATA */
813 struct brw_stage_prog_data *prog_data = brw->wm.base.prog_data;
814
815 _mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_FRAGMENT);
816 /* _NEW_PROGRAM_CONSTANTS */
817 brw_upload_pull_constants(brw, BRW_NEW_SURFACES, &fp->program,
818 stage_state, prog_data);
819 }
820
821 const struct brw_tracked_state brw_wm_pull_constants = {
822 .dirty = {
823 .mesa = _NEW_PROGRAM_CONSTANTS,
824 .brw = BRW_NEW_BATCH |
825 BRW_NEW_FRAGMENT_PROGRAM |
826 BRW_NEW_FS_PROG_DATA,
827 },
828 .emit = brw_upload_wm_pull_constants,
829 };
830
831 /**
832 * Creates a null renderbuffer surface.
833 *
834 * This is used when the shader doesn't write to any color output. An FB
835 * write to target 0 will still be emitted, because that's how the thread is
836 * terminated (and computed depth is returned), so we need to have the
837 * hardware discard the target 0 color output..
838 */
839 static void
840 emit_null_surface_state(struct brw_context *brw,
841 const struct gl_framebuffer *fb,
842 uint32_t *out_offset)
843 {
844 const struct gen_device_info *devinfo = &brw->screen->devinfo;
845 uint32_t *surf = brw_state_batch(brw,
846 brw->isl_dev.ss.size,
847 brw->isl_dev.ss.align,
848 out_offset);
849
850 /* Use the fb dimensions or 1x1x1 */
851 const unsigned width = fb ? _mesa_geometric_width(fb) : 1;
852 const unsigned height = fb ? _mesa_geometric_height(fb) : 1;
853 const unsigned samples = fb ? _mesa_geometric_samples(fb) : 1;
854
855 if (devinfo->gen != 6 || samples <= 1) {
856 isl_null_fill_state(&brw->isl_dev, surf,
857 isl_extent3d(width, height, 1));
858 return;
859 }
860
861 /* On Gen6, null render targets seem to cause GPU hangs when multisampling.
862 * So work around this problem by rendering into dummy color buffer.
863 *
864 * To decrease the amount of memory needed by the workaround buffer, we
865 * set its pitch to 128 bytes (the width of a Y tile). This means that
866 * the amount of memory needed for the workaround buffer is
867 * (width_in_tiles + height_in_tiles - 1) tiles.
868 *
869 * Note that since the workaround buffer will be interpreted by the
870 * hardware as an interleaved multisampled buffer, we need to compute
871 * width_in_tiles and height_in_tiles by dividing the width and height
872 * by 16 rather than the normal Y-tile size of 32.
873 */
874 unsigned width_in_tiles = ALIGN(width, 16) / 16;
875 unsigned height_in_tiles = ALIGN(height, 16) / 16;
876 unsigned pitch_minus_1 = 127;
877 unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
878 brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
879 size_needed);
880
881 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
882 ISL_FORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
883 surf[1] = brw_state_reloc(&brw->batch, *out_offset + 4,
884 brw->wm.multisampled_null_render_target_bo,
885 0, RELOC_WRITE);
886
887 surf[2] = ((width - 1) << BRW_SURFACE_WIDTH_SHIFT |
888 (height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
889
890 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
891 * Notes):
892 *
893 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
894 */
895 surf[3] = (BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y |
896 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
897 surf[4] = BRW_SURFACE_MULTISAMPLECOUNT_4;
898 surf[5] = 0;
899 }
900
901 /**
902 * Sets up a surface state structure to point at the given region.
903 * While it is only used for the front/back buffer currently, it should be
904 * usable for further buffers when doing ARB_draw_buffer support.
905 */
906 static uint32_t
907 gen4_update_renderbuffer_surface(struct brw_context *brw,
908 struct gl_renderbuffer *rb,
909 unsigned unit,
910 uint32_t surf_index)
911 {
912 const struct gen_device_info *devinfo = &brw->screen->devinfo;
913 struct gl_context *ctx = &brw->ctx;
914 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
915 struct intel_mipmap_tree *mt = irb->mt;
916 uint32_t *surf;
917 uint32_t tile_x, tile_y;
918 enum isl_format format;
919 uint32_t offset;
920 /* _NEW_BUFFERS */
921 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
922 /* BRW_NEW_FS_PROG_DATA */
923
924 if (rb->TexImage && !devinfo->has_surface_tile_offset) {
925 intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y);
926
927 if (tile_x != 0 || tile_y != 0) {
928 /* Original gen4 hardware couldn't draw to a non-tile-aligned
929 * destination in a miptree unless you actually setup your renderbuffer
930 * as a miptree and used the fragile lod/array_index/etc. controls to
931 * select the image. So, instead, we just make a new single-level
932 * miptree and render into that.
933 */
934 intel_renderbuffer_move_to_temp(brw, irb, false);
935 assert(irb->align_wa_mt);
936 mt = irb->align_wa_mt;
937 }
938 }
939
940 surf = brw_state_batch(brw, 6 * 4, 32, &offset);
941
942 format = brw->mesa_to_isl_render_format[rb_format];
943 if (unlikely(!brw->mesa_format_supports_render[rb_format])) {
944 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
945 __func__, _mesa_get_format_name(rb_format));
946 }
947
948 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
949 format << BRW_SURFACE_FORMAT_SHIFT);
950
951 /* reloc */
952 assert(mt->offset % mt->cpp == 0);
953 surf[1] = brw_state_reloc(&brw->batch, offset + 4, mt->bo,
954 mt->offset +
955 intel_renderbuffer_get_tile_offsets(irb,
956 &tile_x,
957 &tile_y),
958 RELOC_WRITE);
959
960 surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
961 (rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
962
963 surf[3] = (brw_get_surface_tiling_bits(mt->surf.tiling) |
964 (mt->surf.row_pitch - 1) << BRW_SURFACE_PITCH_SHIFT);
965
966 surf[4] = brw_get_surface_num_multisamples(mt->surf.samples);
967
968 assert(devinfo->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
969 /* Note that the low bits of these fields are missing, so
970 * there's the possibility of getting in trouble.
971 */
972 assert(tile_x % 4 == 0);
973 assert(tile_y % 2 == 0);
974 surf[5] = ((tile_x / 4) << BRW_SURFACE_X_OFFSET_SHIFT |
975 (tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT |
976 (mt->surf.image_alignment_el.height == 4 ?
977 BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
978
979 if (devinfo->gen < 6) {
980 /* _NEW_COLOR */
981 if (!ctx->Color.ColorLogicOpEnabled && !ctx->Color._AdvancedBlendMode &&
982 (ctx->Color.BlendEnabled & (1 << unit)))
983 surf[0] |= BRW_SURFACE_BLEND_ENABLED;
984
985 if (!ctx->Color.ColorMask[unit][0])
986 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT;
987 if (!ctx->Color.ColorMask[unit][1])
988 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT;
989 if (!ctx->Color.ColorMask[unit][2])
990 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT;
991
992 /* As mentioned above, disable writes to the alpha component when the
993 * renderbuffer is XRGB.
994 */
995 if (ctx->DrawBuffer->Visual.alphaBits == 0 ||
996 !ctx->Color.ColorMask[unit][3]) {
997 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT;
998 }
999 }
1000
1001 return offset;
1002 }
1003
1004 static void
1005 update_renderbuffer_surfaces(struct brw_context *brw)
1006 {
1007 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1008 const struct gl_context *ctx = &brw->ctx;
1009
1010 /* _NEW_BUFFERS | _NEW_COLOR */
1011 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1012
1013 /* Render targets always start at binding table index 0. */
1014 const unsigned rt_start = 0;
1015
1016 uint32_t *surf_offsets = brw->wm.base.surf_offset;
1017
1018 /* Update surfaces for drawing buffers */
1019 if (fb->_NumColorDrawBuffers >= 1) {
1020 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
1021 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
1022
1023 if (intel_renderbuffer(rb)) {
1024 surf_offsets[rt_start + i] = devinfo->gen >= 6 ?
1025 gen6_update_renderbuffer_surface(brw, rb, i, rt_start + i) :
1026 gen4_update_renderbuffer_surface(brw, rb, i, rt_start + i);
1027 } else {
1028 emit_null_surface_state(brw, fb, &surf_offsets[rt_start + i]);
1029 }
1030 }
1031 } else {
1032 emit_null_surface_state(brw, fb, &surf_offsets[rt_start]);
1033 }
1034
1035 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1036 }
1037
1038 const struct brw_tracked_state brw_renderbuffer_surfaces = {
1039 .dirty = {
1040 .mesa = _NEW_BUFFERS |
1041 _NEW_COLOR,
1042 .brw = BRW_NEW_BATCH,
1043 },
1044 .emit = update_renderbuffer_surfaces,
1045 };
1046
1047 const struct brw_tracked_state gen6_renderbuffer_surfaces = {
1048 .dirty = {
1049 .mesa = _NEW_BUFFERS,
1050 .brw = BRW_NEW_BATCH |
1051 BRW_NEW_AUX_STATE,
1052 },
1053 .emit = update_renderbuffer_surfaces,
1054 };
1055
1056 static void
1057 update_renderbuffer_read_surfaces(struct brw_context *brw)
1058 {
1059 const struct gl_context *ctx = &brw->ctx;
1060
1061 /* BRW_NEW_FS_PROG_DATA */
1062 const struct brw_wm_prog_data *wm_prog_data =
1063 brw_wm_prog_data(brw->wm.base.prog_data);
1064
1065 if (wm_prog_data->has_render_target_reads &&
1066 !ctx->Extensions.MESA_shader_framebuffer_fetch) {
1067 /* _NEW_BUFFERS */
1068 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1069
1070 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
1071 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
1072 const struct intel_renderbuffer *irb = intel_renderbuffer(rb);
1073 const unsigned surf_index =
1074 wm_prog_data->binding_table.render_target_read_start + i;
1075 uint32_t *surf_offset = &brw->wm.base.surf_offset[surf_index];
1076
1077 if (irb) {
1078 const enum isl_format format = brw->mesa_to_isl_render_format[
1079 _mesa_get_render_format(ctx, intel_rb_format(irb))];
1080 assert(isl_format_supports_sampling(&brw->screen->devinfo,
1081 format));
1082
1083 /* Override the target of the texture if the render buffer is a
1084 * single slice of a 3D texture (since the minimum array element
1085 * field of the surface state structure is ignored by the sampler
1086 * unit for 3D textures on some hardware), or if the render buffer
1087 * is a 1D array (since shaders always provide the array index
1088 * coordinate at the Z component to avoid state-dependent
1089 * recompiles when changing the texture target of the
1090 * framebuffer).
1091 */
1092 const GLenum target =
1093 (irb->mt->target == GL_TEXTURE_3D &&
1094 irb->layer_count == 1) ? GL_TEXTURE_2D :
1095 irb->mt->target == GL_TEXTURE_1D_ARRAY ? GL_TEXTURE_2D_ARRAY :
1096 irb->mt->target;
1097
1098 const struct isl_view view = {
1099 .format = format,
1100 .base_level = irb->mt_level - irb->mt->first_level,
1101 .levels = 1,
1102 .base_array_layer = irb->mt_layer,
1103 .array_len = irb->layer_count,
1104 .swizzle = ISL_SWIZZLE_IDENTITY,
1105 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
1106 };
1107
1108 enum isl_aux_usage aux_usage =
1109 intel_miptree_texture_aux_usage(brw, irb->mt, format);
1110 if (brw->draw_aux_buffer_disabled[i])
1111 aux_usage = ISL_AUX_USAGE_NONE;
1112
1113 brw_emit_surface_state(brw, irb->mt, target, view, aux_usage,
1114 surf_offset, surf_index,
1115 0);
1116
1117 } else {
1118 emit_null_surface_state(brw, fb, surf_offset);
1119 }
1120 }
1121
1122 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1123 }
1124 }
1125
1126 const struct brw_tracked_state brw_renderbuffer_read_surfaces = {
1127 .dirty = {
1128 .mesa = _NEW_BUFFERS,
1129 .brw = BRW_NEW_BATCH |
1130 BRW_NEW_AUX_STATE |
1131 BRW_NEW_FS_PROG_DATA,
1132 },
1133 .emit = update_renderbuffer_read_surfaces,
1134 };
1135
1136 static void
1137 update_stage_texture_surfaces(struct brw_context *brw,
1138 const struct gl_program *prog,
1139 struct brw_stage_state *stage_state,
1140 bool for_gather, uint32_t plane)
1141 {
1142 if (!prog)
1143 return;
1144
1145 struct gl_context *ctx = &brw->ctx;
1146
1147 uint32_t *surf_offset = stage_state->surf_offset;
1148
1149 /* BRW_NEW_*_PROG_DATA */
1150 if (for_gather)
1151 surf_offset += stage_state->prog_data->binding_table.gather_texture_start;
1152 else
1153 surf_offset += stage_state->prog_data->binding_table.plane_start[plane];
1154
1155 unsigned num_samplers = util_last_bit(prog->SamplersUsed);
1156 for (unsigned s = 0; s < num_samplers; s++) {
1157 surf_offset[s] = 0;
1158
1159 if (prog->SamplersUsed & (1 << s)) {
1160 const unsigned unit = prog->SamplerUnits[s];
1161 const bool used_by_txf = prog->info.textures_used_by_txf & (1 << s);
1162
1163 /* _NEW_TEXTURE */
1164 if (ctx->Texture.Unit[unit]._Current) {
1165 brw_update_texture_surface(ctx, unit, surf_offset + s, for_gather,
1166 used_by_txf, plane);
1167 }
1168 }
1169 }
1170 }
1171
1172
1173 /**
1174 * Construct SURFACE_STATE objects for enabled textures.
1175 */
1176 static void
1177 brw_update_texture_surfaces(struct brw_context *brw)
1178 {
1179 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1180
1181 /* BRW_NEW_VERTEX_PROGRAM */
1182 struct gl_program *vs = brw->programs[MESA_SHADER_VERTEX];
1183
1184 /* BRW_NEW_TESS_PROGRAMS */
1185 struct gl_program *tcs = brw->programs[MESA_SHADER_TESS_CTRL];
1186 struct gl_program *tes = brw->programs[MESA_SHADER_TESS_EVAL];
1187
1188 /* BRW_NEW_GEOMETRY_PROGRAM */
1189 struct gl_program *gs = brw->programs[MESA_SHADER_GEOMETRY];
1190
1191 /* BRW_NEW_FRAGMENT_PROGRAM */
1192 struct gl_program *fs = brw->programs[MESA_SHADER_FRAGMENT];
1193
1194 /* _NEW_TEXTURE */
1195 update_stage_texture_surfaces(brw, vs, &brw->vs.base, false, 0);
1196 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, false, 0);
1197 update_stage_texture_surfaces(brw, tes, &brw->tes.base, false, 0);
1198 update_stage_texture_surfaces(brw, gs, &brw->gs.base, false, 0);
1199 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 0);
1200
1201 /* emit alternate set of surface state for gather. this
1202 * allows the surface format to be overriden for only the
1203 * gather4 messages. */
1204 if (devinfo->gen < 8) {
1205 if (vs && vs->info.uses_texture_gather)
1206 update_stage_texture_surfaces(brw, vs, &brw->vs.base, true, 0);
1207 if (tcs && tcs->info.uses_texture_gather)
1208 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, true, 0);
1209 if (tes && tes->info.uses_texture_gather)
1210 update_stage_texture_surfaces(brw, tes, &brw->tes.base, true, 0);
1211 if (gs && gs->info.uses_texture_gather)
1212 update_stage_texture_surfaces(brw, gs, &brw->gs.base, true, 0);
1213 if (fs && fs->info.uses_texture_gather)
1214 update_stage_texture_surfaces(brw, fs, &brw->wm.base, true, 0);
1215 }
1216
1217 if (fs) {
1218 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 1);
1219 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 2);
1220 }
1221
1222 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1223 }
1224
1225 const struct brw_tracked_state brw_texture_surfaces = {
1226 .dirty = {
1227 .mesa = _NEW_TEXTURE,
1228 .brw = BRW_NEW_BATCH |
1229 BRW_NEW_AUX_STATE |
1230 BRW_NEW_FRAGMENT_PROGRAM |
1231 BRW_NEW_FS_PROG_DATA |
1232 BRW_NEW_GEOMETRY_PROGRAM |
1233 BRW_NEW_GS_PROG_DATA |
1234 BRW_NEW_TESS_PROGRAMS |
1235 BRW_NEW_TCS_PROG_DATA |
1236 BRW_NEW_TES_PROG_DATA |
1237 BRW_NEW_TEXTURE_BUFFER |
1238 BRW_NEW_VERTEX_PROGRAM |
1239 BRW_NEW_VS_PROG_DATA,
1240 },
1241 .emit = brw_update_texture_surfaces,
1242 };
1243
1244 static void
1245 brw_update_cs_texture_surfaces(struct brw_context *brw)
1246 {
1247 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1248
1249 /* BRW_NEW_COMPUTE_PROGRAM */
1250 struct gl_program *cs = brw->programs[MESA_SHADER_COMPUTE];
1251
1252 /* _NEW_TEXTURE */
1253 update_stage_texture_surfaces(brw, cs, &brw->cs.base, false, 0);
1254
1255 /* emit alternate set of surface state for gather. this
1256 * allows the surface format to be overriden for only the
1257 * gather4 messages.
1258 */
1259 if (devinfo->gen < 8) {
1260 if (cs && cs->info.uses_texture_gather)
1261 update_stage_texture_surfaces(brw, cs, &brw->cs.base, true, 0);
1262 }
1263
1264 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1265 }
1266
1267 const struct brw_tracked_state brw_cs_texture_surfaces = {
1268 .dirty = {
1269 .mesa = _NEW_TEXTURE,
1270 .brw = BRW_NEW_BATCH |
1271 BRW_NEW_COMPUTE_PROGRAM |
1272 BRW_NEW_AUX_STATE,
1273 },
1274 .emit = brw_update_cs_texture_surfaces,
1275 };
1276
1277
1278 void
1279 brw_upload_ubo_surfaces(struct brw_context *brw, struct gl_program *prog,
1280 struct brw_stage_state *stage_state,
1281 struct brw_stage_prog_data *prog_data)
1282 {
1283 struct gl_context *ctx = &brw->ctx;
1284
1285 if (!prog)
1286 return;
1287
1288 uint32_t *ubo_surf_offsets =
1289 &stage_state->surf_offset[prog_data->binding_table.ubo_start];
1290
1291 for (int i = 0; i < prog->info.num_ubos; i++) {
1292 struct gl_buffer_binding *binding =
1293 &ctx->UniformBufferBindings[prog->sh.UniformBlocks[i]->Binding];
1294
1295 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1296 emit_null_surface_state(brw, NULL, &ubo_surf_offsets[i]);
1297 } else {
1298 struct intel_buffer_object *intel_bo =
1299 intel_buffer_object(binding->BufferObject);
1300 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1301 if (!binding->AutomaticSize)
1302 size = MIN2(size, binding->Size);
1303 struct brw_bo *bo =
1304 intel_bufferobj_buffer(brw, intel_bo,
1305 binding->Offset,
1306 size, false);
1307 brw_create_constant_surface(brw, bo, binding->Offset,
1308 size,
1309 &ubo_surf_offsets[i]);
1310 }
1311 }
1312
1313 uint32_t *ssbo_surf_offsets =
1314 &stage_state->surf_offset[prog_data->binding_table.ssbo_start];
1315
1316 for (int i = 0; i < prog->info.num_ssbos; i++) {
1317 struct gl_buffer_binding *binding =
1318 &ctx->ShaderStorageBufferBindings[prog->sh.ShaderStorageBlocks[i]->Binding];
1319
1320 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1321 emit_null_surface_state(brw, NULL, &ssbo_surf_offsets[i]);
1322 } else {
1323 struct intel_buffer_object *intel_bo =
1324 intel_buffer_object(binding->BufferObject);
1325 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1326 if (!binding->AutomaticSize)
1327 size = MIN2(size, binding->Size);
1328 struct brw_bo *bo =
1329 intel_bufferobj_buffer(brw, intel_bo,
1330 binding->Offset,
1331 size, true);
1332 brw_create_buffer_surface(brw, bo, binding->Offset,
1333 size,
1334 &ssbo_surf_offsets[i]);
1335 }
1336 }
1337
1338 stage_state->push_constants_dirty = true;
1339
1340 if (prog->info.num_ubos || prog->info.num_ssbos)
1341 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1342 }
1343
1344 static void
1345 brw_upload_wm_ubo_surfaces(struct brw_context *brw)
1346 {
1347 struct gl_context *ctx = &brw->ctx;
1348 /* _NEW_PROGRAM */
1349 struct gl_program *prog = ctx->FragmentProgram._Current;
1350
1351 /* BRW_NEW_FS_PROG_DATA */
1352 brw_upload_ubo_surfaces(brw, prog, &brw->wm.base, brw->wm.base.prog_data);
1353 }
1354
1355 const struct brw_tracked_state brw_wm_ubo_surfaces = {
1356 .dirty = {
1357 .mesa = _NEW_PROGRAM,
1358 .brw = BRW_NEW_BATCH |
1359 BRW_NEW_FS_PROG_DATA |
1360 BRW_NEW_UNIFORM_BUFFER,
1361 },
1362 .emit = brw_upload_wm_ubo_surfaces,
1363 };
1364
1365 static void
1366 brw_upload_cs_ubo_surfaces(struct brw_context *brw)
1367 {
1368 struct gl_context *ctx = &brw->ctx;
1369 /* _NEW_PROGRAM */
1370 struct gl_program *prog =
1371 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1372
1373 /* BRW_NEW_CS_PROG_DATA */
1374 brw_upload_ubo_surfaces(brw, prog, &brw->cs.base, brw->cs.base.prog_data);
1375 }
1376
1377 const struct brw_tracked_state brw_cs_ubo_surfaces = {
1378 .dirty = {
1379 .mesa = _NEW_PROGRAM,
1380 .brw = BRW_NEW_BATCH |
1381 BRW_NEW_CS_PROG_DATA |
1382 BRW_NEW_UNIFORM_BUFFER,
1383 },
1384 .emit = brw_upload_cs_ubo_surfaces,
1385 };
1386
1387 void
1388 brw_upload_abo_surfaces(struct brw_context *brw,
1389 const struct gl_program *prog,
1390 struct brw_stage_state *stage_state,
1391 struct brw_stage_prog_data *prog_data)
1392 {
1393 struct gl_context *ctx = &brw->ctx;
1394 uint32_t *surf_offsets =
1395 &stage_state->surf_offset[prog_data->binding_table.abo_start];
1396
1397 if (prog->info.num_abos) {
1398 for (unsigned i = 0; i < prog->info.num_abos; i++) {
1399 struct gl_buffer_binding *binding =
1400 &ctx->AtomicBufferBindings[prog->sh.AtomicBuffers[i]->Binding];
1401 struct intel_buffer_object *intel_bo =
1402 intel_buffer_object(binding->BufferObject);
1403 struct brw_bo *bo =
1404 intel_bufferobj_buffer(brw, intel_bo, binding->Offset,
1405 intel_bo->Base.Size - binding->Offset,
1406 true);
1407
1408 brw_emit_buffer_surface_state(brw, &surf_offsets[i], bo,
1409 binding->Offset, ISL_FORMAT_RAW,
1410 bo->size - binding->Offset, 1,
1411 RELOC_WRITE);
1412 }
1413
1414 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1415 }
1416 }
1417
1418 static void
1419 brw_upload_wm_abo_surfaces(struct brw_context *brw)
1420 {
1421 /* _NEW_PROGRAM */
1422 const struct gl_program *wm = brw->programs[MESA_SHADER_FRAGMENT];
1423
1424 if (wm) {
1425 /* BRW_NEW_FS_PROG_DATA */
1426 brw_upload_abo_surfaces(brw, wm, &brw->wm.base, brw->wm.base.prog_data);
1427 }
1428 }
1429
1430 const struct brw_tracked_state brw_wm_abo_surfaces = {
1431 .dirty = {
1432 .mesa = _NEW_PROGRAM,
1433 .brw = BRW_NEW_ATOMIC_BUFFER |
1434 BRW_NEW_BATCH |
1435 BRW_NEW_FS_PROG_DATA,
1436 },
1437 .emit = brw_upload_wm_abo_surfaces,
1438 };
1439
1440 static void
1441 brw_upload_cs_abo_surfaces(struct brw_context *brw)
1442 {
1443 /* _NEW_PROGRAM */
1444 const struct gl_program *cp = brw->programs[MESA_SHADER_COMPUTE];
1445
1446 if (cp) {
1447 /* BRW_NEW_CS_PROG_DATA */
1448 brw_upload_abo_surfaces(brw, cp, &brw->cs.base, brw->cs.base.prog_data);
1449 }
1450 }
1451
1452 const struct brw_tracked_state brw_cs_abo_surfaces = {
1453 .dirty = {
1454 .mesa = _NEW_PROGRAM,
1455 .brw = BRW_NEW_ATOMIC_BUFFER |
1456 BRW_NEW_BATCH |
1457 BRW_NEW_CS_PROG_DATA,
1458 },
1459 .emit = brw_upload_cs_abo_surfaces,
1460 };
1461
1462 static void
1463 brw_upload_cs_image_surfaces(struct brw_context *brw)
1464 {
1465 /* _NEW_PROGRAM */
1466 const struct gl_program *cp = brw->programs[MESA_SHADER_COMPUTE];
1467
1468 if (cp) {
1469 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1470 brw_upload_image_surfaces(brw, cp, &brw->cs.base,
1471 brw->cs.base.prog_data);
1472 }
1473 }
1474
1475 const struct brw_tracked_state brw_cs_image_surfaces = {
1476 .dirty = {
1477 .mesa = _NEW_TEXTURE | _NEW_PROGRAM,
1478 .brw = BRW_NEW_BATCH |
1479 BRW_NEW_CS_PROG_DATA |
1480 BRW_NEW_AUX_STATE |
1481 BRW_NEW_IMAGE_UNITS
1482 },
1483 .emit = brw_upload_cs_image_surfaces,
1484 };
1485
1486 static uint32_t
1487 get_image_format(struct brw_context *brw, mesa_format format, GLenum access)
1488 {
1489 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1490 enum isl_format hw_format = brw_isl_format_for_mesa_format(format);
1491 if (access == GL_WRITE_ONLY) {
1492 return hw_format;
1493 } else if (isl_has_matching_typed_storage_image_format(devinfo, hw_format)) {
1494 /* Typed surface reads support a very limited subset of the shader
1495 * image formats. Translate it into the closest format the
1496 * hardware supports.
1497 */
1498 return isl_lower_storage_image_format(devinfo, hw_format);
1499 } else {
1500 /* The hardware doesn't actually support a typed format that we can use
1501 * so we have to fall back to untyped read/write messages.
1502 */
1503 return ISL_FORMAT_RAW;
1504 }
1505 }
1506
1507 static void
1508 update_default_image_param(struct brw_context *brw,
1509 struct gl_image_unit *u,
1510 unsigned surface_idx,
1511 struct brw_image_param *param)
1512 {
1513 memset(param, 0, sizeof(*param));
1514 param->surface_idx = surface_idx;
1515 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1516 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1517 * detailed explanation of these parameters.
1518 */
1519 param->swizzling[0] = 0xff;
1520 param->swizzling[1] = 0xff;
1521 }
1522
1523 static void
1524 update_buffer_image_param(struct brw_context *brw,
1525 struct gl_image_unit *u,
1526 unsigned surface_idx,
1527 struct brw_image_param *param)
1528 {
1529 struct gl_buffer_object *obj = u->TexObj->BufferObject;
1530 const uint32_t size = MIN2((uint32_t)u->TexObj->BufferSize, obj->Size);
1531 update_default_image_param(brw, u, surface_idx, param);
1532
1533 param->size[0] = size / _mesa_get_format_bytes(u->_ActualFormat);
1534 param->stride[0] = _mesa_get_format_bytes(u->_ActualFormat);
1535 }
1536
1537 static unsigned
1538 get_image_num_layers(const struct intel_mipmap_tree *mt, GLenum target,
1539 unsigned level)
1540 {
1541 if (target == GL_TEXTURE_CUBE_MAP)
1542 return 6;
1543
1544 return target == GL_TEXTURE_3D ?
1545 minify(mt->surf.logical_level0_px.depth, level) :
1546 mt->surf.logical_level0_px.array_len;
1547 }
1548
1549 static void
1550 update_image_surface(struct brw_context *brw,
1551 struct gl_image_unit *u,
1552 GLenum access,
1553 unsigned surface_idx,
1554 uint32_t *surf_offset,
1555 struct brw_image_param *param)
1556 {
1557 if (_mesa_is_image_unit_valid(&brw->ctx, u)) {
1558 struct gl_texture_object *obj = u->TexObj;
1559 const unsigned format = get_image_format(brw, u->_ActualFormat, access);
1560
1561 if (obj->Target == GL_TEXTURE_BUFFER) {
1562 struct intel_buffer_object *intel_obj =
1563 intel_buffer_object(obj->BufferObject);
1564 const unsigned texel_size = (format == ISL_FORMAT_RAW ? 1 :
1565 _mesa_get_format_bytes(u->_ActualFormat));
1566
1567 brw_emit_buffer_surface_state(
1568 brw, surf_offset, intel_obj->buffer, obj->BufferOffset,
1569 format, intel_obj->Base.Size, texel_size,
1570 access != GL_READ_ONLY ? RELOC_WRITE : 0);
1571
1572 update_buffer_image_param(brw, u, surface_idx, param);
1573
1574 } else {
1575 struct intel_texture_object *intel_obj = intel_texture_object(obj);
1576 struct intel_mipmap_tree *mt = intel_obj->mt;
1577 const unsigned num_layers = u->Layered ?
1578 get_image_num_layers(mt, obj->Target, u->Level) : 1;
1579
1580 struct isl_view view = {
1581 .format = format,
1582 .base_level = obj->MinLevel + u->Level,
1583 .levels = 1,
1584 .base_array_layer = obj->MinLayer + u->_Layer,
1585 .array_len = num_layers,
1586 .swizzle = ISL_SWIZZLE_IDENTITY,
1587 .usage = ISL_SURF_USAGE_STORAGE_BIT,
1588 };
1589
1590 if (format == ISL_FORMAT_RAW) {
1591 brw_emit_buffer_surface_state(
1592 brw, surf_offset, mt->bo, mt->offset,
1593 format, mt->bo->size - mt->offset, 1 /* pitch */,
1594 access != GL_READ_ONLY ? RELOC_WRITE : 0);
1595
1596 } else {
1597 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
1598 assert(!intel_miptree_has_color_unresolved(mt,
1599 view.base_level, 1,
1600 view.base_array_layer,
1601 view.array_len));
1602 brw_emit_surface_state(brw, mt, mt->target, view,
1603 ISL_AUX_USAGE_NONE,
1604 surf_offset, surf_index,
1605 access == GL_READ_ONLY ? 0 : RELOC_WRITE);
1606 }
1607
1608 isl_surf_fill_image_param(&brw->isl_dev, param, &mt->surf, &view);
1609 param->surface_idx = surface_idx;
1610 }
1611
1612 } else {
1613 emit_null_surface_state(brw, NULL, surf_offset);
1614 update_default_image_param(brw, u, surface_idx, param);
1615 }
1616 }
1617
1618 void
1619 brw_upload_image_surfaces(struct brw_context *brw,
1620 const struct gl_program *prog,
1621 struct brw_stage_state *stage_state,
1622 struct brw_stage_prog_data *prog_data)
1623 {
1624 assert(prog);
1625 struct gl_context *ctx = &brw->ctx;
1626
1627 if (prog->info.num_images) {
1628 for (unsigned i = 0; i < prog->info.num_images; i++) {
1629 struct gl_image_unit *u = &ctx->ImageUnits[prog->sh.ImageUnits[i]];
1630 const unsigned surf_idx = prog_data->binding_table.image_start + i;
1631
1632 update_image_surface(brw, u, prog->sh.ImageAccess[i],
1633 surf_idx,
1634 &stage_state->surf_offset[surf_idx],
1635 &stage_state->image_param[i]);
1636 }
1637
1638 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1639 /* This may have changed the image metadata dependent on the context
1640 * image unit state and passed to the program as uniforms, make sure
1641 * that push and pull constants are reuploaded.
1642 */
1643 brw->NewGLState |= _NEW_PROGRAM_CONSTANTS;
1644 }
1645 }
1646
1647 static void
1648 brw_upload_wm_image_surfaces(struct brw_context *brw)
1649 {
1650 /* BRW_NEW_FRAGMENT_PROGRAM */
1651 const struct gl_program *wm = brw->programs[MESA_SHADER_FRAGMENT];
1652
1653 if (wm) {
1654 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1655 brw_upload_image_surfaces(brw, wm, &brw->wm.base,
1656 brw->wm.base.prog_data);
1657 }
1658 }
1659
1660 const struct brw_tracked_state brw_wm_image_surfaces = {
1661 .dirty = {
1662 .mesa = _NEW_TEXTURE,
1663 .brw = BRW_NEW_BATCH |
1664 BRW_NEW_AUX_STATE |
1665 BRW_NEW_FRAGMENT_PROGRAM |
1666 BRW_NEW_FS_PROG_DATA |
1667 BRW_NEW_IMAGE_UNITS
1668 },
1669 .emit = brw_upload_wm_image_surfaces,
1670 };
1671
1672 static void
1673 brw_upload_cs_work_groups_surface(struct brw_context *brw)
1674 {
1675 struct gl_context *ctx = &brw->ctx;
1676 /* _NEW_PROGRAM */
1677 struct gl_program *prog =
1678 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1679 /* BRW_NEW_CS_PROG_DATA */
1680 const struct brw_cs_prog_data *cs_prog_data =
1681 brw_cs_prog_data(brw->cs.base.prog_data);
1682
1683 if (prog && cs_prog_data->uses_num_work_groups) {
1684 const unsigned surf_idx =
1685 cs_prog_data->binding_table.work_groups_start;
1686 uint32_t *surf_offset = &brw->cs.base.surf_offset[surf_idx];
1687 struct brw_bo *bo;
1688 uint32_t bo_offset;
1689
1690 if (brw->compute.num_work_groups_bo == NULL) {
1691 bo = NULL;
1692 intel_upload_data(brw,
1693 (void *)brw->compute.num_work_groups,
1694 3 * sizeof(GLuint),
1695 sizeof(GLuint),
1696 &bo,
1697 &bo_offset);
1698 } else {
1699 bo = brw->compute.num_work_groups_bo;
1700 bo_offset = brw->compute.num_work_groups_offset;
1701 }
1702
1703 brw_emit_buffer_surface_state(brw, surf_offset,
1704 bo, bo_offset,
1705 ISL_FORMAT_RAW,
1706 3 * sizeof(GLuint), 1,
1707 RELOC_WRITE);
1708 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1709 }
1710 }
1711
1712 const struct brw_tracked_state brw_cs_work_groups_surface = {
1713 .dirty = {
1714 .brw = BRW_NEW_CS_PROG_DATA |
1715 BRW_NEW_CS_WORK_GROUPS
1716 },
1717 .emit = brw_upload_cs_work_groups_surface,
1718 };