i965: Replace draw_aux_buffer_disabled with draw_aux_usage
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_surface_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "compiler/nir/nir.h"
34 #include "main/context.h"
35 #include "main/blend.h"
36 #include "main/mtypes.h"
37 #include "main/samplerobj.h"
38 #include "main/shaderimage.h"
39 #include "main/teximage.h"
40 #include "program/prog_parameter.h"
41 #include "program/prog_instruction.h"
42 #include "main/framebuffer.h"
43 #include "main/shaderapi.h"
44
45 #include "isl/isl.h"
46
47 #include "intel_mipmap_tree.h"
48 #include "intel_batchbuffer.h"
49 #include "intel_tex.h"
50 #include "intel_fbo.h"
51 #include "intel_buffer_objects.h"
52
53 #include "brw_context.h"
54 #include "brw_state.h"
55 #include "brw_defines.h"
56 #include "brw_wm.h"
57
58 uint32_t wb_mocs[] = {
59 [7] = GEN7_MOCS_L3,
60 [8] = BDW_MOCS_WB,
61 [9] = SKL_MOCS_WB,
62 [10] = CNL_MOCS_WB,
63 };
64
65 uint32_t pte_mocs[] = {
66 [7] = GEN7_MOCS_L3,
67 [8] = BDW_MOCS_PTE,
68 [9] = SKL_MOCS_PTE,
69 [10] = CNL_MOCS_PTE,
70 };
71
72 uint32_t
73 brw_get_bo_mocs(const struct gen_device_info *devinfo, struct brw_bo *bo)
74 {
75 return (bo && bo->external ? pte_mocs : wb_mocs)[devinfo->gen];
76 }
77
78 static void
79 get_isl_surf(struct brw_context *brw, struct intel_mipmap_tree *mt,
80 GLenum target, struct isl_view *view,
81 uint32_t *tile_x, uint32_t *tile_y,
82 uint32_t *offset, struct isl_surf *surf)
83 {
84 *surf = mt->surf;
85
86 const struct gen_device_info *devinfo = &brw->screen->devinfo;
87 const enum isl_dim_layout dim_layout =
88 get_isl_dim_layout(devinfo, mt->surf.tiling, target);
89
90 if (surf->dim_layout == dim_layout)
91 return;
92
93 /* The layout of the specified texture target is not compatible with the
94 * actual layout of the miptree structure in memory -- You're entering
95 * dangerous territory, this can only possibly work if you only intended
96 * to access a single level and slice of the texture, and the hardware
97 * supports the tile offset feature in order to allow non-tile-aligned
98 * base offsets, since we'll have to point the hardware to the first
99 * texel of the level instead of relying on the usual base level/layer
100 * controls.
101 */
102 assert(devinfo->has_surface_tile_offset);
103 assert(view->levels == 1 && view->array_len == 1);
104 assert(*tile_x == 0 && *tile_y == 0);
105
106 *offset += intel_miptree_get_tile_offsets(mt, view->base_level,
107 view->base_array_layer,
108 tile_x, tile_y);
109
110 /* Minify the logical dimensions of the texture. */
111 const unsigned l = view->base_level - mt->first_level;
112 surf->logical_level0_px.width = minify(surf->logical_level0_px.width, l);
113 surf->logical_level0_px.height = surf->dim <= ISL_SURF_DIM_1D ? 1 :
114 minify(surf->logical_level0_px.height, l);
115 surf->logical_level0_px.depth = surf->dim <= ISL_SURF_DIM_2D ? 1 :
116 minify(surf->logical_level0_px.depth, l);
117
118 /* Only the base level and layer can be addressed with the overridden
119 * layout.
120 */
121 surf->logical_level0_px.array_len = 1;
122 surf->levels = 1;
123 surf->dim_layout = dim_layout;
124
125 /* The requested slice of the texture is now at the base level and
126 * layer.
127 */
128 view->base_level = 0;
129 view->base_array_layer = 0;
130 }
131
132 static void
133 brw_emit_surface_state(struct brw_context *brw,
134 struct intel_mipmap_tree *mt,
135 GLenum target, struct isl_view view,
136 enum isl_aux_usage aux_usage,
137 uint32_t *surf_offset, int surf_index,
138 unsigned reloc_flags)
139 {
140 const struct gen_device_info *devinfo = &brw->screen->devinfo;
141 uint32_t tile_x = mt->level[0].level_x;
142 uint32_t tile_y = mt->level[0].level_y;
143 uint32_t offset = mt->offset;
144
145 struct isl_surf surf;
146
147 get_isl_surf(brw, mt, target, &view, &tile_x, &tile_y, &offset, &surf);
148
149 union isl_color_value clear_color = { .u32 = { 0, 0, 0, 0 } };
150
151 struct brw_bo *aux_bo;
152 struct isl_surf *aux_surf = NULL;
153 uint64_t aux_offset = 0;
154 switch (aux_usage) {
155 case ISL_AUX_USAGE_MCS:
156 case ISL_AUX_USAGE_CCS_D:
157 case ISL_AUX_USAGE_CCS_E:
158 aux_surf = &mt->mcs_buf->surf;
159 aux_bo = mt->mcs_buf->bo;
160 aux_offset = mt->mcs_buf->offset;
161 break;
162
163 case ISL_AUX_USAGE_HIZ:
164 aux_surf = &mt->hiz_buf->surf;
165 aux_bo = mt->hiz_buf->bo;
166 aux_offset = 0;
167 break;
168
169 case ISL_AUX_USAGE_NONE:
170 break;
171 }
172
173 if (aux_usage != ISL_AUX_USAGE_NONE) {
174 /* We only really need a clear color if we also have an auxiliary
175 * surface. Without one, it does nothing.
176 */
177 clear_color = mt->fast_clear_color;
178 }
179
180 void *state = brw_state_batch(brw,
181 brw->isl_dev.ss.size,
182 brw->isl_dev.ss.align,
183 surf_offset);
184
185 isl_surf_fill_state(&brw->isl_dev, state, .surf = &mt->surf, .view = &view,
186 .address = brw_state_reloc(&brw->batch,
187 *surf_offset + brw->isl_dev.ss.addr_offset,
188 mt->bo, offset, reloc_flags),
189 .aux_surf = aux_surf, .aux_usage = aux_usage,
190 .aux_address = aux_offset,
191 .mocs = brw_get_bo_mocs(devinfo, mt->bo),
192 .clear_color = clear_color,
193 .x_offset_sa = tile_x, .y_offset_sa = tile_y);
194 if (aux_surf) {
195 /* On gen7 and prior, the upper 20 bits of surface state DWORD 6 are the
196 * upper 20 bits of the GPU address of the MCS buffer; the lower 12 bits
197 * contain other control information. Since buffer addresses are always
198 * on 4k boundaries (and thus have their lower 12 bits zero), we can use
199 * an ordinary reloc to do the necessary address translation.
200 *
201 * FIXME: move to the point of assignment.
202 */
203 assert((aux_offset & 0xfff) == 0);
204 uint32_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
205 *aux_addr = brw_state_reloc(&brw->batch,
206 *surf_offset +
207 brw->isl_dev.ss.aux_addr_offset,
208 aux_bo, *aux_addr,
209 reloc_flags);
210 }
211 }
212
213 static uint32_t
214 gen6_update_renderbuffer_surface(struct brw_context *brw,
215 struct gl_renderbuffer *rb,
216 unsigned unit,
217 uint32_t surf_index)
218 {
219 struct gl_context *ctx = &brw->ctx;
220 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
221 struct intel_mipmap_tree *mt = irb->mt;
222
223 assert(brw_render_target_supported(brw, rb));
224
225 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
226 if (unlikely(!brw->mesa_format_supports_render[rb_format])) {
227 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
228 __func__, _mesa_get_format_name(rb_format));
229 }
230 enum isl_format isl_format = brw->mesa_to_isl_render_format[rb_format];
231
232 struct isl_view view = {
233 .format = isl_format,
234 .base_level = irb->mt_level - irb->mt->first_level,
235 .levels = 1,
236 .base_array_layer = irb->mt_layer,
237 .array_len = MAX2(irb->layer_count, 1),
238 .swizzle = ISL_SWIZZLE_IDENTITY,
239 .usage = ISL_SURF_USAGE_RENDER_TARGET_BIT,
240 };
241
242 uint32_t offset;
243 brw_emit_surface_state(brw, mt, mt->target, view,
244 brw->draw_aux_usage[unit],
245 &offset, surf_index,
246 RELOC_WRITE);
247 return offset;
248 }
249
250 GLuint
251 translate_tex_target(GLenum target)
252 {
253 switch (target) {
254 case GL_TEXTURE_1D:
255 case GL_TEXTURE_1D_ARRAY_EXT:
256 return BRW_SURFACE_1D;
257
258 case GL_TEXTURE_RECTANGLE_NV:
259 return BRW_SURFACE_2D;
260
261 case GL_TEXTURE_2D:
262 case GL_TEXTURE_2D_ARRAY_EXT:
263 case GL_TEXTURE_EXTERNAL_OES:
264 case GL_TEXTURE_2D_MULTISAMPLE:
265 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
266 return BRW_SURFACE_2D;
267
268 case GL_TEXTURE_3D:
269 return BRW_SURFACE_3D;
270
271 case GL_TEXTURE_CUBE_MAP:
272 case GL_TEXTURE_CUBE_MAP_ARRAY:
273 return BRW_SURFACE_CUBE;
274
275 default:
276 unreachable("not reached");
277 }
278 }
279
280 uint32_t
281 brw_get_surface_tiling_bits(enum isl_tiling tiling)
282 {
283 switch (tiling) {
284 case ISL_TILING_X:
285 return BRW_SURFACE_TILED;
286 case ISL_TILING_Y0:
287 return BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y;
288 default:
289 return 0;
290 }
291 }
292
293
294 uint32_t
295 brw_get_surface_num_multisamples(unsigned num_samples)
296 {
297 if (num_samples > 1)
298 return BRW_SURFACE_MULTISAMPLECOUNT_4;
299 else
300 return BRW_SURFACE_MULTISAMPLECOUNT_1;
301 }
302
303 /**
304 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
305 * swizzling.
306 */
307 int
308 brw_get_texture_swizzle(const struct gl_context *ctx,
309 const struct gl_texture_object *t)
310 {
311 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
312
313 int swizzles[SWIZZLE_NIL + 1] = {
314 SWIZZLE_X,
315 SWIZZLE_Y,
316 SWIZZLE_Z,
317 SWIZZLE_W,
318 SWIZZLE_ZERO,
319 SWIZZLE_ONE,
320 SWIZZLE_NIL
321 };
322
323 if (img->_BaseFormat == GL_DEPTH_COMPONENT ||
324 img->_BaseFormat == GL_DEPTH_STENCIL) {
325 GLenum depth_mode = t->DepthMode;
326
327 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
328 * with depth component data specified with a sized internal format.
329 * Otherwise, it's left at the old default, GL_LUMINANCE.
330 */
331 if (_mesa_is_gles3(ctx) &&
332 img->InternalFormat != GL_DEPTH_COMPONENT &&
333 img->InternalFormat != GL_DEPTH_STENCIL) {
334 depth_mode = GL_RED;
335 }
336
337 switch (depth_mode) {
338 case GL_ALPHA:
339 swizzles[0] = SWIZZLE_ZERO;
340 swizzles[1] = SWIZZLE_ZERO;
341 swizzles[2] = SWIZZLE_ZERO;
342 swizzles[3] = SWIZZLE_X;
343 break;
344 case GL_LUMINANCE:
345 swizzles[0] = SWIZZLE_X;
346 swizzles[1] = SWIZZLE_X;
347 swizzles[2] = SWIZZLE_X;
348 swizzles[3] = SWIZZLE_ONE;
349 break;
350 case GL_INTENSITY:
351 swizzles[0] = SWIZZLE_X;
352 swizzles[1] = SWIZZLE_X;
353 swizzles[2] = SWIZZLE_X;
354 swizzles[3] = SWIZZLE_X;
355 break;
356 case GL_RED:
357 swizzles[0] = SWIZZLE_X;
358 swizzles[1] = SWIZZLE_ZERO;
359 swizzles[2] = SWIZZLE_ZERO;
360 swizzles[3] = SWIZZLE_ONE;
361 break;
362 }
363 }
364
365 GLenum datatype = _mesa_get_format_datatype(img->TexFormat);
366
367 /* If the texture's format is alpha-only, force R, G, and B to
368 * 0.0. Similarly, if the texture's format has no alpha channel,
369 * force the alpha value read to 1.0. This allows for the
370 * implementation to use an RGBA texture for any of these formats
371 * without leaking any unexpected values.
372 */
373 switch (img->_BaseFormat) {
374 case GL_ALPHA:
375 swizzles[0] = SWIZZLE_ZERO;
376 swizzles[1] = SWIZZLE_ZERO;
377 swizzles[2] = SWIZZLE_ZERO;
378 break;
379 case GL_LUMINANCE:
380 if (t->_IsIntegerFormat || datatype == GL_SIGNED_NORMALIZED) {
381 swizzles[0] = SWIZZLE_X;
382 swizzles[1] = SWIZZLE_X;
383 swizzles[2] = SWIZZLE_X;
384 swizzles[3] = SWIZZLE_ONE;
385 }
386 break;
387 case GL_LUMINANCE_ALPHA:
388 if (datatype == GL_SIGNED_NORMALIZED) {
389 swizzles[0] = SWIZZLE_X;
390 swizzles[1] = SWIZZLE_X;
391 swizzles[2] = SWIZZLE_X;
392 swizzles[3] = SWIZZLE_W;
393 }
394 break;
395 case GL_INTENSITY:
396 if (datatype == GL_SIGNED_NORMALIZED) {
397 swizzles[0] = SWIZZLE_X;
398 swizzles[1] = SWIZZLE_X;
399 swizzles[2] = SWIZZLE_X;
400 swizzles[3] = SWIZZLE_X;
401 }
402 break;
403 case GL_RED:
404 case GL_RG:
405 case GL_RGB:
406 if (_mesa_get_format_bits(img->TexFormat, GL_ALPHA_BITS) > 0 ||
407 img->TexFormat == MESA_FORMAT_RGB_DXT1 ||
408 img->TexFormat == MESA_FORMAT_SRGB_DXT1)
409 swizzles[3] = SWIZZLE_ONE;
410 break;
411 }
412
413 return MAKE_SWIZZLE4(swizzles[GET_SWZ(t->_Swizzle, 0)],
414 swizzles[GET_SWZ(t->_Swizzle, 1)],
415 swizzles[GET_SWZ(t->_Swizzle, 2)],
416 swizzles[GET_SWZ(t->_Swizzle, 3)]);
417 }
418
419 /**
420 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
421 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
422 *
423 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
424 * 0 1 2 3 4 5
425 * 4 5 6 7 0 1
426 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
427 *
428 * which is simply adding 4 then modding by 8 (or anding with 7).
429 *
430 * We then may need to apply workarounds for textureGather hardware bugs.
431 */
432 static unsigned
433 swizzle_to_scs(GLenum swizzle, bool need_green_to_blue)
434 {
435 unsigned scs = (swizzle + 4) & 7;
436
437 return (need_green_to_blue && scs == HSW_SCS_GREEN) ? HSW_SCS_BLUE : scs;
438 }
439
440 static void brw_update_texture_surface(struct gl_context *ctx,
441 unsigned unit,
442 uint32_t *surf_offset,
443 bool for_gather,
444 bool for_txf,
445 uint32_t plane)
446 {
447 struct brw_context *brw = brw_context(ctx);
448 const struct gen_device_info *devinfo = &brw->screen->devinfo;
449 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
450
451 if (obj->Target == GL_TEXTURE_BUFFER) {
452 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
453
454 } else {
455 struct intel_texture_object *intel_obj = intel_texture_object(obj);
456 struct intel_mipmap_tree *mt = intel_obj->mt;
457
458 if (plane > 0) {
459 if (mt->plane[plane - 1] == NULL)
460 return;
461 mt = mt->plane[plane - 1];
462 }
463
464 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
465 /* If this is a view with restricted NumLayers, then our effective depth
466 * is not just the miptree depth.
467 */
468 unsigned view_num_layers;
469 if (obj->Immutable && obj->Target != GL_TEXTURE_3D) {
470 view_num_layers = obj->NumLayers;
471 } else {
472 view_num_layers = mt->surf.dim == ISL_SURF_DIM_3D ?
473 mt->surf.logical_level0_px.depth :
474 mt->surf.logical_level0_px.array_len;
475 }
476
477 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
478 * texturing functions that return a float, as our code generation always
479 * selects the .x channel (which would always be 0).
480 */
481 struct gl_texture_image *firstImage = obj->Image[0][obj->BaseLevel];
482 const bool alpha_depth = obj->DepthMode == GL_ALPHA &&
483 (firstImage->_BaseFormat == GL_DEPTH_COMPONENT ||
484 firstImage->_BaseFormat == GL_DEPTH_STENCIL);
485 const unsigned swizzle = (unlikely(alpha_depth) ? SWIZZLE_XYZW :
486 brw_get_texture_swizzle(&brw->ctx, obj));
487
488 mesa_format mesa_fmt = plane == 0 ? intel_obj->_Format : mt->format;
489 enum isl_format format = translate_tex_format(brw, mesa_fmt,
490 for_txf ? GL_DECODE_EXT :
491 sampler->sRGBDecode);
492
493 /* Implement gen6 and gen7 gather work-around */
494 bool need_green_to_blue = false;
495 if (for_gather) {
496 if (devinfo->gen == 7 && (format == ISL_FORMAT_R32G32_FLOAT ||
497 format == ISL_FORMAT_R32G32_SINT ||
498 format == ISL_FORMAT_R32G32_UINT)) {
499 format = ISL_FORMAT_R32G32_FLOAT_LD;
500 need_green_to_blue = devinfo->is_haswell;
501 } else if (devinfo->gen == 6) {
502 /* Sandybridge's gather4 message is broken for integer formats.
503 * To work around this, we pretend the surface is UNORM for
504 * 8 or 16-bit formats, and emit shader instructions to recover
505 * the real INT/UINT value. For 32-bit formats, we pretend
506 * the surface is FLOAT, and simply reinterpret the resulting
507 * bits.
508 */
509 switch (format) {
510 case ISL_FORMAT_R8_SINT:
511 case ISL_FORMAT_R8_UINT:
512 format = ISL_FORMAT_R8_UNORM;
513 break;
514
515 case ISL_FORMAT_R16_SINT:
516 case ISL_FORMAT_R16_UINT:
517 format = ISL_FORMAT_R16_UNORM;
518 break;
519
520 case ISL_FORMAT_R32_SINT:
521 case ISL_FORMAT_R32_UINT:
522 format = ISL_FORMAT_R32_FLOAT;
523 break;
524
525 default:
526 break;
527 }
528 }
529 }
530
531 if (obj->StencilSampling && firstImage->_BaseFormat == GL_DEPTH_STENCIL) {
532 if (devinfo->gen <= 7) {
533 assert(mt->r8stencil_mt && !mt->stencil_mt->r8stencil_needs_update);
534 mt = mt->r8stencil_mt;
535 } else {
536 mt = mt->stencil_mt;
537 }
538 format = ISL_FORMAT_R8_UINT;
539 } else if (devinfo->gen <= 7 && mt->format == MESA_FORMAT_S_UINT8) {
540 assert(mt->r8stencil_mt && !mt->r8stencil_needs_update);
541 mt = mt->r8stencil_mt;
542 format = ISL_FORMAT_R8_UINT;
543 }
544
545 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
546
547 struct isl_view view = {
548 .format = format,
549 .base_level = obj->MinLevel + obj->BaseLevel,
550 .levels = intel_obj->_MaxLevel - obj->BaseLevel + 1,
551 .base_array_layer = obj->MinLayer,
552 .array_len = view_num_layers,
553 .swizzle = {
554 .r = swizzle_to_scs(GET_SWZ(swizzle, 0), need_green_to_blue),
555 .g = swizzle_to_scs(GET_SWZ(swizzle, 1), need_green_to_blue),
556 .b = swizzle_to_scs(GET_SWZ(swizzle, 2), need_green_to_blue),
557 .a = swizzle_to_scs(GET_SWZ(swizzle, 3), need_green_to_blue),
558 },
559 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
560 };
561
562 if (obj->Target == GL_TEXTURE_CUBE_MAP ||
563 obj->Target == GL_TEXTURE_CUBE_MAP_ARRAY)
564 view.usage |= ISL_SURF_USAGE_CUBE_BIT;
565
566 enum isl_aux_usage aux_usage =
567 intel_miptree_texture_aux_usage(brw, mt, format);
568
569 brw_emit_surface_state(brw, mt, mt->target, view, aux_usage,
570 surf_offset, surf_index,
571 0);
572 }
573 }
574
575 void
576 brw_emit_buffer_surface_state(struct brw_context *brw,
577 uint32_t *out_offset,
578 struct brw_bo *bo,
579 unsigned buffer_offset,
580 unsigned surface_format,
581 unsigned buffer_size,
582 unsigned pitch,
583 unsigned reloc_flags)
584 {
585 const struct gen_device_info *devinfo = &brw->screen->devinfo;
586 uint32_t *dw = brw_state_batch(brw,
587 brw->isl_dev.ss.size,
588 brw->isl_dev.ss.align,
589 out_offset);
590
591 isl_buffer_fill_state(&brw->isl_dev, dw,
592 .address = !bo ? buffer_offset :
593 brw_state_reloc(&brw->batch,
594 *out_offset + brw->isl_dev.ss.addr_offset,
595 bo, buffer_offset,
596 reloc_flags),
597 .size = buffer_size,
598 .format = surface_format,
599 .stride = pitch,
600 .mocs = brw_get_bo_mocs(devinfo, bo));
601 }
602
603 void
604 brw_update_buffer_texture_surface(struct gl_context *ctx,
605 unsigned unit,
606 uint32_t *surf_offset)
607 {
608 struct brw_context *brw = brw_context(ctx);
609 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
610 struct intel_buffer_object *intel_obj =
611 intel_buffer_object(tObj->BufferObject);
612 uint32_t size = tObj->BufferSize;
613 struct brw_bo *bo = NULL;
614 mesa_format format = tObj->_BufferObjectFormat;
615 const enum isl_format isl_format = brw_isl_format_for_mesa_format(format);
616 int texel_size = _mesa_get_format_bytes(format);
617
618 if (intel_obj) {
619 size = MIN2(size, intel_obj->Base.Size);
620 bo = intel_bufferobj_buffer(brw, intel_obj, tObj->BufferOffset, size,
621 false);
622 }
623
624 /* The ARB_texture_buffer_specification says:
625 *
626 * "The number of texels in the buffer texture's texel array is given by
627 *
628 * floor(<buffer_size> / (<components> * sizeof(<base_type>)),
629 *
630 * where <buffer_size> is the size of the buffer object, in basic
631 * machine units and <components> and <base_type> are the element count
632 * and base data type for elements, as specified in Table X.1. The
633 * number of texels in the texel array is then clamped to the
634 * implementation-dependent limit MAX_TEXTURE_BUFFER_SIZE_ARB."
635 *
636 * We need to clamp the size in bytes to MAX_TEXTURE_BUFFER_SIZE * stride,
637 * so that when ISL divides by stride to obtain the number of texels, that
638 * texel count is clamped to MAX_TEXTURE_BUFFER_SIZE.
639 */
640 size = MIN2(size, ctx->Const.MaxTextureBufferSize * (unsigned) texel_size);
641
642 if (isl_format == ISL_FORMAT_UNSUPPORTED) {
643 _mesa_problem(NULL, "bad format %s for texture buffer\n",
644 _mesa_get_format_name(format));
645 }
646
647 brw_emit_buffer_surface_state(brw, surf_offset, bo,
648 tObj->BufferOffset,
649 isl_format,
650 size,
651 texel_size,
652 0);
653 }
654
655 /**
656 * Set up a binding table entry for use by stream output logic (transform
657 * feedback).
658 *
659 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
660 */
661 void
662 brw_update_sol_surface(struct brw_context *brw,
663 struct gl_buffer_object *buffer_obj,
664 uint32_t *out_offset, unsigned num_vector_components,
665 unsigned stride_dwords, unsigned offset_dwords)
666 {
667 struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
668 uint32_t offset_bytes = 4 * offset_dwords;
669 struct brw_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
670 offset_bytes,
671 buffer_obj->Size - offset_bytes,
672 true);
673 uint32_t *surf = brw_state_batch(brw, 6 * 4, 32, out_offset);
674 uint32_t pitch_minus_1 = 4*stride_dwords - 1;
675 size_t size_dwords = buffer_obj->Size / 4;
676 uint32_t buffer_size_minus_1, width, height, depth, surface_format;
677
678 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
679 * too big to map using a single binding table entry?
680 */
681 assert((size_dwords - offset_dwords) / stride_dwords
682 <= BRW_MAX_NUM_BUFFER_ENTRIES);
683
684 if (size_dwords > offset_dwords + num_vector_components) {
685 /* There is room for at least 1 transform feedback output in the buffer.
686 * Compute the number of additional transform feedback outputs the
687 * buffer has room for.
688 */
689 buffer_size_minus_1 =
690 (size_dwords - offset_dwords - num_vector_components) / stride_dwords;
691 } else {
692 /* There isn't even room for a single transform feedback output in the
693 * buffer. We can't configure the binding table entry to prevent output
694 * entirely; we'll have to rely on the geometry shader to detect
695 * overflow. But to minimize the damage in case of a bug, set up the
696 * binding table entry to just allow a single output.
697 */
698 buffer_size_minus_1 = 0;
699 }
700 width = buffer_size_minus_1 & 0x7f;
701 height = (buffer_size_minus_1 & 0xfff80) >> 7;
702 depth = (buffer_size_minus_1 & 0x7f00000) >> 20;
703
704 switch (num_vector_components) {
705 case 1:
706 surface_format = ISL_FORMAT_R32_FLOAT;
707 break;
708 case 2:
709 surface_format = ISL_FORMAT_R32G32_FLOAT;
710 break;
711 case 3:
712 surface_format = ISL_FORMAT_R32G32B32_FLOAT;
713 break;
714 case 4:
715 surface_format = ISL_FORMAT_R32G32B32A32_FLOAT;
716 break;
717 default:
718 unreachable("Invalid vector size for transform feedback output");
719 }
720
721 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
722 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
723 surface_format << BRW_SURFACE_FORMAT_SHIFT |
724 BRW_SURFACE_RC_READ_WRITE;
725 surf[1] = brw_state_reloc(&brw->batch,
726 *out_offset + 4, bo, offset_bytes, RELOC_WRITE);
727 surf[2] = (width << BRW_SURFACE_WIDTH_SHIFT |
728 height << BRW_SURFACE_HEIGHT_SHIFT);
729 surf[3] = (depth << BRW_SURFACE_DEPTH_SHIFT |
730 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
731 surf[4] = 0;
732 surf[5] = 0;
733 }
734
735 /* Creates a new WM constant buffer reflecting the current fragment program's
736 * constants, if needed by the fragment program.
737 *
738 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
739 * state atom.
740 */
741 static void
742 brw_upload_wm_pull_constants(struct brw_context *brw)
743 {
744 struct brw_stage_state *stage_state = &brw->wm.base;
745 /* BRW_NEW_FRAGMENT_PROGRAM */
746 struct brw_program *fp =
747 (struct brw_program *) brw->programs[MESA_SHADER_FRAGMENT];
748
749 /* BRW_NEW_FS_PROG_DATA */
750 struct brw_stage_prog_data *prog_data = brw->wm.base.prog_data;
751
752 _mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_FRAGMENT);
753 /* _NEW_PROGRAM_CONSTANTS */
754 brw_upload_pull_constants(brw, BRW_NEW_SURFACES, &fp->program,
755 stage_state, prog_data);
756 }
757
758 const struct brw_tracked_state brw_wm_pull_constants = {
759 .dirty = {
760 .mesa = _NEW_PROGRAM_CONSTANTS,
761 .brw = BRW_NEW_BATCH |
762 BRW_NEW_FRAGMENT_PROGRAM |
763 BRW_NEW_FS_PROG_DATA,
764 },
765 .emit = brw_upload_wm_pull_constants,
766 };
767
768 /**
769 * Creates a null renderbuffer surface.
770 *
771 * This is used when the shader doesn't write to any color output. An FB
772 * write to target 0 will still be emitted, because that's how the thread is
773 * terminated (and computed depth is returned), so we need to have the
774 * hardware discard the target 0 color output..
775 */
776 static void
777 emit_null_surface_state(struct brw_context *brw,
778 const struct gl_framebuffer *fb,
779 uint32_t *out_offset)
780 {
781 const struct gen_device_info *devinfo = &brw->screen->devinfo;
782 uint32_t *surf = brw_state_batch(brw,
783 brw->isl_dev.ss.size,
784 brw->isl_dev.ss.align,
785 out_offset);
786
787 /* Use the fb dimensions or 1x1x1 */
788 const unsigned width = fb ? _mesa_geometric_width(fb) : 1;
789 const unsigned height = fb ? _mesa_geometric_height(fb) : 1;
790 const unsigned samples = fb ? _mesa_geometric_samples(fb) : 1;
791
792 if (devinfo->gen != 6 || samples <= 1) {
793 isl_null_fill_state(&brw->isl_dev, surf,
794 isl_extent3d(width, height, 1));
795 return;
796 }
797
798 /* On Gen6, null render targets seem to cause GPU hangs when multisampling.
799 * So work around this problem by rendering into dummy color buffer.
800 *
801 * To decrease the amount of memory needed by the workaround buffer, we
802 * set its pitch to 128 bytes (the width of a Y tile). This means that
803 * the amount of memory needed for the workaround buffer is
804 * (width_in_tiles + height_in_tiles - 1) tiles.
805 *
806 * Note that since the workaround buffer will be interpreted by the
807 * hardware as an interleaved multisampled buffer, we need to compute
808 * width_in_tiles and height_in_tiles by dividing the width and height
809 * by 16 rather than the normal Y-tile size of 32.
810 */
811 unsigned width_in_tiles = ALIGN(width, 16) / 16;
812 unsigned height_in_tiles = ALIGN(height, 16) / 16;
813 unsigned pitch_minus_1 = 127;
814 unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
815 brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
816 size_needed);
817
818 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
819 ISL_FORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
820 surf[1] = brw_state_reloc(&brw->batch, *out_offset + 4,
821 brw->wm.multisampled_null_render_target_bo,
822 0, RELOC_WRITE);
823
824 surf[2] = ((width - 1) << BRW_SURFACE_WIDTH_SHIFT |
825 (height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
826
827 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
828 * Notes):
829 *
830 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
831 */
832 surf[3] = (BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y |
833 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
834 surf[4] = BRW_SURFACE_MULTISAMPLECOUNT_4;
835 surf[5] = 0;
836 }
837
838 /**
839 * Sets up a surface state structure to point at the given region.
840 * While it is only used for the front/back buffer currently, it should be
841 * usable for further buffers when doing ARB_draw_buffer support.
842 */
843 static uint32_t
844 gen4_update_renderbuffer_surface(struct brw_context *brw,
845 struct gl_renderbuffer *rb,
846 unsigned unit,
847 uint32_t surf_index)
848 {
849 const struct gen_device_info *devinfo = &brw->screen->devinfo;
850 struct gl_context *ctx = &brw->ctx;
851 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
852 struct intel_mipmap_tree *mt = irb->mt;
853 uint32_t *surf;
854 uint32_t tile_x, tile_y;
855 enum isl_format format;
856 uint32_t offset;
857 /* _NEW_BUFFERS */
858 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
859 /* BRW_NEW_FS_PROG_DATA */
860
861 if (rb->TexImage && !devinfo->has_surface_tile_offset) {
862 intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y);
863
864 if (tile_x != 0 || tile_y != 0) {
865 /* Original gen4 hardware couldn't draw to a non-tile-aligned
866 * destination in a miptree unless you actually setup your renderbuffer
867 * as a miptree and used the fragile lod/array_index/etc. controls to
868 * select the image. So, instead, we just make a new single-level
869 * miptree and render into that.
870 */
871 intel_renderbuffer_move_to_temp(brw, irb, false);
872 assert(irb->align_wa_mt);
873 mt = irb->align_wa_mt;
874 }
875 }
876
877 surf = brw_state_batch(brw, 6 * 4, 32, &offset);
878
879 format = brw->mesa_to_isl_render_format[rb_format];
880 if (unlikely(!brw->mesa_format_supports_render[rb_format])) {
881 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
882 __func__, _mesa_get_format_name(rb_format));
883 }
884
885 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
886 format << BRW_SURFACE_FORMAT_SHIFT);
887
888 /* reloc */
889 assert(mt->offset % mt->cpp == 0);
890 surf[1] = brw_state_reloc(&brw->batch, offset + 4, mt->bo,
891 mt->offset +
892 intel_renderbuffer_get_tile_offsets(irb,
893 &tile_x,
894 &tile_y),
895 RELOC_WRITE);
896
897 surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
898 (rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
899
900 surf[3] = (brw_get_surface_tiling_bits(mt->surf.tiling) |
901 (mt->surf.row_pitch - 1) << BRW_SURFACE_PITCH_SHIFT);
902
903 surf[4] = brw_get_surface_num_multisamples(mt->surf.samples);
904
905 assert(devinfo->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
906 /* Note that the low bits of these fields are missing, so
907 * there's the possibility of getting in trouble.
908 */
909 assert(tile_x % 4 == 0);
910 assert(tile_y % 2 == 0);
911 surf[5] = ((tile_x / 4) << BRW_SURFACE_X_OFFSET_SHIFT |
912 (tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT |
913 (mt->surf.image_alignment_el.height == 4 ?
914 BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
915
916 if (devinfo->gen < 6) {
917 /* _NEW_COLOR */
918 if (!ctx->Color.ColorLogicOpEnabled && !ctx->Color._AdvancedBlendMode &&
919 (ctx->Color.BlendEnabled & (1 << unit)))
920 surf[0] |= BRW_SURFACE_BLEND_ENABLED;
921
922 if (!ctx->Color.ColorMask[unit][0])
923 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT;
924 if (!ctx->Color.ColorMask[unit][1])
925 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT;
926 if (!ctx->Color.ColorMask[unit][2])
927 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT;
928
929 /* As mentioned above, disable writes to the alpha component when the
930 * renderbuffer is XRGB.
931 */
932 if (ctx->DrawBuffer->Visual.alphaBits == 0 ||
933 !ctx->Color.ColorMask[unit][3]) {
934 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT;
935 }
936 }
937
938 return offset;
939 }
940
941 static void
942 update_renderbuffer_surfaces(struct brw_context *brw)
943 {
944 const struct gen_device_info *devinfo = &brw->screen->devinfo;
945 const struct gl_context *ctx = &brw->ctx;
946
947 /* _NEW_BUFFERS | _NEW_COLOR */
948 const struct gl_framebuffer *fb = ctx->DrawBuffer;
949
950 /* Render targets always start at binding table index 0. */
951 const unsigned rt_start = 0;
952
953 uint32_t *surf_offsets = brw->wm.base.surf_offset;
954
955 /* Update surfaces for drawing buffers */
956 if (fb->_NumColorDrawBuffers >= 1) {
957 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
958 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
959
960 if (intel_renderbuffer(rb)) {
961 surf_offsets[rt_start + i] = devinfo->gen >= 6 ?
962 gen6_update_renderbuffer_surface(brw, rb, i, rt_start + i) :
963 gen4_update_renderbuffer_surface(brw, rb, i, rt_start + i);
964 } else {
965 emit_null_surface_state(brw, fb, &surf_offsets[rt_start + i]);
966 }
967 }
968 } else {
969 emit_null_surface_state(brw, fb, &surf_offsets[rt_start]);
970 }
971
972 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
973 }
974
975 const struct brw_tracked_state brw_renderbuffer_surfaces = {
976 .dirty = {
977 .mesa = _NEW_BUFFERS |
978 _NEW_COLOR,
979 .brw = BRW_NEW_BATCH,
980 },
981 .emit = update_renderbuffer_surfaces,
982 };
983
984 const struct brw_tracked_state gen6_renderbuffer_surfaces = {
985 .dirty = {
986 .mesa = _NEW_BUFFERS,
987 .brw = BRW_NEW_BATCH |
988 BRW_NEW_AUX_STATE,
989 },
990 .emit = update_renderbuffer_surfaces,
991 };
992
993 static void
994 update_renderbuffer_read_surfaces(struct brw_context *brw)
995 {
996 const struct gl_context *ctx = &brw->ctx;
997
998 /* BRW_NEW_FS_PROG_DATA */
999 const struct brw_wm_prog_data *wm_prog_data =
1000 brw_wm_prog_data(brw->wm.base.prog_data);
1001
1002 if (wm_prog_data->has_render_target_reads &&
1003 !ctx->Extensions.MESA_shader_framebuffer_fetch) {
1004 /* _NEW_BUFFERS */
1005 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1006
1007 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
1008 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
1009 const struct intel_renderbuffer *irb = intel_renderbuffer(rb);
1010 const unsigned surf_index =
1011 wm_prog_data->binding_table.render_target_read_start + i;
1012 uint32_t *surf_offset = &brw->wm.base.surf_offset[surf_index];
1013
1014 if (irb) {
1015 const enum isl_format format = brw->mesa_to_isl_render_format[
1016 _mesa_get_render_format(ctx, intel_rb_format(irb))];
1017 assert(isl_format_supports_sampling(&brw->screen->devinfo,
1018 format));
1019
1020 /* Override the target of the texture if the render buffer is a
1021 * single slice of a 3D texture (since the minimum array element
1022 * field of the surface state structure is ignored by the sampler
1023 * unit for 3D textures on some hardware), or if the render buffer
1024 * is a 1D array (since shaders always provide the array index
1025 * coordinate at the Z component to avoid state-dependent
1026 * recompiles when changing the texture target of the
1027 * framebuffer).
1028 */
1029 const GLenum target =
1030 (irb->mt->target == GL_TEXTURE_3D &&
1031 irb->layer_count == 1) ? GL_TEXTURE_2D :
1032 irb->mt->target == GL_TEXTURE_1D_ARRAY ? GL_TEXTURE_2D_ARRAY :
1033 irb->mt->target;
1034
1035 const struct isl_view view = {
1036 .format = format,
1037 .base_level = irb->mt_level - irb->mt->first_level,
1038 .levels = 1,
1039 .base_array_layer = irb->mt_layer,
1040 .array_len = irb->layer_count,
1041 .swizzle = ISL_SWIZZLE_IDENTITY,
1042 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
1043 };
1044
1045 enum isl_aux_usage aux_usage =
1046 intel_miptree_texture_aux_usage(brw, irb->mt, format);
1047 if (brw->draw_aux_usage[i] == ISL_AUX_USAGE_NONE)
1048 aux_usage = ISL_AUX_USAGE_NONE;
1049
1050 brw_emit_surface_state(brw, irb->mt, target, view, aux_usage,
1051 surf_offset, surf_index,
1052 0);
1053
1054 } else {
1055 emit_null_surface_state(brw, fb, surf_offset);
1056 }
1057 }
1058
1059 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1060 }
1061 }
1062
1063 const struct brw_tracked_state brw_renderbuffer_read_surfaces = {
1064 .dirty = {
1065 .mesa = _NEW_BUFFERS,
1066 .brw = BRW_NEW_BATCH |
1067 BRW_NEW_AUX_STATE |
1068 BRW_NEW_FS_PROG_DATA,
1069 },
1070 .emit = update_renderbuffer_read_surfaces,
1071 };
1072
1073 static bool
1074 is_depth_texture(struct intel_texture_object *iobj)
1075 {
1076 GLenum base_format = _mesa_get_format_base_format(iobj->_Format);
1077 return base_format == GL_DEPTH_COMPONENT ||
1078 (base_format == GL_DEPTH_STENCIL && !iobj->base.StencilSampling);
1079 }
1080
1081 static void
1082 update_stage_texture_surfaces(struct brw_context *brw,
1083 const struct gl_program *prog,
1084 struct brw_stage_state *stage_state,
1085 bool for_gather, uint32_t plane)
1086 {
1087 if (!prog)
1088 return;
1089
1090 struct gl_context *ctx = &brw->ctx;
1091
1092 uint32_t *surf_offset = stage_state->surf_offset;
1093
1094 /* BRW_NEW_*_PROG_DATA */
1095 if (for_gather)
1096 surf_offset += stage_state->prog_data->binding_table.gather_texture_start;
1097 else
1098 surf_offset += stage_state->prog_data->binding_table.plane_start[plane];
1099
1100 unsigned num_samplers = util_last_bit(prog->SamplersUsed);
1101 for (unsigned s = 0; s < num_samplers; s++) {
1102 surf_offset[s] = 0;
1103
1104 if (prog->SamplersUsed & (1 << s)) {
1105 const unsigned unit = prog->SamplerUnits[s];
1106 const bool used_by_txf = prog->info.textures_used_by_txf & (1 << s);
1107 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
1108 struct intel_texture_object *iobj = intel_texture_object(obj);
1109
1110 /* _NEW_TEXTURE */
1111 if (!obj)
1112 continue;
1113
1114 if ((prog->ShadowSamplers & (1 << s)) && !is_depth_texture(iobj)) {
1115 /* A programming note for the sample_c message says:
1116 *
1117 * "The Surface Format of the associated surface must be
1118 * indicated as supporting shadow mapping as indicated in the
1119 * surface format table."
1120 *
1121 * Accessing non-depth textures via a sampler*Shadow type is
1122 * undefined. GLSL 4.50 page 162 says:
1123 *
1124 * "If a shadow texture call is made to a sampler that does not
1125 * represent a depth texture, then results are undefined."
1126 *
1127 * We give them a null surface (zeros) for undefined. We've seen
1128 * GPU hangs with color buffers and sample_c, so we try and avoid
1129 * those with this hack.
1130 */
1131 emit_null_surface_state(brw, NULL, surf_offset + s);
1132 } else {
1133 brw_update_texture_surface(ctx, unit, surf_offset + s, for_gather,
1134 used_by_txf, plane);
1135 }
1136 }
1137 }
1138 }
1139
1140
1141 /**
1142 * Construct SURFACE_STATE objects for enabled textures.
1143 */
1144 static void
1145 brw_update_texture_surfaces(struct brw_context *brw)
1146 {
1147 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1148
1149 /* BRW_NEW_VERTEX_PROGRAM */
1150 struct gl_program *vs = brw->programs[MESA_SHADER_VERTEX];
1151
1152 /* BRW_NEW_TESS_PROGRAMS */
1153 struct gl_program *tcs = brw->programs[MESA_SHADER_TESS_CTRL];
1154 struct gl_program *tes = brw->programs[MESA_SHADER_TESS_EVAL];
1155
1156 /* BRW_NEW_GEOMETRY_PROGRAM */
1157 struct gl_program *gs = brw->programs[MESA_SHADER_GEOMETRY];
1158
1159 /* BRW_NEW_FRAGMENT_PROGRAM */
1160 struct gl_program *fs = brw->programs[MESA_SHADER_FRAGMENT];
1161
1162 /* _NEW_TEXTURE */
1163 update_stage_texture_surfaces(brw, vs, &brw->vs.base, false, 0);
1164 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, false, 0);
1165 update_stage_texture_surfaces(brw, tes, &brw->tes.base, false, 0);
1166 update_stage_texture_surfaces(brw, gs, &brw->gs.base, false, 0);
1167 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 0);
1168
1169 /* emit alternate set of surface state for gather. this
1170 * allows the surface format to be overriden for only the
1171 * gather4 messages. */
1172 if (devinfo->gen < 8) {
1173 if (vs && vs->info.uses_texture_gather)
1174 update_stage_texture_surfaces(brw, vs, &brw->vs.base, true, 0);
1175 if (tcs && tcs->info.uses_texture_gather)
1176 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, true, 0);
1177 if (tes && tes->info.uses_texture_gather)
1178 update_stage_texture_surfaces(brw, tes, &brw->tes.base, true, 0);
1179 if (gs && gs->info.uses_texture_gather)
1180 update_stage_texture_surfaces(brw, gs, &brw->gs.base, true, 0);
1181 if (fs && fs->info.uses_texture_gather)
1182 update_stage_texture_surfaces(brw, fs, &brw->wm.base, true, 0);
1183 }
1184
1185 if (fs) {
1186 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 1);
1187 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 2);
1188 }
1189
1190 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1191 }
1192
1193 const struct brw_tracked_state brw_texture_surfaces = {
1194 .dirty = {
1195 .mesa = _NEW_TEXTURE,
1196 .brw = BRW_NEW_BATCH |
1197 BRW_NEW_AUX_STATE |
1198 BRW_NEW_FRAGMENT_PROGRAM |
1199 BRW_NEW_FS_PROG_DATA |
1200 BRW_NEW_GEOMETRY_PROGRAM |
1201 BRW_NEW_GS_PROG_DATA |
1202 BRW_NEW_TESS_PROGRAMS |
1203 BRW_NEW_TCS_PROG_DATA |
1204 BRW_NEW_TES_PROG_DATA |
1205 BRW_NEW_TEXTURE_BUFFER |
1206 BRW_NEW_VERTEX_PROGRAM |
1207 BRW_NEW_VS_PROG_DATA,
1208 },
1209 .emit = brw_update_texture_surfaces,
1210 };
1211
1212 static void
1213 brw_update_cs_texture_surfaces(struct brw_context *brw)
1214 {
1215 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1216
1217 /* BRW_NEW_COMPUTE_PROGRAM */
1218 struct gl_program *cs = brw->programs[MESA_SHADER_COMPUTE];
1219
1220 /* _NEW_TEXTURE */
1221 update_stage_texture_surfaces(brw, cs, &brw->cs.base, false, 0);
1222
1223 /* emit alternate set of surface state for gather. this
1224 * allows the surface format to be overriden for only the
1225 * gather4 messages.
1226 */
1227 if (devinfo->gen < 8) {
1228 if (cs && cs->info.uses_texture_gather)
1229 update_stage_texture_surfaces(brw, cs, &brw->cs.base, true, 0);
1230 }
1231
1232 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1233 }
1234
1235 const struct brw_tracked_state brw_cs_texture_surfaces = {
1236 .dirty = {
1237 .mesa = _NEW_TEXTURE,
1238 .brw = BRW_NEW_BATCH |
1239 BRW_NEW_COMPUTE_PROGRAM |
1240 BRW_NEW_AUX_STATE,
1241 },
1242 .emit = brw_update_cs_texture_surfaces,
1243 };
1244
1245 static void
1246 upload_buffer_surface(struct brw_context *brw,
1247 struct gl_buffer_binding *binding,
1248 uint32_t *out_offset,
1249 enum isl_format format,
1250 unsigned reloc_flags)
1251 {
1252 struct gl_context *ctx = &brw->ctx;
1253
1254 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1255 emit_null_surface_state(brw, NULL, out_offset);
1256 } else {
1257 ptrdiff_t size = binding->BufferObject->Size - binding->Offset;
1258 if (!binding->AutomaticSize)
1259 size = MIN2(size, binding->Size);
1260
1261 struct intel_buffer_object *iobj =
1262 intel_buffer_object(binding->BufferObject);
1263 struct brw_bo *bo =
1264 intel_bufferobj_buffer(brw, iobj, binding->Offset, size,
1265 (reloc_flags & RELOC_WRITE) != 0);
1266
1267 brw_emit_buffer_surface_state(brw, out_offset, bo, binding->Offset,
1268 format, size, 1, reloc_flags);
1269 }
1270 }
1271
1272 void
1273 brw_upload_ubo_surfaces(struct brw_context *brw, struct gl_program *prog,
1274 struct brw_stage_state *stage_state,
1275 struct brw_stage_prog_data *prog_data)
1276 {
1277 struct gl_context *ctx = &brw->ctx;
1278
1279 if (!prog || (prog->info.num_ubos == 0 &&
1280 prog->info.num_ssbos == 0 &&
1281 prog->info.num_abos == 0))
1282 return;
1283
1284 uint32_t *ubo_surf_offsets =
1285 &stage_state->surf_offset[prog_data->binding_table.ubo_start];
1286
1287 for (int i = 0; i < prog->info.num_ubos; i++) {
1288 struct gl_buffer_binding *binding =
1289 &ctx->UniformBufferBindings[prog->sh.UniformBlocks[i]->Binding];
1290 upload_buffer_surface(brw, binding, &ubo_surf_offsets[i],
1291 ISL_FORMAT_R32G32B32A32_FLOAT, 0);
1292 }
1293
1294 uint32_t *abo_surf_offsets =
1295 &stage_state->surf_offset[prog_data->binding_table.ssbo_start];
1296 uint32_t *ssbo_surf_offsets = abo_surf_offsets + prog->info.num_abos;
1297
1298 for (int i = 0; i < prog->info.num_abos; i++) {
1299 struct gl_buffer_binding *binding =
1300 &ctx->AtomicBufferBindings[prog->sh.AtomicBuffers[i]->Binding];
1301 upload_buffer_surface(brw, binding, &abo_surf_offsets[i],
1302 ISL_FORMAT_RAW, RELOC_WRITE);
1303 }
1304
1305 for (int i = 0; i < prog->info.num_ssbos; i++) {
1306 struct gl_buffer_binding *binding =
1307 &ctx->ShaderStorageBufferBindings[prog->sh.ShaderStorageBlocks[i]->Binding];
1308
1309 upload_buffer_surface(brw, binding, &ssbo_surf_offsets[i],
1310 ISL_FORMAT_RAW, RELOC_WRITE);
1311 }
1312
1313 stage_state->push_constants_dirty = true;
1314 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1315 }
1316
1317 static void
1318 brw_upload_wm_ubo_surfaces(struct brw_context *brw)
1319 {
1320 struct gl_context *ctx = &brw->ctx;
1321 /* _NEW_PROGRAM */
1322 struct gl_program *prog = ctx->FragmentProgram._Current;
1323
1324 /* BRW_NEW_FS_PROG_DATA */
1325 brw_upload_ubo_surfaces(brw, prog, &brw->wm.base, brw->wm.base.prog_data);
1326 }
1327
1328 const struct brw_tracked_state brw_wm_ubo_surfaces = {
1329 .dirty = {
1330 .mesa = _NEW_PROGRAM,
1331 .brw = BRW_NEW_BATCH |
1332 BRW_NEW_FS_PROG_DATA |
1333 BRW_NEW_UNIFORM_BUFFER,
1334 },
1335 .emit = brw_upload_wm_ubo_surfaces,
1336 };
1337
1338 static void
1339 brw_upload_cs_ubo_surfaces(struct brw_context *brw)
1340 {
1341 struct gl_context *ctx = &brw->ctx;
1342 /* _NEW_PROGRAM */
1343 struct gl_program *prog =
1344 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1345
1346 /* BRW_NEW_CS_PROG_DATA */
1347 brw_upload_ubo_surfaces(brw, prog, &brw->cs.base, brw->cs.base.prog_data);
1348 }
1349
1350 const struct brw_tracked_state brw_cs_ubo_surfaces = {
1351 .dirty = {
1352 .mesa = _NEW_PROGRAM,
1353 .brw = BRW_NEW_BATCH |
1354 BRW_NEW_CS_PROG_DATA |
1355 BRW_NEW_UNIFORM_BUFFER,
1356 },
1357 .emit = brw_upload_cs_ubo_surfaces,
1358 };
1359
1360 static void
1361 brw_upload_cs_image_surfaces(struct brw_context *brw)
1362 {
1363 /* _NEW_PROGRAM */
1364 const struct gl_program *cp = brw->programs[MESA_SHADER_COMPUTE];
1365
1366 if (cp) {
1367 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1368 brw_upload_image_surfaces(brw, cp, &brw->cs.base,
1369 brw->cs.base.prog_data);
1370 }
1371 }
1372
1373 const struct brw_tracked_state brw_cs_image_surfaces = {
1374 .dirty = {
1375 .mesa = _NEW_TEXTURE | _NEW_PROGRAM,
1376 .brw = BRW_NEW_BATCH |
1377 BRW_NEW_CS_PROG_DATA |
1378 BRW_NEW_AUX_STATE |
1379 BRW_NEW_IMAGE_UNITS
1380 },
1381 .emit = brw_upload_cs_image_surfaces,
1382 };
1383
1384 static uint32_t
1385 get_image_format(struct brw_context *brw, mesa_format format, GLenum access)
1386 {
1387 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1388 enum isl_format hw_format = brw_isl_format_for_mesa_format(format);
1389 if (access == GL_WRITE_ONLY) {
1390 return hw_format;
1391 } else if (isl_has_matching_typed_storage_image_format(devinfo, hw_format)) {
1392 /* Typed surface reads support a very limited subset of the shader
1393 * image formats. Translate it into the closest format the
1394 * hardware supports.
1395 */
1396 return isl_lower_storage_image_format(devinfo, hw_format);
1397 } else {
1398 /* The hardware doesn't actually support a typed format that we can use
1399 * so we have to fall back to untyped read/write messages.
1400 */
1401 return ISL_FORMAT_RAW;
1402 }
1403 }
1404
1405 static void
1406 update_default_image_param(struct brw_context *brw,
1407 struct gl_image_unit *u,
1408 unsigned surface_idx,
1409 struct brw_image_param *param)
1410 {
1411 memset(param, 0, sizeof(*param));
1412 param->surface_idx = surface_idx;
1413 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1414 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1415 * detailed explanation of these parameters.
1416 */
1417 param->swizzling[0] = 0xff;
1418 param->swizzling[1] = 0xff;
1419 }
1420
1421 static void
1422 update_buffer_image_param(struct brw_context *brw,
1423 struct gl_image_unit *u,
1424 unsigned surface_idx,
1425 struct brw_image_param *param)
1426 {
1427 struct gl_buffer_object *obj = u->TexObj->BufferObject;
1428 const uint32_t size = MIN2((uint32_t)u->TexObj->BufferSize, obj->Size);
1429 update_default_image_param(brw, u, surface_idx, param);
1430
1431 param->size[0] = size / _mesa_get_format_bytes(u->_ActualFormat);
1432 param->stride[0] = _mesa_get_format_bytes(u->_ActualFormat);
1433 }
1434
1435 static unsigned
1436 get_image_num_layers(const struct intel_mipmap_tree *mt, GLenum target,
1437 unsigned level)
1438 {
1439 if (target == GL_TEXTURE_CUBE_MAP)
1440 return 6;
1441
1442 return target == GL_TEXTURE_3D ?
1443 minify(mt->surf.logical_level0_px.depth, level) :
1444 mt->surf.logical_level0_px.array_len;
1445 }
1446
1447 static void
1448 update_image_surface(struct brw_context *brw,
1449 struct gl_image_unit *u,
1450 GLenum access,
1451 unsigned surface_idx,
1452 uint32_t *surf_offset,
1453 struct brw_image_param *param)
1454 {
1455 if (_mesa_is_image_unit_valid(&brw->ctx, u)) {
1456 struct gl_texture_object *obj = u->TexObj;
1457 const unsigned format = get_image_format(brw, u->_ActualFormat, access);
1458
1459 if (obj->Target == GL_TEXTURE_BUFFER) {
1460 struct intel_buffer_object *intel_obj =
1461 intel_buffer_object(obj->BufferObject);
1462 const unsigned texel_size = (format == ISL_FORMAT_RAW ? 1 :
1463 _mesa_get_format_bytes(u->_ActualFormat));
1464
1465 brw_emit_buffer_surface_state(
1466 brw, surf_offset, intel_obj->buffer, obj->BufferOffset,
1467 format, intel_obj->Base.Size, texel_size,
1468 access != GL_READ_ONLY ? RELOC_WRITE : 0);
1469
1470 update_buffer_image_param(brw, u, surface_idx, param);
1471
1472 } else {
1473 struct intel_texture_object *intel_obj = intel_texture_object(obj);
1474 struct intel_mipmap_tree *mt = intel_obj->mt;
1475 const unsigned num_layers = u->Layered ?
1476 get_image_num_layers(mt, obj->Target, u->Level) : 1;
1477
1478 struct isl_view view = {
1479 .format = format,
1480 .base_level = obj->MinLevel + u->Level,
1481 .levels = 1,
1482 .base_array_layer = obj->MinLayer + u->_Layer,
1483 .array_len = num_layers,
1484 .swizzle = ISL_SWIZZLE_IDENTITY,
1485 .usage = ISL_SURF_USAGE_STORAGE_BIT,
1486 };
1487
1488 if (format == ISL_FORMAT_RAW) {
1489 brw_emit_buffer_surface_state(
1490 brw, surf_offset, mt->bo, mt->offset,
1491 format, mt->bo->size - mt->offset, 1 /* pitch */,
1492 access != GL_READ_ONLY ? RELOC_WRITE : 0);
1493
1494 } else {
1495 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
1496 assert(!intel_miptree_has_color_unresolved(mt,
1497 view.base_level, 1,
1498 view.base_array_layer,
1499 view.array_len));
1500 brw_emit_surface_state(brw, mt, mt->target, view,
1501 ISL_AUX_USAGE_NONE,
1502 surf_offset, surf_index,
1503 access == GL_READ_ONLY ? 0 : RELOC_WRITE);
1504 }
1505
1506 isl_surf_fill_image_param(&brw->isl_dev, param, &mt->surf, &view);
1507 param->surface_idx = surface_idx;
1508 }
1509
1510 } else {
1511 emit_null_surface_state(brw, NULL, surf_offset);
1512 update_default_image_param(brw, u, surface_idx, param);
1513 }
1514 }
1515
1516 void
1517 brw_upload_image_surfaces(struct brw_context *brw,
1518 const struct gl_program *prog,
1519 struct brw_stage_state *stage_state,
1520 struct brw_stage_prog_data *prog_data)
1521 {
1522 assert(prog);
1523 struct gl_context *ctx = &brw->ctx;
1524
1525 if (prog->info.num_images) {
1526 for (unsigned i = 0; i < prog->info.num_images; i++) {
1527 struct gl_image_unit *u = &ctx->ImageUnits[prog->sh.ImageUnits[i]];
1528 const unsigned surf_idx = prog_data->binding_table.image_start + i;
1529
1530 update_image_surface(brw, u, prog->sh.ImageAccess[i],
1531 surf_idx,
1532 &stage_state->surf_offset[surf_idx],
1533 &stage_state->image_param[i]);
1534 }
1535
1536 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1537 /* This may have changed the image metadata dependent on the context
1538 * image unit state and passed to the program as uniforms, make sure
1539 * that push and pull constants are reuploaded.
1540 */
1541 brw->NewGLState |= _NEW_PROGRAM_CONSTANTS;
1542 }
1543 }
1544
1545 static void
1546 brw_upload_wm_image_surfaces(struct brw_context *brw)
1547 {
1548 /* BRW_NEW_FRAGMENT_PROGRAM */
1549 const struct gl_program *wm = brw->programs[MESA_SHADER_FRAGMENT];
1550
1551 if (wm) {
1552 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1553 brw_upload_image_surfaces(brw, wm, &brw->wm.base,
1554 brw->wm.base.prog_data);
1555 }
1556 }
1557
1558 const struct brw_tracked_state brw_wm_image_surfaces = {
1559 .dirty = {
1560 .mesa = _NEW_TEXTURE,
1561 .brw = BRW_NEW_BATCH |
1562 BRW_NEW_AUX_STATE |
1563 BRW_NEW_FRAGMENT_PROGRAM |
1564 BRW_NEW_FS_PROG_DATA |
1565 BRW_NEW_IMAGE_UNITS
1566 },
1567 .emit = brw_upload_wm_image_surfaces,
1568 };
1569
1570 static void
1571 brw_upload_cs_work_groups_surface(struct brw_context *brw)
1572 {
1573 struct gl_context *ctx = &brw->ctx;
1574 /* _NEW_PROGRAM */
1575 struct gl_program *prog =
1576 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1577 /* BRW_NEW_CS_PROG_DATA */
1578 const struct brw_cs_prog_data *cs_prog_data =
1579 brw_cs_prog_data(brw->cs.base.prog_data);
1580
1581 if (prog && cs_prog_data->uses_num_work_groups) {
1582 const unsigned surf_idx =
1583 cs_prog_data->binding_table.work_groups_start;
1584 uint32_t *surf_offset = &brw->cs.base.surf_offset[surf_idx];
1585 struct brw_bo *bo;
1586 uint32_t bo_offset;
1587
1588 if (brw->compute.num_work_groups_bo == NULL) {
1589 bo = NULL;
1590 intel_upload_data(brw,
1591 (void *)brw->compute.num_work_groups,
1592 3 * sizeof(GLuint),
1593 sizeof(GLuint),
1594 &bo,
1595 &bo_offset);
1596 } else {
1597 bo = brw->compute.num_work_groups_bo;
1598 bo_offset = brw->compute.num_work_groups_offset;
1599 }
1600
1601 brw_emit_buffer_surface_state(brw, surf_offset,
1602 bo, bo_offset,
1603 ISL_FORMAT_RAW,
1604 3 * sizeof(GLuint), 1,
1605 RELOC_WRITE);
1606 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1607 }
1608 }
1609
1610 const struct brw_tracked_state brw_cs_work_groups_surface = {
1611 .dirty = {
1612 .brw = BRW_NEW_CS_PROG_DATA |
1613 BRW_NEW_CS_WORK_GROUPS
1614 },
1615 .emit = brw_upload_cs_work_groups_surface,
1616 };