i965: Store image_param in brw_context instead of prog_data
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_surface_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "compiler/nir/nir.h"
34 #include "main/context.h"
35 #include "main/blend.h"
36 #include "main/mtypes.h"
37 #include "main/samplerobj.h"
38 #include "main/shaderimage.h"
39 #include "main/teximage.h"
40 #include "program/prog_parameter.h"
41 #include "program/prog_instruction.h"
42 #include "main/framebuffer.h"
43 #include "main/shaderapi.h"
44
45 #include "isl/isl.h"
46
47 #include "intel_mipmap_tree.h"
48 #include "intel_batchbuffer.h"
49 #include "intel_tex.h"
50 #include "intel_fbo.h"
51 #include "intel_buffer_objects.h"
52
53 #include "brw_context.h"
54 #include "brw_state.h"
55 #include "brw_defines.h"
56 #include "brw_wm.h"
57
58 uint32_t tex_mocs[] = {
59 [7] = GEN7_MOCS_L3,
60 [8] = BDW_MOCS_WB,
61 [9] = SKL_MOCS_WB,
62 [10] = CNL_MOCS_WB,
63 };
64
65 uint32_t rb_mocs[] = {
66 [7] = GEN7_MOCS_L3,
67 [8] = BDW_MOCS_PTE,
68 [9] = SKL_MOCS_PTE,
69 [10] = CNL_MOCS_PTE,
70 };
71
72 static void
73 get_isl_surf(struct brw_context *brw, struct intel_mipmap_tree *mt,
74 GLenum target, struct isl_view *view,
75 uint32_t *tile_x, uint32_t *tile_y,
76 uint32_t *offset, struct isl_surf *surf)
77 {
78 *surf = mt->surf;
79
80 const struct gen_device_info *devinfo = &brw->screen->devinfo;
81 const enum isl_dim_layout dim_layout =
82 get_isl_dim_layout(devinfo, mt->surf.tiling, target);
83
84 if (surf->dim_layout == dim_layout)
85 return;
86
87 /* The layout of the specified texture target is not compatible with the
88 * actual layout of the miptree structure in memory -- You're entering
89 * dangerous territory, this can only possibly work if you only intended
90 * to access a single level and slice of the texture, and the hardware
91 * supports the tile offset feature in order to allow non-tile-aligned
92 * base offsets, since we'll have to point the hardware to the first
93 * texel of the level instead of relying on the usual base level/layer
94 * controls.
95 */
96 assert(devinfo->has_surface_tile_offset);
97 assert(view->levels == 1 && view->array_len == 1);
98 assert(*tile_x == 0 && *tile_y == 0);
99
100 *offset += intel_miptree_get_tile_offsets(mt, view->base_level,
101 view->base_array_layer,
102 tile_x, tile_y);
103
104 /* Minify the logical dimensions of the texture. */
105 const unsigned l = view->base_level - mt->first_level;
106 surf->logical_level0_px.width = minify(surf->logical_level0_px.width, l);
107 surf->logical_level0_px.height = surf->dim <= ISL_SURF_DIM_1D ? 1 :
108 minify(surf->logical_level0_px.height, l);
109 surf->logical_level0_px.depth = surf->dim <= ISL_SURF_DIM_2D ? 1 :
110 minify(surf->logical_level0_px.depth, l);
111
112 /* Only the base level and layer can be addressed with the overridden
113 * layout.
114 */
115 surf->logical_level0_px.array_len = 1;
116 surf->levels = 1;
117 surf->dim_layout = dim_layout;
118
119 /* The requested slice of the texture is now at the base level and
120 * layer.
121 */
122 view->base_level = 0;
123 view->base_array_layer = 0;
124 }
125
126 static void
127 brw_emit_surface_state(struct brw_context *brw,
128 struct intel_mipmap_tree *mt,
129 GLenum target, struct isl_view view,
130 enum isl_aux_usage aux_usage,
131 uint32_t mocs, uint32_t *surf_offset, int surf_index,
132 unsigned reloc_flags)
133 {
134 uint32_t tile_x = mt->level[0].level_x;
135 uint32_t tile_y = mt->level[0].level_y;
136 uint32_t offset = mt->offset;
137
138 struct isl_surf surf;
139
140 get_isl_surf(brw, mt, target, &view, &tile_x, &tile_y, &offset, &surf);
141
142 union isl_color_value clear_color = { .u32 = { 0, 0, 0, 0 } };
143
144 struct brw_bo *aux_bo;
145 struct isl_surf *aux_surf = NULL;
146 uint64_t aux_offset = 0;
147 switch (aux_usage) {
148 case ISL_AUX_USAGE_MCS:
149 case ISL_AUX_USAGE_CCS_D:
150 case ISL_AUX_USAGE_CCS_E:
151 aux_surf = &mt->mcs_buf->surf;
152 aux_bo = mt->mcs_buf->bo;
153 aux_offset = mt->mcs_buf->offset;
154 break;
155
156 case ISL_AUX_USAGE_HIZ:
157 aux_surf = &mt->hiz_buf->surf;
158 aux_bo = mt->hiz_buf->bo;
159 aux_offset = 0;
160 break;
161
162 case ISL_AUX_USAGE_NONE:
163 break;
164 }
165
166 if (aux_usage != ISL_AUX_USAGE_NONE) {
167 /* We only really need a clear color if we also have an auxiliary
168 * surface. Without one, it does nothing.
169 */
170 clear_color = mt->fast_clear_color;
171 }
172
173 void *state = brw_state_batch(brw,
174 brw->isl_dev.ss.size,
175 brw->isl_dev.ss.align,
176 surf_offset);
177
178 isl_surf_fill_state(&brw->isl_dev, state, .surf = &mt->surf, .view = &view,
179 .address = brw_state_reloc(&brw->batch,
180 *surf_offset + brw->isl_dev.ss.addr_offset,
181 mt->bo, offset, reloc_flags),
182 .aux_surf = aux_surf, .aux_usage = aux_usage,
183 .aux_address = aux_offset,
184 .mocs = mocs, .clear_color = clear_color,
185 .x_offset_sa = tile_x, .y_offset_sa = tile_y);
186 if (aux_surf) {
187 /* On gen7 and prior, the upper 20 bits of surface state DWORD 6 are the
188 * upper 20 bits of the GPU address of the MCS buffer; the lower 12 bits
189 * contain other control information. Since buffer addresses are always
190 * on 4k boundaries (and thus have their lower 12 bits zero), we can use
191 * an ordinary reloc to do the necessary address translation.
192 *
193 * FIXME: move to the point of assignment.
194 */
195 assert((aux_offset & 0xfff) == 0);
196 uint32_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
197 *aux_addr = brw_state_reloc(&brw->batch,
198 *surf_offset +
199 brw->isl_dev.ss.aux_addr_offset,
200 aux_bo, *aux_addr,
201 reloc_flags);
202 }
203 }
204
205 static uint32_t
206 gen6_update_renderbuffer_surface(struct brw_context *brw,
207 struct gl_renderbuffer *rb,
208 unsigned unit,
209 uint32_t surf_index)
210 {
211 const struct gen_device_info *devinfo = &brw->screen->devinfo;
212 struct gl_context *ctx = &brw->ctx;
213 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
214 struct intel_mipmap_tree *mt = irb->mt;
215
216 enum isl_aux_usage aux_usage =
217 brw->draw_aux_buffer_disabled[unit] ? ISL_AUX_USAGE_NONE :
218 intel_miptree_render_aux_usage(brw, mt, ctx->Color.sRGBEnabled,
219 ctx->Color.BlendEnabled & (1 << unit));
220
221 assert(brw_render_target_supported(brw, rb));
222
223 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
224 if (unlikely(!brw->mesa_format_supports_render[rb_format])) {
225 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
226 __func__, _mesa_get_format_name(rb_format));
227 }
228
229 struct isl_view view = {
230 .format = brw->mesa_to_isl_render_format[rb_format],
231 .base_level = irb->mt_level - irb->mt->first_level,
232 .levels = 1,
233 .base_array_layer = irb->mt_layer,
234 .array_len = MAX2(irb->layer_count, 1),
235 .swizzle = ISL_SWIZZLE_IDENTITY,
236 .usage = ISL_SURF_USAGE_RENDER_TARGET_BIT,
237 };
238
239 uint32_t offset;
240 brw_emit_surface_state(brw, mt, mt->target, view, aux_usage,
241 rb_mocs[devinfo->gen],
242 &offset, surf_index,
243 RELOC_WRITE);
244 return offset;
245 }
246
247 GLuint
248 translate_tex_target(GLenum target)
249 {
250 switch (target) {
251 case GL_TEXTURE_1D:
252 case GL_TEXTURE_1D_ARRAY_EXT:
253 return BRW_SURFACE_1D;
254
255 case GL_TEXTURE_RECTANGLE_NV:
256 return BRW_SURFACE_2D;
257
258 case GL_TEXTURE_2D:
259 case GL_TEXTURE_2D_ARRAY_EXT:
260 case GL_TEXTURE_EXTERNAL_OES:
261 case GL_TEXTURE_2D_MULTISAMPLE:
262 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
263 return BRW_SURFACE_2D;
264
265 case GL_TEXTURE_3D:
266 return BRW_SURFACE_3D;
267
268 case GL_TEXTURE_CUBE_MAP:
269 case GL_TEXTURE_CUBE_MAP_ARRAY:
270 return BRW_SURFACE_CUBE;
271
272 default:
273 unreachable("not reached");
274 }
275 }
276
277 uint32_t
278 brw_get_surface_tiling_bits(enum isl_tiling tiling)
279 {
280 switch (tiling) {
281 case ISL_TILING_X:
282 return BRW_SURFACE_TILED;
283 case ISL_TILING_Y0:
284 return BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y;
285 default:
286 return 0;
287 }
288 }
289
290
291 uint32_t
292 brw_get_surface_num_multisamples(unsigned num_samples)
293 {
294 if (num_samples > 1)
295 return BRW_SURFACE_MULTISAMPLECOUNT_4;
296 else
297 return BRW_SURFACE_MULTISAMPLECOUNT_1;
298 }
299
300 /**
301 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
302 * swizzling.
303 */
304 int
305 brw_get_texture_swizzle(const struct gl_context *ctx,
306 const struct gl_texture_object *t)
307 {
308 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
309
310 int swizzles[SWIZZLE_NIL + 1] = {
311 SWIZZLE_X,
312 SWIZZLE_Y,
313 SWIZZLE_Z,
314 SWIZZLE_W,
315 SWIZZLE_ZERO,
316 SWIZZLE_ONE,
317 SWIZZLE_NIL
318 };
319
320 if (img->_BaseFormat == GL_DEPTH_COMPONENT ||
321 img->_BaseFormat == GL_DEPTH_STENCIL) {
322 GLenum depth_mode = t->DepthMode;
323
324 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
325 * with depth component data specified with a sized internal format.
326 * Otherwise, it's left at the old default, GL_LUMINANCE.
327 */
328 if (_mesa_is_gles3(ctx) &&
329 img->InternalFormat != GL_DEPTH_COMPONENT &&
330 img->InternalFormat != GL_DEPTH_STENCIL) {
331 depth_mode = GL_RED;
332 }
333
334 switch (depth_mode) {
335 case GL_ALPHA:
336 swizzles[0] = SWIZZLE_ZERO;
337 swizzles[1] = SWIZZLE_ZERO;
338 swizzles[2] = SWIZZLE_ZERO;
339 swizzles[3] = SWIZZLE_X;
340 break;
341 case GL_LUMINANCE:
342 swizzles[0] = SWIZZLE_X;
343 swizzles[1] = SWIZZLE_X;
344 swizzles[2] = SWIZZLE_X;
345 swizzles[3] = SWIZZLE_ONE;
346 break;
347 case GL_INTENSITY:
348 swizzles[0] = SWIZZLE_X;
349 swizzles[1] = SWIZZLE_X;
350 swizzles[2] = SWIZZLE_X;
351 swizzles[3] = SWIZZLE_X;
352 break;
353 case GL_RED:
354 swizzles[0] = SWIZZLE_X;
355 swizzles[1] = SWIZZLE_ZERO;
356 swizzles[2] = SWIZZLE_ZERO;
357 swizzles[3] = SWIZZLE_ONE;
358 break;
359 }
360 }
361
362 GLenum datatype = _mesa_get_format_datatype(img->TexFormat);
363
364 /* If the texture's format is alpha-only, force R, G, and B to
365 * 0.0. Similarly, if the texture's format has no alpha channel,
366 * force the alpha value read to 1.0. This allows for the
367 * implementation to use an RGBA texture for any of these formats
368 * without leaking any unexpected values.
369 */
370 switch (img->_BaseFormat) {
371 case GL_ALPHA:
372 swizzles[0] = SWIZZLE_ZERO;
373 swizzles[1] = SWIZZLE_ZERO;
374 swizzles[2] = SWIZZLE_ZERO;
375 break;
376 case GL_LUMINANCE:
377 if (t->_IsIntegerFormat || datatype == GL_SIGNED_NORMALIZED) {
378 swizzles[0] = SWIZZLE_X;
379 swizzles[1] = SWIZZLE_X;
380 swizzles[2] = SWIZZLE_X;
381 swizzles[3] = SWIZZLE_ONE;
382 }
383 break;
384 case GL_LUMINANCE_ALPHA:
385 if (datatype == GL_SIGNED_NORMALIZED) {
386 swizzles[0] = SWIZZLE_X;
387 swizzles[1] = SWIZZLE_X;
388 swizzles[2] = SWIZZLE_X;
389 swizzles[3] = SWIZZLE_W;
390 }
391 break;
392 case GL_INTENSITY:
393 if (datatype == GL_SIGNED_NORMALIZED) {
394 swizzles[0] = SWIZZLE_X;
395 swizzles[1] = SWIZZLE_X;
396 swizzles[2] = SWIZZLE_X;
397 swizzles[3] = SWIZZLE_X;
398 }
399 break;
400 case GL_RED:
401 case GL_RG:
402 case GL_RGB:
403 if (_mesa_get_format_bits(img->TexFormat, GL_ALPHA_BITS) > 0 ||
404 img->TexFormat == MESA_FORMAT_RGB_DXT1 ||
405 img->TexFormat == MESA_FORMAT_SRGB_DXT1)
406 swizzles[3] = SWIZZLE_ONE;
407 break;
408 }
409
410 return MAKE_SWIZZLE4(swizzles[GET_SWZ(t->_Swizzle, 0)],
411 swizzles[GET_SWZ(t->_Swizzle, 1)],
412 swizzles[GET_SWZ(t->_Swizzle, 2)],
413 swizzles[GET_SWZ(t->_Swizzle, 3)]);
414 }
415
416 /**
417 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
418 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
419 *
420 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
421 * 0 1 2 3 4 5
422 * 4 5 6 7 0 1
423 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
424 *
425 * which is simply adding 4 then modding by 8 (or anding with 7).
426 *
427 * We then may need to apply workarounds for textureGather hardware bugs.
428 */
429 static unsigned
430 swizzle_to_scs(GLenum swizzle, bool need_green_to_blue)
431 {
432 unsigned scs = (swizzle + 4) & 7;
433
434 return (need_green_to_blue && scs == HSW_SCS_GREEN) ? HSW_SCS_BLUE : scs;
435 }
436
437 static bool
438 brw_aux_surface_disabled(const struct brw_context *brw,
439 const struct intel_mipmap_tree *mt)
440 {
441 const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
442
443 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
444 const struct intel_renderbuffer *irb =
445 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
446
447 if (irb && irb->mt == mt)
448 return brw->draw_aux_buffer_disabled[i];
449 }
450
451 return false;
452 }
453
454 static void
455 brw_update_texture_surface(struct gl_context *ctx,
456 unsigned unit,
457 uint32_t *surf_offset,
458 bool for_gather,
459 bool for_txf,
460 uint32_t plane)
461 {
462 struct brw_context *brw = brw_context(ctx);
463 const struct gen_device_info *devinfo = &brw->screen->devinfo;
464 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
465
466 if (obj->Target == GL_TEXTURE_BUFFER) {
467 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
468
469 } else {
470 struct intel_texture_object *intel_obj = intel_texture_object(obj);
471 struct intel_mipmap_tree *mt = intel_obj->mt;
472
473 if (plane > 0) {
474 if (mt->plane[plane - 1] == NULL)
475 return;
476 mt = mt->plane[plane - 1];
477 }
478
479 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
480 /* If this is a view with restricted NumLayers, then our effective depth
481 * is not just the miptree depth.
482 */
483 unsigned view_num_layers;
484 if (obj->Immutable && obj->Target != GL_TEXTURE_3D) {
485 view_num_layers = obj->NumLayers;
486 } else {
487 view_num_layers = mt->surf.dim == ISL_SURF_DIM_3D ?
488 mt->surf.logical_level0_px.depth :
489 mt->surf.logical_level0_px.array_len;
490 }
491
492 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
493 * texturing functions that return a float, as our code generation always
494 * selects the .x channel (which would always be 0).
495 */
496 struct gl_texture_image *firstImage = obj->Image[0][obj->BaseLevel];
497 const bool alpha_depth = obj->DepthMode == GL_ALPHA &&
498 (firstImage->_BaseFormat == GL_DEPTH_COMPONENT ||
499 firstImage->_BaseFormat == GL_DEPTH_STENCIL);
500 const unsigned swizzle = (unlikely(alpha_depth) ? SWIZZLE_XYZW :
501 brw_get_texture_swizzle(&brw->ctx, obj));
502
503 mesa_format mesa_fmt = plane == 0 ? intel_obj->_Format : mt->format;
504 enum isl_format format = translate_tex_format(brw, mesa_fmt,
505 for_txf ? GL_DECODE_EXT :
506 sampler->sRGBDecode);
507
508 /* Implement gen6 and gen7 gather work-around */
509 bool need_green_to_blue = false;
510 if (for_gather) {
511 if (devinfo->gen == 7 && (format == ISL_FORMAT_R32G32_FLOAT ||
512 format == ISL_FORMAT_R32G32_SINT ||
513 format == ISL_FORMAT_R32G32_UINT)) {
514 format = ISL_FORMAT_R32G32_FLOAT_LD;
515 need_green_to_blue = devinfo->is_haswell;
516 } else if (devinfo->gen == 6) {
517 /* Sandybridge's gather4 message is broken for integer formats.
518 * To work around this, we pretend the surface is UNORM for
519 * 8 or 16-bit formats, and emit shader instructions to recover
520 * the real INT/UINT value. For 32-bit formats, we pretend
521 * the surface is FLOAT, and simply reinterpret the resulting
522 * bits.
523 */
524 switch (format) {
525 case ISL_FORMAT_R8_SINT:
526 case ISL_FORMAT_R8_UINT:
527 format = ISL_FORMAT_R8_UNORM;
528 break;
529
530 case ISL_FORMAT_R16_SINT:
531 case ISL_FORMAT_R16_UINT:
532 format = ISL_FORMAT_R16_UNORM;
533 break;
534
535 case ISL_FORMAT_R32_SINT:
536 case ISL_FORMAT_R32_UINT:
537 format = ISL_FORMAT_R32_FLOAT;
538 break;
539
540 default:
541 break;
542 }
543 }
544 }
545
546 if (obj->StencilSampling && firstImage->_BaseFormat == GL_DEPTH_STENCIL) {
547 if (devinfo->gen <= 7) {
548 assert(mt->r8stencil_mt && !mt->stencil_mt->r8stencil_needs_update);
549 mt = mt->r8stencil_mt;
550 } else {
551 mt = mt->stencil_mt;
552 }
553 format = ISL_FORMAT_R8_UINT;
554 } else if (devinfo->gen <= 7 && mt->format == MESA_FORMAT_S_UINT8) {
555 assert(mt->r8stencil_mt && !mt->r8stencil_needs_update);
556 mt = mt->r8stencil_mt;
557 format = ISL_FORMAT_R8_UINT;
558 }
559
560 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
561
562 struct isl_view view = {
563 .format = format,
564 .base_level = obj->MinLevel + obj->BaseLevel,
565 .levels = intel_obj->_MaxLevel - obj->BaseLevel + 1,
566 .base_array_layer = obj->MinLayer,
567 .array_len = view_num_layers,
568 .swizzle = {
569 .r = swizzle_to_scs(GET_SWZ(swizzle, 0), need_green_to_blue),
570 .g = swizzle_to_scs(GET_SWZ(swizzle, 1), need_green_to_blue),
571 .b = swizzle_to_scs(GET_SWZ(swizzle, 2), need_green_to_blue),
572 .a = swizzle_to_scs(GET_SWZ(swizzle, 3), need_green_to_blue),
573 },
574 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
575 };
576
577 if (obj->Target == GL_TEXTURE_CUBE_MAP ||
578 obj->Target == GL_TEXTURE_CUBE_MAP_ARRAY)
579 view.usage |= ISL_SURF_USAGE_CUBE_BIT;
580
581 enum isl_aux_usage aux_usage =
582 intel_miptree_texture_aux_usage(brw, mt, format);
583
584 if (brw_aux_surface_disabled(brw, mt))
585 aux_usage = ISL_AUX_USAGE_NONE;
586
587 brw_emit_surface_state(brw, mt, mt->target, view, aux_usage,
588 tex_mocs[devinfo->gen],
589 surf_offset, surf_index,
590 0);
591 }
592 }
593
594 void
595 brw_emit_buffer_surface_state(struct brw_context *brw,
596 uint32_t *out_offset,
597 struct brw_bo *bo,
598 unsigned buffer_offset,
599 unsigned surface_format,
600 unsigned buffer_size,
601 unsigned pitch,
602 unsigned reloc_flags)
603 {
604 const struct gen_device_info *devinfo = &brw->screen->devinfo;
605 uint32_t *dw = brw_state_batch(brw,
606 brw->isl_dev.ss.size,
607 brw->isl_dev.ss.align,
608 out_offset);
609
610 isl_buffer_fill_state(&brw->isl_dev, dw,
611 .address = !bo ? buffer_offset :
612 brw_state_reloc(&brw->batch,
613 *out_offset + brw->isl_dev.ss.addr_offset,
614 bo, buffer_offset,
615 reloc_flags),
616 .size = buffer_size,
617 .format = surface_format,
618 .stride = pitch,
619 .mocs = tex_mocs[devinfo->gen]);
620 }
621
622 void
623 brw_update_buffer_texture_surface(struct gl_context *ctx,
624 unsigned unit,
625 uint32_t *surf_offset)
626 {
627 struct brw_context *brw = brw_context(ctx);
628 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
629 struct intel_buffer_object *intel_obj =
630 intel_buffer_object(tObj->BufferObject);
631 uint32_t size = tObj->BufferSize;
632 struct brw_bo *bo = NULL;
633 mesa_format format = tObj->_BufferObjectFormat;
634 const enum isl_format isl_format = brw_isl_format_for_mesa_format(format);
635 int texel_size = _mesa_get_format_bytes(format);
636
637 if (intel_obj) {
638 size = MIN2(size, intel_obj->Base.Size);
639 bo = intel_bufferobj_buffer(brw, intel_obj, tObj->BufferOffset, size,
640 false);
641 }
642
643 /* The ARB_texture_buffer_specification says:
644 *
645 * "The number of texels in the buffer texture's texel array is given by
646 *
647 * floor(<buffer_size> / (<components> * sizeof(<base_type>)),
648 *
649 * where <buffer_size> is the size of the buffer object, in basic
650 * machine units and <components> and <base_type> are the element count
651 * and base data type for elements, as specified in Table X.1. The
652 * number of texels in the texel array is then clamped to the
653 * implementation-dependent limit MAX_TEXTURE_BUFFER_SIZE_ARB."
654 *
655 * We need to clamp the size in bytes to MAX_TEXTURE_BUFFER_SIZE * stride,
656 * so that when ISL divides by stride to obtain the number of texels, that
657 * texel count is clamped to MAX_TEXTURE_BUFFER_SIZE.
658 */
659 size = MIN2(size, ctx->Const.MaxTextureBufferSize * (unsigned) texel_size);
660
661 if (isl_format == ISL_FORMAT_UNSUPPORTED) {
662 _mesa_problem(NULL, "bad format %s for texture buffer\n",
663 _mesa_get_format_name(format));
664 }
665
666 brw_emit_buffer_surface_state(brw, surf_offset, bo,
667 tObj->BufferOffset,
668 isl_format,
669 size,
670 texel_size,
671 0);
672 }
673
674 /**
675 * Create the constant buffer surface. Vertex/fragment shader constants will be
676 * read from this buffer with Data Port Read instructions/messages.
677 */
678 void
679 brw_create_constant_surface(struct brw_context *brw,
680 struct brw_bo *bo,
681 uint32_t offset,
682 uint32_t size,
683 uint32_t *out_offset)
684 {
685 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
686 ISL_FORMAT_R32G32B32A32_FLOAT,
687 size, 1, 0);
688 }
689
690 /**
691 * Create the buffer surface. Shader buffer variables will be
692 * read from / write to this buffer with Data Port Read/Write
693 * instructions/messages.
694 */
695 void
696 brw_create_buffer_surface(struct brw_context *brw,
697 struct brw_bo *bo,
698 uint32_t offset,
699 uint32_t size,
700 uint32_t *out_offset)
701 {
702 /* Use a raw surface so we can reuse existing untyped read/write/atomic
703 * messages. We need these specifically for the fragment shader since they
704 * include a pixel mask header that we need to ensure correct behavior
705 * with helper invocations, which cannot write to the buffer.
706 */
707 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
708 ISL_FORMAT_RAW,
709 size, 1, RELOC_WRITE);
710 }
711
712 /**
713 * Set up a binding table entry for use by stream output logic (transform
714 * feedback).
715 *
716 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
717 */
718 void
719 brw_update_sol_surface(struct brw_context *brw,
720 struct gl_buffer_object *buffer_obj,
721 uint32_t *out_offset, unsigned num_vector_components,
722 unsigned stride_dwords, unsigned offset_dwords)
723 {
724 struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
725 uint32_t offset_bytes = 4 * offset_dwords;
726 struct brw_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
727 offset_bytes,
728 buffer_obj->Size - offset_bytes,
729 true);
730 uint32_t *surf = brw_state_batch(brw, 6 * 4, 32, out_offset);
731 uint32_t pitch_minus_1 = 4*stride_dwords - 1;
732 size_t size_dwords = buffer_obj->Size / 4;
733 uint32_t buffer_size_minus_1, width, height, depth, surface_format;
734
735 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
736 * too big to map using a single binding table entry?
737 */
738 assert((size_dwords - offset_dwords) / stride_dwords
739 <= BRW_MAX_NUM_BUFFER_ENTRIES);
740
741 if (size_dwords > offset_dwords + num_vector_components) {
742 /* There is room for at least 1 transform feedback output in the buffer.
743 * Compute the number of additional transform feedback outputs the
744 * buffer has room for.
745 */
746 buffer_size_minus_1 =
747 (size_dwords - offset_dwords - num_vector_components) / stride_dwords;
748 } else {
749 /* There isn't even room for a single transform feedback output in the
750 * buffer. We can't configure the binding table entry to prevent output
751 * entirely; we'll have to rely on the geometry shader to detect
752 * overflow. But to minimize the damage in case of a bug, set up the
753 * binding table entry to just allow a single output.
754 */
755 buffer_size_minus_1 = 0;
756 }
757 width = buffer_size_minus_1 & 0x7f;
758 height = (buffer_size_minus_1 & 0xfff80) >> 7;
759 depth = (buffer_size_minus_1 & 0x7f00000) >> 20;
760
761 switch (num_vector_components) {
762 case 1:
763 surface_format = ISL_FORMAT_R32_FLOAT;
764 break;
765 case 2:
766 surface_format = ISL_FORMAT_R32G32_FLOAT;
767 break;
768 case 3:
769 surface_format = ISL_FORMAT_R32G32B32_FLOAT;
770 break;
771 case 4:
772 surface_format = ISL_FORMAT_R32G32B32A32_FLOAT;
773 break;
774 default:
775 unreachable("Invalid vector size for transform feedback output");
776 }
777
778 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
779 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
780 surface_format << BRW_SURFACE_FORMAT_SHIFT |
781 BRW_SURFACE_RC_READ_WRITE;
782 surf[1] = brw_state_reloc(&brw->batch,
783 *out_offset + 4, bo, offset_bytes, RELOC_WRITE);
784 surf[2] = (width << BRW_SURFACE_WIDTH_SHIFT |
785 height << BRW_SURFACE_HEIGHT_SHIFT);
786 surf[3] = (depth << BRW_SURFACE_DEPTH_SHIFT |
787 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
788 surf[4] = 0;
789 surf[5] = 0;
790 }
791
792 /* Creates a new WM constant buffer reflecting the current fragment program's
793 * constants, if needed by the fragment program.
794 *
795 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
796 * state atom.
797 */
798 static void
799 brw_upload_wm_pull_constants(struct brw_context *brw)
800 {
801 struct brw_stage_state *stage_state = &brw->wm.base;
802 /* BRW_NEW_FRAGMENT_PROGRAM */
803 struct brw_program *fp =
804 (struct brw_program *) brw->programs[MESA_SHADER_FRAGMENT];
805
806 /* BRW_NEW_FS_PROG_DATA */
807 struct brw_stage_prog_data *prog_data = brw->wm.base.prog_data;
808
809 _mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_FRAGMENT);
810 /* _NEW_PROGRAM_CONSTANTS */
811 brw_upload_pull_constants(brw, BRW_NEW_SURFACES, &fp->program,
812 stage_state, prog_data);
813 }
814
815 const struct brw_tracked_state brw_wm_pull_constants = {
816 .dirty = {
817 .mesa = _NEW_PROGRAM_CONSTANTS,
818 .brw = BRW_NEW_BATCH |
819 BRW_NEW_FRAGMENT_PROGRAM |
820 BRW_NEW_FS_PROG_DATA,
821 },
822 .emit = brw_upload_wm_pull_constants,
823 };
824
825 /**
826 * Creates a null renderbuffer surface.
827 *
828 * This is used when the shader doesn't write to any color output. An FB
829 * write to target 0 will still be emitted, because that's how the thread is
830 * terminated (and computed depth is returned), so we need to have the
831 * hardware discard the target 0 color output..
832 */
833 static void
834 emit_null_surface_state(struct brw_context *brw,
835 const struct gl_framebuffer *fb,
836 uint32_t *out_offset)
837 {
838 const struct gen_device_info *devinfo = &brw->screen->devinfo;
839 uint32_t *surf = brw_state_batch(brw,
840 brw->isl_dev.ss.size,
841 brw->isl_dev.ss.align,
842 out_offset);
843
844 /* Use the fb dimensions or 1x1x1 */
845 const unsigned width = fb ? _mesa_geometric_width(fb) : 1;
846 const unsigned height = fb ? _mesa_geometric_height(fb) : 1;
847 const unsigned samples = fb ? _mesa_geometric_samples(fb) : 1;
848
849 if (devinfo->gen != 6 || samples <= 1) {
850 isl_null_fill_state(&brw->isl_dev, surf,
851 isl_extent3d(width, height, 1));
852 return;
853 }
854
855 /* On Gen6, null render targets seem to cause GPU hangs when multisampling.
856 * So work around this problem by rendering into dummy color buffer.
857 *
858 * To decrease the amount of memory needed by the workaround buffer, we
859 * set its pitch to 128 bytes (the width of a Y tile). This means that
860 * the amount of memory needed for the workaround buffer is
861 * (width_in_tiles + height_in_tiles - 1) tiles.
862 *
863 * Note that since the workaround buffer will be interpreted by the
864 * hardware as an interleaved multisampled buffer, we need to compute
865 * width_in_tiles and height_in_tiles by dividing the width and height
866 * by 16 rather than the normal Y-tile size of 32.
867 */
868 unsigned width_in_tiles = ALIGN(width, 16) / 16;
869 unsigned height_in_tiles = ALIGN(height, 16) / 16;
870 unsigned pitch_minus_1 = 127;
871 unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
872 brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
873 size_needed);
874
875 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
876 ISL_FORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
877 surf[1] = brw_state_reloc(&brw->batch, *out_offset + 4,
878 brw->wm.multisampled_null_render_target_bo,
879 0, RELOC_WRITE);
880
881 surf[2] = ((width - 1) << BRW_SURFACE_WIDTH_SHIFT |
882 (height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
883
884 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
885 * Notes):
886 *
887 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
888 */
889 surf[3] = (BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y |
890 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
891 surf[4] = BRW_SURFACE_MULTISAMPLECOUNT_4;
892 surf[5] = 0;
893 }
894
895 /**
896 * Sets up a surface state structure to point at the given region.
897 * While it is only used for the front/back buffer currently, it should be
898 * usable for further buffers when doing ARB_draw_buffer support.
899 */
900 static uint32_t
901 gen4_update_renderbuffer_surface(struct brw_context *brw,
902 struct gl_renderbuffer *rb,
903 unsigned unit,
904 uint32_t surf_index)
905 {
906 const struct gen_device_info *devinfo = &brw->screen->devinfo;
907 struct gl_context *ctx = &brw->ctx;
908 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
909 struct intel_mipmap_tree *mt = irb->mt;
910 uint32_t *surf;
911 uint32_t tile_x, tile_y;
912 enum isl_format format;
913 uint32_t offset;
914 /* _NEW_BUFFERS */
915 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
916 /* BRW_NEW_FS_PROG_DATA */
917
918 if (rb->TexImage && !devinfo->has_surface_tile_offset) {
919 intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y);
920
921 if (tile_x != 0 || tile_y != 0) {
922 /* Original gen4 hardware couldn't draw to a non-tile-aligned
923 * destination in a miptree unless you actually setup your renderbuffer
924 * as a miptree and used the fragile lod/array_index/etc. controls to
925 * select the image. So, instead, we just make a new single-level
926 * miptree and render into that.
927 */
928 intel_renderbuffer_move_to_temp(brw, irb, false);
929 assert(irb->align_wa_mt);
930 mt = irb->align_wa_mt;
931 }
932 }
933
934 surf = brw_state_batch(brw, 6 * 4, 32, &offset);
935
936 format = brw->mesa_to_isl_render_format[rb_format];
937 if (unlikely(!brw->mesa_format_supports_render[rb_format])) {
938 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
939 __func__, _mesa_get_format_name(rb_format));
940 }
941
942 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
943 format << BRW_SURFACE_FORMAT_SHIFT);
944
945 /* reloc */
946 assert(mt->offset % mt->cpp == 0);
947 surf[1] = brw_state_reloc(&brw->batch, offset + 4, mt->bo,
948 mt->offset +
949 intel_renderbuffer_get_tile_offsets(irb,
950 &tile_x,
951 &tile_y),
952 RELOC_WRITE);
953
954 surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
955 (rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
956
957 surf[3] = (brw_get_surface_tiling_bits(mt->surf.tiling) |
958 (mt->surf.row_pitch - 1) << BRW_SURFACE_PITCH_SHIFT);
959
960 surf[4] = brw_get_surface_num_multisamples(mt->surf.samples);
961
962 assert(devinfo->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
963 /* Note that the low bits of these fields are missing, so
964 * there's the possibility of getting in trouble.
965 */
966 assert(tile_x % 4 == 0);
967 assert(tile_y % 2 == 0);
968 surf[5] = ((tile_x / 4) << BRW_SURFACE_X_OFFSET_SHIFT |
969 (tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT |
970 (mt->surf.image_alignment_el.height == 4 ?
971 BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
972
973 if (devinfo->gen < 6) {
974 /* _NEW_COLOR */
975 if (!ctx->Color.ColorLogicOpEnabled && !ctx->Color._AdvancedBlendMode &&
976 (ctx->Color.BlendEnabled & (1 << unit)))
977 surf[0] |= BRW_SURFACE_BLEND_ENABLED;
978
979 if (!ctx->Color.ColorMask[unit][0])
980 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT;
981 if (!ctx->Color.ColorMask[unit][1])
982 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT;
983 if (!ctx->Color.ColorMask[unit][2])
984 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT;
985
986 /* As mentioned above, disable writes to the alpha component when the
987 * renderbuffer is XRGB.
988 */
989 if (ctx->DrawBuffer->Visual.alphaBits == 0 ||
990 !ctx->Color.ColorMask[unit][3]) {
991 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT;
992 }
993 }
994
995 return offset;
996 }
997
998 static void
999 update_renderbuffer_surfaces(struct brw_context *brw)
1000 {
1001 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1002 const struct gl_context *ctx = &brw->ctx;
1003
1004 /* _NEW_BUFFERS | _NEW_COLOR */
1005 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1006
1007 /* Render targets always start at binding table index 0. */
1008 const unsigned rt_start = 0;
1009
1010 uint32_t *surf_offsets = brw->wm.base.surf_offset;
1011
1012 /* Update surfaces for drawing buffers */
1013 if (fb->_NumColorDrawBuffers >= 1) {
1014 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
1015 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
1016
1017 if (intel_renderbuffer(rb)) {
1018 surf_offsets[rt_start + i] = devinfo->gen >= 6 ?
1019 gen6_update_renderbuffer_surface(brw, rb, i, rt_start + i) :
1020 gen4_update_renderbuffer_surface(brw, rb, i, rt_start + i);
1021 } else {
1022 emit_null_surface_state(brw, fb, &surf_offsets[rt_start + i]);
1023 }
1024 }
1025 } else {
1026 emit_null_surface_state(brw, fb, &surf_offsets[rt_start]);
1027 }
1028
1029 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1030 }
1031
1032 const struct brw_tracked_state brw_renderbuffer_surfaces = {
1033 .dirty = {
1034 .mesa = _NEW_BUFFERS |
1035 _NEW_COLOR,
1036 .brw = BRW_NEW_BATCH,
1037 },
1038 .emit = update_renderbuffer_surfaces,
1039 };
1040
1041 const struct brw_tracked_state gen6_renderbuffer_surfaces = {
1042 .dirty = {
1043 .mesa = _NEW_BUFFERS,
1044 .brw = BRW_NEW_BATCH |
1045 BRW_NEW_AUX_STATE,
1046 },
1047 .emit = update_renderbuffer_surfaces,
1048 };
1049
1050 static void
1051 update_renderbuffer_read_surfaces(struct brw_context *brw)
1052 {
1053 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1054 const struct gl_context *ctx = &brw->ctx;
1055
1056 /* BRW_NEW_FS_PROG_DATA */
1057 const struct brw_wm_prog_data *wm_prog_data =
1058 brw_wm_prog_data(brw->wm.base.prog_data);
1059
1060 if (wm_prog_data->has_render_target_reads &&
1061 !ctx->Extensions.MESA_shader_framebuffer_fetch) {
1062 /* _NEW_BUFFERS */
1063 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1064
1065 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
1066 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
1067 const struct intel_renderbuffer *irb = intel_renderbuffer(rb);
1068 const unsigned surf_index =
1069 wm_prog_data->binding_table.render_target_read_start + i;
1070 uint32_t *surf_offset = &brw->wm.base.surf_offset[surf_index];
1071
1072 if (irb) {
1073 const enum isl_format format = brw->mesa_to_isl_render_format[
1074 _mesa_get_render_format(ctx, intel_rb_format(irb))];
1075 assert(isl_format_supports_sampling(&brw->screen->devinfo,
1076 format));
1077
1078 /* Override the target of the texture if the render buffer is a
1079 * single slice of a 3D texture (since the minimum array element
1080 * field of the surface state structure is ignored by the sampler
1081 * unit for 3D textures on some hardware), or if the render buffer
1082 * is a 1D array (since shaders always provide the array index
1083 * coordinate at the Z component to avoid state-dependent
1084 * recompiles when changing the texture target of the
1085 * framebuffer).
1086 */
1087 const GLenum target =
1088 (irb->mt->target == GL_TEXTURE_3D &&
1089 irb->layer_count == 1) ? GL_TEXTURE_2D :
1090 irb->mt->target == GL_TEXTURE_1D_ARRAY ? GL_TEXTURE_2D_ARRAY :
1091 irb->mt->target;
1092
1093 const struct isl_view view = {
1094 .format = format,
1095 .base_level = irb->mt_level - irb->mt->first_level,
1096 .levels = 1,
1097 .base_array_layer = irb->mt_layer,
1098 .array_len = irb->layer_count,
1099 .swizzle = ISL_SWIZZLE_IDENTITY,
1100 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
1101 };
1102
1103 enum isl_aux_usage aux_usage =
1104 intel_miptree_texture_aux_usage(brw, irb->mt, format);
1105 if (brw->draw_aux_buffer_disabled[i])
1106 aux_usage = ISL_AUX_USAGE_NONE;
1107
1108 brw_emit_surface_state(brw, irb->mt, target, view, aux_usage,
1109 tex_mocs[devinfo->gen],
1110 surf_offset, surf_index,
1111 0);
1112
1113 } else {
1114 emit_null_surface_state(brw, fb, surf_offset);
1115 }
1116 }
1117
1118 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1119 }
1120 }
1121
1122 const struct brw_tracked_state brw_renderbuffer_read_surfaces = {
1123 .dirty = {
1124 .mesa = _NEW_BUFFERS,
1125 .brw = BRW_NEW_BATCH |
1126 BRW_NEW_AUX_STATE |
1127 BRW_NEW_FS_PROG_DATA,
1128 },
1129 .emit = update_renderbuffer_read_surfaces,
1130 };
1131
1132 static void
1133 update_stage_texture_surfaces(struct brw_context *brw,
1134 const struct gl_program *prog,
1135 struct brw_stage_state *stage_state,
1136 bool for_gather, uint32_t plane)
1137 {
1138 if (!prog)
1139 return;
1140
1141 struct gl_context *ctx = &brw->ctx;
1142
1143 uint32_t *surf_offset = stage_state->surf_offset;
1144
1145 /* BRW_NEW_*_PROG_DATA */
1146 if (for_gather)
1147 surf_offset += stage_state->prog_data->binding_table.gather_texture_start;
1148 else
1149 surf_offset += stage_state->prog_data->binding_table.plane_start[plane];
1150
1151 unsigned num_samplers = util_last_bit(prog->SamplersUsed);
1152 for (unsigned s = 0; s < num_samplers; s++) {
1153 surf_offset[s] = 0;
1154
1155 if (prog->SamplersUsed & (1 << s)) {
1156 const unsigned unit = prog->SamplerUnits[s];
1157 const bool used_by_txf = prog->info.textures_used_by_txf & (1 << s);
1158
1159 /* _NEW_TEXTURE */
1160 if (ctx->Texture.Unit[unit]._Current) {
1161 brw_update_texture_surface(ctx, unit, surf_offset + s, for_gather,
1162 used_by_txf, plane);
1163 }
1164 }
1165 }
1166 }
1167
1168
1169 /**
1170 * Construct SURFACE_STATE objects for enabled textures.
1171 */
1172 static void
1173 brw_update_texture_surfaces(struct brw_context *brw)
1174 {
1175 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1176
1177 /* BRW_NEW_VERTEX_PROGRAM */
1178 struct gl_program *vs = brw->programs[MESA_SHADER_VERTEX];
1179
1180 /* BRW_NEW_TESS_PROGRAMS */
1181 struct gl_program *tcs = brw->programs[MESA_SHADER_TESS_CTRL];
1182 struct gl_program *tes = brw->programs[MESA_SHADER_TESS_EVAL];
1183
1184 /* BRW_NEW_GEOMETRY_PROGRAM */
1185 struct gl_program *gs = brw->programs[MESA_SHADER_GEOMETRY];
1186
1187 /* BRW_NEW_FRAGMENT_PROGRAM */
1188 struct gl_program *fs = brw->programs[MESA_SHADER_FRAGMENT];
1189
1190 /* _NEW_TEXTURE */
1191 update_stage_texture_surfaces(brw, vs, &brw->vs.base, false, 0);
1192 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, false, 0);
1193 update_stage_texture_surfaces(brw, tes, &brw->tes.base, false, 0);
1194 update_stage_texture_surfaces(brw, gs, &brw->gs.base, false, 0);
1195 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 0);
1196
1197 /* emit alternate set of surface state for gather. this
1198 * allows the surface format to be overriden for only the
1199 * gather4 messages. */
1200 if (devinfo->gen < 8) {
1201 if (vs && vs->nir->info.uses_texture_gather)
1202 update_stage_texture_surfaces(brw, vs, &brw->vs.base, true, 0);
1203 if (tcs && tcs->nir->info.uses_texture_gather)
1204 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, true, 0);
1205 if (tes && tes->nir->info.uses_texture_gather)
1206 update_stage_texture_surfaces(brw, tes, &brw->tes.base, true, 0);
1207 if (gs && gs->nir->info.uses_texture_gather)
1208 update_stage_texture_surfaces(brw, gs, &brw->gs.base, true, 0);
1209 if (fs && fs->nir->info.uses_texture_gather)
1210 update_stage_texture_surfaces(brw, fs, &brw->wm.base, true, 0);
1211 }
1212
1213 if (fs) {
1214 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 1);
1215 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 2);
1216 }
1217
1218 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1219 }
1220
1221 const struct brw_tracked_state brw_texture_surfaces = {
1222 .dirty = {
1223 .mesa = _NEW_TEXTURE,
1224 .brw = BRW_NEW_BATCH |
1225 BRW_NEW_AUX_STATE |
1226 BRW_NEW_FRAGMENT_PROGRAM |
1227 BRW_NEW_FS_PROG_DATA |
1228 BRW_NEW_GEOMETRY_PROGRAM |
1229 BRW_NEW_GS_PROG_DATA |
1230 BRW_NEW_TESS_PROGRAMS |
1231 BRW_NEW_TCS_PROG_DATA |
1232 BRW_NEW_TES_PROG_DATA |
1233 BRW_NEW_TEXTURE_BUFFER |
1234 BRW_NEW_VERTEX_PROGRAM |
1235 BRW_NEW_VS_PROG_DATA,
1236 },
1237 .emit = brw_update_texture_surfaces,
1238 };
1239
1240 static void
1241 brw_update_cs_texture_surfaces(struct brw_context *brw)
1242 {
1243 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1244
1245 /* BRW_NEW_COMPUTE_PROGRAM */
1246 struct gl_program *cs = brw->programs[MESA_SHADER_COMPUTE];
1247
1248 /* _NEW_TEXTURE */
1249 update_stage_texture_surfaces(brw, cs, &brw->cs.base, false, 0);
1250
1251 /* emit alternate set of surface state for gather. this
1252 * allows the surface format to be overriden for only the
1253 * gather4 messages.
1254 */
1255 if (devinfo->gen < 8) {
1256 if (cs && cs->nir->info.uses_texture_gather)
1257 update_stage_texture_surfaces(brw, cs, &brw->cs.base, true, 0);
1258 }
1259
1260 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1261 }
1262
1263 const struct brw_tracked_state brw_cs_texture_surfaces = {
1264 .dirty = {
1265 .mesa = _NEW_TEXTURE,
1266 .brw = BRW_NEW_BATCH |
1267 BRW_NEW_COMPUTE_PROGRAM |
1268 BRW_NEW_AUX_STATE,
1269 },
1270 .emit = brw_update_cs_texture_surfaces,
1271 };
1272
1273
1274 void
1275 brw_upload_ubo_surfaces(struct brw_context *brw, struct gl_program *prog,
1276 struct brw_stage_state *stage_state,
1277 struct brw_stage_prog_data *prog_data)
1278 {
1279 struct gl_context *ctx = &brw->ctx;
1280
1281 if (!prog)
1282 return;
1283
1284 uint32_t *ubo_surf_offsets =
1285 &stage_state->surf_offset[prog_data->binding_table.ubo_start];
1286
1287 for (int i = 0; i < prog->info.num_ubos; i++) {
1288 struct gl_buffer_binding *binding =
1289 &ctx->UniformBufferBindings[prog->sh.UniformBlocks[i]->Binding];
1290
1291 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1292 emit_null_surface_state(brw, NULL, &ubo_surf_offsets[i]);
1293 } else {
1294 struct intel_buffer_object *intel_bo =
1295 intel_buffer_object(binding->BufferObject);
1296 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1297 if (!binding->AutomaticSize)
1298 size = MIN2(size, binding->Size);
1299 struct brw_bo *bo =
1300 intel_bufferobj_buffer(brw, intel_bo,
1301 binding->Offset,
1302 size, false);
1303 brw_create_constant_surface(brw, bo, binding->Offset,
1304 size,
1305 &ubo_surf_offsets[i]);
1306 }
1307 }
1308
1309 uint32_t *ssbo_surf_offsets =
1310 &stage_state->surf_offset[prog_data->binding_table.ssbo_start];
1311
1312 for (int i = 0; i < prog->info.num_ssbos; i++) {
1313 struct gl_buffer_binding *binding =
1314 &ctx->ShaderStorageBufferBindings[prog->sh.ShaderStorageBlocks[i]->Binding];
1315
1316 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1317 emit_null_surface_state(brw, NULL, &ssbo_surf_offsets[i]);
1318 } else {
1319 struct intel_buffer_object *intel_bo =
1320 intel_buffer_object(binding->BufferObject);
1321 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1322 if (!binding->AutomaticSize)
1323 size = MIN2(size, binding->Size);
1324 struct brw_bo *bo =
1325 intel_bufferobj_buffer(brw, intel_bo,
1326 binding->Offset,
1327 size, true);
1328 brw_create_buffer_surface(brw, bo, binding->Offset,
1329 size,
1330 &ssbo_surf_offsets[i]);
1331 }
1332 }
1333
1334 stage_state->push_constants_dirty = true;
1335
1336 if (prog->info.num_ubos || prog->info.num_ssbos)
1337 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1338 }
1339
1340 static void
1341 brw_upload_wm_ubo_surfaces(struct brw_context *brw)
1342 {
1343 struct gl_context *ctx = &brw->ctx;
1344 /* _NEW_PROGRAM */
1345 struct gl_program *prog = ctx->FragmentProgram._Current;
1346
1347 /* BRW_NEW_FS_PROG_DATA */
1348 brw_upload_ubo_surfaces(brw, prog, &brw->wm.base, brw->wm.base.prog_data);
1349 }
1350
1351 const struct brw_tracked_state brw_wm_ubo_surfaces = {
1352 .dirty = {
1353 .mesa = _NEW_PROGRAM,
1354 .brw = BRW_NEW_BATCH |
1355 BRW_NEW_FS_PROG_DATA |
1356 BRW_NEW_UNIFORM_BUFFER,
1357 },
1358 .emit = brw_upload_wm_ubo_surfaces,
1359 };
1360
1361 static void
1362 brw_upload_cs_ubo_surfaces(struct brw_context *brw)
1363 {
1364 struct gl_context *ctx = &brw->ctx;
1365 /* _NEW_PROGRAM */
1366 struct gl_program *prog =
1367 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1368
1369 /* BRW_NEW_CS_PROG_DATA */
1370 brw_upload_ubo_surfaces(brw, prog, &brw->cs.base, brw->cs.base.prog_data);
1371 }
1372
1373 const struct brw_tracked_state brw_cs_ubo_surfaces = {
1374 .dirty = {
1375 .mesa = _NEW_PROGRAM,
1376 .brw = BRW_NEW_BATCH |
1377 BRW_NEW_CS_PROG_DATA |
1378 BRW_NEW_UNIFORM_BUFFER,
1379 },
1380 .emit = brw_upload_cs_ubo_surfaces,
1381 };
1382
1383 void
1384 brw_upload_abo_surfaces(struct brw_context *brw,
1385 const struct gl_program *prog,
1386 struct brw_stage_state *stage_state,
1387 struct brw_stage_prog_data *prog_data)
1388 {
1389 struct gl_context *ctx = &brw->ctx;
1390 uint32_t *surf_offsets =
1391 &stage_state->surf_offset[prog_data->binding_table.abo_start];
1392
1393 if (prog->info.num_abos) {
1394 for (unsigned i = 0; i < prog->info.num_abos; i++) {
1395 struct gl_buffer_binding *binding =
1396 &ctx->AtomicBufferBindings[prog->sh.AtomicBuffers[i]->Binding];
1397 struct intel_buffer_object *intel_bo =
1398 intel_buffer_object(binding->BufferObject);
1399 struct brw_bo *bo =
1400 intel_bufferobj_buffer(brw, intel_bo, binding->Offset,
1401 intel_bo->Base.Size - binding->Offset,
1402 true);
1403
1404 brw_emit_buffer_surface_state(brw, &surf_offsets[i], bo,
1405 binding->Offset, ISL_FORMAT_RAW,
1406 bo->size - binding->Offset, 1,
1407 RELOC_WRITE);
1408 }
1409
1410 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1411 }
1412 }
1413
1414 static void
1415 brw_upload_wm_abo_surfaces(struct brw_context *brw)
1416 {
1417 /* _NEW_PROGRAM */
1418 const struct gl_program *wm = brw->programs[MESA_SHADER_FRAGMENT];
1419
1420 if (wm) {
1421 /* BRW_NEW_FS_PROG_DATA */
1422 brw_upload_abo_surfaces(brw, wm, &brw->wm.base, brw->wm.base.prog_data);
1423 }
1424 }
1425
1426 const struct brw_tracked_state brw_wm_abo_surfaces = {
1427 .dirty = {
1428 .mesa = _NEW_PROGRAM,
1429 .brw = BRW_NEW_ATOMIC_BUFFER |
1430 BRW_NEW_BATCH |
1431 BRW_NEW_FS_PROG_DATA,
1432 },
1433 .emit = brw_upload_wm_abo_surfaces,
1434 };
1435
1436 static void
1437 brw_upload_cs_abo_surfaces(struct brw_context *brw)
1438 {
1439 /* _NEW_PROGRAM */
1440 const struct gl_program *cp = brw->programs[MESA_SHADER_COMPUTE];
1441
1442 if (cp) {
1443 /* BRW_NEW_CS_PROG_DATA */
1444 brw_upload_abo_surfaces(brw, cp, &brw->cs.base, brw->cs.base.prog_data);
1445 }
1446 }
1447
1448 const struct brw_tracked_state brw_cs_abo_surfaces = {
1449 .dirty = {
1450 .mesa = _NEW_PROGRAM,
1451 .brw = BRW_NEW_ATOMIC_BUFFER |
1452 BRW_NEW_BATCH |
1453 BRW_NEW_CS_PROG_DATA,
1454 },
1455 .emit = brw_upload_cs_abo_surfaces,
1456 };
1457
1458 static void
1459 brw_upload_cs_image_surfaces(struct brw_context *brw)
1460 {
1461 /* _NEW_PROGRAM */
1462 const struct gl_program *cp = brw->programs[MESA_SHADER_COMPUTE];
1463
1464 if (cp) {
1465 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1466 brw_upload_image_surfaces(brw, cp, &brw->cs.base,
1467 brw->cs.base.prog_data);
1468 }
1469 }
1470
1471 const struct brw_tracked_state brw_cs_image_surfaces = {
1472 .dirty = {
1473 .mesa = _NEW_TEXTURE | _NEW_PROGRAM,
1474 .brw = BRW_NEW_BATCH |
1475 BRW_NEW_CS_PROG_DATA |
1476 BRW_NEW_AUX_STATE |
1477 BRW_NEW_IMAGE_UNITS
1478 },
1479 .emit = brw_upload_cs_image_surfaces,
1480 };
1481
1482 static uint32_t
1483 get_image_format(struct brw_context *brw, mesa_format format, GLenum access)
1484 {
1485 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1486 enum isl_format hw_format = brw_isl_format_for_mesa_format(format);
1487 if (access == GL_WRITE_ONLY) {
1488 return hw_format;
1489 } else if (isl_has_matching_typed_storage_image_format(devinfo, hw_format)) {
1490 /* Typed surface reads support a very limited subset of the shader
1491 * image formats. Translate it into the closest format the
1492 * hardware supports.
1493 */
1494 return isl_lower_storage_image_format(devinfo, hw_format);
1495 } else {
1496 /* The hardware doesn't actually support a typed format that we can use
1497 * so we have to fall back to untyped read/write messages.
1498 */
1499 return ISL_FORMAT_RAW;
1500 }
1501 }
1502
1503 static void
1504 update_default_image_param(struct brw_context *brw,
1505 struct gl_image_unit *u,
1506 unsigned surface_idx,
1507 struct brw_image_param *param)
1508 {
1509 memset(param, 0, sizeof(*param));
1510 param->surface_idx = surface_idx;
1511 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1512 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1513 * detailed explanation of these parameters.
1514 */
1515 param->swizzling[0] = 0xff;
1516 param->swizzling[1] = 0xff;
1517 }
1518
1519 static void
1520 update_buffer_image_param(struct brw_context *brw,
1521 struct gl_image_unit *u,
1522 unsigned surface_idx,
1523 struct brw_image_param *param)
1524 {
1525 struct gl_buffer_object *obj = u->TexObj->BufferObject;
1526 const uint32_t size = MIN2((uint32_t)u->TexObj->BufferSize, obj->Size);
1527 update_default_image_param(brw, u, surface_idx, param);
1528
1529 param->size[0] = size / _mesa_get_format_bytes(u->_ActualFormat);
1530 param->stride[0] = _mesa_get_format_bytes(u->_ActualFormat);
1531 }
1532
1533 static unsigned
1534 get_image_num_layers(const struct intel_mipmap_tree *mt, GLenum target,
1535 unsigned level)
1536 {
1537 if (target == GL_TEXTURE_CUBE_MAP)
1538 return 6;
1539
1540 return target == GL_TEXTURE_3D ?
1541 minify(mt->surf.logical_level0_px.depth, level) :
1542 mt->surf.logical_level0_px.array_len;
1543 }
1544
1545 static void
1546 update_image_surface(struct brw_context *brw,
1547 struct gl_image_unit *u,
1548 GLenum access,
1549 unsigned surface_idx,
1550 uint32_t *surf_offset,
1551 struct brw_image_param *param)
1552 {
1553 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1554
1555 if (_mesa_is_image_unit_valid(&brw->ctx, u)) {
1556 struct gl_texture_object *obj = u->TexObj;
1557 const unsigned format = get_image_format(brw, u->_ActualFormat, access);
1558
1559 if (obj->Target == GL_TEXTURE_BUFFER) {
1560 struct intel_buffer_object *intel_obj =
1561 intel_buffer_object(obj->BufferObject);
1562 const unsigned texel_size = (format == ISL_FORMAT_RAW ? 1 :
1563 _mesa_get_format_bytes(u->_ActualFormat));
1564
1565 brw_emit_buffer_surface_state(
1566 brw, surf_offset, intel_obj->buffer, obj->BufferOffset,
1567 format, intel_obj->Base.Size, texel_size,
1568 access != GL_READ_ONLY ? RELOC_WRITE : 0);
1569
1570 update_buffer_image_param(brw, u, surface_idx, param);
1571
1572 } else {
1573 struct intel_texture_object *intel_obj = intel_texture_object(obj);
1574 struct intel_mipmap_tree *mt = intel_obj->mt;
1575 const unsigned num_layers = u->Layered ?
1576 get_image_num_layers(mt, obj->Target, u->Level) : 1;
1577
1578 struct isl_view view = {
1579 .format = format,
1580 .base_level = obj->MinLevel + u->Level,
1581 .levels = 1,
1582 .base_array_layer = obj->MinLayer + u->_Layer,
1583 .array_len = num_layers,
1584 .swizzle = ISL_SWIZZLE_IDENTITY,
1585 .usage = ISL_SURF_USAGE_STORAGE_BIT,
1586 };
1587
1588 if (format == ISL_FORMAT_RAW) {
1589 brw_emit_buffer_surface_state(
1590 brw, surf_offset, mt->bo, mt->offset,
1591 format, mt->bo->size - mt->offset, 1 /* pitch */,
1592 access != GL_READ_ONLY ? RELOC_WRITE : 0);
1593
1594 } else {
1595 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
1596 assert(!intel_miptree_has_color_unresolved(mt,
1597 view.base_level, 1,
1598 view.base_array_layer,
1599 view.array_len));
1600 brw_emit_surface_state(brw, mt, mt->target, view,
1601 ISL_AUX_USAGE_NONE, tex_mocs[devinfo->gen],
1602 surf_offset, surf_index,
1603 access == GL_READ_ONLY ? 0 : RELOC_WRITE);
1604 }
1605
1606 isl_surf_fill_image_param(&brw->isl_dev, param, &mt->surf, &view);
1607 param->surface_idx = surface_idx;
1608 }
1609
1610 } else {
1611 emit_null_surface_state(brw, NULL, surf_offset);
1612 update_default_image_param(brw, u, surface_idx, param);
1613 }
1614 }
1615
1616 void
1617 brw_upload_image_surfaces(struct brw_context *brw,
1618 const struct gl_program *prog,
1619 struct brw_stage_state *stage_state,
1620 struct brw_stage_prog_data *prog_data)
1621 {
1622 assert(prog);
1623 struct gl_context *ctx = &brw->ctx;
1624
1625 if (prog->info.num_images) {
1626 for (unsigned i = 0; i < prog->info.num_images; i++) {
1627 struct gl_image_unit *u = &ctx->ImageUnits[prog->sh.ImageUnits[i]];
1628 const unsigned surf_idx = prog_data->binding_table.image_start + i;
1629
1630 update_image_surface(brw, u, prog->sh.ImageAccess[i],
1631 surf_idx,
1632 &stage_state->surf_offset[surf_idx],
1633 &stage_state->image_param[i]);
1634 }
1635
1636 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1637 /* This may have changed the image metadata dependent on the context
1638 * image unit state and passed to the program as uniforms, make sure
1639 * that push and pull constants are reuploaded.
1640 */
1641 brw->NewGLState |= _NEW_PROGRAM_CONSTANTS;
1642 }
1643 }
1644
1645 static void
1646 brw_upload_wm_image_surfaces(struct brw_context *brw)
1647 {
1648 /* BRW_NEW_FRAGMENT_PROGRAM */
1649 const struct gl_program *wm = brw->programs[MESA_SHADER_FRAGMENT];
1650
1651 if (wm) {
1652 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1653 brw_upload_image_surfaces(brw, wm, &brw->wm.base,
1654 brw->wm.base.prog_data);
1655 }
1656 }
1657
1658 const struct brw_tracked_state brw_wm_image_surfaces = {
1659 .dirty = {
1660 .mesa = _NEW_TEXTURE,
1661 .brw = BRW_NEW_BATCH |
1662 BRW_NEW_AUX_STATE |
1663 BRW_NEW_FRAGMENT_PROGRAM |
1664 BRW_NEW_FS_PROG_DATA |
1665 BRW_NEW_IMAGE_UNITS
1666 },
1667 .emit = brw_upload_wm_image_surfaces,
1668 };
1669
1670 static void
1671 brw_upload_cs_work_groups_surface(struct brw_context *brw)
1672 {
1673 struct gl_context *ctx = &brw->ctx;
1674 /* _NEW_PROGRAM */
1675 struct gl_program *prog =
1676 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1677 /* BRW_NEW_CS_PROG_DATA */
1678 const struct brw_cs_prog_data *cs_prog_data =
1679 brw_cs_prog_data(brw->cs.base.prog_data);
1680
1681 if (prog && cs_prog_data->uses_num_work_groups) {
1682 const unsigned surf_idx =
1683 cs_prog_data->binding_table.work_groups_start;
1684 uint32_t *surf_offset = &brw->cs.base.surf_offset[surf_idx];
1685 struct brw_bo *bo;
1686 uint32_t bo_offset;
1687
1688 if (brw->compute.num_work_groups_bo == NULL) {
1689 bo = NULL;
1690 intel_upload_data(brw,
1691 (void *)brw->compute.num_work_groups,
1692 3 * sizeof(GLuint),
1693 sizeof(GLuint),
1694 &bo,
1695 &bo_offset);
1696 } else {
1697 bo = brw->compute.num_work_groups_bo;
1698 bo_offset = brw->compute.num_work_groups_offset;
1699 }
1700
1701 brw_emit_buffer_surface_state(brw, surf_offset,
1702 bo, bo_offset,
1703 ISL_FORMAT_RAW,
1704 3 * sizeof(GLuint), 1,
1705 RELOC_WRITE);
1706 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1707 }
1708 }
1709
1710 const struct brw_tracked_state brw_cs_work_groups_surface = {
1711 .dirty = {
1712 .brw = BRW_NEW_CS_PROG_DATA |
1713 BRW_NEW_CS_WORK_GROUPS
1714 },
1715 .emit = brw_upload_cs_work_groups_surface,
1716 };