i965/gen7: Use R8_UINT stencil copy when sampling the stencil texture
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_surface_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "main/context.h"
34 #include "main/blend.h"
35 #include "main/mtypes.h"
36 #include "main/samplerobj.h"
37 #include "main/shaderimage.h"
38 #include "main/teximage.h"
39 #include "program/prog_parameter.h"
40 #include "program/prog_instruction.h"
41 #include "main/framebuffer.h"
42 #include "main/shaderapi.h"
43
44 #include "isl/isl.h"
45
46 #include "intel_mipmap_tree.h"
47 #include "intel_batchbuffer.h"
48 #include "intel_tex.h"
49 #include "intel_fbo.h"
50 #include "intel_buffer_objects.h"
51
52 #include "brw_context.h"
53 #include "brw_state.h"
54 #include "brw_defines.h"
55 #include "brw_wm.h"
56
57 struct surface_state_info {
58 unsigned num_dwords;
59 unsigned ss_align; /* Required alignment of RENDER_SURFACE_STATE in bytes */
60 unsigned reloc_dw;
61 unsigned aux_reloc_dw;
62 unsigned tex_mocs;
63 unsigned rb_mocs;
64 };
65
66 static const struct surface_state_info surface_state_infos[] = {
67 [4] = {6, 32, 1, 0},
68 [5] = {6, 32, 1, 0},
69 [6] = {6, 32, 1, 0},
70 [7] = {8, 32, 1, 6, GEN7_MOCS_L3, GEN7_MOCS_L3},
71 [8] = {13, 64, 8, 10, BDW_MOCS_WB, BDW_MOCS_PTE},
72 [9] = {16, 64, 8, 10, SKL_MOCS_WB, SKL_MOCS_PTE},
73 };
74
75 static void
76 brw_emit_surface_state(struct brw_context *brw,
77 struct intel_mipmap_tree *mt,
78 GLenum target, struct isl_view view,
79 uint32_t mocs, uint32_t *surf_offset, int surf_index,
80 unsigned read_domains, unsigned write_domains)
81 {
82 const struct surface_state_info ss_info = surface_state_infos[brw->gen];
83 uint32_t tile_x = 0, tile_y = 0;
84 uint32_t offset = mt->offset;
85
86 struct isl_surf surf;
87 intel_miptree_get_isl_surf(brw, mt, &surf);
88
89 surf.dim = get_isl_surf_dim(target);
90
91 const enum isl_dim_layout dim_layout =
92 get_isl_dim_layout(brw->intelScreen->devinfo, mt->tiling, target);
93
94 if (surf.dim_layout != dim_layout) {
95 /* The layout of the specified texture target is not compatible with the
96 * actual layout of the miptree structure in memory -- You're entering
97 * dangerous territory, this can only possibly work if you only intended
98 * to access a single level and slice of the texture, and the hardware
99 * supports the tile offset feature in order to allow non-tile-aligned
100 * base offsets, since we'll have to point the hardware to the first
101 * texel of the level instead of relying on the usual base level/layer
102 * controls.
103 */
104 assert(brw->has_surface_tile_offset);
105 assert(view.levels == 1 && view.array_len == 1);
106
107 offset += intel_miptree_get_tile_offsets(mt, view.base_level,
108 view.base_array_layer,
109 &tile_x, &tile_y);
110
111 /* Minify the logical dimensions of the texture. */
112 const unsigned l = view.base_level - mt->first_level;
113 surf.logical_level0_px.width = minify(surf.logical_level0_px.width, l);
114 surf.logical_level0_px.height = surf.dim <= ISL_SURF_DIM_1D ? 1 :
115 minify(surf.logical_level0_px.height, l);
116 surf.logical_level0_px.depth = surf.dim <= ISL_SURF_DIM_2D ? 1 :
117 minify(surf.logical_level0_px.depth, l);
118
119 /* Only the base level and layer can be addressed with the overridden
120 * layout.
121 */
122 surf.logical_level0_px.array_len = 1;
123 surf.levels = 1;
124 surf.dim_layout = dim_layout;
125
126 /* The requested slice of the texture is now at the base level and
127 * layer.
128 */
129 view.base_level = 0;
130 view.base_array_layer = 0;
131 }
132
133 union isl_color_value clear_color = { .u32 = { 0, 0, 0, 0 } };
134
135 struct isl_surf *aux_surf = NULL, aux_surf_s;
136 uint64_t aux_offset = 0;
137 enum isl_aux_usage aux_usage = ISL_AUX_USAGE_NONE;
138 if (mt->mcs_mt &&
139 ((view.usage & ISL_SURF_USAGE_RENDER_TARGET_BIT) ||
140 mt->fast_clear_state != INTEL_FAST_CLEAR_STATE_RESOLVED)) {
141 intel_miptree_get_aux_isl_surf(brw, mt, &aux_surf_s, &aux_usage);
142 aux_surf = &aux_surf_s;
143 assert(mt->mcs_mt->offset == 0);
144 aux_offset = mt->mcs_mt->bo->offset64;
145
146 /* We only really need a clear color if we also have an auxiliary
147 * surfacae. Without one, it does nothing.
148 */
149 clear_color = intel_miptree_get_isl_clear_color(brw, mt);
150 }
151
152 uint32_t *dw = __brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
153 ss_info.num_dwords * 4, ss_info.ss_align,
154 surf_index, surf_offset);
155
156 isl_surf_fill_state(&brw->isl_dev, dw, .surf = &surf, .view = &view,
157 .address = mt->bo->offset64 + offset,
158 .aux_surf = aux_surf, .aux_usage = aux_usage,
159 .aux_address = aux_offset,
160 .mocs = mocs, .clear_color = clear_color,
161 .x_offset_sa = tile_x, .y_offset_sa = tile_y);
162
163 drm_intel_bo_emit_reloc(brw->batch.bo,
164 *surf_offset + 4 * ss_info.reloc_dw,
165 mt->bo, offset,
166 read_domains, write_domains);
167
168 if (aux_surf) {
169 /* On gen7 and prior, the upper 20 bits of surface state DWORD 6 are the
170 * upper 20 bits of the GPU address of the MCS buffer; the lower 12 bits
171 * contain other control information. Since buffer addresses are always
172 * on 4k boundaries (and thus have their lower 12 bits zero), we can use
173 * an ordinary reloc to do the necessary address translation.
174 */
175 assert((aux_offset & 0xfff) == 0);
176 drm_intel_bo_emit_reloc(brw->batch.bo,
177 *surf_offset + 4 * ss_info.aux_reloc_dw,
178 mt->mcs_mt->bo, dw[ss_info.aux_reloc_dw] & 0xfff,
179 read_domains, write_domains);
180 }
181 }
182
183 uint32_t
184 brw_update_renderbuffer_surface(struct brw_context *brw,
185 struct gl_renderbuffer *rb,
186 bool layered, unsigned unit /* unused */,
187 uint32_t surf_index)
188 {
189 struct gl_context *ctx = &brw->ctx;
190 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
191 struct intel_mipmap_tree *mt = irb->mt;
192
193 assert(brw_render_target_supported(brw, rb));
194 intel_miptree_used_for_rendering(mt);
195
196 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
197 if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
198 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
199 __func__, _mesa_get_format_name(rb_format));
200 }
201
202 const unsigned layer_multiplier =
203 (irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
204 irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) ?
205 MAX2(irb->mt->num_samples, 1) : 1;
206
207 struct isl_view view = {
208 .format = brw->render_target_format[rb_format],
209 .base_level = irb->mt_level - irb->mt->first_level,
210 .levels = 1,
211 .base_array_layer = irb->mt_layer / layer_multiplier,
212 .array_len = MAX2(irb->layer_count, 1),
213 .channel_select = {
214 ISL_CHANNEL_SELECT_RED,
215 ISL_CHANNEL_SELECT_GREEN,
216 ISL_CHANNEL_SELECT_BLUE,
217 ISL_CHANNEL_SELECT_ALPHA,
218 },
219 .usage = ISL_SURF_USAGE_RENDER_TARGET_BIT,
220 };
221
222 uint32_t offset;
223 brw_emit_surface_state(brw, mt, mt->target, view,
224 surface_state_infos[brw->gen].rb_mocs,
225 &offset, surf_index,
226 I915_GEM_DOMAIN_RENDER,
227 I915_GEM_DOMAIN_RENDER);
228 return offset;
229 }
230
231 GLuint
232 translate_tex_target(GLenum target)
233 {
234 switch (target) {
235 case GL_TEXTURE_1D:
236 case GL_TEXTURE_1D_ARRAY_EXT:
237 return BRW_SURFACE_1D;
238
239 case GL_TEXTURE_RECTANGLE_NV:
240 return BRW_SURFACE_2D;
241
242 case GL_TEXTURE_2D:
243 case GL_TEXTURE_2D_ARRAY_EXT:
244 case GL_TEXTURE_EXTERNAL_OES:
245 case GL_TEXTURE_2D_MULTISAMPLE:
246 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
247 return BRW_SURFACE_2D;
248
249 case GL_TEXTURE_3D:
250 return BRW_SURFACE_3D;
251
252 case GL_TEXTURE_CUBE_MAP:
253 case GL_TEXTURE_CUBE_MAP_ARRAY:
254 return BRW_SURFACE_CUBE;
255
256 default:
257 unreachable("not reached");
258 }
259 }
260
261 uint32_t
262 brw_get_surface_tiling_bits(uint32_t tiling)
263 {
264 switch (tiling) {
265 case I915_TILING_X:
266 return BRW_SURFACE_TILED;
267 case I915_TILING_Y:
268 return BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y;
269 default:
270 return 0;
271 }
272 }
273
274
275 uint32_t
276 brw_get_surface_num_multisamples(unsigned num_samples)
277 {
278 if (num_samples > 1)
279 return BRW_SURFACE_MULTISAMPLECOUNT_4;
280 else
281 return BRW_SURFACE_MULTISAMPLECOUNT_1;
282 }
283
284 /**
285 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
286 * swizzling.
287 */
288 int
289 brw_get_texture_swizzle(const struct gl_context *ctx,
290 const struct gl_texture_object *t)
291 {
292 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
293
294 int swizzles[SWIZZLE_NIL + 1] = {
295 SWIZZLE_X,
296 SWIZZLE_Y,
297 SWIZZLE_Z,
298 SWIZZLE_W,
299 SWIZZLE_ZERO,
300 SWIZZLE_ONE,
301 SWIZZLE_NIL
302 };
303
304 if (img->_BaseFormat == GL_DEPTH_COMPONENT ||
305 img->_BaseFormat == GL_DEPTH_STENCIL) {
306 GLenum depth_mode = t->DepthMode;
307
308 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
309 * with depth component data specified with a sized internal format.
310 * Otherwise, it's left at the old default, GL_LUMINANCE.
311 */
312 if (_mesa_is_gles3(ctx) &&
313 img->InternalFormat != GL_DEPTH_COMPONENT &&
314 img->InternalFormat != GL_DEPTH_STENCIL) {
315 depth_mode = GL_RED;
316 }
317
318 switch (depth_mode) {
319 case GL_ALPHA:
320 swizzles[0] = SWIZZLE_ZERO;
321 swizzles[1] = SWIZZLE_ZERO;
322 swizzles[2] = SWIZZLE_ZERO;
323 swizzles[3] = SWIZZLE_X;
324 break;
325 case GL_LUMINANCE:
326 swizzles[0] = SWIZZLE_X;
327 swizzles[1] = SWIZZLE_X;
328 swizzles[2] = SWIZZLE_X;
329 swizzles[3] = SWIZZLE_ONE;
330 break;
331 case GL_INTENSITY:
332 swizzles[0] = SWIZZLE_X;
333 swizzles[1] = SWIZZLE_X;
334 swizzles[2] = SWIZZLE_X;
335 swizzles[3] = SWIZZLE_X;
336 break;
337 case GL_RED:
338 swizzles[0] = SWIZZLE_X;
339 swizzles[1] = SWIZZLE_ZERO;
340 swizzles[2] = SWIZZLE_ZERO;
341 swizzles[3] = SWIZZLE_ONE;
342 break;
343 }
344 }
345
346 GLenum datatype = _mesa_get_format_datatype(img->TexFormat);
347
348 /* If the texture's format is alpha-only, force R, G, and B to
349 * 0.0. Similarly, if the texture's format has no alpha channel,
350 * force the alpha value read to 1.0. This allows for the
351 * implementation to use an RGBA texture for any of these formats
352 * without leaking any unexpected values.
353 */
354 switch (img->_BaseFormat) {
355 case GL_ALPHA:
356 swizzles[0] = SWIZZLE_ZERO;
357 swizzles[1] = SWIZZLE_ZERO;
358 swizzles[2] = SWIZZLE_ZERO;
359 break;
360 case GL_LUMINANCE:
361 if (t->_IsIntegerFormat || datatype == GL_SIGNED_NORMALIZED) {
362 swizzles[0] = SWIZZLE_X;
363 swizzles[1] = SWIZZLE_X;
364 swizzles[2] = SWIZZLE_X;
365 swizzles[3] = SWIZZLE_ONE;
366 }
367 break;
368 case GL_LUMINANCE_ALPHA:
369 if (datatype == GL_SIGNED_NORMALIZED) {
370 swizzles[0] = SWIZZLE_X;
371 swizzles[1] = SWIZZLE_X;
372 swizzles[2] = SWIZZLE_X;
373 swizzles[3] = SWIZZLE_W;
374 }
375 break;
376 case GL_INTENSITY:
377 if (datatype == GL_SIGNED_NORMALIZED) {
378 swizzles[0] = SWIZZLE_X;
379 swizzles[1] = SWIZZLE_X;
380 swizzles[2] = SWIZZLE_X;
381 swizzles[3] = SWIZZLE_X;
382 }
383 break;
384 case GL_RED:
385 case GL_RG:
386 case GL_RGB:
387 if (_mesa_get_format_bits(img->TexFormat, GL_ALPHA_BITS) > 0)
388 swizzles[3] = SWIZZLE_ONE;
389 break;
390 }
391
392 return MAKE_SWIZZLE4(swizzles[GET_SWZ(t->_Swizzle, 0)],
393 swizzles[GET_SWZ(t->_Swizzle, 1)],
394 swizzles[GET_SWZ(t->_Swizzle, 2)],
395 swizzles[GET_SWZ(t->_Swizzle, 3)]);
396 }
397
398 /**
399 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
400 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
401 *
402 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
403 * 0 1 2 3 4 5
404 * 4 5 6 7 0 1
405 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
406 *
407 * which is simply adding 4 then modding by 8 (or anding with 7).
408 *
409 * We then may need to apply workarounds for textureGather hardware bugs.
410 */
411 static unsigned
412 swizzle_to_scs(GLenum swizzle, bool need_green_to_blue)
413 {
414 unsigned scs = (swizzle + 4) & 7;
415
416 return (need_green_to_blue && scs == HSW_SCS_GREEN) ? HSW_SCS_BLUE : scs;
417 }
418
419 void
420 brw_update_texture_surface(struct gl_context *ctx,
421 unsigned unit,
422 uint32_t *surf_offset,
423 bool for_gather,
424 uint32_t plane)
425 {
426 struct brw_context *brw = brw_context(ctx);
427 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
428
429 if (obj->Target == GL_TEXTURE_BUFFER) {
430 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
431
432 } else {
433 struct intel_texture_object *intel_obj = intel_texture_object(obj);
434 struct intel_mipmap_tree *mt = intel_obj->mt;
435
436 if (plane > 0) {
437 if (mt->plane[plane - 1] == NULL)
438 return;
439 mt = mt->plane[plane - 1];
440 }
441
442 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
443 /* If this is a view with restricted NumLayers, then our effective depth
444 * is not just the miptree depth.
445 */
446 const unsigned view_num_layers =
447 (obj->Immutable && obj->Target != GL_TEXTURE_3D) ? obj->NumLayers :
448 mt->logical_depth0;
449
450 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
451 * texturing functions that return a float, as our code generation always
452 * selects the .x channel (which would always be 0).
453 */
454 struct gl_texture_image *firstImage = obj->Image[0][obj->BaseLevel];
455 const bool alpha_depth = obj->DepthMode == GL_ALPHA &&
456 (firstImage->_BaseFormat == GL_DEPTH_COMPONENT ||
457 firstImage->_BaseFormat == GL_DEPTH_STENCIL);
458 const unsigned swizzle = (unlikely(alpha_depth) ? SWIZZLE_XYZW :
459 brw_get_texture_swizzle(&brw->ctx, obj));
460
461 mesa_format mesa_fmt = plane == 0 ? intel_obj->_Format : mt->format;
462 unsigned format = translate_tex_format(brw, mesa_fmt,
463 sampler->sRGBDecode);
464
465 /* Implement gen6 and gen7 gather work-around */
466 bool need_green_to_blue = false;
467 if (for_gather) {
468 if (brw->gen == 7 && format == BRW_SURFACEFORMAT_R32G32_FLOAT) {
469 format = BRW_SURFACEFORMAT_R32G32_FLOAT_LD;
470 need_green_to_blue = brw->is_haswell;
471 } else if (brw->gen == 6) {
472 /* Sandybridge's gather4 message is broken for integer formats.
473 * To work around this, we pretend the surface is UNORM for
474 * 8 or 16-bit formats, and emit shader instructions to recover
475 * the real INT/UINT value. For 32-bit formats, we pretend
476 * the surface is FLOAT, and simply reinterpret the resulting
477 * bits.
478 */
479 switch (format) {
480 case BRW_SURFACEFORMAT_R8_SINT:
481 case BRW_SURFACEFORMAT_R8_UINT:
482 format = BRW_SURFACEFORMAT_R8_UNORM;
483 break;
484
485 case BRW_SURFACEFORMAT_R16_SINT:
486 case BRW_SURFACEFORMAT_R16_UINT:
487 format = BRW_SURFACEFORMAT_R16_UNORM;
488 break;
489
490 case BRW_SURFACEFORMAT_R32_SINT:
491 case BRW_SURFACEFORMAT_R32_UINT:
492 format = BRW_SURFACEFORMAT_R32_FLOAT;
493 break;
494
495 default:
496 break;
497 }
498 }
499 }
500
501 if (obj->StencilSampling && firstImage->_BaseFormat == GL_DEPTH_STENCIL) {
502 if (brw->gen <= 7) {
503 assert(mt->r8stencil_mt && !mt->stencil_mt->r8stencil_needs_update);
504 mt = mt->r8stencil_mt;
505 } else {
506 mt = mt->stencil_mt;
507 }
508 format = BRW_SURFACEFORMAT_R8_UINT;
509 } else if (brw->gen <= 7 && mt->format == MESA_FORMAT_S_UINT8) {
510 assert(mt->r8stencil_mt && !mt->r8stencil_needs_update);
511 mt = mt->r8stencil_mt;
512 format = BRW_SURFACEFORMAT_R8_UINT;
513 }
514
515 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
516
517 struct isl_view view = {
518 .format = format,
519 .base_level = obj->MinLevel + obj->BaseLevel,
520 .levels = intel_obj->_MaxLevel - obj->BaseLevel + 1,
521 .base_array_layer = obj->MinLayer,
522 .array_len = view_num_layers,
523 .channel_select = {
524 swizzle_to_scs(GET_SWZ(swizzle, 0), need_green_to_blue),
525 swizzle_to_scs(GET_SWZ(swizzle, 1), need_green_to_blue),
526 swizzle_to_scs(GET_SWZ(swizzle, 2), need_green_to_blue),
527 swizzle_to_scs(GET_SWZ(swizzle, 3), need_green_to_blue),
528 },
529 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
530 };
531
532 if (obj->Target == GL_TEXTURE_CUBE_MAP ||
533 obj->Target == GL_TEXTURE_CUBE_MAP_ARRAY)
534 view.usage |= ISL_SURF_USAGE_CUBE_BIT;
535
536 brw_emit_surface_state(brw, mt, mt->target, view,
537 surface_state_infos[brw->gen].tex_mocs,
538 surf_offset, surf_index,
539 I915_GEM_DOMAIN_SAMPLER, 0);
540 }
541 }
542
543 void
544 brw_emit_buffer_surface_state(struct brw_context *brw,
545 uint32_t *out_offset,
546 drm_intel_bo *bo,
547 unsigned buffer_offset,
548 unsigned surface_format,
549 unsigned buffer_size,
550 unsigned pitch,
551 bool rw)
552 {
553 const struct surface_state_info ss_info = surface_state_infos[brw->gen];
554
555 uint32_t *dw = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
556 ss_info.num_dwords * 4, ss_info.ss_align,
557 out_offset);
558
559 isl_buffer_fill_state(&brw->isl_dev, dw,
560 .address = (bo ? bo->offset64 : 0) + buffer_offset,
561 .size = buffer_size,
562 .format = surface_format,
563 .stride = pitch,
564 .mocs = ss_info.tex_mocs);
565
566 if (bo) {
567 drm_intel_bo_emit_reloc(brw->batch.bo,
568 *out_offset + 4 * ss_info.reloc_dw,
569 bo, buffer_offset,
570 I915_GEM_DOMAIN_SAMPLER,
571 (rw ? I915_GEM_DOMAIN_SAMPLER : 0));
572 }
573 }
574
575 void
576 brw_update_buffer_texture_surface(struct gl_context *ctx,
577 unsigned unit,
578 uint32_t *surf_offset)
579 {
580 struct brw_context *brw = brw_context(ctx);
581 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
582 struct intel_buffer_object *intel_obj =
583 intel_buffer_object(tObj->BufferObject);
584 uint32_t size = tObj->BufferSize;
585 drm_intel_bo *bo = NULL;
586 mesa_format format = tObj->_BufferObjectFormat;
587 uint32_t brw_format = brw_format_for_mesa_format(format);
588 int texel_size = _mesa_get_format_bytes(format);
589
590 if (intel_obj) {
591 size = MIN2(size, intel_obj->Base.Size);
592 bo = intel_bufferobj_buffer(brw, intel_obj, tObj->BufferOffset, size);
593 }
594
595 if (brw_format == 0 && format != MESA_FORMAT_RGBA_FLOAT32) {
596 _mesa_problem(NULL, "bad format %s for texture buffer\n",
597 _mesa_get_format_name(format));
598 }
599
600 brw_emit_buffer_surface_state(brw, surf_offset, bo,
601 tObj->BufferOffset,
602 brw_format,
603 size,
604 texel_size,
605 false /* rw */);
606 }
607
608 /**
609 * Create the constant buffer surface. Vertex/fragment shader constants will be
610 * read from this buffer with Data Port Read instructions/messages.
611 */
612 void
613 brw_create_constant_surface(struct brw_context *brw,
614 drm_intel_bo *bo,
615 uint32_t offset,
616 uint32_t size,
617 uint32_t *out_offset)
618 {
619 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
620 BRW_SURFACEFORMAT_R32G32B32A32_FLOAT,
621 size, 1, false);
622 }
623
624 /**
625 * Create the buffer surface. Shader buffer variables will be
626 * read from / write to this buffer with Data Port Read/Write
627 * instructions/messages.
628 */
629 void
630 brw_create_buffer_surface(struct brw_context *brw,
631 drm_intel_bo *bo,
632 uint32_t offset,
633 uint32_t size,
634 uint32_t *out_offset)
635 {
636 /* Use a raw surface so we can reuse existing untyped read/write/atomic
637 * messages. We need these specifically for the fragment shader since they
638 * include a pixel mask header that we need to ensure correct behavior
639 * with helper invocations, which cannot write to the buffer.
640 */
641 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
642 BRW_SURFACEFORMAT_RAW,
643 size, 1, true);
644 }
645
646 /**
647 * Set up a binding table entry for use by stream output logic (transform
648 * feedback).
649 *
650 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
651 */
652 void
653 brw_update_sol_surface(struct brw_context *brw,
654 struct gl_buffer_object *buffer_obj,
655 uint32_t *out_offset, unsigned num_vector_components,
656 unsigned stride_dwords, unsigned offset_dwords)
657 {
658 struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
659 uint32_t offset_bytes = 4 * offset_dwords;
660 drm_intel_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
661 offset_bytes,
662 buffer_obj->Size - offset_bytes);
663 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
664 out_offset);
665 uint32_t pitch_minus_1 = 4*stride_dwords - 1;
666 size_t size_dwords = buffer_obj->Size / 4;
667 uint32_t buffer_size_minus_1, width, height, depth, surface_format;
668
669 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
670 * too big to map using a single binding table entry?
671 */
672 assert((size_dwords - offset_dwords) / stride_dwords
673 <= BRW_MAX_NUM_BUFFER_ENTRIES);
674
675 if (size_dwords > offset_dwords + num_vector_components) {
676 /* There is room for at least 1 transform feedback output in the buffer.
677 * Compute the number of additional transform feedback outputs the
678 * buffer has room for.
679 */
680 buffer_size_minus_1 =
681 (size_dwords - offset_dwords - num_vector_components) / stride_dwords;
682 } else {
683 /* There isn't even room for a single transform feedback output in the
684 * buffer. We can't configure the binding table entry to prevent output
685 * entirely; we'll have to rely on the geometry shader to detect
686 * overflow. But to minimize the damage in case of a bug, set up the
687 * binding table entry to just allow a single output.
688 */
689 buffer_size_minus_1 = 0;
690 }
691 width = buffer_size_minus_1 & 0x7f;
692 height = (buffer_size_minus_1 & 0xfff80) >> 7;
693 depth = (buffer_size_minus_1 & 0x7f00000) >> 20;
694
695 switch (num_vector_components) {
696 case 1:
697 surface_format = BRW_SURFACEFORMAT_R32_FLOAT;
698 break;
699 case 2:
700 surface_format = BRW_SURFACEFORMAT_R32G32_FLOAT;
701 break;
702 case 3:
703 surface_format = BRW_SURFACEFORMAT_R32G32B32_FLOAT;
704 break;
705 case 4:
706 surface_format = BRW_SURFACEFORMAT_R32G32B32A32_FLOAT;
707 break;
708 default:
709 unreachable("Invalid vector size for transform feedback output");
710 }
711
712 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
713 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
714 surface_format << BRW_SURFACE_FORMAT_SHIFT |
715 BRW_SURFACE_RC_READ_WRITE;
716 surf[1] = bo->offset64 + offset_bytes; /* reloc */
717 surf[2] = (width << BRW_SURFACE_WIDTH_SHIFT |
718 height << BRW_SURFACE_HEIGHT_SHIFT);
719 surf[3] = (depth << BRW_SURFACE_DEPTH_SHIFT |
720 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
721 surf[4] = 0;
722 surf[5] = 0;
723
724 /* Emit relocation to surface contents. */
725 drm_intel_bo_emit_reloc(brw->batch.bo,
726 *out_offset + 4,
727 bo, offset_bytes,
728 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
729 }
730
731 /* Creates a new WM constant buffer reflecting the current fragment program's
732 * constants, if needed by the fragment program.
733 *
734 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
735 * state atom.
736 */
737 static void
738 brw_upload_wm_pull_constants(struct brw_context *brw)
739 {
740 struct brw_stage_state *stage_state = &brw->wm.base;
741 /* BRW_NEW_FRAGMENT_PROGRAM */
742 struct brw_fragment_program *fp =
743 (struct brw_fragment_program *) brw->fragment_program;
744 /* BRW_NEW_FS_PROG_DATA */
745 struct brw_stage_prog_data *prog_data = &brw->wm.prog_data->base;
746
747 _mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_FRAGMENT);
748 /* _NEW_PROGRAM_CONSTANTS */
749 brw_upload_pull_constants(brw, BRW_NEW_SURFACES, &fp->program.Base,
750 stage_state, prog_data);
751 }
752
753 const struct brw_tracked_state brw_wm_pull_constants = {
754 .dirty = {
755 .mesa = _NEW_PROGRAM_CONSTANTS,
756 .brw = BRW_NEW_BATCH |
757 BRW_NEW_BLORP |
758 BRW_NEW_FRAGMENT_PROGRAM |
759 BRW_NEW_FS_PROG_DATA,
760 },
761 .emit = brw_upload_wm_pull_constants,
762 };
763
764 /**
765 * Creates a null renderbuffer surface.
766 *
767 * This is used when the shader doesn't write to any color output. An FB
768 * write to target 0 will still be emitted, because that's how the thread is
769 * terminated (and computed depth is returned), so we need to have the
770 * hardware discard the target 0 color output..
771 */
772 static void
773 brw_emit_null_surface_state(struct brw_context *brw,
774 unsigned width,
775 unsigned height,
776 unsigned samples,
777 uint32_t *out_offset)
778 {
779 /* From the Sandy bridge PRM, Vol4 Part1 p71 (Surface Type: Programming
780 * Notes):
781 *
782 * A null surface will be used in instances where an actual surface is
783 * not bound. When a write message is generated to a null surface, no
784 * actual surface is written to. When a read message (including any
785 * sampling engine message) is generated to a null surface, the result
786 * is all zeros. Note that a null surface type is allowed to be used
787 * with all messages, even if it is not specificially indicated as
788 * supported. All of the remaining fields in surface state are ignored
789 * for null surfaces, with the following exceptions:
790 *
791 * - [DevSNB+]: Width, Height, Depth, and LOD fields must match the
792 * depth buffer’s corresponding state for all render target surfaces,
793 * including null.
794 *
795 * - Surface Format must be R8G8B8A8_UNORM.
796 */
797 unsigned surface_type = BRW_SURFACE_NULL;
798 drm_intel_bo *bo = NULL;
799 unsigned pitch_minus_1 = 0;
800 uint32_t multisampling_state = 0;
801 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
802 out_offset);
803
804 if (samples > 1) {
805 /* On Gen6, null render targets seem to cause GPU hangs when
806 * multisampling. So work around this problem by rendering into dummy
807 * color buffer.
808 *
809 * To decrease the amount of memory needed by the workaround buffer, we
810 * set its pitch to 128 bytes (the width of a Y tile). This means that
811 * the amount of memory needed for the workaround buffer is
812 * (width_in_tiles + height_in_tiles - 1) tiles.
813 *
814 * Note that since the workaround buffer will be interpreted by the
815 * hardware as an interleaved multisampled buffer, we need to compute
816 * width_in_tiles and height_in_tiles by dividing the width and height
817 * by 16 rather than the normal Y-tile size of 32.
818 */
819 unsigned width_in_tiles = ALIGN(width, 16) / 16;
820 unsigned height_in_tiles = ALIGN(height, 16) / 16;
821 unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
822 brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
823 size_needed);
824 bo = brw->wm.multisampled_null_render_target_bo;
825 surface_type = BRW_SURFACE_2D;
826 pitch_minus_1 = 127;
827 multisampling_state = brw_get_surface_num_multisamples(samples);
828 }
829
830 surf[0] = (surface_type << BRW_SURFACE_TYPE_SHIFT |
831 BRW_SURFACEFORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
832 if (brw->gen < 6) {
833 surf[0] |= (1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT |
834 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT |
835 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT |
836 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT);
837 }
838 surf[1] = bo ? bo->offset64 : 0;
839 surf[2] = ((width - 1) << BRW_SURFACE_WIDTH_SHIFT |
840 (height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
841
842 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
843 * Notes):
844 *
845 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
846 */
847 surf[3] = (BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y |
848 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
849 surf[4] = multisampling_state;
850 surf[5] = 0;
851
852 if (bo) {
853 drm_intel_bo_emit_reloc(brw->batch.bo,
854 *out_offset + 4,
855 bo, 0,
856 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
857 }
858 }
859
860 /**
861 * Sets up a surface state structure to point at the given region.
862 * While it is only used for the front/back buffer currently, it should be
863 * usable for further buffers when doing ARB_draw_buffer support.
864 */
865 static uint32_t
866 gen4_update_renderbuffer_surface(struct brw_context *brw,
867 struct gl_renderbuffer *rb,
868 bool layered, unsigned unit,
869 uint32_t surf_index)
870 {
871 struct gl_context *ctx = &brw->ctx;
872 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
873 struct intel_mipmap_tree *mt = irb->mt;
874 uint32_t *surf;
875 uint32_t tile_x, tile_y;
876 uint32_t format = 0;
877 uint32_t offset;
878 /* _NEW_BUFFERS */
879 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
880 /* BRW_NEW_FS_PROG_DATA */
881
882 assert(!layered);
883
884 if (rb->TexImage && !brw->has_surface_tile_offset) {
885 intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y);
886
887 if (tile_x != 0 || tile_y != 0) {
888 /* Original gen4 hardware couldn't draw to a non-tile-aligned
889 * destination in a miptree unless you actually setup your renderbuffer
890 * as a miptree and used the fragile lod/array_index/etc. controls to
891 * select the image. So, instead, we just make a new single-level
892 * miptree and render into that.
893 */
894 intel_renderbuffer_move_to_temp(brw, irb, false);
895 mt = irb->mt;
896 }
897 }
898
899 intel_miptree_used_for_rendering(irb->mt);
900
901 surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32, &offset);
902
903 format = brw->render_target_format[rb_format];
904 if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
905 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
906 __func__, _mesa_get_format_name(rb_format));
907 }
908
909 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
910 format << BRW_SURFACE_FORMAT_SHIFT);
911
912 /* reloc */
913 assert(mt->offset % mt->cpp == 0);
914 surf[1] = (intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y) +
915 mt->bo->offset64 + mt->offset);
916
917 surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
918 (rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
919
920 surf[3] = (brw_get_surface_tiling_bits(mt->tiling) |
921 (mt->pitch - 1) << BRW_SURFACE_PITCH_SHIFT);
922
923 surf[4] = brw_get_surface_num_multisamples(mt->num_samples);
924
925 assert(brw->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
926 /* Note that the low bits of these fields are missing, so
927 * there's the possibility of getting in trouble.
928 */
929 assert(tile_x % 4 == 0);
930 assert(tile_y % 2 == 0);
931 surf[5] = ((tile_x / 4) << BRW_SURFACE_X_OFFSET_SHIFT |
932 (tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT |
933 (mt->valign == 4 ? BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
934
935 if (brw->gen < 6) {
936 /* _NEW_COLOR */
937 if (!ctx->Color.ColorLogicOpEnabled && !ctx->Color._AdvancedBlendMode &&
938 (ctx->Color.BlendEnabled & (1 << unit)))
939 surf[0] |= BRW_SURFACE_BLEND_ENABLED;
940
941 if (!ctx->Color.ColorMask[unit][0])
942 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT;
943 if (!ctx->Color.ColorMask[unit][1])
944 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT;
945 if (!ctx->Color.ColorMask[unit][2])
946 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT;
947
948 /* As mentioned above, disable writes to the alpha component when the
949 * renderbuffer is XRGB.
950 */
951 if (ctx->DrawBuffer->Visual.alphaBits == 0 ||
952 !ctx->Color.ColorMask[unit][3]) {
953 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT;
954 }
955 }
956
957 drm_intel_bo_emit_reloc(brw->batch.bo,
958 offset + 4,
959 mt->bo,
960 surf[1] - mt->bo->offset64,
961 I915_GEM_DOMAIN_RENDER,
962 I915_GEM_DOMAIN_RENDER);
963
964 return offset;
965 }
966
967 /**
968 * Construct SURFACE_STATE objects for renderbuffers/draw buffers.
969 */
970 void
971 brw_update_renderbuffer_surfaces(struct brw_context *brw,
972 const struct gl_framebuffer *fb,
973 uint32_t render_target_start,
974 uint32_t *surf_offset)
975 {
976 GLuint i;
977 const unsigned int w = _mesa_geometric_width(fb);
978 const unsigned int h = _mesa_geometric_height(fb);
979 const unsigned int s = _mesa_geometric_samples(fb);
980
981 /* Update surfaces for drawing buffers */
982 if (fb->_NumColorDrawBuffers >= 1) {
983 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
984 const uint32_t surf_index = render_target_start + i;
985
986 if (intel_renderbuffer(fb->_ColorDrawBuffers[i])) {
987 surf_offset[surf_index] =
988 brw->vtbl.update_renderbuffer_surface(
989 brw, fb->_ColorDrawBuffers[i],
990 _mesa_geometric_layers(fb) > 0, i, surf_index);
991 } else {
992 brw->vtbl.emit_null_surface_state(brw, w, h, s,
993 &surf_offset[surf_index]);
994 }
995 }
996 } else {
997 const uint32_t surf_index = render_target_start;
998 brw->vtbl.emit_null_surface_state(brw, w, h, s,
999 &surf_offset[surf_index]);
1000 }
1001 }
1002
1003 static void
1004 update_renderbuffer_surfaces(struct brw_context *brw)
1005 {
1006 const struct gl_context *ctx = &brw->ctx;
1007
1008 /* _NEW_BUFFERS | _NEW_COLOR */
1009 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1010 brw_update_renderbuffer_surfaces(
1011 brw, fb,
1012 brw->wm.prog_data->binding_table.render_target_start,
1013 brw->wm.base.surf_offset);
1014 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1015 }
1016
1017 const struct brw_tracked_state brw_renderbuffer_surfaces = {
1018 .dirty = {
1019 .mesa = _NEW_BUFFERS |
1020 _NEW_COLOR,
1021 .brw = BRW_NEW_BATCH |
1022 BRW_NEW_BLORP |
1023 BRW_NEW_FS_PROG_DATA,
1024 },
1025 .emit = update_renderbuffer_surfaces,
1026 };
1027
1028 const struct brw_tracked_state gen6_renderbuffer_surfaces = {
1029 .dirty = {
1030 .mesa = _NEW_BUFFERS,
1031 .brw = BRW_NEW_BATCH |
1032 BRW_NEW_BLORP,
1033 },
1034 .emit = update_renderbuffer_surfaces,
1035 };
1036
1037 static void
1038 update_renderbuffer_read_surfaces(struct brw_context *brw)
1039 {
1040 const struct gl_context *ctx = &brw->ctx;
1041
1042 /* BRW_NEW_FRAGMENT_PROGRAM */
1043 if (!ctx->Extensions.MESA_shader_framebuffer_fetch &&
1044 brw->fragment_program &&
1045 brw->fragment_program->Base.OutputsRead) {
1046 /* _NEW_BUFFERS */
1047 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1048
1049 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
1050 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
1051 const struct intel_renderbuffer *irb = intel_renderbuffer(rb);
1052 const unsigned surf_index =
1053 brw->wm.prog_data->binding_table.render_target_read_start + i;
1054 uint32_t *surf_offset = &brw->wm.base.surf_offset[surf_index];
1055
1056 if (irb) {
1057 const unsigned format = brw->render_target_format[
1058 _mesa_get_render_format(ctx, intel_rb_format(irb))];
1059 assert(isl_format_supports_sampling(brw->intelScreen->devinfo,
1060 format));
1061
1062 /* Override the target of the texture if the render buffer is a
1063 * single slice of a 3D texture (since the minimum array element
1064 * field of the surface state structure is ignored by the sampler
1065 * unit for 3D textures on some hardware), or if the render buffer
1066 * is a 1D array (since shaders always provide the array index
1067 * coordinate at the Z component to avoid state-dependent
1068 * recompiles when changing the texture target of the
1069 * framebuffer).
1070 */
1071 const GLenum target =
1072 (irb->mt->target == GL_TEXTURE_3D &&
1073 irb->layer_count == 1) ? GL_TEXTURE_2D :
1074 irb->mt->target == GL_TEXTURE_1D_ARRAY ? GL_TEXTURE_2D_ARRAY :
1075 irb->mt->target;
1076
1077 /* intel_renderbuffer::mt_layer is expressed in sample units for
1078 * the UMS and CMS multisample layouts, but
1079 * intel_renderbuffer::layer_count is expressed in units of whole
1080 * logical layers regardless of the multisample layout.
1081 */
1082 const unsigned mt_layer_unit =
1083 (irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
1084 irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) ?
1085 MAX2(irb->mt->num_samples, 1) : 1;
1086
1087 const struct isl_view view = {
1088 .format = format,
1089 .base_level = irb->mt_level - irb->mt->first_level,
1090 .levels = 1,
1091 .base_array_layer = irb->mt_layer / mt_layer_unit,
1092 .array_len = irb->layer_count,
1093 .channel_select = {
1094 ISL_CHANNEL_SELECT_RED,
1095 ISL_CHANNEL_SELECT_GREEN,
1096 ISL_CHANNEL_SELECT_BLUE,
1097 ISL_CHANNEL_SELECT_ALPHA,
1098 },
1099 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
1100 };
1101
1102 brw_emit_surface_state(brw, irb->mt, target, view,
1103 surface_state_infos[brw->gen].tex_mocs,
1104 surf_offset, surf_index,
1105 I915_GEM_DOMAIN_SAMPLER, 0);
1106
1107 } else {
1108 brw->vtbl.emit_null_surface_state(
1109 brw, _mesa_geometric_width(fb), _mesa_geometric_height(fb),
1110 _mesa_geometric_samples(fb), surf_offset);
1111 }
1112 }
1113
1114 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1115 }
1116 }
1117
1118 const struct brw_tracked_state brw_renderbuffer_read_surfaces = {
1119 .dirty = {
1120 .mesa = _NEW_BUFFERS,
1121 .brw = BRW_NEW_BATCH |
1122 BRW_NEW_FRAGMENT_PROGRAM,
1123 },
1124 .emit = update_renderbuffer_read_surfaces,
1125 };
1126
1127 static void
1128 update_stage_texture_surfaces(struct brw_context *brw,
1129 const struct gl_program *prog,
1130 struct brw_stage_state *stage_state,
1131 bool for_gather, uint32_t plane)
1132 {
1133 if (!prog)
1134 return;
1135
1136 struct gl_context *ctx = &brw->ctx;
1137
1138 uint32_t *surf_offset = stage_state->surf_offset;
1139
1140 /* BRW_NEW_*_PROG_DATA */
1141 if (for_gather)
1142 surf_offset += stage_state->prog_data->binding_table.gather_texture_start;
1143 else
1144 surf_offset += stage_state->prog_data->binding_table.plane_start[plane];
1145
1146 unsigned num_samplers = util_last_bit(prog->SamplersUsed);
1147 for (unsigned s = 0; s < num_samplers; s++) {
1148 surf_offset[s] = 0;
1149
1150 if (prog->SamplersUsed & (1 << s)) {
1151 const unsigned unit = prog->SamplerUnits[s];
1152
1153 /* _NEW_TEXTURE */
1154 if (ctx->Texture.Unit[unit]._Current) {
1155 brw_update_texture_surface(ctx, unit, surf_offset + s, for_gather, plane);
1156 }
1157 }
1158 }
1159 }
1160
1161
1162 /**
1163 * Construct SURFACE_STATE objects for enabled textures.
1164 */
1165 static void
1166 brw_update_texture_surfaces(struct brw_context *brw)
1167 {
1168 /* BRW_NEW_VERTEX_PROGRAM */
1169 struct gl_program *vs = (struct gl_program *) brw->vertex_program;
1170
1171 /* BRW_NEW_TESS_PROGRAMS */
1172 struct gl_program *tcs = (struct gl_program *) brw->tess_ctrl_program;
1173 struct gl_program *tes = (struct gl_program *) brw->tess_eval_program;
1174
1175 /* BRW_NEW_GEOMETRY_PROGRAM */
1176 struct gl_program *gs = (struct gl_program *) brw->geometry_program;
1177
1178 /* BRW_NEW_FRAGMENT_PROGRAM */
1179 struct gl_program *fs = (struct gl_program *) brw->fragment_program;
1180
1181 /* _NEW_TEXTURE */
1182 update_stage_texture_surfaces(brw, vs, &brw->vs.base, false, 0);
1183 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, false, 0);
1184 update_stage_texture_surfaces(brw, tes, &brw->tes.base, false, 0);
1185 update_stage_texture_surfaces(brw, gs, &brw->gs.base, false, 0);
1186 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 0);
1187
1188 /* emit alternate set of surface state for gather. this
1189 * allows the surface format to be overriden for only the
1190 * gather4 messages. */
1191 if (brw->gen < 8) {
1192 if (vs && vs->UsesGather)
1193 update_stage_texture_surfaces(brw, vs, &brw->vs.base, true, 0);
1194 if (tcs && tcs->UsesGather)
1195 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, true, 0);
1196 if (tes && tes->UsesGather)
1197 update_stage_texture_surfaces(brw, tes, &brw->tes.base, true, 0);
1198 if (gs && gs->UsesGather)
1199 update_stage_texture_surfaces(brw, gs, &brw->gs.base, true, 0);
1200 if (fs && fs->UsesGather)
1201 update_stage_texture_surfaces(brw, fs, &brw->wm.base, true, 0);
1202 }
1203
1204 if (fs) {
1205 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 1);
1206 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 2);
1207 }
1208
1209 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1210 }
1211
1212 const struct brw_tracked_state brw_texture_surfaces = {
1213 .dirty = {
1214 .mesa = _NEW_TEXTURE,
1215 .brw = BRW_NEW_BATCH |
1216 BRW_NEW_BLORP |
1217 BRW_NEW_FRAGMENT_PROGRAM |
1218 BRW_NEW_FS_PROG_DATA |
1219 BRW_NEW_GEOMETRY_PROGRAM |
1220 BRW_NEW_GS_PROG_DATA |
1221 BRW_NEW_TESS_PROGRAMS |
1222 BRW_NEW_TCS_PROG_DATA |
1223 BRW_NEW_TES_PROG_DATA |
1224 BRW_NEW_TEXTURE_BUFFER |
1225 BRW_NEW_VERTEX_PROGRAM |
1226 BRW_NEW_VS_PROG_DATA,
1227 },
1228 .emit = brw_update_texture_surfaces,
1229 };
1230
1231 static void
1232 brw_update_cs_texture_surfaces(struct brw_context *brw)
1233 {
1234 /* BRW_NEW_COMPUTE_PROGRAM */
1235 struct gl_program *cs = (struct gl_program *) brw->compute_program;
1236
1237 /* _NEW_TEXTURE */
1238 update_stage_texture_surfaces(brw, cs, &brw->cs.base, false, 0);
1239
1240 /* emit alternate set of surface state for gather. this
1241 * allows the surface format to be overriden for only the
1242 * gather4 messages.
1243 */
1244 if (brw->gen < 8) {
1245 if (cs && cs->UsesGather)
1246 update_stage_texture_surfaces(brw, cs, &brw->cs.base, true, 0);
1247 }
1248
1249 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1250 }
1251
1252 const struct brw_tracked_state brw_cs_texture_surfaces = {
1253 .dirty = {
1254 .mesa = _NEW_TEXTURE,
1255 .brw = BRW_NEW_BATCH |
1256 BRW_NEW_BLORP |
1257 BRW_NEW_COMPUTE_PROGRAM,
1258 },
1259 .emit = brw_update_cs_texture_surfaces,
1260 };
1261
1262
1263 void
1264 brw_upload_ubo_surfaces(struct brw_context *brw,
1265 struct gl_linked_shader *shader,
1266 struct brw_stage_state *stage_state,
1267 struct brw_stage_prog_data *prog_data)
1268 {
1269 struct gl_context *ctx = &brw->ctx;
1270
1271 if (!shader)
1272 return;
1273
1274 uint32_t *ubo_surf_offsets =
1275 &stage_state->surf_offset[prog_data->binding_table.ubo_start];
1276
1277 for (int i = 0; i < shader->NumUniformBlocks; i++) {
1278 struct gl_uniform_buffer_binding *binding =
1279 &ctx->UniformBufferBindings[shader->UniformBlocks[i]->Binding];
1280
1281 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1282 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ubo_surf_offsets[i]);
1283 } else {
1284 struct intel_buffer_object *intel_bo =
1285 intel_buffer_object(binding->BufferObject);
1286 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1287 if (!binding->AutomaticSize)
1288 size = MIN2(size, binding->Size);
1289 drm_intel_bo *bo =
1290 intel_bufferobj_buffer(brw, intel_bo,
1291 binding->Offset,
1292 size);
1293 brw_create_constant_surface(brw, bo, binding->Offset,
1294 size,
1295 &ubo_surf_offsets[i]);
1296 }
1297 }
1298
1299 uint32_t *ssbo_surf_offsets =
1300 &stage_state->surf_offset[prog_data->binding_table.ssbo_start];
1301
1302 for (int i = 0; i < shader->NumShaderStorageBlocks; i++) {
1303 struct gl_shader_storage_buffer_binding *binding =
1304 &ctx->ShaderStorageBufferBindings[shader->ShaderStorageBlocks[i]->Binding];
1305
1306 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1307 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ssbo_surf_offsets[i]);
1308 } else {
1309 struct intel_buffer_object *intel_bo =
1310 intel_buffer_object(binding->BufferObject);
1311 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1312 if (!binding->AutomaticSize)
1313 size = MIN2(size, binding->Size);
1314 drm_intel_bo *bo =
1315 intel_bufferobj_buffer(brw, intel_bo,
1316 binding->Offset,
1317 size);
1318 brw_create_buffer_surface(brw, bo, binding->Offset,
1319 size,
1320 &ssbo_surf_offsets[i]);
1321 }
1322 }
1323
1324 if (shader->NumUniformBlocks || shader->NumShaderStorageBlocks)
1325 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1326 }
1327
1328 static void
1329 brw_upload_wm_ubo_surfaces(struct brw_context *brw)
1330 {
1331 struct gl_context *ctx = &brw->ctx;
1332 /* _NEW_PROGRAM */
1333 struct gl_shader_program *prog = ctx->_Shader->_CurrentFragmentProgram;
1334
1335 if (!prog)
1336 return;
1337
1338 /* BRW_NEW_FS_PROG_DATA */
1339 brw_upload_ubo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
1340 &brw->wm.base, &brw->wm.prog_data->base);
1341 }
1342
1343 const struct brw_tracked_state brw_wm_ubo_surfaces = {
1344 .dirty = {
1345 .mesa = _NEW_PROGRAM,
1346 .brw = BRW_NEW_BATCH |
1347 BRW_NEW_BLORP |
1348 BRW_NEW_FS_PROG_DATA |
1349 BRW_NEW_UNIFORM_BUFFER,
1350 },
1351 .emit = brw_upload_wm_ubo_surfaces,
1352 };
1353
1354 static void
1355 brw_upload_cs_ubo_surfaces(struct brw_context *brw)
1356 {
1357 struct gl_context *ctx = &brw->ctx;
1358 /* _NEW_PROGRAM */
1359 struct gl_shader_program *prog =
1360 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1361
1362 if (!prog)
1363 return;
1364
1365 /* BRW_NEW_CS_PROG_DATA */
1366 brw_upload_ubo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_COMPUTE],
1367 &brw->cs.base, &brw->cs.prog_data->base);
1368 }
1369
1370 const struct brw_tracked_state brw_cs_ubo_surfaces = {
1371 .dirty = {
1372 .mesa = _NEW_PROGRAM,
1373 .brw = BRW_NEW_BATCH |
1374 BRW_NEW_BLORP |
1375 BRW_NEW_CS_PROG_DATA |
1376 BRW_NEW_UNIFORM_BUFFER,
1377 },
1378 .emit = brw_upload_cs_ubo_surfaces,
1379 };
1380
1381 void
1382 brw_upload_abo_surfaces(struct brw_context *brw,
1383 struct gl_linked_shader *shader,
1384 struct brw_stage_state *stage_state,
1385 struct brw_stage_prog_data *prog_data)
1386 {
1387 struct gl_context *ctx = &brw->ctx;
1388 uint32_t *surf_offsets =
1389 &stage_state->surf_offset[prog_data->binding_table.abo_start];
1390
1391 if (shader && shader->NumAtomicBuffers) {
1392 for (unsigned i = 0; i < shader->NumAtomicBuffers; i++) {
1393 struct gl_atomic_buffer_binding *binding =
1394 &ctx->AtomicBufferBindings[shader->AtomicBuffers[i]->Binding];
1395 struct intel_buffer_object *intel_bo =
1396 intel_buffer_object(binding->BufferObject);
1397 drm_intel_bo *bo = intel_bufferobj_buffer(
1398 brw, intel_bo, binding->Offset, intel_bo->Base.Size - binding->Offset);
1399
1400 brw_emit_buffer_surface_state(brw, &surf_offsets[i], bo,
1401 binding->Offset, BRW_SURFACEFORMAT_RAW,
1402 bo->size - binding->Offset, 1, true);
1403 }
1404
1405 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1406 }
1407 }
1408
1409 static void
1410 brw_upload_wm_abo_surfaces(struct brw_context *brw)
1411 {
1412 struct gl_context *ctx = &brw->ctx;
1413 /* _NEW_PROGRAM */
1414 struct gl_shader_program *prog = ctx->_Shader->_CurrentFragmentProgram;
1415
1416 if (prog) {
1417 /* BRW_NEW_FS_PROG_DATA */
1418 brw_upload_abo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
1419 &brw->wm.base, &brw->wm.prog_data->base);
1420 }
1421 }
1422
1423 const struct brw_tracked_state brw_wm_abo_surfaces = {
1424 .dirty = {
1425 .mesa = _NEW_PROGRAM,
1426 .brw = BRW_NEW_ATOMIC_BUFFER |
1427 BRW_NEW_BLORP |
1428 BRW_NEW_BATCH |
1429 BRW_NEW_FS_PROG_DATA,
1430 },
1431 .emit = brw_upload_wm_abo_surfaces,
1432 };
1433
1434 static void
1435 brw_upload_cs_abo_surfaces(struct brw_context *brw)
1436 {
1437 struct gl_context *ctx = &brw->ctx;
1438 /* _NEW_PROGRAM */
1439 struct gl_shader_program *prog =
1440 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1441
1442 if (prog) {
1443 /* BRW_NEW_CS_PROG_DATA */
1444 brw_upload_abo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_COMPUTE],
1445 &brw->cs.base, &brw->cs.prog_data->base);
1446 }
1447 }
1448
1449 const struct brw_tracked_state brw_cs_abo_surfaces = {
1450 .dirty = {
1451 .mesa = _NEW_PROGRAM,
1452 .brw = BRW_NEW_ATOMIC_BUFFER |
1453 BRW_NEW_BLORP |
1454 BRW_NEW_BATCH |
1455 BRW_NEW_CS_PROG_DATA,
1456 },
1457 .emit = brw_upload_cs_abo_surfaces,
1458 };
1459
1460 static void
1461 brw_upload_cs_image_surfaces(struct brw_context *brw)
1462 {
1463 struct gl_context *ctx = &brw->ctx;
1464 /* _NEW_PROGRAM */
1465 struct gl_shader_program *prog =
1466 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1467
1468 if (prog) {
1469 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1470 brw_upload_image_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_COMPUTE],
1471 &brw->cs.base, &brw->cs.prog_data->base);
1472 }
1473 }
1474
1475 const struct brw_tracked_state brw_cs_image_surfaces = {
1476 .dirty = {
1477 .mesa = _NEW_TEXTURE | _NEW_PROGRAM,
1478 .brw = BRW_NEW_BATCH |
1479 BRW_NEW_BLORP |
1480 BRW_NEW_CS_PROG_DATA |
1481 BRW_NEW_IMAGE_UNITS
1482 },
1483 .emit = brw_upload_cs_image_surfaces,
1484 };
1485
1486 static uint32_t
1487 get_image_format(struct brw_context *brw, mesa_format format, GLenum access)
1488 {
1489 const struct brw_device_info *devinfo = brw->intelScreen->devinfo;
1490 uint32_t hw_format = brw_format_for_mesa_format(format);
1491 if (access == GL_WRITE_ONLY) {
1492 return hw_format;
1493 } else if (isl_has_matching_typed_storage_image_format(devinfo, hw_format)) {
1494 /* Typed surface reads support a very limited subset of the shader
1495 * image formats. Translate it into the closest format the
1496 * hardware supports.
1497 */
1498 return isl_lower_storage_image_format(devinfo, hw_format);
1499 } else {
1500 /* The hardware doesn't actually support a typed format that we can use
1501 * so we have to fall back to untyped read/write messages.
1502 */
1503 return BRW_SURFACEFORMAT_RAW;
1504 }
1505 }
1506
1507 static void
1508 update_default_image_param(struct brw_context *brw,
1509 struct gl_image_unit *u,
1510 unsigned surface_idx,
1511 struct brw_image_param *param)
1512 {
1513 memset(param, 0, sizeof(*param));
1514 param->surface_idx = surface_idx;
1515 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1516 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1517 * detailed explanation of these parameters.
1518 */
1519 param->swizzling[0] = 0xff;
1520 param->swizzling[1] = 0xff;
1521 }
1522
1523 static void
1524 update_buffer_image_param(struct brw_context *brw,
1525 struct gl_image_unit *u,
1526 unsigned surface_idx,
1527 struct brw_image_param *param)
1528 {
1529 struct gl_buffer_object *obj = u->TexObj->BufferObject;
1530 const uint32_t size = MIN2((uint32_t)u->TexObj->BufferSize, obj->Size);
1531 update_default_image_param(brw, u, surface_idx, param);
1532
1533 param->size[0] = size / _mesa_get_format_bytes(u->_ActualFormat);
1534 param->stride[0] = _mesa_get_format_bytes(u->_ActualFormat);
1535 }
1536
1537 static void
1538 update_texture_image_param(struct brw_context *brw,
1539 struct gl_image_unit *u,
1540 unsigned surface_idx,
1541 struct brw_image_param *param)
1542 {
1543 struct intel_mipmap_tree *mt = intel_texture_object(u->TexObj)->mt;
1544
1545 update_default_image_param(brw, u, surface_idx, param);
1546
1547 param->size[0] = minify(mt->logical_width0, u->Level);
1548 param->size[1] = minify(mt->logical_height0, u->Level);
1549 param->size[2] = (!u->Layered ? 1 :
1550 u->TexObj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1551 u->TexObj->Target == GL_TEXTURE_3D ?
1552 minify(mt->logical_depth0, u->Level) :
1553 mt->logical_depth0);
1554
1555 intel_miptree_get_image_offset(mt, u->Level, u->_Layer,
1556 &param->offset[0],
1557 &param->offset[1]);
1558
1559 param->stride[0] = mt->cpp;
1560 param->stride[1] = mt->pitch / mt->cpp;
1561 param->stride[2] =
1562 brw_miptree_get_horizontal_slice_pitch(brw, mt, u->Level);
1563 param->stride[3] =
1564 brw_miptree_get_vertical_slice_pitch(brw, mt, u->Level);
1565
1566 if (mt->tiling == I915_TILING_X) {
1567 /* An X tile is a rectangular block of 512x8 bytes. */
1568 param->tiling[0] = _mesa_logbase2(512 / mt->cpp);
1569 param->tiling[1] = _mesa_logbase2(8);
1570
1571 if (brw->has_swizzling) {
1572 /* Right shifts required to swizzle bits 9 and 10 of the memory
1573 * address with bit 6.
1574 */
1575 param->swizzling[0] = 3;
1576 param->swizzling[1] = 4;
1577 }
1578 } else if (mt->tiling == I915_TILING_Y) {
1579 /* The layout of a Y-tiled surface in memory isn't really fundamentally
1580 * different to the layout of an X-tiled surface, we simply pretend that
1581 * the surface is broken up in a number of smaller 16Bx32 tiles, each
1582 * one arranged in X-major order just like is the case for X-tiling.
1583 */
1584 param->tiling[0] = _mesa_logbase2(16 / mt->cpp);
1585 param->tiling[1] = _mesa_logbase2(32);
1586
1587 if (brw->has_swizzling) {
1588 /* Right shift required to swizzle bit 9 of the memory address with
1589 * bit 6.
1590 */
1591 param->swizzling[0] = 3;
1592 }
1593 }
1594
1595 /* 3D textures are arranged in 2D in memory with 2^lod slices per row. The
1596 * address calculation algorithm (emit_address_calculation() in
1597 * brw_fs_surface_builder.cpp) handles this as a sort of tiling with
1598 * modulus equal to the LOD.
1599 */
1600 param->tiling[2] = (u->TexObj->Target == GL_TEXTURE_3D ? u->Level :
1601 0);
1602 }
1603
1604 static void
1605 update_image_surface(struct brw_context *brw,
1606 struct gl_image_unit *u,
1607 GLenum access,
1608 unsigned surface_idx,
1609 uint32_t *surf_offset,
1610 struct brw_image_param *param)
1611 {
1612 if (_mesa_is_image_unit_valid(&brw->ctx, u)) {
1613 struct gl_texture_object *obj = u->TexObj;
1614 const unsigned format = get_image_format(brw, u->_ActualFormat, access);
1615
1616 if (obj->Target == GL_TEXTURE_BUFFER) {
1617 struct intel_buffer_object *intel_obj =
1618 intel_buffer_object(obj->BufferObject);
1619 const unsigned texel_size = (format == BRW_SURFACEFORMAT_RAW ? 1 :
1620 _mesa_get_format_bytes(u->_ActualFormat));
1621
1622 brw_emit_buffer_surface_state(
1623 brw, surf_offset, intel_obj->buffer, obj->BufferOffset,
1624 format, intel_obj->Base.Size, texel_size,
1625 access != GL_READ_ONLY);
1626
1627 update_buffer_image_param(brw, u, surface_idx, param);
1628
1629 } else {
1630 struct intel_texture_object *intel_obj = intel_texture_object(obj);
1631 struct intel_mipmap_tree *mt = intel_obj->mt;
1632
1633 if (format == BRW_SURFACEFORMAT_RAW) {
1634 brw_emit_buffer_surface_state(
1635 brw, surf_offset, mt->bo, mt->offset,
1636 format, mt->bo->size - mt->offset, 1 /* pitch */,
1637 access != GL_READ_ONLY);
1638
1639 } else {
1640 const unsigned num_layers = (!u->Layered ? 1 :
1641 obj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1642 mt->logical_depth0);
1643
1644 struct isl_view view = {
1645 .format = format,
1646 .base_level = obj->MinLevel + u->Level,
1647 .levels = 1,
1648 .base_array_layer = obj->MinLayer + u->_Layer,
1649 .array_len = num_layers,
1650 .channel_select = {
1651 ISL_CHANNEL_SELECT_RED,
1652 ISL_CHANNEL_SELECT_GREEN,
1653 ISL_CHANNEL_SELECT_BLUE,
1654 ISL_CHANNEL_SELECT_ALPHA,
1655 },
1656 .usage = ISL_SURF_USAGE_STORAGE_BIT,
1657 };
1658
1659 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
1660
1661 brw_emit_surface_state(brw, mt, mt->target, view,
1662 surface_state_infos[brw->gen].tex_mocs,
1663 surf_offset, surf_index,
1664 I915_GEM_DOMAIN_SAMPLER,
1665 access == GL_READ_ONLY ? 0 :
1666 I915_GEM_DOMAIN_SAMPLER);
1667 }
1668
1669 update_texture_image_param(brw, u, surface_idx, param);
1670 }
1671
1672 } else {
1673 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, surf_offset);
1674 update_default_image_param(brw, u, surface_idx, param);
1675 }
1676 }
1677
1678 void
1679 brw_upload_image_surfaces(struct brw_context *brw,
1680 struct gl_linked_shader *shader,
1681 struct brw_stage_state *stage_state,
1682 struct brw_stage_prog_data *prog_data)
1683 {
1684 struct gl_context *ctx = &brw->ctx;
1685
1686 if (shader && shader->NumImages) {
1687 for (unsigned i = 0; i < shader->NumImages; i++) {
1688 struct gl_image_unit *u = &ctx->ImageUnits[shader->ImageUnits[i]];
1689 const unsigned surf_idx = prog_data->binding_table.image_start + i;
1690
1691 update_image_surface(brw, u, shader->ImageAccess[i],
1692 surf_idx,
1693 &stage_state->surf_offset[surf_idx],
1694 &prog_data->image_param[i]);
1695 }
1696
1697 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1698 /* This may have changed the image metadata dependent on the context
1699 * image unit state and passed to the program as uniforms, make sure
1700 * that push and pull constants are reuploaded.
1701 */
1702 brw->NewGLState |= _NEW_PROGRAM_CONSTANTS;
1703 }
1704 }
1705
1706 static void
1707 brw_upload_wm_image_surfaces(struct brw_context *brw)
1708 {
1709 struct gl_context *ctx = &brw->ctx;
1710 /* BRW_NEW_FRAGMENT_PROGRAM */
1711 struct gl_shader_program *prog = ctx->_Shader->_CurrentFragmentProgram;
1712
1713 if (prog) {
1714 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1715 brw_upload_image_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
1716 &brw->wm.base, &brw->wm.prog_data->base);
1717 }
1718 }
1719
1720 const struct brw_tracked_state brw_wm_image_surfaces = {
1721 .dirty = {
1722 .mesa = _NEW_TEXTURE,
1723 .brw = BRW_NEW_BATCH |
1724 BRW_NEW_BLORP |
1725 BRW_NEW_FRAGMENT_PROGRAM |
1726 BRW_NEW_FS_PROG_DATA |
1727 BRW_NEW_IMAGE_UNITS
1728 },
1729 .emit = brw_upload_wm_image_surfaces,
1730 };
1731
1732 void
1733 gen4_init_vtable_surface_functions(struct brw_context *brw)
1734 {
1735 brw->vtbl.update_renderbuffer_surface = gen4_update_renderbuffer_surface;
1736 brw->vtbl.emit_null_surface_state = brw_emit_null_surface_state;
1737 }
1738
1739 void
1740 gen6_init_vtable_surface_functions(struct brw_context *brw)
1741 {
1742 gen4_init_vtable_surface_functions(brw);
1743 brw->vtbl.update_renderbuffer_surface = brw_update_renderbuffer_surface;
1744 }
1745
1746 static void
1747 brw_upload_cs_work_groups_surface(struct brw_context *brw)
1748 {
1749 struct gl_context *ctx = &brw->ctx;
1750 /* _NEW_PROGRAM */
1751 struct gl_shader_program *prog =
1752 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1753
1754 if (prog && brw->cs.prog_data->uses_num_work_groups) {
1755 const unsigned surf_idx =
1756 brw->cs.prog_data->binding_table.work_groups_start;
1757 uint32_t *surf_offset = &brw->cs.base.surf_offset[surf_idx];
1758 drm_intel_bo *bo;
1759 uint32_t bo_offset;
1760
1761 if (brw->compute.num_work_groups_bo == NULL) {
1762 bo = NULL;
1763 intel_upload_data(brw,
1764 (void *)brw->compute.num_work_groups,
1765 3 * sizeof(GLuint),
1766 sizeof(GLuint),
1767 &bo,
1768 &bo_offset);
1769 } else {
1770 bo = brw->compute.num_work_groups_bo;
1771 bo_offset = brw->compute.num_work_groups_offset;
1772 }
1773
1774 brw_emit_buffer_surface_state(brw, surf_offset,
1775 bo, bo_offset,
1776 BRW_SURFACEFORMAT_RAW,
1777 3 * sizeof(GLuint), 1, true);
1778 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1779 }
1780 }
1781
1782 const struct brw_tracked_state brw_cs_work_groups_surface = {
1783 .dirty = {
1784 .brw = BRW_NEW_BLORP |
1785 BRW_NEW_CS_WORK_GROUPS
1786 },
1787 .emit = brw_upload_cs_work_groups_surface,
1788 };