2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **********************************************************************/
29 * Keith Whitwell <keithw@vmware.com>
33 #include "compiler/nir/nir.h"
34 #include "main/context.h"
35 #include "main/blend.h"
36 #include "main/mtypes.h"
37 #include "main/samplerobj.h"
38 #include "main/shaderimage.h"
39 #include "main/teximage.h"
40 #include "program/prog_parameter.h"
41 #include "program/prog_instruction.h"
42 #include "main/framebuffer.h"
43 #include "main/shaderapi.h"
47 #include "intel_mipmap_tree.h"
48 #include "intel_batchbuffer.h"
49 #include "intel_tex.h"
50 #include "intel_fbo.h"
51 #include "intel_buffer_objects.h"
53 #include "brw_context.h"
54 #include "brw_state.h"
55 #include "brw_defines.h"
59 INTEL_RENDERBUFFER_LAYERED
= 1 << 0,
60 INTEL_AUX_BUFFER_DISABLED
= 1 << 1,
63 uint32_t tex_mocs
[] = {
69 uint32_t rb_mocs
[] = {
76 brw_emit_surface_state(struct brw_context
*brw
,
77 struct intel_mipmap_tree
*mt
, uint32_t flags
,
78 GLenum target
, struct isl_view view
,
79 uint32_t mocs
, uint32_t *surf_offset
, int surf_index
,
80 unsigned read_domains
, unsigned write_domains
)
82 uint32_t tile_x
= mt
->level
[0].slice
[0].x_offset
;
83 uint32_t tile_y
= mt
->level
[0].slice
[0].y_offset
;
84 uint32_t offset
= mt
->offset
;
87 intel_miptree_get_isl_surf(brw
, mt
, &surf
);
89 surf
.dim
= get_isl_surf_dim(target
);
91 const enum isl_dim_layout dim_layout
=
92 get_isl_dim_layout(&brw
->screen
->devinfo
, mt
->tiling
, target
);
94 if (surf
.dim_layout
!= dim_layout
) {
95 /* The layout of the specified texture target is not compatible with the
96 * actual layout of the miptree structure in memory -- You're entering
97 * dangerous territory, this can only possibly work if you only intended
98 * to access a single level and slice of the texture, and the hardware
99 * supports the tile offset feature in order to allow non-tile-aligned
100 * base offsets, since we'll have to point the hardware to the first
101 * texel of the level instead of relying on the usual base level/layer
104 assert(brw
->has_surface_tile_offset
);
105 assert(view
.levels
== 1 && view
.array_len
== 1);
106 assert(tile_x
== 0 && tile_y
== 0);
108 offset
+= intel_miptree_get_tile_offsets(mt
, view
.base_level
,
109 view
.base_array_layer
,
112 /* Minify the logical dimensions of the texture. */
113 const unsigned l
= view
.base_level
- mt
->first_level
;
114 surf
.logical_level0_px
.width
= minify(surf
.logical_level0_px
.width
, l
);
115 surf
.logical_level0_px
.height
= surf
.dim
<= ISL_SURF_DIM_1D
? 1 :
116 minify(surf
.logical_level0_px
.height
, l
);
117 surf
.logical_level0_px
.depth
= surf
.dim
<= ISL_SURF_DIM_2D
? 1 :
118 minify(surf
.logical_level0_px
.depth
, l
);
120 /* Only the base level and layer can be addressed with the overridden
123 surf
.logical_level0_px
.array_len
= 1;
125 surf
.dim_layout
= dim_layout
;
127 /* The requested slice of the texture is now at the base level and
131 view
.base_array_layer
= 0;
134 union isl_color_value clear_color
= { .u32
= { 0, 0, 0, 0 } };
136 drm_intel_bo
*aux_bo
;
137 struct isl_surf
*aux_surf
= NULL
, aux_surf_s
;
138 uint64_t aux_offset
= 0;
139 enum isl_aux_usage aux_usage
= ISL_AUX_USAGE_NONE
;
140 if ((mt
->mcs_buf
|| intel_miptree_sample_with_hiz(brw
, mt
)) &&
141 !(flags
& INTEL_AUX_BUFFER_DISABLED
)) {
142 intel_miptree_get_aux_isl_surf(brw
, mt
, &aux_surf_s
, &aux_usage
);
143 aux_surf
= &aux_surf_s
;
146 aux_bo
= mt
->mcs_buf
->bo
;
147 aux_offset
= mt
->mcs_buf
->bo
->offset64
+ mt
->mcs_buf
->offset
;
149 aux_bo
= mt
->hiz_buf
->aux_base
.bo
;
150 aux_offset
= mt
->hiz_buf
->aux_base
.bo
->offset64
;
153 /* We only really need a clear color if we also have an auxiliary
154 * surface. Without one, it does nothing.
156 clear_color
= intel_miptree_get_isl_clear_color(brw
, mt
);
159 void *state
= __brw_state_batch(brw
, AUB_TRACE_SURFACE_STATE
,
160 brw
->isl_dev
.ss
.size
,
161 brw
->isl_dev
.ss
.align
,
162 surf_index
, surf_offset
);
164 isl_surf_fill_state(&brw
->isl_dev
, state
, .surf
= &surf
, .view
= &view
,
165 .address
= mt
->bo
->offset64
+ offset
,
166 .aux_surf
= aux_surf
, .aux_usage
= aux_usage
,
167 .aux_address
= aux_offset
,
168 .mocs
= mocs
, .clear_color
= clear_color
,
169 .x_offset_sa
= tile_x
, .y_offset_sa
= tile_y
);
171 drm_intel_bo_emit_reloc(brw
->batch
.bo
,
172 *surf_offset
+ brw
->isl_dev
.ss
.addr_offset
,
174 read_domains
, write_domains
);
177 /* On gen7 and prior, the upper 20 bits of surface state DWORD 6 are the
178 * upper 20 bits of the GPU address of the MCS buffer; the lower 12 bits
179 * contain other control information. Since buffer addresses are always
180 * on 4k boundaries (and thus have their lower 12 bits zero), we can use
181 * an ordinary reloc to do the necessary address translation.
183 assert((aux_offset
& 0xfff) == 0);
184 uint32_t *aux_addr
= state
+ brw
->isl_dev
.ss
.aux_addr_offset
;
185 drm_intel_bo_emit_reloc(brw
->batch
.bo
,
186 *surf_offset
+ brw
->isl_dev
.ss
.aux_addr_offset
,
187 aux_bo
, *aux_addr
- aux_bo
->offset64
,
188 read_domains
, write_domains
);
193 brw_update_renderbuffer_surface(struct brw_context
*brw
,
194 struct gl_renderbuffer
*rb
,
195 uint32_t flags
, unsigned unit
/* unused */,
198 struct gl_context
*ctx
= &brw
->ctx
;
199 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
200 struct intel_mipmap_tree
*mt
= irb
->mt
;
203 assert(!(flags
& INTEL_AUX_BUFFER_DISABLED
));
206 assert(brw_render_target_supported(brw
, rb
));
208 mesa_format rb_format
= _mesa_get_render_format(ctx
, intel_rb_format(irb
));
209 if (unlikely(!brw
->format_supported_as_render_target
[rb_format
])) {
210 _mesa_problem(ctx
, "%s: renderbuffer format %s unsupported\n",
211 __func__
, _mesa_get_format_name(rb_format
));
214 const unsigned layer_multiplier
=
215 (irb
->mt
->msaa_layout
== INTEL_MSAA_LAYOUT_UMS
||
216 irb
->mt
->msaa_layout
== INTEL_MSAA_LAYOUT_CMS
) ?
217 MAX2(irb
->mt
->num_samples
, 1) : 1;
219 struct isl_view view
= {
220 .format
= brw
->render_target_format
[rb_format
],
221 .base_level
= irb
->mt_level
- irb
->mt
->first_level
,
223 .base_array_layer
= irb
->mt_layer
/ layer_multiplier
,
224 .array_len
= MAX2(irb
->layer_count
, 1),
225 .swizzle
= ISL_SWIZZLE_IDENTITY
,
226 .usage
= ISL_SURF_USAGE_RENDER_TARGET_BIT
,
230 brw_emit_surface_state(brw
, mt
, flags
, mt
->target
, view
,
233 I915_GEM_DOMAIN_RENDER
,
234 I915_GEM_DOMAIN_RENDER
);
239 translate_tex_target(GLenum target
)
243 case GL_TEXTURE_1D_ARRAY_EXT
:
244 return BRW_SURFACE_1D
;
246 case GL_TEXTURE_RECTANGLE_NV
:
247 return BRW_SURFACE_2D
;
250 case GL_TEXTURE_2D_ARRAY_EXT
:
251 case GL_TEXTURE_EXTERNAL_OES
:
252 case GL_TEXTURE_2D_MULTISAMPLE
:
253 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY
:
254 return BRW_SURFACE_2D
;
257 return BRW_SURFACE_3D
;
259 case GL_TEXTURE_CUBE_MAP
:
260 case GL_TEXTURE_CUBE_MAP_ARRAY
:
261 return BRW_SURFACE_CUBE
;
264 unreachable("not reached");
269 brw_get_surface_tiling_bits(uint32_t tiling
)
273 return BRW_SURFACE_TILED
;
275 return BRW_SURFACE_TILED
| BRW_SURFACE_TILED_Y
;
283 brw_get_surface_num_multisamples(unsigned num_samples
)
286 return BRW_SURFACE_MULTISAMPLECOUNT_4
;
288 return BRW_SURFACE_MULTISAMPLECOUNT_1
;
292 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
296 brw_get_texture_swizzle(const struct gl_context
*ctx
,
297 const struct gl_texture_object
*t
)
299 const struct gl_texture_image
*img
= t
->Image
[0][t
->BaseLevel
];
301 int swizzles
[SWIZZLE_NIL
+ 1] = {
311 if (img
->_BaseFormat
== GL_DEPTH_COMPONENT
||
312 img
->_BaseFormat
== GL_DEPTH_STENCIL
) {
313 GLenum depth_mode
= t
->DepthMode
;
315 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
316 * with depth component data specified with a sized internal format.
317 * Otherwise, it's left at the old default, GL_LUMINANCE.
319 if (_mesa_is_gles3(ctx
) &&
320 img
->InternalFormat
!= GL_DEPTH_COMPONENT
&&
321 img
->InternalFormat
!= GL_DEPTH_STENCIL
) {
325 switch (depth_mode
) {
327 swizzles
[0] = SWIZZLE_ZERO
;
328 swizzles
[1] = SWIZZLE_ZERO
;
329 swizzles
[2] = SWIZZLE_ZERO
;
330 swizzles
[3] = SWIZZLE_X
;
333 swizzles
[0] = SWIZZLE_X
;
334 swizzles
[1] = SWIZZLE_X
;
335 swizzles
[2] = SWIZZLE_X
;
336 swizzles
[3] = SWIZZLE_ONE
;
339 swizzles
[0] = SWIZZLE_X
;
340 swizzles
[1] = SWIZZLE_X
;
341 swizzles
[2] = SWIZZLE_X
;
342 swizzles
[3] = SWIZZLE_X
;
345 swizzles
[0] = SWIZZLE_X
;
346 swizzles
[1] = SWIZZLE_ZERO
;
347 swizzles
[2] = SWIZZLE_ZERO
;
348 swizzles
[3] = SWIZZLE_ONE
;
353 GLenum datatype
= _mesa_get_format_datatype(img
->TexFormat
);
355 /* If the texture's format is alpha-only, force R, G, and B to
356 * 0.0. Similarly, if the texture's format has no alpha channel,
357 * force the alpha value read to 1.0. This allows for the
358 * implementation to use an RGBA texture for any of these formats
359 * without leaking any unexpected values.
361 switch (img
->_BaseFormat
) {
363 swizzles
[0] = SWIZZLE_ZERO
;
364 swizzles
[1] = SWIZZLE_ZERO
;
365 swizzles
[2] = SWIZZLE_ZERO
;
368 if (t
->_IsIntegerFormat
|| datatype
== GL_SIGNED_NORMALIZED
) {
369 swizzles
[0] = SWIZZLE_X
;
370 swizzles
[1] = SWIZZLE_X
;
371 swizzles
[2] = SWIZZLE_X
;
372 swizzles
[3] = SWIZZLE_ONE
;
375 case GL_LUMINANCE_ALPHA
:
376 if (datatype
== GL_SIGNED_NORMALIZED
) {
377 swizzles
[0] = SWIZZLE_X
;
378 swizzles
[1] = SWIZZLE_X
;
379 swizzles
[2] = SWIZZLE_X
;
380 swizzles
[3] = SWIZZLE_W
;
384 if (datatype
== GL_SIGNED_NORMALIZED
) {
385 swizzles
[0] = SWIZZLE_X
;
386 swizzles
[1] = SWIZZLE_X
;
387 swizzles
[2] = SWIZZLE_X
;
388 swizzles
[3] = SWIZZLE_X
;
394 if (_mesa_get_format_bits(img
->TexFormat
, GL_ALPHA_BITS
) > 0)
395 swizzles
[3] = SWIZZLE_ONE
;
399 return MAKE_SWIZZLE4(swizzles
[GET_SWZ(t
->_Swizzle
, 0)],
400 swizzles
[GET_SWZ(t
->_Swizzle
, 1)],
401 swizzles
[GET_SWZ(t
->_Swizzle
, 2)],
402 swizzles
[GET_SWZ(t
->_Swizzle
, 3)]);
406 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
407 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
409 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
412 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
414 * which is simply adding 4 then modding by 8 (or anding with 7).
416 * We then may need to apply workarounds for textureGather hardware bugs.
419 swizzle_to_scs(GLenum swizzle
, bool need_green_to_blue
)
421 unsigned scs
= (swizzle
+ 4) & 7;
423 return (need_green_to_blue
&& scs
== HSW_SCS_GREEN
) ? HSW_SCS_BLUE
: scs
;
427 brw_find_matching_rb(const struct gl_framebuffer
*fb
,
428 const struct intel_mipmap_tree
*mt
)
430 for (unsigned i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
431 const struct intel_renderbuffer
*irb
=
432 intel_renderbuffer(fb
->_ColorDrawBuffers
[i
]);
434 if (irb
&& irb
->mt
== mt
)
438 return fb
->_NumColorDrawBuffers
;
442 brw_texture_view_sane(const struct brw_context
*brw
,
443 const struct intel_mipmap_tree
*mt
,
444 const struct isl_view
*view
)
446 /* There are special cases only for lossless compression. */
447 if (!intel_miptree_is_lossless_compressed(brw
, mt
))
450 if (isl_format_supports_ccs_e(&brw
->screen
->devinfo
, view
->format
))
453 /* Logic elsewhere needs to take care to resolve the color buffer prior
454 * to sampling it as non-compressed.
456 if (intel_miptree_has_color_unresolved(mt
, view
->base_level
, view
->levels
,
457 view
->base_array_layer
,
461 const struct gl_framebuffer
*fb
= brw
->ctx
.DrawBuffer
;
462 const unsigned rb_index
= brw_find_matching_rb(fb
, mt
);
464 if (rb_index
== fb
->_NumColorDrawBuffers
)
467 /* Underlying surface is compressed but it is sampled using a format that
468 * the sampling engine doesn't support as compressed. Compression must be
469 * disabled for both sampling engine and data port in case the same surface
470 * is used also as render target.
472 return brw
->draw_aux_buffer_disabled
[rb_index
];
476 brw_disable_aux_surface(const struct brw_context
*brw
,
477 const struct intel_mipmap_tree
*mt
,
478 const struct isl_view
*view
)
480 /* Nothing to disable. */
484 const bool is_unresolved
= intel_miptree_has_color_unresolved(
485 mt
, view
->base_level
, view
->levels
,
486 view
->base_array_layer
, view
->array_len
);
488 /* There are special cases only for lossless compression. */
489 if (!intel_miptree_is_lossless_compressed(brw
, mt
))
490 return !is_unresolved
;
492 const struct gl_framebuffer
*fb
= brw
->ctx
.DrawBuffer
;
493 const unsigned rb_index
= brw_find_matching_rb(fb
, mt
);
495 /* If we are drawing into this with compression enabled, then we must also
496 * enable compression when texturing from it regardless of
497 * fast_clear_state. If we don't then, after the first draw call with
498 * this setup, there will be data in the CCS which won't get picked up by
499 * subsequent texturing operations as required by ARB_texture_barrier.
500 * Since we don't want to re-emit the binding table or do a resolve
501 * operation every draw call, the easiest thing to do is just enable
502 * compression on the texturing side. This is completely safe to do
503 * since, if compressed texturing weren't allowed, we would have disabled
504 * compression of render targets in whatever_that_function_is_called().
506 if (rb_index
< fb
->_NumColorDrawBuffers
) {
507 if (brw
->draw_aux_buffer_disabled
[rb_index
]) {
508 assert(!is_unresolved
);
511 return brw
->draw_aux_buffer_disabled
[rb_index
];
514 return !is_unresolved
;
518 brw_update_texture_surface(struct gl_context
*ctx
,
520 uint32_t *surf_offset
,
524 struct brw_context
*brw
= brw_context(ctx
);
525 struct gl_texture_object
*obj
= ctx
->Texture
.Unit
[unit
]._Current
;
527 if (obj
->Target
== GL_TEXTURE_BUFFER
) {
528 brw_update_buffer_texture_surface(ctx
, unit
, surf_offset
);
531 struct intel_texture_object
*intel_obj
= intel_texture_object(obj
);
532 struct intel_mipmap_tree
*mt
= intel_obj
->mt
;
535 if (mt
->plane
[plane
- 1] == NULL
)
537 mt
= mt
->plane
[plane
- 1];
540 struct gl_sampler_object
*sampler
= _mesa_get_samplerobj(ctx
, unit
);
541 /* If this is a view with restricted NumLayers, then our effective depth
542 * is not just the miptree depth.
544 const unsigned view_num_layers
=
545 (obj
->Immutable
&& obj
->Target
!= GL_TEXTURE_3D
) ? obj
->NumLayers
:
548 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
549 * texturing functions that return a float, as our code generation always
550 * selects the .x channel (which would always be 0).
552 struct gl_texture_image
*firstImage
= obj
->Image
[0][obj
->BaseLevel
];
553 const bool alpha_depth
= obj
->DepthMode
== GL_ALPHA
&&
554 (firstImage
->_BaseFormat
== GL_DEPTH_COMPONENT
||
555 firstImage
->_BaseFormat
== GL_DEPTH_STENCIL
);
556 const unsigned swizzle
= (unlikely(alpha_depth
) ? SWIZZLE_XYZW
:
557 brw_get_texture_swizzle(&brw
->ctx
, obj
));
559 mesa_format mesa_fmt
= plane
== 0 ? intel_obj
->_Format
: mt
->format
;
560 unsigned format
= translate_tex_format(brw
, mesa_fmt
,
561 sampler
->sRGBDecode
);
563 /* Implement gen6 and gen7 gather work-around */
564 bool need_green_to_blue
= false;
566 if (brw
->gen
== 7 && (format
== ISL_FORMAT_R32G32_FLOAT
||
567 format
== ISL_FORMAT_R32G32_SINT
||
568 format
== ISL_FORMAT_R32G32_UINT
)) {
569 format
= ISL_FORMAT_R32G32_FLOAT_LD
;
570 need_green_to_blue
= brw
->is_haswell
;
571 } else if (brw
->gen
== 6) {
572 /* Sandybridge's gather4 message is broken for integer formats.
573 * To work around this, we pretend the surface is UNORM for
574 * 8 or 16-bit formats, and emit shader instructions to recover
575 * the real INT/UINT value. For 32-bit formats, we pretend
576 * the surface is FLOAT, and simply reinterpret the resulting
580 case ISL_FORMAT_R8_SINT
:
581 case ISL_FORMAT_R8_UINT
:
582 format
= ISL_FORMAT_R8_UNORM
;
585 case ISL_FORMAT_R16_SINT
:
586 case ISL_FORMAT_R16_UINT
:
587 format
= ISL_FORMAT_R16_UNORM
;
590 case ISL_FORMAT_R32_SINT
:
591 case ISL_FORMAT_R32_UINT
:
592 format
= ISL_FORMAT_R32_FLOAT
;
601 if (obj
->StencilSampling
&& firstImage
->_BaseFormat
== GL_DEPTH_STENCIL
) {
603 assert(mt
->r8stencil_mt
&& !mt
->stencil_mt
->r8stencil_needs_update
);
604 mt
= mt
->r8stencil_mt
;
608 format
= ISL_FORMAT_R8_UINT
;
609 } else if (brw
->gen
<= 7 && mt
->format
== MESA_FORMAT_S_UINT8
) {
610 assert(mt
->r8stencil_mt
&& !mt
->r8stencil_needs_update
);
611 mt
= mt
->r8stencil_mt
;
612 format
= ISL_FORMAT_R8_UINT
;
615 const int surf_index
= surf_offset
- &brw
->wm
.base
.surf_offset
[0];
617 struct isl_view view
= {
619 .base_level
= obj
->MinLevel
+ obj
->BaseLevel
,
620 .levels
= intel_obj
->_MaxLevel
- obj
->BaseLevel
+ 1,
621 .base_array_layer
= obj
->MinLayer
,
622 .array_len
= view_num_layers
,
624 .r
= swizzle_to_scs(GET_SWZ(swizzle
, 0), need_green_to_blue
),
625 .g
= swizzle_to_scs(GET_SWZ(swizzle
, 1), need_green_to_blue
),
626 .b
= swizzle_to_scs(GET_SWZ(swizzle
, 2), need_green_to_blue
),
627 .a
= swizzle_to_scs(GET_SWZ(swizzle
, 3), need_green_to_blue
),
629 .usage
= ISL_SURF_USAGE_TEXTURE_BIT
,
632 if (obj
->Target
== GL_TEXTURE_CUBE_MAP
||
633 obj
->Target
== GL_TEXTURE_CUBE_MAP_ARRAY
)
634 view
.usage
|= ISL_SURF_USAGE_CUBE_BIT
;
636 assert(brw_texture_view_sane(brw
, mt
, &view
));
638 const int flags
= brw_disable_aux_surface(brw
, mt
, &view
) ?
639 INTEL_AUX_BUFFER_DISABLED
: 0;
640 brw_emit_surface_state(brw
, mt
, flags
, mt
->target
, view
,
642 surf_offset
, surf_index
,
643 I915_GEM_DOMAIN_SAMPLER
, 0);
648 brw_emit_buffer_surface_state(struct brw_context
*brw
,
649 uint32_t *out_offset
,
651 unsigned buffer_offset
,
652 unsigned surface_format
,
653 unsigned buffer_size
,
657 uint32_t *dw
= brw_state_batch(brw
, AUB_TRACE_SURFACE_STATE
,
658 brw
->isl_dev
.ss
.size
,
659 brw
->isl_dev
.ss
.align
,
662 isl_buffer_fill_state(&brw
->isl_dev
, dw
,
663 .address
= (bo
? bo
->offset64
: 0) + buffer_offset
,
665 .format
= surface_format
,
667 .mocs
= tex_mocs
[brw
->gen
]);
670 drm_intel_bo_emit_reloc(brw
->batch
.bo
,
671 *out_offset
+ brw
->isl_dev
.ss
.addr_offset
,
673 I915_GEM_DOMAIN_SAMPLER
,
674 (rw
? I915_GEM_DOMAIN_SAMPLER
: 0));
679 brw_update_buffer_texture_surface(struct gl_context
*ctx
,
681 uint32_t *surf_offset
)
683 struct brw_context
*brw
= brw_context(ctx
);
684 struct gl_texture_object
*tObj
= ctx
->Texture
.Unit
[unit
]._Current
;
685 struct intel_buffer_object
*intel_obj
=
686 intel_buffer_object(tObj
->BufferObject
);
687 uint32_t size
= tObj
->BufferSize
;
688 drm_intel_bo
*bo
= NULL
;
689 mesa_format format
= tObj
->_BufferObjectFormat
;
690 uint32_t brw_format
= brw_isl_format_for_mesa_format(format
);
691 int texel_size
= _mesa_get_format_bytes(format
);
694 size
= MIN2(size
, intel_obj
->Base
.Size
);
695 bo
= intel_bufferobj_buffer(brw
, intel_obj
, tObj
->BufferOffset
, size
);
698 /* The ARB_texture_buffer_specification says:
700 * "The number of texels in the buffer texture's texel array is given by
702 * floor(<buffer_size> / (<components> * sizeof(<base_type>)),
704 * where <buffer_size> is the size of the buffer object, in basic
705 * machine units and <components> and <base_type> are the element count
706 * and base data type for elements, as specified in Table X.1. The
707 * number of texels in the texel array is then clamped to the
708 * implementation-dependent limit MAX_TEXTURE_BUFFER_SIZE_ARB."
710 * We need to clamp the size in bytes to MAX_TEXTURE_BUFFER_SIZE * stride,
711 * so that when ISL divides by stride to obtain the number of texels, that
712 * texel count is clamped to MAX_TEXTURE_BUFFER_SIZE.
714 size
= MIN2(size
, ctx
->Const
.MaxTextureBufferSize
* (unsigned) texel_size
);
716 if (brw_format
== 0 && format
!= MESA_FORMAT_RGBA_FLOAT32
) {
717 _mesa_problem(NULL
, "bad format %s for texture buffer\n",
718 _mesa_get_format_name(format
));
721 brw_emit_buffer_surface_state(brw
, surf_offset
, bo
,
730 * Create the constant buffer surface. Vertex/fragment shader constants will be
731 * read from this buffer with Data Port Read instructions/messages.
734 brw_create_constant_surface(struct brw_context
*brw
,
738 uint32_t *out_offset
)
740 brw_emit_buffer_surface_state(brw
, out_offset
, bo
, offset
,
741 ISL_FORMAT_R32G32B32A32_FLOAT
,
746 * Create the buffer surface. Shader buffer variables will be
747 * read from / write to this buffer with Data Port Read/Write
748 * instructions/messages.
751 brw_create_buffer_surface(struct brw_context
*brw
,
755 uint32_t *out_offset
)
757 /* Use a raw surface so we can reuse existing untyped read/write/atomic
758 * messages. We need these specifically for the fragment shader since they
759 * include a pixel mask header that we need to ensure correct behavior
760 * with helper invocations, which cannot write to the buffer.
762 brw_emit_buffer_surface_state(brw
, out_offset
, bo
, offset
,
768 * Set up a binding table entry for use by stream output logic (transform
771 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
774 brw_update_sol_surface(struct brw_context
*brw
,
775 struct gl_buffer_object
*buffer_obj
,
776 uint32_t *out_offset
, unsigned num_vector_components
,
777 unsigned stride_dwords
, unsigned offset_dwords
)
779 struct intel_buffer_object
*intel_bo
= intel_buffer_object(buffer_obj
);
780 uint32_t offset_bytes
= 4 * offset_dwords
;
781 drm_intel_bo
*bo
= intel_bufferobj_buffer(brw
, intel_bo
,
783 buffer_obj
->Size
- offset_bytes
);
784 uint32_t *surf
= brw_state_batch(brw
, AUB_TRACE_SURFACE_STATE
, 6 * 4, 32,
786 uint32_t pitch_minus_1
= 4*stride_dwords
- 1;
787 size_t size_dwords
= buffer_obj
->Size
/ 4;
788 uint32_t buffer_size_minus_1
, width
, height
, depth
, surface_format
;
790 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
791 * too big to map using a single binding table entry?
793 assert((size_dwords
- offset_dwords
) / stride_dwords
794 <= BRW_MAX_NUM_BUFFER_ENTRIES
);
796 if (size_dwords
> offset_dwords
+ num_vector_components
) {
797 /* There is room for at least 1 transform feedback output in the buffer.
798 * Compute the number of additional transform feedback outputs the
799 * buffer has room for.
801 buffer_size_minus_1
=
802 (size_dwords
- offset_dwords
- num_vector_components
) / stride_dwords
;
804 /* There isn't even room for a single transform feedback output in the
805 * buffer. We can't configure the binding table entry to prevent output
806 * entirely; we'll have to rely on the geometry shader to detect
807 * overflow. But to minimize the damage in case of a bug, set up the
808 * binding table entry to just allow a single output.
810 buffer_size_minus_1
= 0;
812 width
= buffer_size_minus_1
& 0x7f;
813 height
= (buffer_size_minus_1
& 0xfff80) >> 7;
814 depth
= (buffer_size_minus_1
& 0x7f00000) >> 20;
816 switch (num_vector_components
) {
818 surface_format
= ISL_FORMAT_R32_FLOAT
;
821 surface_format
= ISL_FORMAT_R32G32_FLOAT
;
824 surface_format
= ISL_FORMAT_R32G32B32_FLOAT
;
827 surface_format
= ISL_FORMAT_R32G32B32A32_FLOAT
;
830 unreachable("Invalid vector size for transform feedback output");
833 surf
[0] = BRW_SURFACE_BUFFER
<< BRW_SURFACE_TYPE_SHIFT
|
834 BRW_SURFACE_MIPMAPLAYOUT_BELOW
<< BRW_SURFACE_MIPLAYOUT_SHIFT
|
835 surface_format
<< BRW_SURFACE_FORMAT_SHIFT
|
836 BRW_SURFACE_RC_READ_WRITE
;
837 surf
[1] = bo
->offset64
+ offset_bytes
; /* reloc */
838 surf
[2] = (width
<< BRW_SURFACE_WIDTH_SHIFT
|
839 height
<< BRW_SURFACE_HEIGHT_SHIFT
);
840 surf
[3] = (depth
<< BRW_SURFACE_DEPTH_SHIFT
|
841 pitch_minus_1
<< BRW_SURFACE_PITCH_SHIFT
);
845 /* Emit relocation to surface contents. */
846 drm_intel_bo_emit_reloc(brw
->batch
.bo
,
849 I915_GEM_DOMAIN_RENDER
, I915_GEM_DOMAIN_RENDER
);
852 /* Creates a new WM constant buffer reflecting the current fragment program's
853 * constants, if needed by the fragment program.
855 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
859 brw_upload_wm_pull_constants(struct brw_context
*brw
)
861 struct brw_stage_state
*stage_state
= &brw
->wm
.base
;
862 /* BRW_NEW_FRAGMENT_PROGRAM */
863 struct brw_program
*fp
= (struct brw_program
*) brw
->fragment_program
;
864 /* BRW_NEW_FS_PROG_DATA */
865 struct brw_stage_prog_data
*prog_data
= brw
->wm
.base
.prog_data
;
867 _mesa_shader_write_subroutine_indices(&brw
->ctx
, MESA_SHADER_FRAGMENT
);
868 /* _NEW_PROGRAM_CONSTANTS */
869 brw_upload_pull_constants(brw
, BRW_NEW_SURFACES
, &fp
->program
,
870 stage_state
, prog_data
);
873 const struct brw_tracked_state brw_wm_pull_constants
= {
875 .mesa
= _NEW_PROGRAM_CONSTANTS
,
876 .brw
= BRW_NEW_BATCH
|
878 BRW_NEW_FRAGMENT_PROGRAM
|
879 BRW_NEW_FS_PROG_DATA
,
881 .emit
= brw_upload_wm_pull_constants
,
885 * Creates a null renderbuffer surface.
887 * This is used when the shader doesn't write to any color output. An FB
888 * write to target 0 will still be emitted, because that's how the thread is
889 * terminated (and computed depth is returned), so we need to have the
890 * hardware discard the target 0 color output..
893 brw_emit_null_surface_state(struct brw_context
*brw
,
897 uint32_t *out_offset
)
899 /* From the Sandy bridge PRM, Vol4 Part1 p71 (Surface Type: Programming
902 * A null surface will be used in instances where an actual surface is
903 * not bound. When a write message is generated to a null surface, no
904 * actual surface is written to. When a read message (including any
905 * sampling engine message) is generated to a null surface, the result
906 * is all zeros. Note that a null surface type is allowed to be used
907 * with all messages, even if it is not specificially indicated as
908 * supported. All of the remaining fields in surface state are ignored
909 * for null surfaces, with the following exceptions:
911 * - [DevSNB+]: Width, Height, Depth, and LOD fields must match the
912 * depth buffer’s corresponding state for all render target surfaces,
915 * - Surface Format must be R8G8B8A8_UNORM.
917 unsigned surface_type
= BRW_SURFACE_NULL
;
918 drm_intel_bo
*bo
= NULL
;
919 unsigned pitch_minus_1
= 0;
920 uint32_t multisampling_state
= 0;
921 uint32_t *surf
= brw_state_batch(brw
, AUB_TRACE_SURFACE_STATE
, 6 * 4, 32,
925 /* On Gen6, null render targets seem to cause GPU hangs when
926 * multisampling. So work around this problem by rendering into dummy
929 * To decrease the amount of memory needed by the workaround buffer, we
930 * set its pitch to 128 bytes (the width of a Y tile). This means that
931 * the amount of memory needed for the workaround buffer is
932 * (width_in_tiles + height_in_tiles - 1) tiles.
934 * Note that since the workaround buffer will be interpreted by the
935 * hardware as an interleaved multisampled buffer, we need to compute
936 * width_in_tiles and height_in_tiles by dividing the width and height
937 * by 16 rather than the normal Y-tile size of 32.
939 unsigned width_in_tiles
= ALIGN(width
, 16) / 16;
940 unsigned height_in_tiles
= ALIGN(height
, 16) / 16;
941 unsigned size_needed
= (width_in_tiles
+ height_in_tiles
- 1) * 4096;
942 brw_get_scratch_bo(brw
, &brw
->wm
.multisampled_null_render_target_bo
,
944 bo
= brw
->wm
.multisampled_null_render_target_bo
;
945 surface_type
= BRW_SURFACE_2D
;
947 multisampling_state
= brw_get_surface_num_multisamples(samples
);
950 surf
[0] = (surface_type
<< BRW_SURFACE_TYPE_SHIFT
|
951 ISL_FORMAT_B8G8R8A8_UNORM
<< BRW_SURFACE_FORMAT_SHIFT
);
953 surf
[0] |= (1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT
|
954 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT
|
955 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT
|
956 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT
);
958 surf
[1] = bo
? bo
->offset64
: 0;
959 surf
[2] = ((width
- 1) << BRW_SURFACE_WIDTH_SHIFT
|
960 (height
- 1) << BRW_SURFACE_HEIGHT_SHIFT
);
962 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
965 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
967 surf
[3] = (BRW_SURFACE_TILED
| BRW_SURFACE_TILED_Y
|
968 pitch_minus_1
<< BRW_SURFACE_PITCH_SHIFT
);
969 surf
[4] = multisampling_state
;
973 drm_intel_bo_emit_reloc(brw
->batch
.bo
,
976 I915_GEM_DOMAIN_RENDER
, I915_GEM_DOMAIN_RENDER
);
981 * Sets up a surface state structure to point at the given region.
982 * While it is only used for the front/back buffer currently, it should be
983 * usable for further buffers when doing ARB_draw_buffer support.
986 gen4_update_renderbuffer_surface(struct brw_context
*brw
,
987 struct gl_renderbuffer
*rb
,
988 uint32_t flags
, unsigned unit
,
991 struct gl_context
*ctx
= &brw
->ctx
;
992 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
993 struct intel_mipmap_tree
*mt
= irb
->mt
;
995 uint32_t tile_x
, tile_y
;
999 mesa_format rb_format
= _mesa_get_render_format(ctx
, intel_rb_format(irb
));
1000 /* BRW_NEW_FS_PROG_DATA */
1002 assert(!(flags
& INTEL_RENDERBUFFER_LAYERED
));
1003 assert(!(flags
& INTEL_AUX_BUFFER_DISABLED
));
1005 if (rb
->TexImage
&& !brw
->has_surface_tile_offset
) {
1006 intel_renderbuffer_get_tile_offsets(irb
, &tile_x
, &tile_y
);
1008 if (tile_x
!= 0 || tile_y
!= 0) {
1009 /* Original gen4 hardware couldn't draw to a non-tile-aligned
1010 * destination in a miptree unless you actually setup your renderbuffer
1011 * as a miptree and used the fragile lod/array_index/etc. controls to
1012 * select the image. So, instead, we just make a new single-level
1013 * miptree and render into that.
1015 intel_renderbuffer_move_to_temp(brw
, irb
, false);
1020 surf
= brw_state_batch(brw
, AUB_TRACE_SURFACE_STATE
, 6 * 4, 32, &offset
);
1022 format
= brw
->render_target_format
[rb_format
];
1023 if (unlikely(!brw
->format_supported_as_render_target
[rb_format
])) {
1024 _mesa_problem(ctx
, "%s: renderbuffer format %s unsupported\n",
1025 __func__
, _mesa_get_format_name(rb_format
));
1028 surf
[0] = (BRW_SURFACE_2D
<< BRW_SURFACE_TYPE_SHIFT
|
1029 format
<< BRW_SURFACE_FORMAT_SHIFT
);
1032 assert(mt
->offset
% mt
->cpp
== 0);
1033 surf
[1] = (intel_renderbuffer_get_tile_offsets(irb
, &tile_x
, &tile_y
) +
1034 mt
->bo
->offset64
+ mt
->offset
);
1036 surf
[2] = ((rb
->Width
- 1) << BRW_SURFACE_WIDTH_SHIFT
|
1037 (rb
->Height
- 1) << BRW_SURFACE_HEIGHT_SHIFT
);
1039 surf
[3] = (brw_get_surface_tiling_bits(mt
->tiling
) |
1040 (mt
->pitch
- 1) << BRW_SURFACE_PITCH_SHIFT
);
1042 surf
[4] = brw_get_surface_num_multisamples(mt
->num_samples
);
1044 assert(brw
->has_surface_tile_offset
|| (tile_x
== 0 && tile_y
== 0));
1045 /* Note that the low bits of these fields are missing, so
1046 * there's the possibility of getting in trouble.
1048 assert(tile_x
% 4 == 0);
1049 assert(tile_y
% 2 == 0);
1050 surf
[5] = ((tile_x
/ 4) << BRW_SURFACE_X_OFFSET_SHIFT
|
1051 (tile_y
/ 2) << BRW_SURFACE_Y_OFFSET_SHIFT
|
1052 (mt
->valign
== 4 ? BRW_SURFACE_VERTICAL_ALIGN_ENABLE
: 0));
1056 if (!ctx
->Color
.ColorLogicOpEnabled
&& !ctx
->Color
._AdvancedBlendMode
&&
1057 (ctx
->Color
.BlendEnabled
& (1 << unit
)))
1058 surf
[0] |= BRW_SURFACE_BLEND_ENABLED
;
1060 if (!ctx
->Color
.ColorMask
[unit
][0])
1061 surf
[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT
;
1062 if (!ctx
->Color
.ColorMask
[unit
][1])
1063 surf
[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT
;
1064 if (!ctx
->Color
.ColorMask
[unit
][2])
1065 surf
[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT
;
1067 /* As mentioned above, disable writes to the alpha component when the
1068 * renderbuffer is XRGB.
1070 if (ctx
->DrawBuffer
->Visual
.alphaBits
== 0 ||
1071 !ctx
->Color
.ColorMask
[unit
][3]) {
1072 surf
[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT
;
1076 drm_intel_bo_emit_reloc(brw
->batch
.bo
,
1079 surf
[1] - mt
->bo
->offset64
,
1080 I915_GEM_DOMAIN_RENDER
,
1081 I915_GEM_DOMAIN_RENDER
);
1087 * Construct SURFACE_STATE objects for renderbuffers/draw buffers.
1090 brw_update_renderbuffer_surfaces(struct brw_context
*brw
,
1091 const struct gl_framebuffer
*fb
,
1092 uint32_t render_target_start
,
1093 uint32_t *surf_offset
)
1096 const unsigned int w
= _mesa_geometric_width(fb
);
1097 const unsigned int h
= _mesa_geometric_height(fb
);
1098 const unsigned int s
= _mesa_geometric_samples(fb
);
1100 /* Update surfaces for drawing buffers */
1101 if (fb
->_NumColorDrawBuffers
>= 1) {
1102 for (i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
1103 const uint32_t surf_index
= render_target_start
+ i
;
1104 const int flags
= (_mesa_geometric_layers(fb
) > 0 ?
1105 INTEL_RENDERBUFFER_LAYERED
: 0) |
1106 (brw
->draw_aux_buffer_disabled
[i
] ?
1107 INTEL_AUX_BUFFER_DISABLED
: 0);
1109 if (intel_renderbuffer(fb
->_ColorDrawBuffers
[i
])) {
1110 surf_offset
[surf_index
] =
1111 brw
->vtbl
.update_renderbuffer_surface(
1112 brw
, fb
->_ColorDrawBuffers
[i
], flags
, i
, surf_index
);
1114 brw
->vtbl
.emit_null_surface_state(brw
, w
, h
, s
,
1115 &surf_offset
[surf_index
]);
1119 const uint32_t surf_index
= render_target_start
;
1120 brw
->vtbl
.emit_null_surface_state(brw
, w
, h
, s
,
1121 &surf_offset
[surf_index
]);
1126 update_renderbuffer_surfaces(struct brw_context
*brw
)
1128 const struct gl_context
*ctx
= &brw
->ctx
;
1130 /* BRW_NEW_FS_PROG_DATA */
1131 const struct brw_wm_prog_data
*wm_prog_data
=
1132 brw_wm_prog_data(brw
->wm
.base
.prog_data
);
1134 /* _NEW_BUFFERS | _NEW_COLOR */
1135 const struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
1136 brw_update_renderbuffer_surfaces(
1138 wm_prog_data
->binding_table
.render_target_start
,
1139 brw
->wm
.base
.surf_offset
);
1140 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1143 const struct brw_tracked_state brw_renderbuffer_surfaces
= {
1145 .mesa
= _NEW_BUFFERS
|
1147 .brw
= BRW_NEW_BATCH
|
1149 BRW_NEW_FS_PROG_DATA
,
1151 .emit
= update_renderbuffer_surfaces
,
1154 const struct brw_tracked_state gen6_renderbuffer_surfaces
= {
1156 .mesa
= _NEW_BUFFERS
,
1157 .brw
= BRW_NEW_BATCH
|
1160 .emit
= update_renderbuffer_surfaces
,
1164 update_renderbuffer_read_surfaces(struct brw_context
*brw
)
1166 const struct gl_context
*ctx
= &brw
->ctx
;
1168 /* BRW_NEW_FS_PROG_DATA */
1169 const struct brw_wm_prog_data
*wm_prog_data
=
1170 brw_wm_prog_data(brw
->wm
.base
.prog_data
);
1172 /* BRW_NEW_FRAGMENT_PROGRAM */
1173 if (!ctx
->Extensions
.MESA_shader_framebuffer_fetch
&&
1174 brw
->fragment_program
&& brw
->fragment_program
->info
.outputs_read
) {
1176 const struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
1178 for (unsigned i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
1179 struct gl_renderbuffer
*rb
= fb
->_ColorDrawBuffers
[i
];
1180 const struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
1181 const unsigned surf_index
=
1182 wm_prog_data
->binding_table
.render_target_read_start
+ i
;
1183 uint32_t *surf_offset
= &brw
->wm
.base
.surf_offset
[surf_index
];
1186 const unsigned format
= brw
->render_target_format
[
1187 _mesa_get_render_format(ctx
, intel_rb_format(irb
))];
1188 assert(isl_format_supports_sampling(&brw
->screen
->devinfo
,
1191 /* Override the target of the texture if the render buffer is a
1192 * single slice of a 3D texture (since the minimum array element
1193 * field of the surface state structure is ignored by the sampler
1194 * unit for 3D textures on some hardware), or if the render buffer
1195 * is a 1D array (since shaders always provide the array index
1196 * coordinate at the Z component to avoid state-dependent
1197 * recompiles when changing the texture target of the
1200 const GLenum target
=
1201 (irb
->mt
->target
== GL_TEXTURE_3D
&&
1202 irb
->layer_count
== 1) ? GL_TEXTURE_2D
:
1203 irb
->mt
->target
== GL_TEXTURE_1D_ARRAY
? GL_TEXTURE_2D_ARRAY
:
1206 /* intel_renderbuffer::mt_layer is expressed in sample units for
1207 * the UMS and CMS multisample layouts, but
1208 * intel_renderbuffer::layer_count is expressed in units of whole
1209 * logical layers regardless of the multisample layout.
1211 const unsigned mt_layer_unit
=
1212 (irb
->mt
->msaa_layout
== INTEL_MSAA_LAYOUT_UMS
||
1213 irb
->mt
->msaa_layout
== INTEL_MSAA_LAYOUT_CMS
) ?
1214 MAX2(irb
->mt
->num_samples
, 1) : 1;
1216 const struct isl_view view
= {
1218 .base_level
= irb
->mt_level
- irb
->mt
->first_level
,
1220 .base_array_layer
= irb
->mt_layer
/ mt_layer_unit
,
1221 .array_len
= irb
->layer_count
,
1222 .swizzle
= ISL_SWIZZLE_IDENTITY
,
1223 .usage
= ISL_SURF_USAGE_TEXTURE_BIT
,
1226 const int flags
= brw
->draw_aux_buffer_disabled
[i
] ?
1227 INTEL_AUX_BUFFER_DISABLED
: 0;
1228 brw_emit_surface_state(brw
, irb
->mt
, flags
, target
, view
,
1230 surf_offset
, surf_index
,
1231 I915_GEM_DOMAIN_SAMPLER
, 0);
1234 brw
->vtbl
.emit_null_surface_state(
1235 brw
, _mesa_geometric_width(fb
), _mesa_geometric_height(fb
),
1236 _mesa_geometric_samples(fb
), surf_offset
);
1240 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1244 const struct brw_tracked_state brw_renderbuffer_read_surfaces
= {
1246 .mesa
= _NEW_BUFFERS
,
1247 .brw
= BRW_NEW_BATCH
|
1248 BRW_NEW_FRAGMENT_PROGRAM
|
1249 BRW_NEW_FS_PROG_DATA
,
1251 .emit
= update_renderbuffer_read_surfaces
,
1255 update_stage_texture_surfaces(struct brw_context
*brw
,
1256 const struct gl_program
*prog
,
1257 struct brw_stage_state
*stage_state
,
1258 bool for_gather
, uint32_t plane
)
1263 struct gl_context
*ctx
= &brw
->ctx
;
1265 uint32_t *surf_offset
= stage_state
->surf_offset
;
1267 /* BRW_NEW_*_PROG_DATA */
1269 surf_offset
+= stage_state
->prog_data
->binding_table
.gather_texture_start
;
1271 surf_offset
+= stage_state
->prog_data
->binding_table
.plane_start
[plane
];
1273 unsigned num_samplers
= util_last_bit(prog
->SamplersUsed
);
1274 for (unsigned s
= 0; s
< num_samplers
; s
++) {
1277 if (prog
->SamplersUsed
& (1 << s
)) {
1278 const unsigned unit
= prog
->SamplerUnits
[s
];
1281 if (ctx
->Texture
.Unit
[unit
]._Current
) {
1282 brw_update_texture_surface(ctx
, unit
, surf_offset
+ s
, for_gather
, plane
);
1290 * Construct SURFACE_STATE objects for enabled textures.
1293 brw_update_texture_surfaces(struct brw_context
*brw
)
1295 /* BRW_NEW_VERTEX_PROGRAM */
1296 struct gl_program
*vs
= (struct gl_program
*) brw
->vertex_program
;
1298 /* BRW_NEW_TESS_PROGRAMS */
1299 struct gl_program
*tcs
= (struct gl_program
*) brw
->tess_ctrl_program
;
1300 struct gl_program
*tes
= (struct gl_program
*) brw
->tess_eval_program
;
1302 /* BRW_NEW_GEOMETRY_PROGRAM */
1303 struct gl_program
*gs
= (struct gl_program
*) brw
->geometry_program
;
1305 /* BRW_NEW_FRAGMENT_PROGRAM */
1306 struct gl_program
*fs
= (struct gl_program
*) brw
->fragment_program
;
1309 update_stage_texture_surfaces(brw
, vs
, &brw
->vs
.base
, false, 0);
1310 update_stage_texture_surfaces(brw
, tcs
, &brw
->tcs
.base
, false, 0);
1311 update_stage_texture_surfaces(brw
, tes
, &brw
->tes
.base
, false, 0);
1312 update_stage_texture_surfaces(brw
, gs
, &brw
->gs
.base
, false, 0);
1313 update_stage_texture_surfaces(brw
, fs
, &brw
->wm
.base
, false, 0);
1315 /* emit alternate set of surface state for gather. this
1316 * allows the surface format to be overriden for only the
1317 * gather4 messages. */
1319 if (vs
&& vs
->nir
->info
->uses_texture_gather
)
1320 update_stage_texture_surfaces(brw
, vs
, &brw
->vs
.base
, true, 0);
1321 if (tcs
&& tcs
->nir
->info
->uses_texture_gather
)
1322 update_stage_texture_surfaces(brw
, tcs
, &brw
->tcs
.base
, true, 0);
1323 if (tes
&& tes
->nir
->info
->uses_texture_gather
)
1324 update_stage_texture_surfaces(brw
, tes
, &brw
->tes
.base
, true, 0);
1325 if (gs
&& gs
->nir
->info
->uses_texture_gather
)
1326 update_stage_texture_surfaces(brw
, gs
, &brw
->gs
.base
, true, 0);
1327 if (fs
&& fs
->nir
->info
->uses_texture_gather
)
1328 update_stage_texture_surfaces(brw
, fs
, &brw
->wm
.base
, true, 0);
1332 update_stage_texture_surfaces(brw
, fs
, &brw
->wm
.base
, false, 1);
1333 update_stage_texture_surfaces(brw
, fs
, &brw
->wm
.base
, false, 2);
1336 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1339 const struct brw_tracked_state brw_texture_surfaces
= {
1341 .mesa
= _NEW_TEXTURE
,
1342 .brw
= BRW_NEW_BATCH
|
1344 BRW_NEW_FRAGMENT_PROGRAM
|
1345 BRW_NEW_FS_PROG_DATA
|
1346 BRW_NEW_GEOMETRY_PROGRAM
|
1347 BRW_NEW_GS_PROG_DATA
|
1348 BRW_NEW_TESS_PROGRAMS
|
1349 BRW_NEW_TCS_PROG_DATA
|
1350 BRW_NEW_TES_PROG_DATA
|
1351 BRW_NEW_TEXTURE_BUFFER
|
1352 BRW_NEW_VERTEX_PROGRAM
|
1353 BRW_NEW_VS_PROG_DATA
,
1355 .emit
= brw_update_texture_surfaces
,
1359 brw_update_cs_texture_surfaces(struct brw_context
*brw
)
1361 /* BRW_NEW_COMPUTE_PROGRAM */
1362 struct gl_program
*cs
= (struct gl_program
*) brw
->compute_program
;
1365 update_stage_texture_surfaces(brw
, cs
, &brw
->cs
.base
, false, 0);
1367 /* emit alternate set of surface state for gather. this
1368 * allows the surface format to be overriden for only the
1372 if (cs
&& cs
->nir
->info
->uses_texture_gather
)
1373 update_stage_texture_surfaces(brw
, cs
, &brw
->cs
.base
, true, 0);
1376 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1379 const struct brw_tracked_state brw_cs_texture_surfaces
= {
1381 .mesa
= _NEW_TEXTURE
,
1382 .brw
= BRW_NEW_BATCH
|
1384 BRW_NEW_COMPUTE_PROGRAM
,
1386 .emit
= brw_update_cs_texture_surfaces
,
1391 brw_upload_ubo_surfaces(struct brw_context
*brw
, struct gl_program
*prog
,
1392 struct brw_stage_state
*stage_state
,
1393 struct brw_stage_prog_data
*prog_data
)
1395 struct gl_context
*ctx
= &brw
->ctx
;
1400 uint32_t *ubo_surf_offsets
=
1401 &stage_state
->surf_offset
[prog_data
->binding_table
.ubo_start
];
1403 for (int i
= 0; i
< prog
->info
.num_ubos
; i
++) {
1404 struct gl_uniform_buffer_binding
*binding
=
1405 &ctx
->UniformBufferBindings
[prog
->sh
.UniformBlocks
[i
]->Binding
];
1407 if (binding
->BufferObject
== ctx
->Shared
->NullBufferObj
) {
1408 brw
->vtbl
.emit_null_surface_state(brw
, 1, 1, 1, &ubo_surf_offsets
[i
]);
1410 struct intel_buffer_object
*intel_bo
=
1411 intel_buffer_object(binding
->BufferObject
);
1412 GLsizeiptr size
= binding
->BufferObject
->Size
- binding
->Offset
;
1413 if (!binding
->AutomaticSize
)
1414 size
= MIN2(size
, binding
->Size
);
1416 intel_bufferobj_buffer(brw
, intel_bo
,
1419 brw_create_constant_surface(brw
, bo
, binding
->Offset
,
1421 &ubo_surf_offsets
[i
]);
1425 uint32_t *ssbo_surf_offsets
=
1426 &stage_state
->surf_offset
[prog_data
->binding_table
.ssbo_start
];
1428 for (int i
= 0; i
< prog
->info
.num_ssbos
; i
++) {
1429 struct gl_shader_storage_buffer_binding
*binding
=
1430 &ctx
->ShaderStorageBufferBindings
[prog
->sh
.ShaderStorageBlocks
[i
]->Binding
];
1432 if (binding
->BufferObject
== ctx
->Shared
->NullBufferObj
) {
1433 brw
->vtbl
.emit_null_surface_state(brw
, 1, 1, 1, &ssbo_surf_offsets
[i
]);
1435 struct intel_buffer_object
*intel_bo
=
1436 intel_buffer_object(binding
->BufferObject
);
1437 GLsizeiptr size
= binding
->BufferObject
->Size
- binding
->Offset
;
1438 if (!binding
->AutomaticSize
)
1439 size
= MIN2(size
, binding
->Size
);
1441 intel_bufferobj_buffer(brw
, intel_bo
,
1444 brw_create_buffer_surface(brw
, bo
, binding
->Offset
,
1446 &ssbo_surf_offsets
[i
]);
1450 if (prog
->info
.num_ubos
|| prog
->info
.num_ssbos
)
1451 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1455 brw_upload_wm_ubo_surfaces(struct brw_context
*brw
)
1457 struct gl_context
*ctx
= &brw
->ctx
;
1459 struct gl_program
*prog
= ctx
->_Shader
->_CurrentFragmentProgram
;
1461 /* BRW_NEW_FS_PROG_DATA */
1462 brw_upload_ubo_surfaces(brw
, prog
, &brw
->wm
.base
, brw
->wm
.base
.prog_data
);
1465 const struct brw_tracked_state brw_wm_ubo_surfaces
= {
1467 .mesa
= _NEW_PROGRAM
,
1468 .brw
= BRW_NEW_BATCH
|
1470 BRW_NEW_FS_PROG_DATA
|
1471 BRW_NEW_UNIFORM_BUFFER
,
1473 .emit
= brw_upload_wm_ubo_surfaces
,
1477 brw_upload_cs_ubo_surfaces(struct brw_context
*brw
)
1479 struct gl_context
*ctx
= &brw
->ctx
;
1481 struct gl_program
*prog
=
1482 ctx
->_Shader
->CurrentProgram
[MESA_SHADER_COMPUTE
];
1484 /* BRW_NEW_CS_PROG_DATA */
1485 brw_upload_ubo_surfaces(brw
, prog
, &brw
->cs
.base
, brw
->cs
.base
.prog_data
);
1488 const struct brw_tracked_state brw_cs_ubo_surfaces
= {
1490 .mesa
= _NEW_PROGRAM
,
1491 .brw
= BRW_NEW_BATCH
|
1493 BRW_NEW_CS_PROG_DATA
|
1494 BRW_NEW_UNIFORM_BUFFER
,
1496 .emit
= brw_upload_cs_ubo_surfaces
,
1500 brw_upload_abo_surfaces(struct brw_context
*brw
,
1501 const struct gl_program
*prog
,
1502 struct brw_stage_state
*stage_state
,
1503 struct brw_stage_prog_data
*prog_data
)
1505 struct gl_context
*ctx
= &brw
->ctx
;
1506 uint32_t *surf_offsets
=
1507 &stage_state
->surf_offset
[prog_data
->binding_table
.abo_start
];
1509 if (prog
->info
.num_abos
) {
1510 for (unsigned i
= 0; i
< prog
->info
.num_abos
; i
++) {
1511 struct gl_atomic_buffer_binding
*binding
=
1512 &ctx
->AtomicBufferBindings
[prog
->sh
.AtomicBuffers
[i
]->Binding
];
1513 struct intel_buffer_object
*intel_bo
=
1514 intel_buffer_object(binding
->BufferObject
);
1515 drm_intel_bo
*bo
= intel_bufferobj_buffer(
1516 brw
, intel_bo
, binding
->Offset
, intel_bo
->Base
.Size
- binding
->Offset
);
1518 brw_emit_buffer_surface_state(brw
, &surf_offsets
[i
], bo
,
1519 binding
->Offset
, ISL_FORMAT_RAW
,
1520 bo
->size
- binding
->Offset
, 1, true);
1523 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1528 brw_upload_wm_abo_surfaces(struct brw_context
*brw
)
1531 const struct gl_program
*wm
= brw
->fragment_program
;
1534 /* BRW_NEW_FS_PROG_DATA */
1535 brw_upload_abo_surfaces(brw
, wm
, &brw
->wm
.base
, brw
->wm
.base
.prog_data
);
1539 const struct brw_tracked_state brw_wm_abo_surfaces
= {
1541 .mesa
= _NEW_PROGRAM
,
1542 .brw
= BRW_NEW_ATOMIC_BUFFER
|
1545 BRW_NEW_FS_PROG_DATA
,
1547 .emit
= brw_upload_wm_abo_surfaces
,
1551 brw_upload_cs_abo_surfaces(struct brw_context
*brw
)
1554 const struct gl_program
*cp
= brw
->compute_program
;
1557 /* BRW_NEW_CS_PROG_DATA */
1558 brw_upload_abo_surfaces(brw
, cp
, &brw
->cs
.base
, brw
->cs
.base
.prog_data
);
1562 const struct brw_tracked_state brw_cs_abo_surfaces
= {
1564 .mesa
= _NEW_PROGRAM
,
1565 .brw
= BRW_NEW_ATOMIC_BUFFER
|
1568 BRW_NEW_CS_PROG_DATA
,
1570 .emit
= brw_upload_cs_abo_surfaces
,
1574 brw_upload_cs_image_surfaces(struct brw_context
*brw
)
1577 const struct gl_program
*cp
= brw
->compute_program
;
1580 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1581 brw_upload_image_surfaces(brw
, cp
, &brw
->cs
.base
,
1582 brw
->cs
.base
.prog_data
);
1586 const struct brw_tracked_state brw_cs_image_surfaces
= {
1588 .mesa
= _NEW_TEXTURE
| _NEW_PROGRAM
,
1589 .brw
= BRW_NEW_BATCH
|
1591 BRW_NEW_CS_PROG_DATA
|
1594 .emit
= brw_upload_cs_image_surfaces
,
1598 get_image_format(struct brw_context
*brw
, mesa_format format
, GLenum access
)
1600 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1601 uint32_t hw_format
= brw_isl_format_for_mesa_format(format
);
1602 if (access
== GL_WRITE_ONLY
) {
1604 } else if (isl_has_matching_typed_storage_image_format(devinfo
, hw_format
)) {
1605 /* Typed surface reads support a very limited subset of the shader
1606 * image formats. Translate it into the closest format the
1607 * hardware supports.
1609 return isl_lower_storage_image_format(devinfo
, hw_format
);
1611 /* The hardware doesn't actually support a typed format that we can use
1612 * so we have to fall back to untyped read/write messages.
1614 return ISL_FORMAT_RAW
;
1619 update_default_image_param(struct brw_context
*brw
,
1620 struct gl_image_unit
*u
,
1621 unsigned surface_idx
,
1622 struct brw_image_param
*param
)
1624 memset(param
, 0, sizeof(*param
));
1625 param
->surface_idx
= surface_idx
;
1626 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1627 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1628 * detailed explanation of these parameters.
1630 param
->swizzling
[0] = 0xff;
1631 param
->swizzling
[1] = 0xff;
1635 update_buffer_image_param(struct brw_context
*brw
,
1636 struct gl_image_unit
*u
,
1637 unsigned surface_idx
,
1638 struct brw_image_param
*param
)
1640 struct gl_buffer_object
*obj
= u
->TexObj
->BufferObject
;
1641 const uint32_t size
= MIN2((uint32_t)u
->TexObj
->BufferSize
, obj
->Size
);
1642 update_default_image_param(brw
, u
, surface_idx
, param
);
1644 param
->size
[0] = size
/ _mesa_get_format_bytes(u
->_ActualFormat
);
1645 param
->stride
[0] = _mesa_get_format_bytes(u
->_ActualFormat
);
1649 update_texture_image_param(struct brw_context
*brw
,
1650 struct gl_image_unit
*u
,
1651 unsigned surface_idx
,
1652 struct brw_image_param
*param
)
1654 struct intel_mipmap_tree
*mt
= intel_texture_object(u
->TexObj
)->mt
;
1656 update_default_image_param(brw
, u
, surface_idx
, param
);
1658 param
->size
[0] = minify(mt
->logical_width0
, u
->Level
);
1659 param
->size
[1] = minify(mt
->logical_height0
, u
->Level
);
1660 param
->size
[2] = (!u
->Layered
? 1 :
1661 u
->TexObj
->Target
== GL_TEXTURE_CUBE_MAP
? 6 :
1662 u
->TexObj
->Target
== GL_TEXTURE_3D
?
1663 minify(mt
->logical_depth0
, u
->Level
) :
1664 mt
->logical_depth0
);
1666 intel_miptree_get_image_offset(mt
, u
->Level
, u
->_Layer
,
1670 param
->stride
[0] = mt
->cpp
;
1671 param
->stride
[1] = mt
->pitch
/ mt
->cpp
;
1673 brw_miptree_get_horizontal_slice_pitch(brw
, mt
, u
->Level
);
1675 brw_miptree_get_vertical_slice_pitch(brw
, mt
, u
->Level
);
1677 if (mt
->tiling
== I915_TILING_X
) {
1678 /* An X tile is a rectangular block of 512x8 bytes. */
1679 param
->tiling
[0] = _mesa_logbase2(512 / mt
->cpp
);
1680 param
->tiling
[1] = _mesa_logbase2(8);
1682 if (brw
->has_swizzling
) {
1683 /* Right shifts required to swizzle bits 9 and 10 of the memory
1684 * address with bit 6.
1686 param
->swizzling
[0] = 3;
1687 param
->swizzling
[1] = 4;
1689 } else if (mt
->tiling
== I915_TILING_Y
) {
1690 /* The layout of a Y-tiled surface in memory isn't really fundamentally
1691 * different to the layout of an X-tiled surface, we simply pretend that
1692 * the surface is broken up in a number of smaller 16Bx32 tiles, each
1693 * one arranged in X-major order just like is the case for X-tiling.
1695 param
->tiling
[0] = _mesa_logbase2(16 / mt
->cpp
);
1696 param
->tiling
[1] = _mesa_logbase2(32);
1698 if (brw
->has_swizzling
) {
1699 /* Right shift required to swizzle bit 9 of the memory address with
1702 param
->swizzling
[0] = 3;
1706 /* 3D textures are arranged in 2D in memory with 2^lod slices per row. The
1707 * address calculation algorithm (emit_address_calculation() in
1708 * brw_fs_surface_builder.cpp) handles this as a sort of tiling with
1709 * modulus equal to the LOD.
1711 param
->tiling
[2] = (u
->TexObj
->Target
== GL_TEXTURE_3D
? u
->Level
:
1716 update_image_surface(struct brw_context
*brw
,
1717 struct gl_image_unit
*u
,
1719 unsigned surface_idx
,
1720 uint32_t *surf_offset
,
1721 struct brw_image_param
*param
)
1723 if (_mesa_is_image_unit_valid(&brw
->ctx
, u
)) {
1724 struct gl_texture_object
*obj
= u
->TexObj
;
1725 const unsigned format
= get_image_format(brw
, u
->_ActualFormat
, access
);
1727 if (obj
->Target
== GL_TEXTURE_BUFFER
) {
1728 struct intel_buffer_object
*intel_obj
=
1729 intel_buffer_object(obj
->BufferObject
);
1730 const unsigned texel_size
= (format
== ISL_FORMAT_RAW
? 1 :
1731 _mesa_get_format_bytes(u
->_ActualFormat
));
1733 brw_emit_buffer_surface_state(
1734 brw
, surf_offset
, intel_obj
->buffer
, obj
->BufferOffset
,
1735 format
, intel_obj
->Base
.Size
, texel_size
,
1736 access
!= GL_READ_ONLY
);
1738 update_buffer_image_param(brw
, u
, surface_idx
, param
);
1741 struct intel_texture_object
*intel_obj
= intel_texture_object(obj
);
1742 struct intel_mipmap_tree
*mt
= intel_obj
->mt
;
1744 if (format
== ISL_FORMAT_RAW
) {
1745 brw_emit_buffer_surface_state(
1746 brw
, surf_offset
, mt
->bo
, mt
->offset
,
1747 format
, mt
->bo
->size
- mt
->offset
, 1 /* pitch */,
1748 access
!= GL_READ_ONLY
);
1751 const unsigned num_layers
= (!u
->Layered
? 1 :
1752 obj
->Target
== GL_TEXTURE_CUBE_MAP
? 6 :
1753 mt
->logical_depth0
);
1755 struct isl_view view
= {
1757 .base_level
= obj
->MinLevel
+ u
->Level
,
1759 .base_array_layer
= obj
->MinLayer
+ u
->_Layer
,
1760 .array_len
= num_layers
,
1761 .swizzle
= ISL_SWIZZLE_IDENTITY
,
1762 .usage
= ISL_SURF_USAGE_STORAGE_BIT
,
1765 const int surf_index
= surf_offset
- &brw
->wm
.base
.surf_offset
[0];
1766 const bool unresolved
= intel_miptree_has_color_unresolved(
1767 mt
, view
.base_level
, view
.levels
,
1768 view
.base_array_layer
, view
.array_len
);
1769 const int flags
= unresolved
? 0 : INTEL_AUX_BUFFER_DISABLED
;
1770 brw_emit_surface_state(brw
, mt
, flags
, mt
->target
, view
,
1772 surf_offset
, surf_index
,
1773 I915_GEM_DOMAIN_SAMPLER
,
1774 access
== GL_READ_ONLY
? 0 :
1775 I915_GEM_DOMAIN_SAMPLER
);
1778 update_texture_image_param(brw
, u
, surface_idx
, param
);
1782 brw
->vtbl
.emit_null_surface_state(brw
, 1, 1, 1, surf_offset
);
1783 update_default_image_param(brw
, u
, surface_idx
, param
);
1788 brw_upload_image_surfaces(struct brw_context
*brw
,
1789 const struct gl_program
*prog
,
1790 struct brw_stage_state
*stage_state
,
1791 struct brw_stage_prog_data
*prog_data
)
1794 struct gl_context
*ctx
= &brw
->ctx
;
1796 if (prog
->info
.num_images
) {
1797 for (unsigned i
= 0; i
< prog
->info
.num_images
; i
++) {
1798 struct gl_image_unit
*u
= &ctx
->ImageUnits
[prog
->sh
.ImageUnits
[i
]];
1799 const unsigned surf_idx
= prog_data
->binding_table
.image_start
+ i
;
1801 update_image_surface(brw
, u
, prog
->sh
.ImageAccess
[i
],
1803 &stage_state
->surf_offset
[surf_idx
],
1804 &prog_data
->image_param
[i
]);
1807 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1808 /* This may have changed the image metadata dependent on the context
1809 * image unit state and passed to the program as uniforms, make sure
1810 * that push and pull constants are reuploaded.
1812 brw
->NewGLState
|= _NEW_PROGRAM_CONSTANTS
;
1817 brw_upload_wm_image_surfaces(struct brw_context
*brw
)
1819 /* BRW_NEW_FRAGMENT_PROGRAM */
1820 const struct gl_program
*wm
= brw
->fragment_program
;
1823 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1824 brw_upload_image_surfaces(brw
, wm
, &brw
->wm
.base
,
1825 brw
->wm
.base
.prog_data
);
1829 const struct brw_tracked_state brw_wm_image_surfaces
= {
1831 .mesa
= _NEW_TEXTURE
,
1832 .brw
= BRW_NEW_BATCH
|
1834 BRW_NEW_FRAGMENT_PROGRAM
|
1835 BRW_NEW_FS_PROG_DATA
|
1838 .emit
= brw_upload_wm_image_surfaces
,
1842 gen4_init_vtable_surface_functions(struct brw_context
*brw
)
1844 brw
->vtbl
.update_renderbuffer_surface
= gen4_update_renderbuffer_surface
;
1845 brw
->vtbl
.emit_null_surface_state
= brw_emit_null_surface_state
;
1849 gen6_init_vtable_surface_functions(struct brw_context
*brw
)
1851 gen4_init_vtable_surface_functions(brw
);
1852 brw
->vtbl
.update_renderbuffer_surface
= brw_update_renderbuffer_surface
;
1856 brw_upload_cs_work_groups_surface(struct brw_context
*brw
)
1858 struct gl_context
*ctx
= &brw
->ctx
;
1860 struct gl_program
*prog
=
1861 ctx
->_Shader
->CurrentProgram
[MESA_SHADER_COMPUTE
];
1862 /* BRW_NEW_CS_PROG_DATA */
1863 const struct brw_cs_prog_data
*cs_prog_data
=
1864 brw_cs_prog_data(brw
->cs
.base
.prog_data
);
1866 if (prog
&& cs_prog_data
->uses_num_work_groups
) {
1867 const unsigned surf_idx
=
1868 cs_prog_data
->binding_table
.work_groups_start
;
1869 uint32_t *surf_offset
= &brw
->cs
.base
.surf_offset
[surf_idx
];
1873 if (brw
->compute
.num_work_groups_bo
== NULL
) {
1875 intel_upload_data(brw
,
1876 (void *)brw
->compute
.num_work_groups
,
1882 bo
= brw
->compute
.num_work_groups_bo
;
1883 bo_offset
= brw
->compute
.num_work_groups_offset
;
1886 brw_emit_buffer_surface_state(brw
, surf_offset
,
1889 3 * sizeof(GLuint
), 1, true);
1890 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1894 const struct brw_tracked_state brw_cs_work_groups_surface
= {
1896 .brw
= BRW_NEW_BLORP
|
1897 BRW_NEW_CS_PROG_DATA
|
1898 BRW_NEW_CS_WORK_GROUPS
1900 .emit
= brw_upload_cs_work_groups_surface
,