2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **********************************************************************/
29 * Keith Whitwell <keithw@vmware.com>
33 #include "compiler/nir/nir.h"
34 #include "main/context.h"
35 #include "main/blend.h"
36 #include "main/mtypes.h"
37 #include "main/samplerobj.h"
38 #include "main/shaderimage.h"
39 #include "main/teximage.h"
40 #include "program/prog_parameter.h"
41 #include "program/prog_instruction.h"
42 #include "main/framebuffer.h"
43 #include "main/shaderapi.h"
47 #include "intel_mipmap_tree.h"
48 #include "intel_batchbuffer.h"
49 #include "intel_tex.h"
50 #include "intel_fbo.h"
51 #include "intel_buffer_objects.h"
53 #include "brw_context.h"
54 #include "brw_state.h"
55 #include "brw_defines.h"
58 uint32_t wb_mocs
[] = {
66 uint32_t pte_mocs
[] = {
75 brw_get_bo_mocs(const struct gen_device_info
*devinfo
, struct brw_bo
*bo
)
77 return (bo
&& bo
->external
? pte_mocs
: wb_mocs
)[devinfo
->gen
];
81 get_isl_surf(struct brw_context
*brw
, struct intel_mipmap_tree
*mt
,
82 GLenum target
, struct isl_view
*view
,
83 uint32_t *tile_x
, uint32_t *tile_y
,
84 uint32_t *offset
, struct isl_surf
*surf
)
88 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
89 const enum isl_dim_layout dim_layout
=
90 get_isl_dim_layout(devinfo
, mt
->surf
.tiling
, target
);
92 surf
->dim
= get_isl_surf_dim(target
);
94 if (surf
->dim_layout
== dim_layout
)
97 /* The layout of the specified texture target is not compatible with the
98 * actual layout of the miptree structure in memory -- You're entering
99 * dangerous territory, this can only possibly work if you only intended
100 * to access a single level and slice of the texture, and the hardware
101 * supports the tile offset feature in order to allow non-tile-aligned
102 * base offsets, since we'll have to point the hardware to the first
103 * texel of the level instead of relying on the usual base level/layer
106 assert(devinfo
->has_surface_tile_offset
);
107 assert(view
->levels
== 1 && view
->array_len
== 1);
108 assert(*tile_x
== 0 && *tile_y
== 0);
110 *offset
+= intel_miptree_get_tile_offsets(mt
, view
->base_level
,
111 view
->base_array_layer
,
114 /* Minify the logical dimensions of the texture. */
115 const unsigned l
= view
->base_level
- mt
->first_level
;
116 surf
->logical_level0_px
.width
= minify(surf
->logical_level0_px
.width
, l
);
117 surf
->logical_level0_px
.height
= surf
->dim
<= ISL_SURF_DIM_1D
? 1 :
118 minify(surf
->logical_level0_px
.height
, l
);
119 surf
->logical_level0_px
.depth
= surf
->dim
<= ISL_SURF_DIM_2D
? 1 :
120 minify(surf
->logical_level0_px
.depth
, l
);
122 /* Only the base level and layer can be addressed with the overridden
125 surf
->logical_level0_px
.array_len
= 1;
127 surf
->dim_layout
= dim_layout
;
129 /* The requested slice of the texture is now at the base level and
132 view
->base_level
= 0;
133 view
->base_array_layer
= 0;
137 brw_emit_surface_state(struct brw_context
*brw
,
138 struct intel_mipmap_tree
*mt
,
139 GLenum target
, struct isl_view view
,
140 enum isl_aux_usage aux_usage
,
141 uint32_t *surf_offset
, int surf_index
,
142 unsigned reloc_flags
)
144 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
145 uint32_t tile_x
= mt
->level
[0].level_x
;
146 uint32_t tile_y
= mt
->level
[0].level_y
;
147 uint32_t offset
= mt
->offset
;
149 struct isl_surf surf
;
151 get_isl_surf(brw
, mt
, target
, &view
, &tile_x
, &tile_y
, &offset
, &surf
);
153 union isl_color_value clear_color
= { .u32
= { 0, 0, 0, 0 } };
155 struct brw_bo
*aux_bo
= NULL
;
156 struct isl_surf
*aux_surf
= NULL
;
157 uint64_t aux_offset
= 0;
158 struct brw_bo
*clear_bo
= NULL
;
159 uint32_t clear_offset
= 0;
161 if (aux_usage
!= ISL_AUX_USAGE_NONE
) {
162 aux_surf
= &mt
->aux_buf
->surf
;
163 aux_bo
= mt
->aux_buf
->bo
;
164 aux_offset
= mt
->aux_buf
->offset
;
166 /* We only really need a clear color if we also have an auxiliary
167 * surface. Without one, it does nothing.
170 intel_miptree_get_clear_color(devinfo
, mt
, view
.format
,
171 view
.usage
& ISL_SURF_USAGE_TEXTURE_BIT
,
172 &clear_bo
, &clear_offset
);
175 void *state
= brw_state_batch(brw
,
176 brw
->isl_dev
.ss
.size
,
177 brw
->isl_dev
.ss
.align
,
180 isl_surf_fill_state(&brw
->isl_dev
, state
, .surf
= &surf
, .view
= &view
,
181 .address
= brw_state_reloc(&brw
->batch
,
182 *surf_offset
+ brw
->isl_dev
.ss
.addr_offset
,
183 mt
->bo
, offset
, reloc_flags
),
184 .aux_surf
= aux_surf
, .aux_usage
= aux_usage
,
185 .aux_address
= aux_offset
,
186 .mocs
= brw_get_bo_mocs(devinfo
, mt
->bo
),
187 .clear_color
= clear_color
,
188 .use_clear_address
= clear_bo
!= NULL
,
189 .clear_address
= clear_offset
,
190 .x_offset_sa
= tile_x
, .y_offset_sa
= tile_y
);
192 /* On gen7 and prior, the upper 20 bits of surface state DWORD 6 are the
193 * upper 20 bits of the GPU address of the MCS buffer; the lower 12 bits
194 * contain other control information. Since buffer addresses are always
195 * on 4k boundaries (and thus have their lower 12 bits zero), we can use
196 * an ordinary reloc to do the necessary address translation.
198 * FIXME: move to the point of assignment.
200 assert((aux_offset
& 0xfff) == 0);
202 if (devinfo
->gen
>= 8) {
203 uint64_t *aux_addr
= state
+ brw
->isl_dev
.ss
.aux_addr_offset
;
204 *aux_addr
= brw_state_reloc(&brw
->batch
,
206 brw
->isl_dev
.ss
.aux_addr_offset
,
210 uint32_t *aux_addr
= state
+ brw
->isl_dev
.ss
.aux_addr_offset
;
211 *aux_addr
= brw_state_reloc(&brw
->batch
,
213 brw
->isl_dev
.ss
.aux_addr_offset
,
220 if (clear_bo
!= NULL
) {
221 /* Make sure the offset is aligned with a cacheline. */
222 assert((clear_offset
& 0x3f) == 0);
223 uint64_t *clear_address
=
224 state
+ brw
->isl_dev
.ss
.clear_color_state_offset
;
225 *clear_address
= brw_state_reloc(&brw
->batch
,
227 brw
->isl_dev
.ss
.clear_color_state_offset
,
228 clear_bo
, *clear_address
, reloc_flags
);
233 gen6_update_renderbuffer_surface(struct brw_context
*brw
,
234 struct gl_renderbuffer
*rb
,
238 struct gl_context
*ctx
= &brw
->ctx
;
239 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
240 struct intel_mipmap_tree
*mt
= irb
->mt
;
242 assert(brw_render_target_supported(brw
, rb
));
244 mesa_format rb_format
= _mesa_get_render_format(ctx
, intel_rb_format(irb
));
245 if (unlikely(!brw
->mesa_format_supports_render
[rb_format
])) {
246 _mesa_problem(ctx
, "%s: renderbuffer format %s unsupported\n",
247 __func__
, _mesa_get_format_name(rb_format
));
249 enum isl_format isl_format
= brw
->mesa_to_isl_render_format
[rb_format
];
251 struct isl_view view
= {
252 .format
= isl_format
,
253 .base_level
= irb
->mt_level
- irb
->mt
->first_level
,
255 .base_array_layer
= irb
->mt_layer
,
256 .array_len
= MAX2(irb
->layer_count
, 1),
257 .swizzle
= ISL_SWIZZLE_IDENTITY
,
258 .usage
= ISL_SURF_USAGE_RENDER_TARGET_BIT
,
262 brw_emit_surface_state(brw
, mt
, mt
->target
, view
,
263 brw
->draw_aux_usage
[unit
],
270 translate_tex_target(GLenum target
)
274 case GL_TEXTURE_1D_ARRAY_EXT
:
275 return BRW_SURFACE_1D
;
277 case GL_TEXTURE_RECTANGLE_NV
:
278 return BRW_SURFACE_2D
;
281 case GL_TEXTURE_2D_ARRAY_EXT
:
282 case GL_TEXTURE_EXTERNAL_OES
:
283 case GL_TEXTURE_2D_MULTISAMPLE
:
284 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY
:
285 return BRW_SURFACE_2D
;
288 return BRW_SURFACE_3D
;
290 case GL_TEXTURE_CUBE_MAP
:
291 case GL_TEXTURE_CUBE_MAP_ARRAY
:
292 return BRW_SURFACE_CUBE
;
295 unreachable("not reached");
300 brw_get_surface_tiling_bits(enum isl_tiling tiling
)
304 return BRW_SURFACE_TILED
;
306 return BRW_SURFACE_TILED
| BRW_SURFACE_TILED_Y
;
314 brw_get_surface_num_multisamples(unsigned num_samples
)
317 return BRW_SURFACE_MULTISAMPLECOUNT_4
;
319 return BRW_SURFACE_MULTISAMPLECOUNT_1
;
323 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
327 brw_get_texture_swizzle(const struct gl_context
*ctx
,
328 const struct gl_texture_object
*t
)
330 const struct gl_texture_image
*img
= t
->Image
[0][t
->BaseLevel
];
332 int swizzles
[SWIZZLE_NIL
+ 1] = {
342 if (img
->_BaseFormat
== GL_DEPTH_COMPONENT
||
343 img
->_BaseFormat
== GL_DEPTH_STENCIL
) {
344 GLenum depth_mode
= t
->DepthMode
;
346 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
347 * with depth component data specified with a sized internal format.
348 * Otherwise, it's left at the old default, GL_LUMINANCE.
350 if (_mesa_is_gles3(ctx
) &&
351 img
->InternalFormat
!= GL_DEPTH_COMPONENT
&&
352 img
->InternalFormat
!= GL_DEPTH_STENCIL
) {
356 switch (depth_mode
) {
358 swizzles
[0] = SWIZZLE_ZERO
;
359 swizzles
[1] = SWIZZLE_ZERO
;
360 swizzles
[2] = SWIZZLE_ZERO
;
361 swizzles
[3] = SWIZZLE_X
;
364 swizzles
[0] = SWIZZLE_X
;
365 swizzles
[1] = SWIZZLE_X
;
366 swizzles
[2] = SWIZZLE_X
;
367 swizzles
[3] = SWIZZLE_ONE
;
370 swizzles
[0] = SWIZZLE_X
;
371 swizzles
[1] = SWIZZLE_X
;
372 swizzles
[2] = SWIZZLE_X
;
373 swizzles
[3] = SWIZZLE_X
;
376 swizzles
[0] = SWIZZLE_X
;
377 swizzles
[1] = SWIZZLE_ZERO
;
378 swizzles
[2] = SWIZZLE_ZERO
;
379 swizzles
[3] = SWIZZLE_ONE
;
384 GLenum datatype
= _mesa_get_format_datatype(img
->TexFormat
);
386 /* If the texture's format is alpha-only, force R, G, and B to
387 * 0.0. Similarly, if the texture's format has no alpha channel,
388 * force the alpha value read to 1.0. This allows for the
389 * implementation to use an RGBA texture for any of these formats
390 * without leaking any unexpected values.
392 switch (img
->_BaseFormat
) {
394 swizzles
[0] = SWIZZLE_ZERO
;
395 swizzles
[1] = SWIZZLE_ZERO
;
396 swizzles
[2] = SWIZZLE_ZERO
;
399 if (t
->_IsIntegerFormat
|| datatype
== GL_SIGNED_NORMALIZED
) {
400 swizzles
[0] = SWIZZLE_X
;
401 swizzles
[1] = SWIZZLE_X
;
402 swizzles
[2] = SWIZZLE_X
;
403 swizzles
[3] = SWIZZLE_ONE
;
406 case GL_LUMINANCE_ALPHA
:
407 if (datatype
== GL_SIGNED_NORMALIZED
) {
408 swizzles
[0] = SWIZZLE_X
;
409 swizzles
[1] = SWIZZLE_X
;
410 swizzles
[2] = SWIZZLE_X
;
411 swizzles
[3] = SWIZZLE_W
;
415 if (datatype
== GL_SIGNED_NORMALIZED
) {
416 swizzles
[0] = SWIZZLE_X
;
417 swizzles
[1] = SWIZZLE_X
;
418 swizzles
[2] = SWIZZLE_X
;
419 swizzles
[3] = SWIZZLE_X
;
425 if (_mesa_get_format_bits(img
->TexFormat
, GL_ALPHA_BITS
) > 0 ||
426 img
->TexFormat
== MESA_FORMAT_RGB_DXT1
||
427 img
->TexFormat
== MESA_FORMAT_SRGB_DXT1
)
428 swizzles
[3] = SWIZZLE_ONE
;
432 return MAKE_SWIZZLE4(swizzles
[GET_SWZ(t
->_Swizzle
, 0)],
433 swizzles
[GET_SWZ(t
->_Swizzle
, 1)],
434 swizzles
[GET_SWZ(t
->_Swizzle
, 2)],
435 swizzles
[GET_SWZ(t
->_Swizzle
, 3)]);
439 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
440 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
442 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
445 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
447 * which is simply adding 4 then modding by 8 (or anding with 7).
449 * We then may need to apply workarounds for textureGather hardware bugs.
452 swizzle_to_scs(GLenum swizzle
, bool need_green_to_blue
)
454 unsigned scs
= (swizzle
+ 4) & 7;
456 return (need_green_to_blue
&& scs
== HSW_SCS_GREEN
) ? HSW_SCS_BLUE
: scs
;
459 static void brw_update_texture_surface(struct gl_context
*ctx
,
461 uint32_t *surf_offset
,
466 struct brw_context
*brw
= brw_context(ctx
);
467 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
468 struct gl_texture_object
*obj
= ctx
->Texture
.Unit
[unit
]._Current
;
470 if (obj
->Target
== GL_TEXTURE_BUFFER
) {
471 brw_update_buffer_texture_surface(ctx
, unit
, surf_offset
);
474 struct intel_texture_object
*intel_obj
= intel_texture_object(obj
);
475 struct intel_mipmap_tree
*mt
= intel_obj
->mt
;
478 if (mt
->plane
[plane
- 1] == NULL
)
480 mt
= mt
->plane
[plane
- 1];
483 struct gl_sampler_object
*sampler
= _mesa_get_samplerobj(ctx
, unit
);
484 /* If this is a view with restricted NumLayers, then our effective depth
485 * is not just the miptree depth.
487 unsigned view_num_layers
;
488 if (obj
->Immutable
&& obj
->Target
!= GL_TEXTURE_3D
) {
489 view_num_layers
= obj
->NumLayers
;
491 view_num_layers
= mt
->surf
.dim
== ISL_SURF_DIM_3D
?
492 mt
->surf
.logical_level0_px
.depth
:
493 mt
->surf
.logical_level0_px
.array_len
;
496 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
497 * texturing functions that return a float, as our code generation always
498 * selects the .x channel (which would always be 0).
500 struct gl_texture_image
*firstImage
= obj
->Image
[0][obj
->BaseLevel
];
501 const bool alpha_depth
= obj
->DepthMode
== GL_ALPHA
&&
502 (firstImage
->_BaseFormat
== GL_DEPTH_COMPONENT
||
503 firstImage
->_BaseFormat
== GL_DEPTH_STENCIL
);
504 const unsigned swizzle
= (unlikely(alpha_depth
) ? SWIZZLE_XYZW
:
505 brw_get_texture_swizzle(&brw
->ctx
, obj
));
507 mesa_format mesa_fmt
;
508 if (firstImage
->_BaseFormat
== GL_DEPTH_STENCIL
||
509 firstImage
->_BaseFormat
== GL_DEPTH_COMPONENT
) {
510 /* The format from intel_obj may be a combined depth stencil format
511 * when we just want depth. Pull it from the miptree instead. This
512 * is safe because texture views aren't allowed on depth/stencil.
514 mesa_fmt
= mt
->format
;
515 } else if (mt
->etc_format
!= MESA_FORMAT_NONE
) {
516 mesa_fmt
= mt
->format
;
517 } else if (plane
> 0) {
518 mesa_fmt
= mt
->format
;
520 mesa_fmt
= intel_obj
->_Format
;
522 enum isl_format format
= translate_tex_format(brw
, mesa_fmt
,
523 for_txf
? GL_DECODE_EXT
:
524 sampler
->sRGBDecode
);
526 /* Implement gen6 and gen7 gather work-around */
527 bool need_green_to_blue
= false;
529 if (devinfo
->gen
== 7 && (format
== ISL_FORMAT_R32G32_FLOAT
||
530 format
== ISL_FORMAT_R32G32_SINT
||
531 format
== ISL_FORMAT_R32G32_UINT
)) {
532 format
= ISL_FORMAT_R32G32_FLOAT_LD
;
533 need_green_to_blue
= devinfo
->is_haswell
;
534 } else if (devinfo
->gen
== 6) {
535 /* Sandybridge's gather4 message is broken for integer formats.
536 * To work around this, we pretend the surface is UNORM for
537 * 8 or 16-bit formats, and emit shader instructions to recover
538 * the real INT/UINT value. For 32-bit formats, we pretend
539 * the surface is FLOAT, and simply reinterpret the resulting
543 case ISL_FORMAT_R8_SINT
:
544 case ISL_FORMAT_R8_UINT
:
545 format
= ISL_FORMAT_R8_UNORM
;
548 case ISL_FORMAT_R16_SINT
:
549 case ISL_FORMAT_R16_UINT
:
550 format
= ISL_FORMAT_R16_UNORM
;
553 case ISL_FORMAT_R32_SINT
:
554 case ISL_FORMAT_R32_UINT
:
555 format
= ISL_FORMAT_R32_FLOAT
;
564 if (obj
->StencilSampling
&& firstImage
->_BaseFormat
== GL_DEPTH_STENCIL
) {
565 if (devinfo
->gen
<= 7) {
566 assert(mt
->r8stencil_mt
&& !mt
->stencil_mt
->r8stencil_needs_update
);
567 mt
= mt
->r8stencil_mt
;
571 format
= ISL_FORMAT_R8_UINT
;
572 } else if (devinfo
->gen
<= 7 && mt
->format
== MESA_FORMAT_S_UINT8
) {
573 assert(mt
->r8stencil_mt
&& !mt
->r8stencil_needs_update
);
574 mt
= mt
->r8stencil_mt
;
575 format
= ISL_FORMAT_R8_UINT
;
578 const int surf_index
= surf_offset
- &brw
->wm
.base
.surf_offset
[0];
580 struct isl_view view
= {
582 .base_level
= obj
->MinLevel
+ obj
->BaseLevel
,
583 .levels
= intel_obj
->_MaxLevel
- obj
->BaseLevel
+ 1,
584 .base_array_layer
= obj
->MinLayer
,
585 .array_len
= view_num_layers
,
587 .r
= swizzle_to_scs(GET_SWZ(swizzle
, 0), need_green_to_blue
),
588 .g
= swizzle_to_scs(GET_SWZ(swizzle
, 1), need_green_to_blue
),
589 .b
= swizzle_to_scs(GET_SWZ(swizzle
, 2), need_green_to_blue
),
590 .a
= swizzle_to_scs(GET_SWZ(swizzle
, 3), need_green_to_blue
),
592 .usage
= ISL_SURF_USAGE_TEXTURE_BIT
,
595 /* On Ivy Bridge and earlier, we handle texture swizzle with shader
596 * code. The actual surface swizzle should be identity.
598 if (devinfo
->gen
<= 7 && !devinfo
->is_haswell
)
599 view
.swizzle
= ISL_SWIZZLE_IDENTITY
;
601 if (obj
->Target
== GL_TEXTURE_CUBE_MAP
||
602 obj
->Target
== GL_TEXTURE_CUBE_MAP_ARRAY
)
603 view
.usage
|= ISL_SURF_USAGE_CUBE_BIT
;
605 enum isl_aux_usage aux_usage
=
606 intel_miptree_texture_aux_usage(brw
, mt
, format
);
608 brw_emit_surface_state(brw
, mt
, mt
->target
, view
, aux_usage
,
609 surf_offset
, surf_index
,
615 brw_emit_buffer_surface_state(struct brw_context
*brw
,
616 uint32_t *out_offset
,
618 unsigned buffer_offset
,
619 unsigned surface_format
,
620 unsigned buffer_size
,
622 unsigned reloc_flags
)
624 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
625 uint32_t *dw
= brw_state_batch(brw
,
626 brw
->isl_dev
.ss
.size
,
627 brw
->isl_dev
.ss
.align
,
630 isl_buffer_fill_state(&brw
->isl_dev
, dw
,
631 .address
= !bo
? buffer_offset
:
632 brw_state_reloc(&brw
->batch
,
633 *out_offset
+ brw
->isl_dev
.ss
.addr_offset
,
637 .format
= surface_format
,
639 .mocs
= brw_get_bo_mocs(devinfo
, bo
));
643 buffer_texture_range_size(struct brw_context
*brw
,
644 struct gl_texture_object
*obj
)
646 assert(obj
->Target
== GL_TEXTURE_BUFFER
);
647 const unsigned texel_size
= _mesa_get_format_bytes(obj
->_BufferObjectFormat
);
648 const unsigned buffer_size
= (!obj
->BufferObject
? 0 :
649 obj
->BufferObject
->Size
);
650 const unsigned buffer_offset
= MIN2(buffer_size
, obj
->BufferOffset
);
652 /* The ARB_texture_buffer_specification says:
654 * "The number of texels in the buffer texture's texel array is given by
656 * floor(<buffer_size> / (<components> * sizeof(<base_type>)),
658 * where <buffer_size> is the size of the buffer object, in basic
659 * machine units and <components> and <base_type> are the element count
660 * and base data type for elements, as specified in Table X.1. The
661 * number of texels in the texel array is then clamped to the
662 * implementation-dependent limit MAX_TEXTURE_BUFFER_SIZE_ARB."
664 * We need to clamp the size in bytes to MAX_TEXTURE_BUFFER_SIZE * stride,
665 * so that when ISL divides by stride to obtain the number of texels, that
666 * texel count is clamped to MAX_TEXTURE_BUFFER_SIZE.
668 return MIN3((unsigned)obj
->BufferSize
,
669 buffer_size
- buffer_offset
,
670 brw
->ctx
.Const
.MaxTextureBufferSize
* texel_size
);
674 brw_update_buffer_texture_surface(struct gl_context
*ctx
,
676 uint32_t *surf_offset
)
678 struct brw_context
*brw
= brw_context(ctx
);
679 struct gl_texture_object
*tObj
= ctx
->Texture
.Unit
[unit
]._Current
;
680 struct intel_buffer_object
*intel_obj
=
681 intel_buffer_object(tObj
->BufferObject
);
682 const unsigned size
= buffer_texture_range_size(brw
, tObj
);
683 struct brw_bo
*bo
= NULL
;
684 mesa_format format
= tObj
->_BufferObjectFormat
;
685 const enum isl_format isl_format
= brw_isl_format_for_mesa_format(format
);
686 int texel_size
= _mesa_get_format_bytes(format
);
689 bo
= intel_bufferobj_buffer(brw
, intel_obj
, tObj
->BufferOffset
, size
,
692 if (isl_format
== ISL_FORMAT_UNSUPPORTED
) {
693 _mesa_problem(NULL
, "bad format %s for texture buffer\n",
694 _mesa_get_format_name(format
));
697 brw_emit_buffer_surface_state(brw
, surf_offset
, bo
,
706 * Set up a binding table entry for use by stream output logic (transform
709 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
712 brw_update_sol_surface(struct brw_context
*brw
,
713 struct gl_buffer_object
*buffer_obj
,
714 uint32_t *out_offset
, unsigned num_vector_components
,
715 unsigned stride_dwords
, unsigned offset_dwords
)
717 struct intel_buffer_object
*intel_bo
= intel_buffer_object(buffer_obj
);
718 uint32_t offset_bytes
= 4 * offset_dwords
;
719 struct brw_bo
*bo
= intel_bufferobj_buffer(brw
, intel_bo
,
721 buffer_obj
->Size
- offset_bytes
,
723 uint32_t *surf
= brw_state_batch(brw
, 6 * 4, 32, out_offset
);
724 uint32_t pitch_minus_1
= 4*stride_dwords
- 1;
725 size_t size_dwords
= buffer_obj
->Size
/ 4;
726 uint32_t buffer_size_minus_1
, width
, height
, depth
, surface_format
;
728 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
729 * too big to map using a single binding table entry?
731 assert((size_dwords
- offset_dwords
) / stride_dwords
732 <= BRW_MAX_NUM_BUFFER_ENTRIES
);
734 if (size_dwords
> offset_dwords
+ num_vector_components
) {
735 /* There is room for at least 1 transform feedback output in the buffer.
736 * Compute the number of additional transform feedback outputs the
737 * buffer has room for.
739 buffer_size_minus_1
=
740 (size_dwords
- offset_dwords
- num_vector_components
) / stride_dwords
;
742 /* There isn't even room for a single transform feedback output in the
743 * buffer. We can't configure the binding table entry to prevent output
744 * entirely; we'll have to rely on the geometry shader to detect
745 * overflow. But to minimize the damage in case of a bug, set up the
746 * binding table entry to just allow a single output.
748 buffer_size_minus_1
= 0;
750 width
= buffer_size_minus_1
& 0x7f;
751 height
= (buffer_size_minus_1
& 0xfff80) >> 7;
752 depth
= (buffer_size_minus_1
& 0x7f00000) >> 20;
754 switch (num_vector_components
) {
756 surface_format
= ISL_FORMAT_R32_FLOAT
;
759 surface_format
= ISL_FORMAT_R32G32_FLOAT
;
762 surface_format
= ISL_FORMAT_R32G32B32_FLOAT
;
765 surface_format
= ISL_FORMAT_R32G32B32A32_FLOAT
;
768 unreachable("Invalid vector size for transform feedback output");
771 surf
[0] = BRW_SURFACE_BUFFER
<< BRW_SURFACE_TYPE_SHIFT
|
772 BRW_SURFACE_MIPMAPLAYOUT_BELOW
<< BRW_SURFACE_MIPLAYOUT_SHIFT
|
773 surface_format
<< BRW_SURFACE_FORMAT_SHIFT
|
774 BRW_SURFACE_RC_READ_WRITE
;
775 surf
[1] = brw_state_reloc(&brw
->batch
,
776 *out_offset
+ 4, bo
, offset_bytes
, RELOC_WRITE
);
777 surf
[2] = (width
<< BRW_SURFACE_WIDTH_SHIFT
|
778 height
<< BRW_SURFACE_HEIGHT_SHIFT
);
779 surf
[3] = (depth
<< BRW_SURFACE_DEPTH_SHIFT
|
780 pitch_minus_1
<< BRW_SURFACE_PITCH_SHIFT
);
785 /* Creates a new WM constant buffer reflecting the current fragment program's
786 * constants, if needed by the fragment program.
788 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
792 brw_upload_wm_pull_constants(struct brw_context
*brw
)
794 struct brw_stage_state
*stage_state
= &brw
->wm
.base
;
795 /* BRW_NEW_FRAGMENT_PROGRAM */
796 struct brw_program
*fp
=
797 (struct brw_program
*) brw
->programs
[MESA_SHADER_FRAGMENT
];
799 /* BRW_NEW_FS_PROG_DATA */
800 struct brw_stage_prog_data
*prog_data
= brw
->wm
.base
.prog_data
;
802 _mesa_shader_write_subroutine_indices(&brw
->ctx
, MESA_SHADER_FRAGMENT
);
803 /* _NEW_PROGRAM_CONSTANTS */
804 brw_upload_pull_constants(brw
, BRW_NEW_SURFACES
, &fp
->program
,
805 stage_state
, prog_data
);
808 const struct brw_tracked_state brw_wm_pull_constants
= {
810 .mesa
= _NEW_PROGRAM_CONSTANTS
,
811 .brw
= BRW_NEW_BATCH
|
812 BRW_NEW_FRAGMENT_PROGRAM
|
813 BRW_NEW_FS_PROG_DATA
,
815 .emit
= brw_upload_wm_pull_constants
,
819 * Creates a null renderbuffer surface.
821 * This is used when the shader doesn't write to any color output. An FB
822 * write to target 0 will still be emitted, because that's how the thread is
823 * terminated (and computed depth is returned), so we need to have the
824 * hardware discard the target 0 color output..
827 emit_null_surface_state(struct brw_context
*brw
,
828 const struct gl_framebuffer
*fb
,
829 uint32_t *out_offset
)
831 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
832 uint32_t *surf
= brw_state_batch(brw
,
833 brw
->isl_dev
.ss
.size
,
834 brw
->isl_dev
.ss
.align
,
837 /* Use the fb dimensions or 1x1x1 */
838 const unsigned width
= fb
? _mesa_geometric_width(fb
) : 1;
839 const unsigned height
= fb
? _mesa_geometric_height(fb
) : 1;
840 const unsigned samples
= fb
? _mesa_geometric_samples(fb
) : 1;
842 if (devinfo
->gen
!= 6 || samples
<= 1) {
843 isl_null_fill_state(&brw
->isl_dev
, surf
,
844 isl_extent3d(width
, height
, 1));
848 /* On Gen6, null render targets seem to cause GPU hangs when multisampling.
849 * So work around this problem by rendering into dummy color buffer.
851 * To decrease the amount of memory needed by the workaround buffer, we
852 * set its pitch to 128 bytes (the width of a Y tile). This means that
853 * the amount of memory needed for the workaround buffer is
854 * (width_in_tiles + height_in_tiles - 1) tiles.
856 * Note that since the workaround buffer will be interpreted by the
857 * hardware as an interleaved multisampled buffer, we need to compute
858 * width_in_tiles and height_in_tiles by dividing the width and height
859 * by 16 rather than the normal Y-tile size of 32.
861 unsigned width_in_tiles
= ALIGN(width
, 16) / 16;
862 unsigned height_in_tiles
= ALIGN(height
, 16) / 16;
863 unsigned pitch_minus_1
= 127;
864 unsigned size_needed
= (width_in_tiles
+ height_in_tiles
- 1) * 4096;
865 brw_get_scratch_bo(brw
, &brw
->wm
.multisampled_null_render_target_bo
,
868 surf
[0] = (BRW_SURFACE_2D
<< BRW_SURFACE_TYPE_SHIFT
|
869 ISL_FORMAT_B8G8R8A8_UNORM
<< BRW_SURFACE_FORMAT_SHIFT
);
870 surf
[1] = brw_state_reloc(&brw
->batch
, *out_offset
+ 4,
871 brw
->wm
.multisampled_null_render_target_bo
,
874 surf
[2] = ((width
- 1) << BRW_SURFACE_WIDTH_SHIFT
|
875 (height
- 1) << BRW_SURFACE_HEIGHT_SHIFT
);
877 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
880 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
882 surf
[3] = (BRW_SURFACE_TILED
| BRW_SURFACE_TILED_Y
|
883 pitch_minus_1
<< BRW_SURFACE_PITCH_SHIFT
);
884 surf
[4] = BRW_SURFACE_MULTISAMPLECOUNT_4
;
889 * Sets up a surface state structure to point at the given region.
890 * While it is only used for the front/back buffer currently, it should be
891 * usable for further buffers when doing ARB_draw_buffer support.
894 gen4_update_renderbuffer_surface(struct brw_context
*brw
,
895 struct gl_renderbuffer
*rb
,
899 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
900 struct gl_context
*ctx
= &brw
->ctx
;
901 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
902 struct intel_mipmap_tree
*mt
= irb
->mt
;
904 uint32_t tile_x
, tile_y
;
905 enum isl_format format
;
908 mesa_format rb_format
= _mesa_get_render_format(ctx
, intel_rb_format(irb
));
909 /* BRW_NEW_FS_PROG_DATA */
911 if (rb
->TexImage
&& !devinfo
->has_surface_tile_offset
) {
912 intel_renderbuffer_get_tile_offsets(irb
, &tile_x
, &tile_y
);
914 if (tile_x
!= 0 || tile_y
!= 0) {
915 /* Original gen4 hardware couldn't draw to a non-tile-aligned
916 * destination in a miptree unless you actually setup your renderbuffer
917 * as a miptree and used the fragile lod/array_index/etc. controls to
918 * select the image. So, instead, we just make a new single-level
919 * miptree and render into that.
921 intel_renderbuffer_move_to_temp(brw
, irb
, false);
922 assert(irb
->align_wa_mt
);
923 mt
= irb
->align_wa_mt
;
927 surf
= brw_state_batch(brw
, 6 * 4, 32, &offset
);
929 format
= brw
->mesa_to_isl_render_format
[rb_format
];
930 if (unlikely(!brw
->mesa_format_supports_render
[rb_format
])) {
931 _mesa_problem(ctx
, "%s: renderbuffer format %s unsupported\n",
932 __func__
, _mesa_get_format_name(rb_format
));
935 surf
[0] = (BRW_SURFACE_2D
<< BRW_SURFACE_TYPE_SHIFT
|
936 format
<< BRW_SURFACE_FORMAT_SHIFT
);
939 assert(mt
->offset
% mt
->cpp
== 0);
940 surf
[1] = brw_state_reloc(&brw
->batch
, offset
+ 4, mt
->bo
,
942 intel_renderbuffer_get_tile_offsets(irb
,
947 surf
[2] = ((rb
->Width
- 1) << BRW_SURFACE_WIDTH_SHIFT
|
948 (rb
->Height
- 1) << BRW_SURFACE_HEIGHT_SHIFT
);
950 surf
[3] = (brw_get_surface_tiling_bits(mt
->surf
.tiling
) |
951 (mt
->surf
.row_pitch
- 1) << BRW_SURFACE_PITCH_SHIFT
);
953 surf
[4] = brw_get_surface_num_multisamples(mt
->surf
.samples
);
955 assert(devinfo
->has_surface_tile_offset
|| (tile_x
== 0 && tile_y
== 0));
956 /* Note that the low bits of these fields are missing, so
957 * there's the possibility of getting in trouble.
959 assert(tile_x
% 4 == 0);
960 assert(tile_y
% 2 == 0);
961 surf
[5] = ((tile_x
/ 4) << BRW_SURFACE_X_OFFSET_SHIFT
|
962 (tile_y
/ 2) << BRW_SURFACE_Y_OFFSET_SHIFT
|
963 (mt
->surf
.image_alignment_el
.height
== 4 ?
964 BRW_SURFACE_VERTICAL_ALIGN_ENABLE
: 0));
966 if (devinfo
->gen
< 6) {
968 if (!ctx
->Color
.ColorLogicOpEnabled
&& !ctx
->Color
._AdvancedBlendMode
&&
969 (ctx
->Color
.BlendEnabled
& (1 << unit
)))
970 surf
[0] |= BRW_SURFACE_BLEND_ENABLED
;
972 if (!GET_COLORMASK_BIT(ctx
->Color
.ColorMask
, unit
, 0))
973 surf
[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT
;
974 if (!GET_COLORMASK_BIT(ctx
->Color
.ColorMask
, unit
, 1))
975 surf
[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT
;
976 if (!GET_COLORMASK_BIT(ctx
->Color
.ColorMask
, unit
, 2))
977 surf
[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT
;
979 /* As mentioned above, disable writes to the alpha component when the
980 * renderbuffer is XRGB.
982 if (ctx
->DrawBuffer
->Visual
.alphaBits
== 0 ||
983 !GET_COLORMASK_BIT(ctx
->Color
.ColorMask
, unit
, 3)) {
984 surf
[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT
;
992 update_renderbuffer_surfaces(struct brw_context
*brw
)
994 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
995 const struct gl_context
*ctx
= &brw
->ctx
;
997 /* _NEW_BUFFERS | _NEW_COLOR */
998 const struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
1000 /* Render targets always start at binding table index 0. */
1001 const unsigned rt_start
= 0;
1003 uint32_t *surf_offsets
= brw
->wm
.base
.surf_offset
;
1005 /* Update surfaces for drawing buffers */
1006 if (fb
->_NumColorDrawBuffers
>= 1) {
1007 for (unsigned i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
1008 struct gl_renderbuffer
*rb
= fb
->_ColorDrawBuffers
[i
];
1010 if (intel_renderbuffer(rb
)) {
1011 surf_offsets
[rt_start
+ i
] = devinfo
->gen
>= 6 ?
1012 gen6_update_renderbuffer_surface(brw
, rb
, i
, rt_start
+ i
) :
1013 gen4_update_renderbuffer_surface(brw
, rb
, i
, rt_start
+ i
);
1015 emit_null_surface_state(brw
, fb
, &surf_offsets
[rt_start
+ i
]);
1019 emit_null_surface_state(brw
, fb
, &surf_offsets
[rt_start
]);
1022 /* The PIPE_CONTROL command description says:
1024 * "Whenever a Binding Table Index (BTI) used by a Render Taget Message
1025 * points to a different RENDER_SURFACE_STATE, SW must issue a Render
1026 * Target Cache Flush by enabling this bit. When render target flush
1027 * is set due to new association of BTI, PS Scoreboard Stall bit must
1028 * be set in this packet."
1030 if (devinfo
->gen
>= 11) {
1031 brw_emit_pipe_control_flush(brw
,
1032 PIPE_CONTROL_RENDER_TARGET_FLUSH
|
1033 PIPE_CONTROL_STALL_AT_SCOREBOARD
);
1036 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1039 const struct brw_tracked_state brw_renderbuffer_surfaces
= {
1041 .mesa
= _NEW_BUFFERS
|
1043 .brw
= BRW_NEW_BATCH
,
1045 .emit
= update_renderbuffer_surfaces
,
1048 const struct brw_tracked_state gen6_renderbuffer_surfaces
= {
1050 .mesa
= _NEW_BUFFERS
,
1051 .brw
= BRW_NEW_BATCH
|
1054 .emit
= update_renderbuffer_surfaces
,
1058 update_renderbuffer_read_surfaces(struct brw_context
*brw
)
1060 const struct gl_context
*ctx
= &brw
->ctx
;
1062 /* BRW_NEW_FS_PROG_DATA */
1063 const struct brw_wm_prog_data
*wm_prog_data
=
1064 brw_wm_prog_data(brw
->wm
.base
.prog_data
);
1066 if (wm_prog_data
->has_render_target_reads
&&
1067 !ctx
->Extensions
.EXT_shader_framebuffer_fetch
) {
1069 const struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
1071 for (unsigned i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
1072 struct gl_renderbuffer
*rb
= fb
->_ColorDrawBuffers
[i
];
1073 const struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
1074 const unsigned surf_index
=
1075 wm_prog_data
->binding_table
.render_target_read_start
+ i
;
1076 uint32_t *surf_offset
= &brw
->wm
.base
.surf_offset
[surf_index
];
1079 const enum isl_format format
= brw
->mesa_to_isl_render_format
[
1080 _mesa_get_render_format(ctx
, intel_rb_format(irb
))];
1081 assert(isl_format_supports_sampling(&brw
->screen
->devinfo
,
1084 /* Override the target of the texture if the render buffer is a
1085 * single slice of a 3D texture (since the minimum array element
1086 * field of the surface state structure is ignored by the sampler
1087 * unit for 3D textures on some hardware), or if the render buffer
1088 * is a 1D array (since shaders always provide the array index
1089 * coordinate at the Z component to avoid state-dependent
1090 * recompiles when changing the texture target of the
1093 const GLenum target
=
1094 (irb
->mt
->target
== GL_TEXTURE_3D
&&
1095 irb
->layer_count
== 1) ? GL_TEXTURE_2D
:
1096 irb
->mt
->target
== GL_TEXTURE_1D_ARRAY
? GL_TEXTURE_2D_ARRAY
:
1099 const struct isl_view view
= {
1101 .base_level
= irb
->mt_level
- irb
->mt
->first_level
,
1103 .base_array_layer
= irb
->mt_layer
,
1104 .array_len
= irb
->layer_count
,
1105 .swizzle
= ISL_SWIZZLE_IDENTITY
,
1106 .usage
= ISL_SURF_USAGE_TEXTURE_BIT
,
1109 enum isl_aux_usage aux_usage
=
1110 intel_miptree_texture_aux_usage(brw
, irb
->mt
, format
);
1111 if (brw
->draw_aux_usage
[i
] == ISL_AUX_USAGE_NONE
)
1112 aux_usage
= ISL_AUX_USAGE_NONE
;
1114 brw_emit_surface_state(brw
, irb
->mt
, target
, view
, aux_usage
,
1115 surf_offset
, surf_index
,
1119 emit_null_surface_state(brw
, fb
, surf_offset
);
1123 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1127 const struct brw_tracked_state brw_renderbuffer_read_surfaces
= {
1129 .mesa
= _NEW_BUFFERS
,
1130 .brw
= BRW_NEW_BATCH
|
1132 BRW_NEW_FS_PROG_DATA
,
1134 .emit
= update_renderbuffer_read_surfaces
,
1138 is_depth_texture(struct intel_texture_object
*iobj
)
1140 GLenum base_format
= _mesa_get_format_base_format(iobj
->_Format
);
1141 return base_format
== GL_DEPTH_COMPONENT
||
1142 (base_format
== GL_DEPTH_STENCIL
&& !iobj
->base
.StencilSampling
);
1146 update_stage_texture_surfaces(struct brw_context
*brw
,
1147 const struct gl_program
*prog
,
1148 struct brw_stage_state
*stage_state
,
1149 bool for_gather
, uint32_t plane
)
1154 struct gl_context
*ctx
= &brw
->ctx
;
1156 uint32_t *surf_offset
= stage_state
->surf_offset
;
1158 /* BRW_NEW_*_PROG_DATA */
1160 surf_offset
+= stage_state
->prog_data
->binding_table
.gather_texture_start
;
1162 surf_offset
+= stage_state
->prog_data
->binding_table
.plane_start
[plane
];
1164 unsigned num_samplers
= util_last_bit(prog
->SamplersUsed
);
1165 for (unsigned s
= 0; s
< num_samplers
; s
++) {
1168 if (prog
->SamplersUsed
& (1 << s
)) {
1169 const unsigned unit
= prog
->SamplerUnits
[s
];
1170 const bool used_by_txf
= prog
->info
.textures_used_by_txf
& (1 << s
);
1171 struct gl_texture_object
*obj
= ctx
->Texture
.Unit
[unit
]._Current
;
1172 struct intel_texture_object
*iobj
= intel_texture_object(obj
);
1178 if ((prog
->ShadowSamplers
& (1 << s
)) && !is_depth_texture(iobj
)) {
1179 /* A programming note for the sample_c message says:
1181 * "The Surface Format of the associated surface must be
1182 * indicated as supporting shadow mapping as indicated in the
1183 * surface format table."
1185 * Accessing non-depth textures via a sampler*Shadow type is
1186 * undefined. GLSL 4.50 page 162 says:
1188 * "If a shadow texture call is made to a sampler that does not
1189 * represent a depth texture, then results are undefined."
1191 * We give them a null surface (zeros) for undefined. We've seen
1192 * GPU hangs with color buffers and sample_c, so we try and avoid
1193 * those with this hack.
1195 emit_null_surface_state(brw
, NULL
, surf_offset
+ s
);
1197 brw_update_texture_surface(ctx
, unit
, surf_offset
+ s
, for_gather
,
1198 used_by_txf
, plane
);
1206 * Construct SURFACE_STATE objects for enabled textures.
1209 brw_update_texture_surfaces(struct brw_context
*brw
)
1211 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1213 /* BRW_NEW_VERTEX_PROGRAM */
1214 struct gl_program
*vs
= brw
->programs
[MESA_SHADER_VERTEX
];
1216 /* BRW_NEW_TESS_PROGRAMS */
1217 struct gl_program
*tcs
= brw
->programs
[MESA_SHADER_TESS_CTRL
];
1218 struct gl_program
*tes
= brw
->programs
[MESA_SHADER_TESS_EVAL
];
1220 /* BRW_NEW_GEOMETRY_PROGRAM */
1221 struct gl_program
*gs
= brw
->programs
[MESA_SHADER_GEOMETRY
];
1223 /* BRW_NEW_FRAGMENT_PROGRAM */
1224 struct gl_program
*fs
= brw
->programs
[MESA_SHADER_FRAGMENT
];
1227 update_stage_texture_surfaces(brw
, vs
, &brw
->vs
.base
, false, 0);
1228 update_stage_texture_surfaces(brw
, tcs
, &brw
->tcs
.base
, false, 0);
1229 update_stage_texture_surfaces(brw
, tes
, &brw
->tes
.base
, false, 0);
1230 update_stage_texture_surfaces(brw
, gs
, &brw
->gs
.base
, false, 0);
1231 update_stage_texture_surfaces(brw
, fs
, &brw
->wm
.base
, false, 0);
1233 /* emit alternate set of surface state for gather. this
1234 * allows the surface format to be overriden for only the
1235 * gather4 messages. */
1236 if (devinfo
->gen
< 8) {
1237 if (vs
&& vs
->info
.uses_texture_gather
)
1238 update_stage_texture_surfaces(brw
, vs
, &brw
->vs
.base
, true, 0);
1239 if (tcs
&& tcs
->info
.uses_texture_gather
)
1240 update_stage_texture_surfaces(brw
, tcs
, &brw
->tcs
.base
, true, 0);
1241 if (tes
&& tes
->info
.uses_texture_gather
)
1242 update_stage_texture_surfaces(brw
, tes
, &brw
->tes
.base
, true, 0);
1243 if (gs
&& gs
->info
.uses_texture_gather
)
1244 update_stage_texture_surfaces(brw
, gs
, &brw
->gs
.base
, true, 0);
1245 if (fs
&& fs
->info
.uses_texture_gather
)
1246 update_stage_texture_surfaces(brw
, fs
, &brw
->wm
.base
, true, 0);
1250 update_stage_texture_surfaces(brw
, fs
, &brw
->wm
.base
, false, 1);
1251 update_stage_texture_surfaces(brw
, fs
, &brw
->wm
.base
, false, 2);
1254 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1257 const struct brw_tracked_state brw_texture_surfaces
= {
1259 .mesa
= _NEW_TEXTURE
,
1260 .brw
= BRW_NEW_BATCH
|
1262 BRW_NEW_FRAGMENT_PROGRAM
|
1263 BRW_NEW_FS_PROG_DATA
|
1264 BRW_NEW_GEOMETRY_PROGRAM
|
1265 BRW_NEW_GS_PROG_DATA
|
1266 BRW_NEW_TESS_PROGRAMS
|
1267 BRW_NEW_TCS_PROG_DATA
|
1268 BRW_NEW_TES_PROG_DATA
|
1269 BRW_NEW_TEXTURE_BUFFER
|
1270 BRW_NEW_VERTEX_PROGRAM
|
1271 BRW_NEW_VS_PROG_DATA
,
1273 .emit
= brw_update_texture_surfaces
,
1277 brw_update_cs_texture_surfaces(struct brw_context
*brw
)
1279 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1281 /* BRW_NEW_COMPUTE_PROGRAM */
1282 struct gl_program
*cs
= brw
->programs
[MESA_SHADER_COMPUTE
];
1285 update_stage_texture_surfaces(brw
, cs
, &brw
->cs
.base
, false, 0);
1287 /* emit alternate set of surface state for gather. this
1288 * allows the surface format to be overriden for only the
1291 if (devinfo
->gen
< 8) {
1292 if (cs
&& cs
->info
.uses_texture_gather
)
1293 update_stage_texture_surfaces(brw
, cs
, &brw
->cs
.base
, true, 0);
1296 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1299 const struct brw_tracked_state brw_cs_texture_surfaces
= {
1301 .mesa
= _NEW_TEXTURE
,
1302 .brw
= BRW_NEW_BATCH
|
1303 BRW_NEW_COMPUTE_PROGRAM
|
1306 .emit
= brw_update_cs_texture_surfaces
,
1310 upload_buffer_surface(struct brw_context
*brw
,
1311 struct gl_buffer_binding
*binding
,
1312 uint32_t *out_offset
,
1313 enum isl_format format
,
1314 unsigned reloc_flags
)
1316 struct gl_context
*ctx
= &brw
->ctx
;
1318 if (binding
->BufferObject
== ctx
->Shared
->NullBufferObj
) {
1319 emit_null_surface_state(brw
, NULL
, out_offset
);
1321 ptrdiff_t size
= binding
->BufferObject
->Size
- binding
->Offset
;
1322 if (!binding
->AutomaticSize
)
1323 size
= MIN2(size
, binding
->Size
);
1326 emit_null_surface_state(brw
, NULL
, out_offset
);
1330 struct intel_buffer_object
*iobj
=
1331 intel_buffer_object(binding
->BufferObject
);
1333 intel_bufferobj_buffer(brw
, iobj
, binding
->Offset
, size
,
1334 (reloc_flags
& RELOC_WRITE
) != 0);
1336 brw_emit_buffer_surface_state(brw
, out_offset
, bo
, binding
->Offset
,
1337 format
, size
, 1, reloc_flags
);
1342 brw_upload_ubo_surfaces(struct brw_context
*brw
, struct gl_program
*prog
,
1343 struct brw_stage_state
*stage_state
,
1344 struct brw_stage_prog_data
*prog_data
)
1346 struct gl_context
*ctx
= &brw
->ctx
;
1348 if (!prog
|| (prog
->info
.num_ubos
== 0 &&
1349 prog
->info
.num_ssbos
== 0 &&
1350 prog
->info
.num_abos
== 0))
1353 uint32_t *ubo_surf_offsets
=
1354 &stage_state
->surf_offset
[prog_data
->binding_table
.ubo_start
];
1356 for (int i
= 0; i
< prog
->info
.num_ubos
; i
++) {
1357 struct gl_buffer_binding
*binding
=
1358 &ctx
->UniformBufferBindings
[prog
->sh
.UniformBlocks
[i
]->Binding
];
1359 upload_buffer_surface(brw
, binding
, &ubo_surf_offsets
[i
],
1360 ISL_FORMAT_R32G32B32A32_FLOAT
, 0);
1363 uint32_t *abo_surf_offsets
=
1364 &stage_state
->surf_offset
[prog_data
->binding_table
.ssbo_start
];
1365 uint32_t *ssbo_surf_offsets
= abo_surf_offsets
+ prog
->info
.num_abos
;
1367 for (int i
= 0; i
< prog
->info
.num_abos
; i
++) {
1368 struct gl_buffer_binding
*binding
=
1369 &ctx
->AtomicBufferBindings
[prog
->sh
.AtomicBuffers
[i
]->Binding
];
1370 upload_buffer_surface(brw
, binding
, &abo_surf_offsets
[i
],
1371 ISL_FORMAT_RAW
, RELOC_WRITE
);
1374 for (int i
= 0; i
< prog
->info
.num_ssbos
; i
++) {
1375 struct gl_buffer_binding
*binding
=
1376 &ctx
->ShaderStorageBufferBindings
[prog
->sh
.ShaderStorageBlocks
[i
]->Binding
];
1378 upload_buffer_surface(brw
, binding
, &ssbo_surf_offsets
[i
],
1379 ISL_FORMAT_RAW
, RELOC_WRITE
);
1382 stage_state
->push_constants_dirty
= true;
1383 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1387 brw_upload_wm_ubo_surfaces(struct brw_context
*brw
)
1389 struct gl_context
*ctx
= &brw
->ctx
;
1391 struct gl_program
*prog
= ctx
->FragmentProgram
._Current
;
1393 /* BRW_NEW_FS_PROG_DATA */
1394 brw_upload_ubo_surfaces(brw
, prog
, &brw
->wm
.base
, brw
->wm
.base
.prog_data
);
1397 const struct brw_tracked_state brw_wm_ubo_surfaces
= {
1399 .mesa
= _NEW_PROGRAM
,
1400 .brw
= BRW_NEW_BATCH
|
1401 BRW_NEW_FS_PROG_DATA
|
1402 BRW_NEW_UNIFORM_BUFFER
,
1404 .emit
= brw_upload_wm_ubo_surfaces
,
1408 brw_upload_cs_ubo_surfaces(struct brw_context
*brw
)
1410 struct gl_context
*ctx
= &brw
->ctx
;
1412 struct gl_program
*prog
=
1413 ctx
->_Shader
->CurrentProgram
[MESA_SHADER_COMPUTE
];
1415 /* BRW_NEW_CS_PROG_DATA */
1416 brw_upload_ubo_surfaces(brw
, prog
, &brw
->cs
.base
, brw
->cs
.base
.prog_data
);
1419 const struct brw_tracked_state brw_cs_ubo_surfaces
= {
1421 .mesa
= _NEW_PROGRAM
,
1422 .brw
= BRW_NEW_BATCH
|
1423 BRW_NEW_CS_PROG_DATA
|
1424 BRW_NEW_UNIFORM_BUFFER
,
1426 .emit
= brw_upload_cs_ubo_surfaces
,
1430 brw_upload_cs_image_surfaces(struct brw_context
*brw
)
1433 const struct gl_program
*cp
= brw
->programs
[MESA_SHADER_COMPUTE
];
1436 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1437 brw_upload_image_surfaces(brw
, cp
, &brw
->cs
.base
,
1438 brw
->cs
.base
.prog_data
);
1442 const struct brw_tracked_state brw_cs_image_surfaces
= {
1444 .mesa
= _NEW_TEXTURE
| _NEW_PROGRAM
,
1445 .brw
= BRW_NEW_BATCH
|
1446 BRW_NEW_CS_PROG_DATA
|
1450 .emit
= brw_upload_cs_image_surfaces
,
1454 get_image_format(struct brw_context
*brw
, mesa_format format
, GLenum access
)
1456 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1457 enum isl_format hw_format
= brw_isl_format_for_mesa_format(format
);
1458 if (access
== GL_WRITE_ONLY
|| access
== GL_NONE
) {
1460 } else if (isl_has_matching_typed_storage_image_format(devinfo
, hw_format
)) {
1461 /* Typed surface reads support a very limited subset of the shader
1462 * image formats. Translate it into the closest format the
1463 * hardware supports.
1465 return isl_lower_storage_image_format(devinfo
, hw_format
);
1467 /* The hardware doesn't actually support a typed format that we can use
1468 * so we have to fall back to untyped read/write messages.
1470 return ISL_FORMAT_RAW
;
1475 update_default_image_param(struct brw_context
*brw
,
1476 struct gl_image_unit
*u
,
1477 struct brw_image_param
*param
)
1479 memset(param
, 0, sizeof(*param
));
1480 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1481 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1482 * detailed explanation of these parameters.
1484 param
->swizzling
[0] = 0xff;
1485 param
->swizzling
[1] = 0xff;
1489 update_buffer_image_param(struct brw_context
*brw
,
1490 struct gl_image_unit
*u
,
1491 struct brw_image_param
*param
)
1493 const unsigned size
= buffer_texture_range_size(brw
, u
->TexObj
);
1494 update_default_image_param(brw
, u
, param
);
1496 param
->size
[0] = size
/ _mesa_get_format_bytes(u
->_ActualFormat
);
1497 param
->stride
[0] = _mesa_get_format_bytes(u
->_ActualFormat
);
1501 get_image_num_layers(const struct intel_mipmap_tree
*mt
, GLenum target
,
1504 if (target
== GL_TEXTURE_CUBE_MAP
)
1507 return target
== GL_TEXTURE_3D
?
1508 minify(mt
->surf
.logical_level0_px
.depth
, level
) :
1509 mt
->surf
.logical_level0_px
.array_len
;
1513 update_image_surface(struct brw_context
*brw
,
1514 struct gl_image_unit
*u
,
1516 uint32_t *surf_offset
,
1517 struct brw_image_param
*param
)
1519 if (_mesa_is_image_unit_valid(&brw
->ctx
, u
)) {
1520 struct gl_texture_object
*obj
= u
->TexObj
;
1521 const unsigned format
= get_image_format(brw
, u
->_ActualFormat
, access
);
1522 const bool written
= (access
!= GL_READ_ONLY
&& access
!= GL_NONE
);
1524 if (obj
->Target
== GL_TEXTURE_BUFFER
) {
1525 const unsigned texel_size
= (format
== ISL_FORMAT_RAW
? 1 :
1526 _mesa_get_format_bytes(u
->_ActualFormat
));
1527 const unsigned buffer_size
= buffer_texture_range_size(brw
, obj
);
1528 struct brw_bo
*const bo
= !obj
->BufferObject
? NULL
:
1529 intel_bufferobj_buffer(brw
, intel_buffer_object(obj
->BufferObject
),
1530 obj
->BufferOffset
, buffer_size
, written
);
1532 brw_emit_buffer_surface_state(
1533 brw
, surf_offset
, bo
, obj
->BufferOffset
,
1534 format
, buffer_size
, texel_size
,
1535 written
? RELOC_WRITE
: 0);
1537 update_buffer_image_param(brw
, u
, param
);
1540 struct intel_texture_object
*intel_obj
= intel_texture_object(obj
);
1541 struct intel_mipmap_tree
*mt
= intel_obj
->mt
;
1542 const unsigned num_layers
= u
->Layered
?
1543 get_image_num_layers(mt
, obj
->Target
, u
->Level
) : 1;
1545 struct isl_view view
= {
1547 .base_level
= obj
->MinLevel
+ u
->Level
,
1549 .base_array_layer
= obj
->MinLayer
+ u
->_Layer
,
1550 .array_len
= num_layers
,
1551 .swizzle
= ISL_SWIZZLE_IDENTITY
,
1552 .usage
= ISL_SURF_USAGE_STORAGE_BIT
,
1555 if (format
== ISL_FORMAT_RAW
) {
1556 brw_emit_buffer_surface_state(
1557 brw
, surf_offset
, mt
->bo
, mt
->offset
,
1558 format
, mt
->bo
->size
- mt
->offset
, 1 /* pitch */,
1559 written
? RELOC_WRITE
: 0);
1562 const int surf_index
= surf_offset
- &brw
->wm
.base
.surf_offset
[0];
1563 assert(!intel_miptree_has_color_unresolved(mt
,
1565 view
.base_array_layer
,
1567 brw_emit_surface_state(brw
, mt
, mt
->target
, view
,
1569 surf_offset
, surf_index
,
1570 written
? RELOC_WRITE
: 0);
1573 isl_surf_fill_image_param(&brw
->isl_dev
, param
, &mt
->surf
, &view
);
1577 emit_null_surface_state(brw
, NULL
, surf_offset
);
1578 update_default_image_param(brw
, u
, param
);
1583 brw_upload_image_surfaces(struct brw_context
*brw
,
1584 const struct gl_program
*prog
,
1585 struct brw_stage_state
*stage_state
,
1586 struct brw_stage_prog_data
*prog_data
)
1589 struct gl_context
*ctx
= &brw
->ctx
;
1591 if (prog
->info
.num_images
) {
1592 for (unsigned i
= 0; i
< prog
->info
.num_images
; i
++) {
1593 struct gl_image_unit
*u
= &ctx
->ImageUnits
[prog
->sh
.ImageUnits
[i
]];
1594 const unsigned surf_idx
= prog_data
->binding_table
.image_start
+ i
;
1596 update_image_surface(brw
, u
, prog
->sh
.ImageAccess
[i
],
1597 &stage_state
->surf_offset
[surf_idx
],
1598 &stage_state
->image_param
[i
]);
1601 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1602 /* This may have changed the image metadata dependent on the context
1603 * image unit state and passed to the program as uniforms, make sure
1604 * that push and pull constants are reuploaded.
1606 brw
->NewGLState
|= _NEW_PROGRAM_CONSTANTS
;
1611 brw_upload_wm_image_surfaces(struct brw_context
*brw
)
1613 /* BRW_NEW_FRAGMENT_PROGRAM */
1614 const struct gl_program
*wm
= brw
->programs
[MESA_SHADER_FRAGMENT
];
1617 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1618 brw_upload_image_surfaces(brw
, wm
, &brw
->wm
.base
,
1619 brw
->wm
.base
.prog_data
);
1623 const struct brw_tracked_state brw_wm_image_surfaces
= {
1625 .mesa
= _NEW_TEXTURE
,
1626 .brw
= BRW_NEW_BATCH
|
1628 BRW_NEW_FRAGMENT_PROGRAM
|
1629 BRW_NEW_FS_PROG_DATA
|
1632 .emit
= brw_upload_wm_image_surfaces
,
1636 brw_upload_cs_work_groups_surface(struct brw_context
*brw
)
1638 struct gl_context
*ctx
= &brw
->ctx
;
1640 struct gl_program
*prog
=
1641 ctx
->_Shader
->CurrentProgram
[MESA_SHADER_COMPUTE
];
1642 /* BRW_NEW_CS_PROG_DATA */
1643 const struct brw_cs_prog_data
*cs_prog_data
=
1644 brw_cs_prog_data(brw
->cs
.base
.prog_data
);
1646 if (prog
&& cs_prog_data
->uses_num_work_groups
) {
1647 const unsigned surf_idx
=
1648 cs_prog_data
->binding_table
.work_groups_start
;
1649 uint32_t *surf_offset
= &brw
->cs
.base
.surf_offset
[surf_idx
];
1653 if (brw
->compute
.num_work_groups_bo
== NULL
) {
1655 brw_upload_data(&brw
->upload
,
1656 (void *)brw
->compute
.num_work_groups
,
1662 bo
= brw
->compute
.num_work_groups_bo
;
1663 bo_offset
= brw
->compute
.num_work_groups_offset
;
1666 brw_emit_buffer_surface_state(brw
, surf_offset
,
1669 3 * sizeof(GLuint
), 1,
1671 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1675 const struct brw_tracked_state brw_cs_work_groups_surface
= {
1677 .brw
= BRW_NEW_CS_PROG_DATA
|
1678 BRW_NEW_CS_WORK_GROUPS
1680 .emit
= brw_upload_cs_work_groups_surface
,