2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **********************************************************************/
29 * Keith Whitwell <keithw@vmware.com>
33 #include "main/context.h"
34 #include "main/blend.h"
35 #include "main/mtypes.h"
36 #include "main/samplerobj.h"
37 #include "main/shaderimage.h"
38 #include "main/teximage.h"
39 #include "program/prog_parameter.h"
40 #include "program/prog_instruction.h"
41 #include "main/framebuffer.h"
42 #include "main/shaderapi.h"
46 #include "intel_mipmap_tree.h"
47 #include "intel_batchbuffer.h"
48 #include "intel_tex.h"
49 #include "intel_fbo.h"
50 #include "intel_buffer_objects.h"
52 #include "brw_context.h"
53 #include "brw_state.h"
54 #include "brw_defines.h"
57 struct surface_state_info
{
59 unsigned ss_align
; /* Required alignment of RENDER_SURFACE_STATE in bytes */
61 unsigned aux_reloc_dw
;
66 static const struct surface_state_info surface_state_infos
[] = {
70 [7] = {8, 32, 1, 6, GEN7_MOCS_L3
, GEN7_MOCS_L3
},
71 [8] = {13, 64, 8, 10, BDW_MOCS_WB
, BDW_MOCS_PTE
},
72 [9] = {16, 64, 8, 10, SKL_MOCS_WB
, SKL_MOCS_PTE
},
76 brw_emit_surface_state(struct brw_context
*brw
,
77 struct intel_mipmap_tree
*mt
,
78 GLenum target
, struct isl_view view
,
79 uint32_t mocs
, uint32_t *surf_offset
, int surf_index
,
80 unsigned read_domains
, unsigned write_domains
)
82 const struct surface_state_info ss_info
= surface_state_infos
[brw
->gen
];
83 uint32_t tile_x
= 0, tile_y
= 0;
84 uint32_t offset
= mt
->offset
;
87 intel_miptree_get_isl_surf(brw
, mt
, &surf
);
89 surf
.dim
= get_isl_surf_dim(target
);
91 const enum isl_dim_layout dim_layout
=
92 get_isl_dim_layout(brw
->intelScreen
->devinfo
, mt
->tiling
, target
);
94 if (surf
.dim_layout
!= dim_layout
) {
95 /* The layout of the specified texture target is not compatible with the
96 * actual layout of the miptree structure in memory -- You're entering
97 * dangerous territory, this can only possibly work if you only intended
98 * to access a single level and slice of the texture, and the hardware
99 * supports the tile offset feature in order to allow non-tile-aligned
100 * base offsets, since we'll have to point the hardware to the first
101 * texel of the level instead of relying on the usual base level/layer
104 assert(brw
->has_surface_tile_offset
);
105 assert(view
.levels
== 1 && view
.array_len
== 1);
107 offset
+= intel_miptree_get_tile_offsets(mt
, view
.base_level
,
108 view
.base_array_layer
,
111 /* Minify the logical dimensions of the texture. */
112 const unsigned l
= view
.base_level
- mt
->first_level
;
113 surf
.logical_level0_px
.width
= minify(surf
.logical_level0_px
.width
, l
);
114 surf
.logical_level0_px
.height
= surf
.dim
<= ISL_SURF_DIM_1D
? 1 :
115 minify(surf
.logical_level0_px
.height
, l
);
116 surf
.logical_level0_px
.depth
= surf
.dim
<= ISL_SURF_DIM_2D
? 1 :
117 minify(surf
.logical_level0_px
.depth
, l
);
119 /* Only the base level and layer can be addressed with the overridden
122 surf
.logical_level0_px
.array_len
= 1;
124 surf
.dim_layout
= dim_layout
;
126 /* The requested slice of the texture is now at the base level and
130 view
.base_array_layer
= 0;
133 union isl_color_value clear_color
= { .u32
= { 0, 0, 0, 0 } };
135 struct isl_surf
*aux_surf
= NULL
, aux_surf_s
;
136 uint64_t aux_offset
= 0;
137 enum isl_aux_usage aux_usage
= ISL_AUX_USAGE_NONE
;
139 ((view
.usage
& ISL_SURF_USAGE_RENDER_TARGET_BIT
) ||
140 mt
->fast_clear_state
!= INTEL_FAST_CLEAR_STATE_RESOLVED
)) {
141 intel_miptree_get_aux_isl_surf(brw
, mt
, &aux_surf_s
, &aux_usage
);
142 aux_surf
= &aux_surf_s
;
143 assert(mt
->mcs_mt
->offset
== 0);
144 aux_offset
= mt
->mcs_mt
->bo
->offset64
;
146 /* We only really need a clear color if we also have an auxiliary
147 * surfacae. Without one, it does nothing.
149 clear_color
= intel_miptree_get_isl_clear_color(brw
, mt
);
152 uint32_t *dw
= __brw_state_batch(brw
, AUB_TRACE_SURFACE_STATE
,
153 ss_info
.num_dwords
* 4, ss_info
.ss_align
,
154 surf_index
, surf_offset
);
156 isl_surf_fill_state(&brw
->isl_dev
, dw
, .surf
= &surf
, .view
= &view
,
157 .address
= mt
->bo
->offset64
+ offset
,
158 .aux_surf
= aux_surf
, .aux_usage
= aux_usage
,
159 .aux_address
= aux_offset
,
160 .mocs
= mocs
, .clear_color
= clear_color
,
161 .x_offset_sa
= tile_x
, .y_offset_sa
= tile_y
);
163 drm_intel_bo_emit_reloc(brw
->batch
.bo
,
164 *surf_offset
+ 4 * ss_info
.reloc_dw
,
166 read_domains
, write_domains
);
169 /* On gen7 and prior, the upper 20 bits of surface state DWORD 6 are the
170 * upper 20 bits of the GPU address of the MCS buffer; the lower 12 bits
171 * contain other control information. Since buffer addresses are always
172 * on 4k boundaries (and thus have their lower 12 bits zero), we can use
173 * an ordinary reloc to do the necessary address translation.
175 assert((aux_offset
& 0xfff) == 0);
176 drm_intel_bo_emit_reloc(brw
->batch
.bo
,
177 *surf_offset
+ 4 * ss_info
.aux_reloc_dw
,
178 mt
->mcs_mt
->bo
, dw
[ss_info
.aux_reloc_dw
] & 0xfff,
179 read_domains
, write_domains
);
184 brw_update_renderbuffer_surface(struct brw_context
*brw
,
185 struct gl_renderbuffer
*rb
,
186 bool layered
, unsigned unit
/* unused */,
189 struct gl_context
*ctx
= &brw
->ctx
;
190 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
191 struct intel_mipmap_tree
*mt
= irb
->mt
;
193 assert(brw_render_target_supported(brw
, rb
));
194 intel_miptree_used_for_rendering(mt
);
196 mesa_format rb_format
= _mesa_get_render_format(ctx
, intel_rb_format(irb
));
197 if (unlikely(!brw
->format_supported_as_render_target
[rb_format
])) {
198 _mesa_problem(ctx
, "%s: renderbuffer format %s unsupported\n",
199 __func__
, _mesa_get_format_name(rb_format
));
202 const unsigned layer_multiplier
=
203 (irb
->mt
->msaa_layout
== INTEL_MSAA_LAYOUT_UMS
||
204 irb
->mt
->msaa_layout
== INTEL_MSAA_LAYOUT_CMS
) ?
205 MAX2(irb
->mt
->num_samples
, 1) : 1;
207 struct isl_view view
= {
208 .format
= brw
->render_target_format
[rb_format
],
209 .base_level
= irb
->mt_level
- irb
->mt
->first_level
,
211 .base_array_layer
= irb
->mt_layer
/ layer_multiplier
,
212 .array_len
= MAX2(irb
->layer_count
, 1),
214 ISL_CHANNEL_SELECT_RED
,
215 ISL_CHANNEL_SELECT_GREEN
,
216 ISL_CHANNEL_SELECT_BLUE
,
217 ISL_CHANNEL_SELECT_ALPHA
,
219 .usage
= ISL_SURF_USAGE_RENDER_TARGET_BIT
,
223 brw_emit_surface_state(brw
, mt
, mt
->target
, view
,
224 surface_state_infos
[brw
->gen
].rb_mocs
,
226 I915_GEM_DOMAIN_RENDER
,
227 I915_GEM_DOMAIN_RENDER
);
232 translate_tex_target(GLenum target
)
236 case GL_TEXTURE_1D_ARRAY_EXT
:
237 return BRW_SURFACE_1D
;
239 case GL_TEXTURE_RECTANGLE_NV
:
240 return BRW_SURFACE_2D
;
243 case GL_TEXTURE_2D_ARRAY_EXT
:
244 case GL_TEXTURE_EXTERNAL_OES
:
245 case GL_TEXTURE_2D_MULTISAMPLE
:
246 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY
:
247 return BRW_SURFACE_2D
;
250 return BRW_SURFACE_3D
;
252 case GL_TEXTURE_CUBE_MAP
:
253 case GL_TEXTURE_CUBE_MAP_ARRAY
:
254 return BRW_SURFACE_CUBE
;
257 unreachable("not reached");
262 brw_get_surface_tiling_bits(uint32_t tiling
)
266 return BRW_SURFACE_TILED
;
268 return BRW_SURFACE_TILED
| BRW_SURFACE_TILED_Y
;
276 brw_get_surface_num_multisamples(unsigned num_samples
)
279 return BRW_SURFACE_MULTISAMPLECOUNT_4
;
281 return BRW_SURFACE_MULTISAMPLECOUNT_1
;
285 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
289 brw_get_texture_swizzle(const struct gl_context
*ctx
,
290 const struct gl_texture_object
*t
)
292 const struct gl_texture_image
*img
= t
->Image
[0][t
->BaseLevel
];
294 int swizzles
[SWIZZLE_NIL
+ 1] = {
304 if (img
->_BaseFormat
== GL_DEPTH_COMPONENT
||
305 img
->_BaseFormat
== GL_DEPTH_STENCIL
) {
306 GLenum depth_mode
= t
->DepthMode
;
308 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
309 * with depth component data specified with a sized internal format.
310 * Otherwise, it's left at the old default, GL_LUMINANCE.
312 if (_mesa_is_gles3(ctx
) &&
313 img
->InternalFormat
!= GL_DEPTH_COMPONENT
&&
314 img
->InternalFormat
!= GL_DEPTH_STENCIL
) {
318 switch (depth_mode
) {
320 swizzles
[0] = SWIZZLE_ZERO
;
321 swizzles
[1] = SWIZZLE_ZERO
;
322 swizzles
[2] = SWIZZLE_ZERO
;
323 swizzles
[3] = SWIZZLE_X
;
326 swizzles
[0] = SWIZZLE_X
;
327 swizzles
[1] = SWIZZLE_X
;
328 swizzles
[2] = SWIZZLE_X
;
329 swizzles
[3] = SWIZZLE_ONE
;
332 swizzles
[0] = SWIZZLE_X
;
333 swizzles
[1] = SWIZZLE_X
;
334 swizzles
[2] = SWIZZLE_X
;
335 swizzles
[3] = SWIZZLE_X
;
338 swizzles
[0] = SWIZZLE_X
;
339 swizzles
[1] = SWIZZLE_ZERO
;
340 swizzles
[2] = SWIZZLE_ZERO
;
341 swizzles
[3] = SWIZZLE_ONE
;
346 GLenum datatype
= _mesa_get_format_datatype(img
->TexFormat
);
348 /* If the texture's format is alpha-only, force R, G, and B to
349 * 0.0. Similarly, if the texture's format has no alpha channel,
350 * force the alpha value read to 1.0. This allows for the
351 * implementation to use an RGBA texture for any of these formats
352 * without leaking any unexpected values.
354 switch (img
->_BaseFormat
) {
356 swizzles
[0] = SWIZZLE_ZERO
;
357 swizzles
[1] = SWIZZLE_ZERO
;
358 swizzles
[2] = SWIZZLE_ZERO
;
361 if (t
->_IsIntegerFormat
|| datatype
== GL_SIGNED_NORMALIZED
) {
362 swizzles
[0] = SWIZZLE_X
;
363 swizzles
[1] = SWIZZLE_X
;
364 swizzles
[2] = SWIZZLE_X
;
365 swizzles
[3] = SWIZZLE_ONE
;
368 case GL_LUMINANCE_ALPHA
:
369 if (datatype
== GL_SIGNED_NORMALIZED
) {
370 swizzles
[0] = SWIZZLE_X
;
371 swizzles
[1] = SWIZZLE_X
;
372 swizzles
[2] = SWIZZLE_X
;
373 swizzles
[3] = SWIZZLE_W
;
377 if (datatype
== GL_SIGNED_NORMALIZED
) {
378 swizzles
[0] = SWIZZLE_X
;
379 swizzles
[1] = SWIZZLE_X
;
380 swizzles
[2] = SWIZZLE_X
;
381 swizzles
[3] = SWIZZLE_X
;
387 if (_mesa_get_format_bits(img
->TexFormat
, GL_ALPHA_BITS
) > 0)
388 swizzles
[3] = SWIZZLE_ONE
;
392 return MAKE_SWIZZLE4(swizzles
[GET_SWZ(t
->_Swizzle
, 0)],
393 swizzles
[GET_SWZ(t
->_Swizzle
, 1)],
394 swizzles
[GET_SWZ(t
->_Swizzle
, 2)],
395 swizzles
[GET_SWZ(t
->_Swizzle
, 3)]);
399 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
400 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
402 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
405 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
407 * which is simply adding 4 then modding by 8 (or anding with 7).
409 * We then may need to apply workarounds for textureGather hardware bugs.
412 swizzle_to_scs(GLenum swizzle
, bool need_green_to_blue
)
414 unsigned scs
= (swizzle
+ 4) & 7;
416 return (need_green_to_blue
&& scs
== HSW_SCS_GREEN
) ? HSW_SCS_BLUE
: scs
;
420 brw_update_texture_surface(struct gl_context
*ctx
,
422 uint32_t *surf_offset
,
426 struct brw_context
*brw
= brw_context(ctx
);
427 struct gl_texture_object
*obj
= ctx
->Texture
.Unit
[unit
]._Current
;
429 if (obj
->Target
== GL_TEXTURE_BUFFER
) {
430 brw_update_buffer_texture_surface(ctx
, unit
, surf_offset
);
433 struct intel_texture_object
*intel_obj
= intel_texture_object(obj
);
434 struct intel_mipmap_tree
*mt
= intel_obj
->mt
;
437 if (mt
->plane
[plane
- 1] == NULL
)
439 mt
= mt
->plane
[plane
- 1];
442 struct gl_sampler_object
*sampler
= _mesa_get_samplerobj(ctx
, unit
);
443 /* If this is a view with restricted NumLayers, then our effective depth
444 * is not just the miptree depth.
446 const unsigned view_num_layers
=
447 (obj
->Immutable
&& obj
->Target
!= GL_TEXTURE_3D
) ? obj
->NumLayers
:
450 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
451 * texturing functions that return a float, as our code generation always
452 * selects the .x channel (which would always be 0).
454 struct gl_texture_image
*firstImage
= obj
->Image
[0][obj
->BaseLevel
];
455 const bool alpha_depth
= obj
->DepthMode
== GL_ALPHA
&&
456 (firstImage
->_BaseFormat
== GL_DEPTH_COMPONENT
||
457 firstImage
->_BaseFormat
== GL_DEPTH_STENCIL
);
458 const unsigned swizzle
= (unlikely(alpha_depth
) ? SWIZZLE_XYZW
:
459 brw_get_texture_swizzle(&brw
->ctx
, obj
));
461 mesa_format mesa_fmt
= plane
== 0 ? intel_obj
->_Format
: mt
->format
;
462 unsigned format
= translate_tex_format(brw
, mesa_fmt
,
463 sampler
->sRGBDecode
);
465 /* Implement gen6 and gen7 gather work-around */
466 bool need_green_to_blue
= false;
468 if (brw
->gen
== 7 && format
== BRW_SURFACEFORMAT_R32G32_FLOAT
) {
469 format
= BRW_SURFACEFORMAT_R32G32_FLOAT_LD
;
470 need_green_to_blue
= brw
->is_haswell
;
471 } else if (brw
->gen
== 6) {
472 /* Sandybridge's gather4 message is broken for integer formats.
473 * To work around this, we pretend the surface is UNORM for
474 * 8 or 16-bit formats, and emit shader instructions to recover
475 * the real INT/UINT value. For 32-bit formats, we pretend
476 * the surface is FLOAT, and simply reinterpret the resulting
480 case BRW_SURFACEFORMAT_R8_SINT
:
481 case BRW_SURFACEFORMAT_R8_UINT
:
482 format
= BRW_SURFACEFORMAT_R8_UNORM
;
485 case BRW_SURFACEFORMAT_R16_SINT
:
486 case BRW_SURFACEFORMAT_R16_UINT
:
487 format
= BRW_SURFACEFORMAT_R16_UNORM
;
490 case BRW_SURFACEFORMAT_R32_SINT
:
491 case BRW_SURFACEFORMAT_R32_UINT
:
492 format
= BRW_SURFACEFORMAT_R32_FLOAT
;
501 if (obj
->StencilSampling
&& firstImage
->_BaseFormat
== GL_DEPTH_STENCIL
) {
502 assert(brw
->gen
>= 8);
504 format
= BRW_SURFACEFORMAT_R8_UINT
;
507 const int surf_index
= surf_offset
- &brw
->wm
.base
.surf_offset
[0];
509 struct isl_view view
= {
511 .base_level
= obj
->MinLevel
+ obj
->BaseLevel
,
512 .levels
= intel_obj
->_MaxLevel
- obj
->BaseLevel
+ 1,
513 .base_array_layer
= obj
->MinLayer
,
514 .array_len
= view_num_layers
,
516 swizzle_to_scs(GET_SWZ(swizzle
, 0), need_green_to_blue
),
517 swizzle_to_scs(GET_SWZ(swizzle
, 1), need_green_to_blue
),
518 swizzle_to_scs(GET_SWZ(swizzle
, 2), need_green_to_blue
),
519 swizzle_to_scs(GET_SWZ(swizzle
, 3), need_green_to_blue
),
521 .usage
= ISL_SURF_USAGE_TEXTURE_BIT
,
524 if (obj
->Target
== GL_TEXTURE_CUBE_MAP
||
525 obj
->Target
== GL_TEXTURE_CUBE_MAP_ARRAY
)
526 view
.usage
|= ISL_SURF_USAGE_CUBE_BIT
;
528 brw_emit_surface_state(brw
, mt
, mt
->target
, view
,
529 surface_state_infos
[brw
->gen
].tex_mocs
,
530 surf_offset
, surf_index
,
531 I915_GEM_DOMAIN_SAMPLER
, 0);
536 brw_emit_buffer_surface_state(struct brw_context
*brw
,
537 uint32_t *out_offset
,
539 unsigned buffer_offset
,
540 unsigned surface_format
,
541 unsigned buffer_size
,
545 const struct surface_state_info ss_info
= surface_state_infos
[brw
->gen
];
547 uint32_t *dw
= brw_state_batch(brw
, AUB_TRACE_SURFACE_STATE
,
548 ss_info
.num_dwords
* 4, ss_info
.ss_align
,
551 isl_buffer_fill_state(&brw
->isl_dev
, dw
,
552 .address
= (bo
? bo
->offset64
: 0) + buffer_offset
,
554 .format
= surface_format
,
556 .mocs
= ss_info
.tex_mocs
);
559 drm_intel_bo_emit_reloc(brw
->batch
.bo
,
560 *out_offset
+ 4 * ss_info
.reloc_dw
,
562 I915_GEM_DOMAIN_SAMPLER
,
563 (rw
? I915_GEM_DOMAIN_SAMPLER
: 0));
568 brw_update_buffer_texture_surface(struct gl_context
*ctx
,
570 uint32_t *surf_offset
)
572 struct brw_context
*brw
= brw_context(ctx
);
573 struct gl_texture_object
*tObj
= ctx
->Texture
.Unit
[unit
]._Current
;
574 struct intel_buffer_object
*intel_obj
=
575 intel_buffer_object(tObj
->BufferObject
);
576 uint32_t size
= tObj
->BufferSize
;
577 drm_intel_bo
*bo
= NULL
;
578 mesa_format format
= tObj
->_BufferObjectFormat
;
579 uint32_t brw_format
= brw_format_for_mesa_format(format
);
580 int texel_size
= _mesa_get_format_bytes(format
);
583 size
= MIN2(size
, intel_obj
->Base
.Size
);
584 bo
= intel_bufferobj_buffer(brw
, intel_obj
, tObj
->BufferOffset
, size
);
587 if (brw_format
== 0 && format
!= MESA_FORMAT_RGBA_FLOAT32
) {
588 _mesa_problem(NULL
, "bad format %s for texture buffer\n",
589 _mesa_get_format_name(format
));
592 brw_emit_buffer_surface_state(brw
, surf_offset
, bo
,
601 * Create the constant buffer surface. Vertex/fragment shader constants will be
602 * read from this buffer with Data Port Read instructions/messages.
605 brw_create_constant_surface(struct brw_context
*brw
,
609 uint32_t *out_offset
)
611 brw_emit_buffer_surface_state(brw
, out_offset
, bo
, offset
,
612 BRW_SURFACEFORMAT_R32G32B32A32_FLOAT
,
617 * Create the buffer surface. Shader buffer variables will be
618 * read from / write to this buffer with Data Port Read/Write
619 * instructions/messages.
622 brw_create_buffer_surface(struct brw_context
*brw
,
626 uint32_t *out_offset
)
628 /* Use a raw surface so we can reuse existing untyped read/write/atomic
629 * messages. We need these specifically for the fragment shader since they
630 * include a pixel mask header that we need to ensure correct behavior
631 * with helper invocations, which cannot write to the buffer.
633 brw_emit_buffer_surface_state(brw
, out_offset
, bo
, offset
,
634 BRW_SURFACEFORMAT_RAW
,
639 * Set up a binding table entry for use by stream output logic (transform
642 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
645 brw_update_sol_surface(struct brw_context
*brw
,
646 struct gl_buffer_object
*buffer_obj
,
647 uint32_t *out_offset
, unsigned num_vector_components
,
648 unsigned stride_dwords
, unsigned offset_dwords
)
650 struct intel_buffer_object
*intel_bo
= intel_buffer_object(buffer_obj
);
651 uint32_t offset_bytes
= 4 * offset_dwords
;
652 drm_intel_bo
*bo
= intel_bufferobj_buffer(brw
, intel_bo
,
654 buffer_obj
->Size
- offset_bytes
);
655 uint32_t *surf
= brw_state_batch(brw
, AUB_TRACE_SURFACE_STATE
, 6 * 4, 32,
657 uint32_t pitch_minus_1
= 4*stride_dwords
- 1;
658 size_t size_dwords
= buffer_obj
->Size
/ 4;
659 uint32_t buffer_size_minus_1
, width
, height
, depth
, surface_format
;
661 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
662 * too big to map using a single binding table entry?
664 assert((size_dwords
- offset_dwords
) / stride_dwords
665 <= BRW_MAX_NUM_BUFFER_ENTRIES
);
667 if (size_dwords
> offset_dwords
+ num_vector_components
) {
668 /* There is room for at least 1 transform feedback output in the buffer.
669 * Compute the number of additional transform feedback outputs the
670 * buffer has room for.
672 buffer_size_minus_1
=
673 (size_dwords
- offset_dwords
- num_vector_components
) / stride_dwords
;
675 /* There isn't even room for a single transform feedback output in the
676 * buffer. We can't configure the binding table entry to prevent output
677 * entirely; we'll have to rely on the geometry shader to detect
678 * overflow. But to minimize the damage in case of a bug, set up the
679 * binding table entry to just allow a single output.
681 buffer_size_minus_1
= 0;
683 width
= buffer_size_minus_1
& 0x7f;
684 height
= (buffer_size_minus_1
& 0xfff80) >> 7;
685 depth
= (buffer_size_minus_1
& 0x7f00000) >> 20;
687 switch (num_vector_components
) {
689 surface_format
= BRW_SURFACEFORMAT_R32_FLOAT
;
692 surface_format
= BRW_SURFACEFORMAT_R32G32_FLOAT
;
695 surface_format
= BRW_SURFACEFORMAT_R32G32B32_FLOAT
;
698 surface_format
= BRW_SURFACEFORMAT_R32G32B32A32_FLOAT
;
701 unreachable("Invalid vector size for transform feedback output");
704 surf
[0] = BRW_SURFACE_BUFFER
<< BRW_SURFACE_TYPE_SHIFT
|
705 BRW_SURFACE_MIPMAPLAYOUT_BELOW
<< BRW_SURFACE_MIPLAYOUT_SHIFT
|
706 surface_format
<< BRW_SURFACE_FORMAT_SHIFT
|
707 BRW_SURFACE_RC_READ_WRITE
;
708 surf
[1] = bo
->offset64
+ offset_bytes
; /* reloc */
709 surf
[2] = (width
<< BRW_SURFACE_WIDTH_SHIFT
|
710 height
<< BRW_SURFACE_HEIGHT_SHIFT
);
711 surf
[3] = (depth
<< BRW_SURFACE_DEPTH_SHIFT
|
712 pitch_minus_1
<< BRW_SURFACE_PITCH_SHIFT
);
716 /* Emit relocation to surface contents. */
717 drm_intel_bo_emit_reloc(brw
->batch
.bo
,
720 I915_GEM_DOMAIN_RENDER
, I915_GEM_DOMAIN_RENDER
);
723 /* Creates a new WM constant buffer reflecting the current fragment program's
724 * constants, if needed by the fragment program.
726 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
730 brw_upload_wm_pull_constants(struct brw_context
*brw
)
732 struct brw_stage_state
*stage_state
= &brw
->wm
.base
;
733 /* BRW_NEW_FRAGMENT_PROGRAM */
734 struct brw_fragment_program
*fp
=
735 (struct brw_fragment_program
*) brw
->fragment_program
;
736 /* BRW_NEW_FS_PROG_DATA */
737 struct brw_stage_prog_data
*prog_data
= &brw
->wm
.prog_data
->base
;
739 _mesa_shader_write_subroutine_indices(&brw
->ctx
, MESA_SHADER_FRAGMENT
);
740 /* _NEW_PROGRAM_CONSTANTS */
741 brw_upload_pull_constants(brw
, BRW_NEW_SURFACES
, &fp
->program
.Base
,
742 stage_state
, prog_data
);
745 const struct brw_tracked_state brw_wm_pull_constants
= {
747 .mesa
= _NEW_PROGRAM_CONSTANTS
,
748 .brw
= BRW_NEW_BATCH
|
750 BRW_NEW_FRAGMENT_PROGRAM
|
751 BRW_NEW_FS_PROG_DATA
,
753 .emit
= brw_upload_wm_pull_constants
,
757 * Creates a null renderbuffer surface.
759 * This is used when the shader doesn't write to any color output. An FB
760 * write to target 0 will still be emitted, because that's how the thread is
761 * terminated (and computed depth is returned), so we need to have the
762 * hardware discard the target 0 color output..
765 brw_emit_null_surface_state(struct brw_context
*brw
,
769 uint32_t *out_offset
)
771 /* From the Sandy bridge PRM, Vol4 Part1 p71 (Surface Type: Programming
774 * A null surface will be used in instances where an actual surface is
775 * not bound. When a write message is generated to a null surface, no
776 * actual surface is written to. When a read message (including any
777 * sampling engine message) is generated to a null surface, the result
778 * is all zeros. Note that a null surface type is allowed to be used
779 * with all messages, even if it is not specificially indicated as
780 * supported. All of the remaining fields in surface state are ignored
781 * for null surfaces, with the following exceptions:
783 * - [DevSNB+]: Width, Height, Depth, and LOD fields must match the
784 * depth buffer’s corresponding state for all render target surfaces,
787 * - Surface Format must be R8G8B8A8_UNORM.
789 unsigned surface_type
= BRW_SURFACE_NULL
;
790 drm_intel_bo
*bo
= NULL
;
791 unsigned pitch_minus_1
= 0;
792 uint32_t multisampling_state
= 0;
793 uint32_t *surf
= brw_state_batch(brw
, AUB_TRACE_SURFACE_STATE
, 6 * 4, 32,
797 /* On Gen6, null render targets seem to cause GPU hangs when
798 * multisampling. So work around this problem by rendering into dummy
801 * To decrease the amount of memory needed by the workaround buffer, we
802 * set its pitch to 128 bytes (the width of a Y tile). This means that
803 * the amount of memory needed for the workaround buffer is
804 * (width_in_tiles + height_in_tiles - 1) tiles.
806 * Note that since the workaround buffer will be interpreted by the
807 * hardware as an interleaved multisampled buffer, we need to compute
808 * width_in_tiles and height_in_tiles by dividing the width and height
809 * by 16 rather than the normal Y-tile size of 32.
811 unsigned width_in_tiles
= ALIGN(width
, 16) / 16;
812 unsigned height_in_tiles
= ALIGN(height
, 16) / 16;
813 unsigned size_needed
= (width_in_tiles
+ height_in_tiles
- 1) * 4096;
814 brw_get_scratch_bo(brw
, &brw
->wm
.multisampled_null_render_target_bo
,
816 bo
= brw
->wm
.multisampled_null_render_target_bo
;
817 surface_type
= BRW_SURFACE_2D
;
819 multisampling_state
= brw_get_surface_num_multisamples(samples
);
822 surf
[0] = (surface_type
<< BRW_SURFACE_TYPE_SHIFT
|
823 BRW_SURFACEFORMAT_B8G8R8A8_UNORM
<< BRW_SURFACE_FORMAT_SHIFT
);
825 surf
[0] |= (1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT
|
826 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT
|
827 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT
|
828 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT
);
830 surf
[1] = bo
? bo
->offset64
: 0;
831 surf
[2] = ((width
- 1) << BRW_SURFACE_WIDTH_SHIFT
|
832 (height
- 1) << BRW_SURFACE_HEIGHT_SHIFT
);
834 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
837 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
839 surf
[3] = (BRW_SURFACE_TILED
| BRW_SURFACE_TILED_Y
|
840 pitch_minus_1
<< BRW_SURFACE_PITCH_SHIFT
);
841 surf
[4] = multisampling_state
;
845 drm_intel_bo_emit_reloc(brw
->batch
.bo
,
848 I915_GEM_DOMAIN_RENDER
, I915_GEM_DOMAIN_RENDER
);
853 * Sets up a surface state structure to point at the given region.
854 * While it is only used for the front/back buffer currently, it should be
855 * usable for further buffers when doing ARB_draw_buffer support.
858 gen4_update_renderbuffer_surface(struct brw_context
*brw
,
859 struct gl_renderbuffer
*rb
,
860 bool layered
, unsigned unit
,
863 struct gl_context
*ctx
= &brw
->ctx
;
864 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
865 struct intel_mipmap_tree
*mt
= irb
->mt
;
867 uint32_t tile_x
, tile_y
;
871 mesa_format rb_format
= _mesa_get_render_format(ctx
, intel_rb_format(irb
));
872 /* BRW_NEW_FS_PROG_DATA */
876 if (rb
->TexImage
&& !brw
->has_surface_tile_offset
) {
877 intel_renderbuffer_get_tile_offsets(irb
, &tile_x
, &tile_y
);
879 if (tile_x
!= 0 || tile_y
!= 0) {
880 /* Original gen4 hardware couldn't draw to a non-tile-aligned
881 * destination in a miptree unless you actually setup your renderbuffer
882 * as a miptree and used the fragile lod/array_index/etc. controls to
883 * select the image. So, instead, we just make a new single-level
884 * miptree and render into that.
886 intel_renderbuffer_move_to_temp(brw
, irb
, false);
891 intel_miptree_used_for_rendering(irb
->mt
);
893 surf
= brw_state_batch(brw
, AUB_TRACE_SURFACE_STATE
, 6 * 4, 32, &offset
);
895 format
= brw
->render_target_format
[rb_format
];
896 if (unlikely(!brw
->format_supported_as_render_target
[rb_format
])) {
897 _mesa_problem(ctx
, "%s: renderbuffer format %s unsupported\n",
898 __func__
, _mesa_get_format_name(rb_format
));
901 surf
[0] = (BRW_SURFACE_2D
<< BRW_SURFACE_TYPE_SHIFT
|
902 format
<< BRW_SURFACE_FORMAT_SHIFT
);
905 assert(mt
->offset
% mt
->cpp
== 0);
906 surf
[1] = (intel_renderbuffer_get_tile_offsets(irb
, &tile_x
, &tile_y
) +
907 mt
->bo
->offset64
+ mt
->offset
);
909 surf
[2] = ((rb
->Width
- 1) << BRW_SURFACE_WIDTH_SHIFT
|
910 (rb
->Height
- 1) << BRW_SURFACE_HEIGHT_SHIFT
);
912 surf
[3] = (brw_get_surface_tiling_bits(mt
->tiling
) |
913 (mt
->pitch
- 1) << BRW_SURFACE_PITCH_SHIFT
);
915 surf
[4] = brw_get_surface_num_multisamples(mt
->num_samples
);
917 assert(brw
->has_surface_tile_offset
|| (tile_x
== 0 && tile_y
== 0));
918 /* Note that the low bits of these fields are missing, so
919 * there's the possibility of getting in trouble.
921 assert(tile_x
% 4 == 0);
922 assert(tile_y
% 2 == 0);
923 surf
[5] = ((tile_x
/ 4) << BRW_SURFACE_X_OFFSET_SHIFT
|
924 (tile_y
/ 2) << BRW_SURFACE_Y_OFFSET_SHIFT
|
925 (mt
->valign
== 4 ? BRW_SURFACE_VERTICAL_ALIGN_ENABLE
: 0));
929 if (!ctx
->Color
.ColorLogicOpEnabled
&& !ctx
->Color
._AdvancedBlendMode
&&
930 (ctx
->Color
.BlendEnabled
& (1 << unit
)))
931 surf
[0] |= BRW_SURFACE_BLEND_ENABLED
;
933 if (!ctx
->Color
.ColorMask
[unit
][0])
934 surf
[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT
;
935 if (!ctx
->Color
.ColorMask
[unit
][1])
936 surf
[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT
;
937 if (!ctx
->Color
.ColorMask
[unit
][2])
938 surf
[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT
;
940 /* As mentioned above, disable writes to the alpha component when the
941 * renderbuffer is XRGB.
943 if (ctx
->DrawBuffer
->Visual
.alphaBits
== 0 ||
944 !ctx
->Color
.ColorMask
[unit
][3]) {
945 surf
[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT
;
949 drm_intel_bo_emit_reloc(brw
->batch
.bo
,
952 surf
[1] - mt
->bo
->offset64
,
953 I915_GEM_DOMAIN_RENDER
,
954 I915_GEM_DOMAIN_RENDER
);
960 * Construct SURFACE_STATE objects for renderbuffers/draw buffers.
963 brw_update_renderbuffer_surfaces(struct brw_context
*brw
,
964 const struct gl_framebuffer
*fb
,
965 uint32_t render_target_start
,
966 uint32_t *surf_offset
)
969 const unsigned int w
= _mesa_geometric_width(fb
);
970 const unsigned int h
= _mesa_geometric_height(fb
);
971 const unsigned int s
= _mesa_geometric_samples(fb
);
973 /* Update surfaces for drawing buffers */
974 if (fb
->_NumColorDrawBuffers
>= 1) {
975 for (i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
976 const uint32_t surf_index
= render_target_start
+ i
;
978 if (intel_renderbuffer(fb
->_ColorDrawBuffers
[i
])) {
979 surf_offset
[surf_index
] =
980 brw
->vtbl
.update_renderbuffer_surface(
981 brw
, fb
->_ColorDrawBuffers
[i
],
982 _mesa_geometric_layers(fb
) > 0, i
, surf_index
);
984 brw
->vtbl
.emit_null_surface_state(brw
, w
, h
, s
,
985 &surf_offset
[surf_index
]);
989 const uint32_t surf_index
= render_target_start
;
990 brw
->vtbl
.emit_null_surface_state(brw
, w
, h
, s
,
991 &surf_offset
[surf_index
]);
996 update_renderbuffer_surfaces(struct brw_context
*brw
)
998 const struct gl_context
*ctx
= &brw
->ctx
;
1000 /* _NEW_BUFFERS | _NEW_COLOR */
1001 const struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
1002 brw_update_renderbuffer_surfaces(
1004 brw
->wm
.prog_data
->binding_table
.render_target_start
,
1005 brw
->wm
.base
.surf_offset
);
1006 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1009 const struct brw_tracked_state brw_renderbuffer_surfaces
= {
1011 .mesa
= _NEW_BUFFERS
|
1013 .brw
= BRW_NEW_BATCH
|
1015 BRW_NEW_FS_PROG_DATA
,
1017 .emit
= update_renderbuffer_surfaces
,
1020 const struct brw_tracked_state gen6_renderbuffer_surfaces
= {
1022 .mesa
= _NEW_BUFFERS
,
1023 .brw
= BRW_NEW_BATCH
|
1026 .emit
= update_renderbuffer_surfaces
,
1030 update_renderbuffer_read_surfaces(struct brw_context
*brw
)
1032 const struct gl_context
*ctx
= &brw
->ctx
;
1034 /* BRW_NEW_FRAGMENT_PROGRAM */
1035 if (!ctx
->Extensions
.MESA_shader_framebuffer_fetch
&&
1036 brw
->fragment_program
&&
1037 brw
->fragment_program
->Base
.OutputsRead
) {
1039 const struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
1041 for (unsigned i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
1042 struct gl_renderbuffer
*rb
= fb
->_ColorDrawBuffers
[i
];
1043 const struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
1044 const unsigned surf_index
=
1045 brw
->wm
.prog_data
->binding_table
.render_target_read_start
+ i
;
1046 uint32_t *surf_offset
= &brw
->wm
.base
.surf_offset
[surf_index
];
1049 const unsigned format
= brw
->render_target_format
[
1050 _mesa_get_render_format(ctx
, intel_rb_format(irb
))];
1051 assert(isl_format_supports_sampling(brw
->intelScreen
->devinfo
,
1054 /* Override the target of the texture if the render buffer is a
1055 * single slice of a 3D texture (since the minimum array element
1056 * field of the surface state structure is ignored by the sampler
1057 * unit for 3D textures on some hardware), or if the render buffer
1058 * is a 1D array (since shaders always provide the array index
1059 * coordinate at the Z component to avoid state-dependent
1060 * recompiles when changing the texture target of the
1063 const GLenum target
=
1064 (irb
->mt
->target
== GL_TEXTURE_3D
&&
1065 irb
->layer_count
== 1) ? GL_TEXTURE_2D
:
1066 irb
->mt
->target
== GL_TEXTURE_1D_ARRAY
? GL_TEXTURE_2D_ARRAY
:
1069 /* intel_renderbuffer::mt_layer is expressed in sample units for
1070 * the UMS and CMS multisample layouts, but
1071 * intel_renderbuffer::layer_count is expressed in units of whole
1072 * logical layers regardless of the multisample layout.
1074 const unsigned mt_layer_unit
=
1075 (irb
->mt
->msaa_layout
== INTEL_MSAA_LAYOUT_UMS
||
1076 irb
->mt
->msaa_layout
== INTEL_MSAA_LAYOUT_CMS
) ?
1077 MAX2(irb
->mt
->num_samples
, 1) : 1;
1079 const struct isl_view view
= {
1081 .base_level
= irb
->mt_level
- irb
->mt
->first_level
,
1083 .base_array_layer
= irb
->mt_layer
/ mt_layer_unit
,
1084 .array_len
= irb
->layer_count
,
1086 ISL_CHANNEL_SELECT_RED
,
1087 ISL_CHANNEL_SELECT_GREEN
,
1088 ISL_CHANNEL_SELECT_BLUE
,
1089 ISL_CHANNEL_SELECT_ALPHA
,
1091 .usage
= ISL_SURF_USAGE_TEXTURE_BIT
,
1094 brw_emit_surface_state(brw
, irb
->mt
, target
, view
,
1095 surface_state_infos
[brw
->gen
].tex_mocs
,
1096 surf_offset
, surf_index
,
1097 I915_GEM_DOMAIN_SAMPLER
, 0);
1100 brw
->vtbl
.emit_null_surface_state(
1101 brw
, _mesa_geometric_width(fb
), _mesa_geometric_height(fb
),
1102 _mesa_geometric_samples(fb
), surf_offset
);
1106 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1110 const struct brw_tracked_state brw_renderbuffer_read_surfaces
= {
1112 .mesa
= _NEW_BUFFERS
,
1113 .brw
= BRW_NEW_BATCH
|
1114 BRW_NEW_FRAGMENT_PROGRAM
,
1116 .emit
= update_renderbuffer_read_surfaces
,
1120 update_stage_texture_surfaces(struct brw_context
*brw
,
1121 const struct gl_program
*prog
,
1122 struct brw_stage_state
*stage_state
,
1123 bool for_gather
, uint32_t plane
)
1128 struct gl_context
*ctx
= &brw
->ctx
;
1130 uint32_t *surf_offset
= stage_state
->surf_offset
;
1132 /* BRW_NEW_*_PROG_DATA */
1134 surf_offset
+= stage_state
->prog_data
->binding_table
.gather_texture_start
;
1136 surf_offset
+= stage_state
->prog_data
->binding_table
.plane_start
[plane
];
1138 unsigned num_samplers
= util_last_bit(prog
->SamplersUsed
);
1139 for (unsigned s
= 0; s
< num_samplers
; s
++) {
1142 if (prog
->SamplersUsed
& (1 << s
)) {
1143 const unsigned unit
= prog
->SamplerUnits
[s
];
1146 if (ctx
->Texture
.Unit
[unit
]._Current
) {
1147 brw_update_texture_surface(ctx
, unit
, surf_offset
+ s
, for_gather
, plane
);
1155 * Construct SURFACE_STATE objects for enabled textures.
1158 brw_update_texture_surfaces(struct brw_context
*brw
)
1160 /* BRW_NEW_VERTEX_PROGRAM */
1161 struct gl_program
*vs
= (struct gl_program
*) brw
->vertex_program
;
1163 /* BRW_NEW_TESS_PROGRAMS */
1164 struct gl_program
*tcs
= (struct gl_program
*) brw
->tess_ctrl_program
;
1165 struct gl_program
*tes
= (struct gl_program
*) brw
->tess_eval_program
;
1167 /* BRW_NEW_GEOMETRY_PROGRAM */
1168 struct gl_program
*gs
= (struct gl_program
*) brw
->geometry_program
;
1170 /* BRW_NEW_FRAGMENT_PROGRAM */
1171 struct gl_program
*fs
= (struct gl_program
*) brw
->fragment_program
;
1174 update_stage_texture_surfaces(brw
, vs
, &brw
->vs
.base
, false, 0);
1175 update_stage_texture_surfaces(brw
, tcs
, &brw
->tcs
.base
, false, 0);
1176 update_stage_texture_surfaces(brw
, tes
, &brw
->tes
.base
, false, 0);
1177 update_stage_texture_surfaces(brw
, gs
, &brw
->gs
.base
, false, 0);
1178 update_stage_texture_surfaces(brw
, fs
, &brw
->wm
.base
, false, 0);
1180 /* emit alternate set of surface state for gather. this
1181 * allows the surface format to be overriden for only the
1182 * gather4 messages. */
1184 if (vs
&& vs
->UsesGather
)
1185 update_stage_texture_surfaces(brw
, vs
, &brw
->vs
.base
, true, 0);
1186 if (tcs
&& tcs
->UsesGather
)
1187 update_stage_texture_surfaces(brw
, tcs
, &brw
->tcs
.base
, true, 0);
1188 if (tes
&& tes
->UsesGather
)
1189 update_stage_texture_surfaces(brw
, tes
, &brw
->tes
.base
, true, 0);
1190 if (gs
&& gs
->UsesGather
)
1191 update_stage_texture_surfaces(brw
, gs
, &brw
->gs
.base
, true, 0);
1192 if (fs
&& fs
->UsesGather
)
1193 update_stage_texture_surfaces(brw
, fs
, &brw
->wm
.base
, true, 0);
1197 update_stage_texture_surfaces(brw
, fs
, &brw
->wm
.base
, false, 1);
1198 update_stage_texture_surfaces(brw
, fs
, &brw
->wm
.base
, false, 2);
1201 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1204 const struct brw_tracked_state brw_texture_surfaces
= {
1206 .mesa
= _NEW_TEXTURE
,
1207 .brw
= BRW_NEW_BATCH
|
1209 BRW_NEW_FRAGMENT_PROGRAM
|
1210 BRW_NEW_FS_PROG_DATA
|
1211 BRW_NEW_GEOMETRY_PROGRAM
|
1212 BRW_NEW_GS_PROG_DATA
|
1213 BRW_NEW_TESS_PROGRAMS
|
1214 BRW_NEW_TCS_PROG_DATA
|
1215 BRW_NEW_TES_PROG_DATA
|
1216 BRW_NEW_TEXTURE_BUFFER
|
1217 BRW_NEW_VERTEX_PROGRAM
|
1218 BRW_NEW_VS_PROG_DATA
,
1220 .emit
= brw_update_texture_surfaces
,
1224 brw_update_cs_texture_surfaces(struct brw_context
*brw
)
1226 /* BRW_NEW_COMPUTE_PROGRAM */
1227 struct gl_program
*cs
= (struct gl_program
*) brw
->compute_program
;
1230 update_stage_texture_surfaces(brw
, cs
, &brw
->cs
.base
, false, 0);
1232 /* emit alternate set of surface state for gather. this
1233 * allows the surface format to be overriden for only the
1237 if (cs
&& cs
->UsesGather
)
1238 update_stage_texture_surfaces(brw
, cs
, &brw
->cs
.base
, true, 0);
1241 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1244 const struct brw_tracked_state brw_cs_texture_surfaces
= {
1246 .mesa
= _NEW_TEXTURE
,
1247 .brw
= BRW_NEW_BATCH
|
1249 BRW_NEW_COMPUTE_PROGRAM
,
1251 .emit
= brw_update_cs_texture_surfaces
,
1256 brw_upload_ubo_surfaces(struct brw_context
*brw
,
1257 struct gl_linked_shader
*shader
,
1258 struct brw_stage_state
*stage_state
,
1259 struct brw_stage_prog_data
*prog_data
)
1261 struct gl_context
*ctx
= &brw
->ctx
;
1266 uint32_t *ubo_surf_offsets
=
1267 &stage_state
->surf_offset
[prog_data
->binding_table
.ubo_start
];
1269 for (int i
= 0; i
< shader
->NumUniformBlocks
; i
++) {
1270 struct gl_uniform_buffer_binding
*binding
=
1271 &ctx
->UniformBufferBindings
[shader
->UniformBlocks
[i
]->Binding
];
1273 if (binding
->BufferObject
== ctx
->Shared
->NullBufferObj
) {
1274 brw
->vtbl
.emit_null_surface_state(brw
, 1, 1, 1, &ubo_surf_offsets
[i
]);
1276 struct intel_buffer_object
*intel_bo
=
1277 intel_buffer_object(binding
->BufferObject
);
1278 GLsizeiptr size
= binding
->BufferObject
->Size
- binding
->Offset
;
1279 if (!binding
->AutomaticSize
)
1280 size
= MIN2(size
, binding
->Size
);
1282 intel_bufferobj_buffer(brw
, intel_bo
,
1285 brw_create_constant_surface(brw
, bo
, binding
->Offset
,
1287 &ubo_surf_offsets
[i
]);
1291 uint32_t *ssbo_surf_offsets
=
1292 &stage_state
->surf_offset
[prog_data
->binding_table
.ssbo_start
];
1294 for (int i
= 0; i
< shader
->NumShaderStorageBlocks
; i
++) {
1295 struct gl_shader_storage_buffer_binding
*binding
=
1296 &ctx
->ShaderStorageBufferBindings
[shader
->ShaderStorageBlocks
[i
]->Binding
];
1298 if (binding
->BufferObject
== ctx
->Shared
->NullBufferObj
) {
1299 brw
->vtbl
.emit_null_surface_state(brw
, 1, 1, 1, &ssbo_surf_offsets
[i
]);
1301 struct intel_buffer_object
*intel_bo
=
1302 intel_buffer_object(binding
->BufferObject
);
1303 GLsizeiptr size
= binding
->BufferObject
->Size
- binding
->Offset
;
1304 if (!binding
->AutomaticSize
)
1305 size
= MIN2(size
, binding
->Size
);
1307 intel_bufferobj_buffer(brw
, intel_bo
,
1310 brw_create_buffer_surface(brw
, bo
, binding
->Offset
,
1312 &ssbo_surf_offsets
[i
]);
1316 if (shader
->NumUniformBlocks
|| shader
->NumShaderStorageBlocks
)
1317 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1321 brw_upload_wm_ubo_surfaces(struct brw_context
*brw
)
1323 struct gl_context
*ctx
= &brw
->ctx
;
1325 struct gl_shader_program
*prog
= ctx
->_Shader
->_CurrentFragmentProgram
;
1330 /* BRW_NEW_FS_PROG_DATA */
1331 brw_upload_ubo_surfaces(brw
, prog
->_LinkedShaders
[MESA_SHADER_FRAGMENT
],
1332 &brw
->wm
.base
, &brw
->wm
.prog_data
->base
);
1335 const struct brw_tracked_state brw_wm_ubo_surfaces
= {
1337 .mesa
= _NEW_PROGRAM
,
1338 .brw
= BRW_NEW_BATCH
|
1340 BRW_NEW_FS_PROG_DATA
|
1341 BRW_NEW_UNIFORM_BUFFER
,
1343 .emit
= brw_upload_wm_ubo_surfaces
,
1347 brw_upload_cs_ubo_surfaces(struct brw_context
*brw
)
1349 struct gl_context
*ctx
= &brw
->ctx
;
1351 struct gl_shader_program
*prog
=
1352 ctx
->_Shader
->CurrentProgram
[MESA_SHADER_COMPUTE
];
1357 /* BRW_NEW_CS_PROG_DATA */
1358 brw_upload_ubo_surfaces(brw
, prog
->_LinkedShaders
[MESA_SHADER_COMPUTE
],
1359 &brw
->cs
.base
, &brw
->cs
.prog_data
->base
);
1362 const struct brw_tracked_state brw_cs_ubo_surfaces
= {
1364 .mesa
= _NEW_PROGRAM
,
1365 .brw
= BRW_NEW_BATCH
|
1367 BRW_NEW_CS_PROG_DATA
|
1368 BRW_NEW_UNIFORM_BUFFER
,
1370 .emit
= brw_upload_cs_ubo_surfaces
,
1374 brw_upload_abo_surfaces(struct brw_context
*brw
,
1375 struct gl_linked_shader
*shader
,
1376 struct brw_stage_state
*stage_state
,
1377 struct brw_stage_prog_data
*prog_data
)
1379 struct gl_context
*ctx
= &brw
->ctx
;
1380 uint32_t *surf_offsets
=
1381 &stage_state
->surf_offset
[prog_data
->binding_table
.abo_start
];
1383 if (shader
&& shader
->NumAtomicBuffers
) {
1384 for (unsigned i
= 0; i
< shader
->NumAtomicBuffers
; i
++) {
1385 struct gl_atomic_buffer_binding
*binding
=
1386 &ctx
->AtomicBufferBindings
[shader
->AtomicBuffers
[i
]->Binding
];
1387 struct intel_buffer_object
*intel_bo
=
1388 intel_buffer_object(binding
->BufferObject
);
1389 drm_intel_bo
*bo
= intel_bufferobj_buffer(
1390 brw
, intel_bo
, binding
->Offset
, intel_bo
->Base
.Size
- binding
->Offset
);
1392 brw_emit_buffer_surface_state(brw
, &surf_offsets
[i
], bo
,
1393 binding
->Offset
, BRW_SURFACEFORMAT_RAW
,
1394 bo
->size
- binding
->Offset
, 1, true);
1397 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1402 brw_upload_wm_abo_surfaces(struct brw_context
*brw
)
1404 struct gl_context
*ctx
= &brw
->ctx
;
1406 struct gl_shader_program
*prog
= ctx
->_Shader
->_CurrentFragmentProgram
;
1409 /* BRW_NEW_FS_PROG_DATA */
1410 brw_upload_abo_surfaces(brw
, prog
->_LinkedShaders
[MESA_SHADER_FRAGMENT
],
1411 &brw
->wm
.base
, &brw
->wm
.prog_data
->base
);
1415 const struct brw_tracked_state brw_wm_abo_surfaces
= {
1417 .mesa
= _NEW_PROGRAM
,
1418 .brw
= BRW_NEW_ATOMIC_BUFFER
|
1421 BRW_NEW_FS_PROG_DATA
,
1423 .emit
= brw_upload_wm_abo_surfaces
,
1427 brw_upload_cs_abo_surfaces(struct brw_context
*brw
)
1429 struct gl_context
*ctx
= &brw
->ctx
;
1431 struct gl_shader_program
*prog
=
1432 ctx
->_Shader
->CurrentProgram
[MESA_SHADER_COMPUTE
];
1435 /* BRW_NEW_CS_PROG_DATA */
1436 brw_upload_abo_surfaces(brw
, prog
->_LinkedShaders
[MESA_SHADER_COMPUTE
],
1437 &brw
->cs
.base
, &brw
->cs
.prog_data
->base
);
1441 const struct brw_tracked_state brw_cs_abo_surfaces
= {
1443 .mesa
= _NEW_PROGRAM
,
1444 .brw
= BRW_NEW_ATOMIC_BUFFER
|
1447 BRW_NEW_CS_PROG_DATA
,
1449 .emit
= brw_upload_cs_abo_surfaces
,
1453 brw_upload_cs_image_surfaces(struct brw_context
*brw
)
1455 struct gl_context
*ctx
= &brw
->ctx
;
1457 struct gl_shader_program
*prog
=
1458 ctx
->_Shader
->CurrentProgram
[MESA_SHADER_COMPUTE
];
1461 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1462 brw_upload_image_surfaces(brw
, prog
->_LinkedShaders
[MESA_SHADER_COMPUTE
],
1463 &brw
->cs
.base
, &brw
->cs
.prog_data
->base
);
1467 const struct brw_tracked_state brw_cs_image_surfaces
= {
1469 .mesa
= _NEW_TEXTURE
| _NEW_PROGRAM
,
1470 .brw
= BRW_NEW_BATCH
|
1472 BRW_NEW_CS_PROG_DATA
|
1475 .emit
= brw_upload_cs_image_surfaces
,
1479 get_image_format(struct brw_context
*brw
, mesa_format format
, GLenum access
)
1481 const struct brw_device_info
*devinfo
= brw
->intelScreen
->devinfo
;
1482 uint32_t hw_format
= brw_format_for_mesa_format(format
);
1483 if (access
== GL_WRITE_ONLY
) {
1485 } else if (isl_has_matching_typed_storage_image_format(devinfo
, hw_format
)) {
1486 /* Typed surface reads support a very limited subset of the shader
1487 * image formats. Translate it into the closest format the
1488 * hardware supports.
1490 return isl_lower_storage_image_format(devinfo
, hw_format
);
1492 /* The hardware doesn't actually support a typed format that we can use
1493 * so we have to fall back to untyped read/write messages.
1495 return BRW_SURFACEFORMAT_RAW
;
1500 update_default_image_param(struct brw_context
*brw
,
1501 struct gl_image_unit
*u
,
1502 unsigned surface_idx
,
1503 struct brw_image_param
*param
)
1505 memset(param
, 0, sizeof(*param
));
1506 param
->surface_idx
= surface_idx
;
1507 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1508 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1509 * detailed explanation of these parameters.
1511 param
->swizzling
[0] = 0xff;
1512 param
->swizzling
[1] = 0xff;
1516 update_buffer_image_param(struct brw_context
*brw
,
1517 struct gl_image_unit
*u
,
1518 unsigned surface_idx
,
1519 struct brw_image_param
*param
)
1521 struct gl_buffer_object
*obj
= u
->TexObj
->BufferObject
;
1522 const uint32_t size
= MIN2((uint32_t)u
->TexObj
->BufferSize
, obj
->Size
);
1523 update_default_image_param(brw
, u
, surface_idx
, param
);
1525 param
->size
[0] = size
/ _mesa_get_format_bytes(u
->_ActualFormat
);
1526 param
->stride
[0] = _mesa_get_format_bytes(u
->_ActualFormat
);
1530 update_texture_image_param(struct brw_context
*brw
,
1531 struct gl_image_unit
*u
,
1532 unsigned surface_idx
,
1533 struct brw_image_param
*param
)
1535 struct intel_mipmap_tree
*mt
= intel_texture_object(u
->TexObj
)->mt
;
1537 update_default_image_param(brw
, u
, surface_idx
, param
);
1539 param
->size
[0] = minify(mt
->logical_width0
, u
->Level
);
1540 param
->size
[1] = minify(mt
->logical_height0
, u
->Level
);
1541 param
->size
[2] = (!u
->Layered
? 1 :
1542 u
->TexObj
->Target
== GL_TEXTURE_CUBE_MAP
? 6 :
1543 u
->TexObj
->Target
== GL_TEXTURE_3D
?
1544 minify(mt
->logical_depth0
, u
->Level
) :
1545 mt
->logical_depth0
);
1547 intel_miptree_get_image_offset(mt
, u
->Level
, u
->_Layer
,
1551 param
->stride
[0] = mt
->cpp
;
1552 param
->stride
[1] = mt
->pitch
/ mt
->cpp
;
1554 brw_miptree_get_horizontal_slice_pitch(brw
, mt
, u
->Level
);
1556 brw_miptree_get_vertical_slice_pitch(brw
, mt
, u
->Level
);
1558 if (mt
->tiling
== I915_TILING_X
) {
1559 /* An X tile is a rectangular block of 512x8 bytes. */
1560 param
->tiling
[0] = _mesa_logbase2(512 / mt
->cpp
);
1561 param
->tiling
[1] = _mesa_logbase2(8);
1563 if (brw
->has_swizzling
) {
1564 /* Right shifts required to swizzle bits 9 and 10 of the memory
1565 * address with bit 6.
1567 param
->swizzling
[0] = 3;
1568 param
->swizzling
[1] = 4;
1570 } else if (mt
->tiling
== I915_TILING_Y
) {
1571 /* The layout of a Y-tiled surface in memory isn't really fundamentally
1572 * different to the layout of an X-tiled surface, we simply pretend that
1573 * the surface is broken up in a number of smaller 16Bx32 tiles, each
1574 * one arranged in X-major order just like is the case for X-tiling.
1576 param
->tiling
[0] = _mesa_logbase2(16 / mt
->cpp
);
1577 param
->tiling
[1] = _mesa_logbase2(32);
1579 if (brw
->has_swizzling
) {
1580 /* Right shift required to swizzle bit 9 of the memory address with
1583 param
->swizzling
[0] = 3;
1587 /* 3D textures are arranged in 2D in memory with 2^lod slices per row. The
1588 * address calculation algorithm (emit_address_calculation() in
1589 * brw_fs_surface_builder.cpp) handles this as a sort of tiling with
1590 * modulus equal to the LOD.
1592 param
->tiling
[2] = (u
->TexObj
->Target
== GL_TEXTURE_3D
? u
->Level
:
1597 update_image_surface(struct brw_context
*brw
,
1598 struct gl_image_unit
*u
,
1600 unsigned surface_idx
,
1601 uint32_t *surf_offset
,
1602 struct brw_image_param
*param
)
1604 if (_mesa_is_image_unit_valid(&brw
->ctx
, u
)) {
1605 struct gl_texture_object
*obj
= u
->TexObj
;
1606 const unsigned format
= get_image_format(brw
, u
->_ActualFormat
, access
);
1608 if (obj
->Target
== GL_TEXTURE_BUFFER
) {
1609 struct intel_buffer_object
*intel_obj
=
1610 intel_buffer_object(obj
->BufferObject
);
1611 const unsigned texel_size
= (format
== BRW_SURFACEFORMAT_RAW
? 1 :
1612 _mesa_get_format_bytes(u
->_ActualFormat
));
1614 brw_emit_buffer_surface_state(
1615 brw
, surf_offset
, intel_obj
->buffer
, obj
->BufferOffset
,
1616 format
, intel_obj
->Base
.Size
, texel_size
,
1617 access
!= GL_READ_ONLY
);
1619 update_buffer_image_param(brw
, u
, surface_idx
, param
);
1622 struct intel_texture_object
*intel_obj
= intel_texture_object(obj
);
1623 struct intel_mipmap_tree
*mt
= intel_obj
->mt
;
1625 if (format
== BRW_SURFACEFORMAT_RAW
) {
1626 brw_emit_buffer_surface_state(
1627 brw
, surf_offset
, mt
->bo
, mt
->offset
,
1628 format
, mt
->bo
->size
- mt
->offset
, 1 /* pitch */,
1629 access
!= GL_READ_ONLY
);
1632 const unsigned num_layers
= (!u
->Layered
? 1 :
1633 obj
->Target
== GL_TEXTURE_CUBE_MAP
? 6 :
1634 mt
->logical_depth0
);
1636 struct isl_view view
= {
1638 .base_level
= obj
->MinLevel
+ u
->Level
,
1640 .base_array_layer
= obj
->MinLayer
+ u
->_Layer
,
1641 .array_len
= num_layers
,
1643 ISL_CHANNEL_SELECT_RED
,
1644 ISL_CHANNEL_SELECT_GREEN
,
1645 ISL_CHANNEL_SELECT_BLUE
,
1646 ISL_CHANNEL_SELECT_ALPHA
,
1648 .usage
= ISL_SURF_USAGE_STORAGE_BIT
,
1651 const int surf_index
= surf_offset
- &brw
->wm
.base
.surf_offset
[0];
1653 brw_emit_surface_state(brw
, mt
, mt
->target
, view
,
1654 surface_state_infos
[brw
->gen
].tex_mocs
,
1655 surf_offset
, surf_index
,
1656 I915_GEM_DOMAIN_SAMPLER
,
1657 access
== GL_READ_ONLY
? 0 :
1658 I915_GEM_DOMAIN_SAMPLER
);
1661 update_texture_image_param(brw
, u
, surface_idx
, param
);
1665 brw
->vtbl
.emit_null_surface_state(brw
, 1, 1, 1, surf_offset
);
1666 update_default_image_param(brw
, u
, surface_idx
, param
);
1671 brw_upload_image_surfaces(struct brw_context
*brw
,
1672 struct gl_linked_shader
*shader
,
1673 struct brw_stage_state
*stage_state
,
1674 struct brw_stage_prog_data
*prog_data
)
1676 struct gl_context
*ctx
= &brw
->ctx
;
1678 if (shader
&& shader
->NumImages
) {
1679 for (unsigned i
= 0; i
< shader
->NumImages
; i
++) {
1680 struct gl_image_unit
*u
= &ctx
->ImageUnits
[shader
->ImageUnits
[i
]];
1681 const unsigned surf_idx
= prog_data
->binding_table
.image_start
+ i
;
1683 update_image_surface(brw
, u
, shader
->ImageAccess
[i
],
1685 &stage_state
->surf_offset
[surf_idx
],
1686 &prog_data
->image_param
[i
]);
1689 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1690 /* This may have changed the image metadata dependent on the context
1691 * image unit state and passed to the program as uniforms, make sure
1692 * that push and pull constants are reuploaded.
1694 brw
->NewGLState
|= _NEW_PROGRAM_CONSTANTS
;
1699 brw_upload_wm_image_surfaces(struct brw_context
*brw
)
1701 struct gl_context
*ctx
= &brw
->ctx
;
1702 /* BRW_NEW_FRAGMENT_PROGRAM */
1703 struct gl_shader_program
*prog
= ctx
->_Shader
->_CurrentFragmentProgram
;
1706 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1707 brw_upload_image_surfaces(brw
, prog
->_LinkedShaders
[MESA_SHADER_FRAGMENT
],
1708 &brw
->wm
.base
, &brw
->wm
.prog_data
->base
);
1712 const struct brw_tracked_state brw_wm_image_surfaces
= {
1714 .mesa
= _NEW_TEXTURE
,
1715 .brw
= BRW_NEW_BATCH
|
1717 BRW_NEW_FRAGMENT_PROGRAM
|
1718 BRW_NEW_FS_PROG_DATA
|
1721 .emit
= brw_upload_wm_image_surfaces
,
1725 gen4_init_vtable_surface_functions(struct brw_context
*brw
)
1727 brw
->vtbl
.update_renderbuffer_surface
= gen4_update_renderbuffer_surface
;
1728 brw
->vtbl
.emit_null_surface_state
= brw_emit_null_surface_state
;
1732 gen6_init_vtable_surface_functions(struct brw_context
*brw
)
1734 gen4_init_vtable_surface_functions(brw
);
1735 brw
->vtbl
.update_renderbuffer_surface
= brw_update_renderbuffer_surface
;
1739 brw_upload_cs_work_groups_surface(struct brw_context
*brw
)
1741 struct gl_context
*ctx
= &brw
->ctx
;
1743 struct gl_shader_program
*prog
=
1744 ctx
->_Shader
->CurrentProgram
[MESA_SHADER_COMPUTE
];
1746 if (prog
&& brw
->cs
.prog_data
->uses_num_work_groups
) {
1747 const unsigned surf_idx
=
1748 brw
->cs
.prog_data
->binding_table
.work_groups_start
;
1749 uint32_t *surf_offset
= &brw
->cs
.base
.surf_offset
[surf_idx
];
1753 if (brw
->compute
.num_work_groups_bo
== NULL
) {
1755 intel_upload_data(brw
,
1756 (void *)brw
->compute
.num_work_groups
,
1762 bo
= brw
->compute
.num_work_groups_bo
;
1763 bo_offset
= brw
->compute
.num_work_groups_offset
;
1766 brw_emit_buffer_surface_state(brw
, surf_offset
,
1768 BRW_SURFACEFORMAT_RAW
,
1769 3 * sizeof(GLuint
), 1, true);
1770 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1774 const struct brw_tracked_state brw_cs_work_groups_surface
= {
1776 .brw
= BRW_NEW_BLORP
|
1777 BRW_NEW_CS_WORK_GROUPS
1779 .emit
= brw_upload_cs_work_groups_surface
,