2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **********************************************************************/
29 * Keith Whitwell <keithw@vmware.com>
33 #include "main/context.h"
34 #include "main/blend.h"
35 #include "main/mtypes.h"
36 #include "main/samplerobj.h"
37 #include "main/shaderimage.h"
38 #include "main/teximage.h"
39 #include "program/prog_parameter.h"
40 #include "program/prog_instruction.h"
41 #include "main/framebuffer.h"
45 #include "intel_mipmap_tree.h"
46 #include "intel_batchbuffer.h"
47 #include "intel_tex.h"
48 #include "intel_fbo.h"
49 #include "intel_buffer_objects.h"
51 #include "brw_context.h"
52 #include "brw_state.h"
53 #include "brw_defines.h"
56 struct surface_state_info
{
58 unsigned ss_align
; /* Required alignment of RENDER_SURFACE_STATE in bytes */
60 unsigned aux_reloc_dw
;
65 static const struct surface_state_info surface_state_infos
[] = {
69 [7] = {8, 32, 1, 6, GEN7_MOCS_L3
, GEN7_MOCS_L3
},
70 [8] = {13, 64, 8, 10, BDW_MOCS_WB
, BDW_MOCS_PTE
},
71 [9] = {16, 64, 8, 10, SKL_MOCS_WB
, SKL_MOCS_PTE
},
75 brw_emit_surface_state(struct brw_context
*brw
,
76 struct intel_mipmap_tree
*mt
,
77 const struct isl_view
*view
,
78 uint32_t mocs
, bool for_gather
,
79 uint32_t *surf_offset
, int surf_index
,
80 unsigned read_domains
, unsigned write_domains
)
82 const struct surface_state_info ss_info
= surface_state_infos
[brw
->gen
];
85 intel_miptree_get_isl_surf(brw
, mt
, &surf
);
87 union isl_color_value clear_color
= { .u32
= { 0, 0, 0, 0 } };
89 struct isl_surf
*aux_surf
= NULL
, aux_surf_s
;
90 uint64_t aux_offset
= 0;
91 enum isl_aux_usage aux_usage
= ISL_AUX_USAGE_NONE
;
93 ((view
->usage
& ISL_SURF_USAGE_RENDER_TARGET_BIT
) ||
94 mt
->fast_clear_state
!= INTEL_FAST_CLEAR_STATE_RESOLVED
)) {
95 intel_miptree_get_aux_isl_surf(brw
, mt
, &aux_surf_s
, &aux_usage
);
96 aux_surf
= &aux_surf_s
;
97 assert(mt
->mcs_mt
->offset
== 0);
98 aux_offset
= mt
->mcs_mt
->bo
->offset64
;
100 /* We only really need a clear color if we also have an auxiliary
101 * surfacae. Without one, it does nothing.
103 clear_color
= intel_miptree_get_isl_clear_color(brw
, mt
);
106 uint32_t *dw
= __brw_state_batch(brw
, AUB_TRACE_SURFACE_STATE
,
107 ss_info
.num_dwords
* 4, ss_info
.ss_align
,
108 surf_index
, surf_offset
);
110 isl_surf_fill_state(&brw
->isl_dev
, dw
, .surf
= &surf
, .view
= view
,
111 .address
= mt
->bo
->offset64
+ mt
->offset
,
112 .aux_surf
= aux_surf
, .aux_usage
= aux_usage
,
113 .aux_address
= aux_offset
,
114 .mocs
= mocs
, .clear_color
= clear_color
);
116 drm_intel_bo_emit_reloc(brw
->batch
.bo
,
117 *surf_offset
+ 4 * ss_info
.reloc_dw
,
119 read_domains
, write_domains
);
122 /* On gen7 and prior, the upper 20 bits of surface state DWORD 6 are the
123 * upper 20 bits of the GPU address of the MCS buffer; the lower 12 bits
124 * contain other control information. Since buffer addresses are always
125 * on 4k boundaries (and thus have their lower 12 bits zero), we can use
126 * an ordinary reloc to do the necessary address translation.
128 assert((aux_offset
& 0xfff) == 0);
129 drm_intel_bo_emit_reloc(brw
->batch
.bo
,
130 *surf_offset
+ 4 * ss_info
.aux_reloc_dw
,
131 mt
->mcs_mt
->bo
, dw
[ss_info
.aux_reloc_dw
] & 0xfff,
132 read_domains
, write_domains
);
137 brw_update_renderbuffer_surface(struct brw_context
*brw
,
138 struct gl_renderbuffer
*rb
,
139 bool layered
, unsigned unit
/* unused */,
142 struct gl_context
*ctx
= &brw
->ctx
;
143 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
144 struct intel_mipmap_tree
*mt
= irb
->mt
;
146 assert(brw_render_target_supported(brw
, rb
));
147 intel_miptree_used_for_rendering(mt
);
149 mesa_format rb_format
= _mesa_get_render_format(ctx
, intel_rb_format(irb
));
150 if (unlikely(!brw
->format_supported_as_render_target
[rb_format
])) {
151 _mesa_problem(ctx
, "%s: renderbuffer format %s unsupported\n",
152 __func__
, _mesa_get_format_name(rb_format
));
155 const unsigned layer_multiplier
=
156 (irb
->mt
->msaa_layout
== INTEL_MSAA_LAYOUT_UMS
||
157 irb
->mt
->msaa_layout
== INTEL_MSAA_LAYOUT_CMS
) ?
158 MAX2(irb
->mt
->num_samples
, 1) : 1;
160 struct isl_view view
= {
161 .format
= brw
->render_target_format
[rb_format
],
162 .base_level
= irb
->mt_level
- irb
->mt
->first_level
,
164 .base_array_layer
= irb
->mt_layer
/ layer_multiplier
,
165 .array_len
= MAX2(irb
->layer_count
, 1),
167 ISL_CHANNEL_SELECT_RED
,
168 ISL_CHANNEL_SELECT_GREEN
,
169 ISL_CHANNEL_SELECT_BLUE
,
170 ISL_CHANNEL_SELECT_ALPHA
,
172 .usage
= ISL_SURF_USAGE_RENDER_TARGET_BIT
,
176 brw_emit_surface_state(brw
, mt
, &view
,
177 surface_state_infos
[brw
->gen
].rb_mocs
, false,
179 I915_GEM_DOMAIN_RENDER
,
180 I915_GEM_DOMAIN_RENDER
);
185 translate_tex_target(GLenum target
)
189 case GL_TEXTURE_1D_ARRAY_EXT
:
190 return BRW_SURFACE_1D
;
192 case GL_TEXTURE_RECTANGLE_NV
:
193 return BRW_SURFACE_2D
;
196 case GL_TEXTURE_2D_ARRAY_EXT
:
197 case GL_TEXTURE_EXTERNAL_OES
:
198 case GL_TEXTURE_2D_MULTISAMPLE
:
199 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY
:
200 return BRW_SURFACE_2D
;
203 return BRW_SURFACE_3D
;
205 case GL_TEXTURE_CUBE_MAP
:
206 case GL_TEXTURE_CUBE_MAP_ARRAY
:
207 return BRW_SURFACE_CUBE
;
210 unreachable("not reached");
215 brw_get_surface_tiling_bits(uint32_t tiling
)
219 return BRW_SURFACE_TILED
;
221 return BRW_SURFACE_TILED
| BRW_SURFACE_TILED_Y
;
229 brw_get_surface_num_multisamples(unsigned num_samples
)
232 return BRW_SURFACE_MULTISAMPLECOUNT_4
;
234 return BRW_SURFACE_MULTISAMPLECOUNT_1
;
238 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
242 brw_get_texture_swizzle(const struct gl_context
*ctx
,
243 const struct gl_texture_object
*t
)
245 const struct gl_texture_image
*img
= t
->Image
[0][t
->BaseLevel
];
247 int swizzles
[SWIZZLE_NIL
+ 1] = {
257 if (img
->_BaseFormat
== GL_DEPTH_COMPONENT
||
258 img
->_BaseFormat
== GL_DEPTH_STENCIL
) {
259 GLenum depth_mode
= t
->DepthMode
;
261 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
262 * with depth component data specified with a sized internal format.
263 * Otherwise, it's left at the old default, GL_LUMINANCE.
265 if (_mesa_is_gles3(ctx
) &&
266 img
->InternalFormat
!= GL_DEPTH_COMPONENT
&&
267 img
->InternalFormat
!= GL_DEPTH_STENCIL
) {
271 switch (depth_mode
) {
273 swizzles
[0] = SWIZZLE_ZERO
;
274 swizzles
[1] = SWIZZLE_ZERO
;
275 swizzles
[2] = SWIZZLE_ZERO
;
276 swizzles
[3] = SWIZZLE_X
;
279 swizzles
[0] = SWIZZLE_X
;
280 swizzles
[1] = SWIZZLE_X
;
281 swizzles
[2] = SWIZZLE_X
;
282 swizzles
[3] = SWIZZLE_ONE
;
285 swizzles
[0] = SWIZZLE_X
;
286 swizzles
[1] = SWIZZLE_X
;
287 swizzles
[2] = SWIZZLE_X
;
288 swizzles
[3] = SWIZZLE_X
;
291 swizzles
[0] = SWIZZLE_X
;
292 swizzles
[1] = SWIZZLE_ZERO
;
293 swizzles
[2] = SWIZZLE_ZERO
;
294 swizzles
[3] = SWIZZLE_ONE
;
299 GLenum datatype
= _mesa_get_format_datatype(img
->TexFormat
);
301 /* If the texture's format is alpha-only, force R, G, and B to
302 * 0.0. Similarly, if the texture's format has no alpha channel,
303 * force the alpha value read to 1.0. This allows for the
304 * implementation to use an RGBA texture for any of these formats
305 * without leaking any unexpected values.
307 switch (img
->_BaseFormat
) {
309 swizzles
[0] = SWIZZLE_ZERO
;
310 swizzles
[1] = SWIZZLE_ZERO
;
311 swizzles
[2] = SWIZZLE_ZERO
;
314 if (t
->_IsIntegerFormat
|| datatype
== GL_SIGNED_NORMALIZED
) {
315 swizzles
[0] = SWIZZLE_X
;
316 swizzles
[1] = SWIZZLE_X
;
317 swizzles
[2] = SWIZZLE_X
;
318 swizzles
[3] = SWIZZLE_ONE
;
321 case GL_LUMINANCE_ALPHA
:
322 if (datatype
== GL_SIGNED_NORMALIZED
) {
323 swizzles
[0] = SWIZZLE_X
;
324 swizzles
[1] = SWIZZLE_X
;
325 swizzles
[2] = SWIZZLE_X
;
326 swizzles
[3] = SWIZZLE_W
;
330 if (datatype
== GL_SIGNED_NORMALIZED
) {
331 swizzles
[0] = SWIZZLE_X
;
332 swizzles
[1] = SWIZZLE_X
;
333 swizzles
[2] = SWIZZLE_X
;
334 swizzles
[3] = SWIZZLE_X
;
340 if (_mesa_get_format_bits(img
->TexFormat
, GL_ALPHA_BITS
) > 0)
341 swizzles
[3] = SWIZZLE_ONE
;
345 return MAKE_SWIZZLE4(swizzles
[GET_SWZ(t
->_Swizzle
, 0)],
346 swizzles
[GET_SWZ(t
->_Swizzle
, 1)],
347 swizzles
[GET_SWZ(t
->_Swizzle
, 2)],
348 swizzles
[GET_SWZ(t
->_Swizzle
, 3)]);
352 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
353 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
355 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
358 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
360 * which is simply adding 4 then modding by 8 (or anding with 7).
362 * We then may need to apply workarounds for textureGather hardware bugs.
365 swizzle_to_scs(GLenum swizzle
, bool need_green_to_blue
)
367 unsigned scs
= (swizzle
+ 4) & 7;
369 return (need_green_to_blue
&& scs
== HSW_SCS_GREEN
) ? HSW_SCS_BLUE
: scs
;
373 brw_update_texture_surface(struct gl_context
*ctx
,
375 uint32_t *surf_offset
,
379 struct brw_context
*brw
= brw_context(ctx
);
380 struct gl_texture_object
*obj
= ctx
->Texture
.Unit
[unit
]._Current
;
382 if (obj
->Target
== GL_TEXTURE_BUFFER
) {
383 brw_update_buffer_texture_surface(ctx
, unit
, surf_offset
);
386 struct intel_texture_object
*intel_obj
= intel_texture_object(obj
);
387 struct intel_mipmap_tree
*mt
= intel_obj
->mt
;
388 struct gl_sampler_object
*sampler
= _mesa_get_samplerobj(ctx
, unit
);
389 /* If this is a view with restricted NumLayers, then our effective depth
390 * is not just the miptree depth.
392 const unsigned mt_num_layers
=
393 mt
->logical_depth0
* (_mesa_is_cube_map_texture(mt
->target
) ? 6 : 1);
394 const unsigned view_num_layers
=
395 (obj
->Immutable
&& obj
->Target
!= GL_TEXTURE_3D
) ? obj
->NumLayers
:
398 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
399 * texturing functions that return a float, as our code generation always
400 * selects the .x channel (which would always be 0).
402 struct gl_texture_image
*firstImage
= obj
->Image
[0][obj
->BaseLevel
];
403 const bool alpha_depth
= obj
->DepthMode
== GL_ALPHA
&&
404 (firstImage
->_BaseFormat
== GL_DEPTH_COMPONENT
||
405 firstImage
->_BaseFormat
== GL_DEPTH_STENCIL
);
406 const unsigned swizzle
= (unlikely(alpha_depth
) ? SWIZZLE_XYZW
:
407 brw_get_texture_swizzle(&brw
->ctx
, obj
));
409 unsigned format
= translate_tex_format(
410 brw
, intel_obj
->_Format
, sampler
->sRGBDecode
);
412 /* Implement gen6 and gen7 gather work-around */
413 bool need_green_to_blue
= false;
415 if (brw
->gen
== 7 && format
== BRW_SURFACEFORMAT_R32G32_FLOAT
) {
416 format
= BRW_SURFACEFORMAT_R32G32_FLOAT_LD
;
417 need_green_to_blue
= brw
->is_haswell
;
418 } else if (brw
->gen
== 6) {
419 /* Sandybridge's gather4 message is broken for integer formats.
420 * To work around this, we pretend the surface is UNORM for
421 * 8 or 16-bit formats, and emit shader instructions to recover
422 * the real INT/UINT value. For 32-bit formats, we pretend
423 * the surface is FLOAT, and simply reinterpret the resulting
427 case BRW_SURFACEFORMAT_R8_SINT
:
428 case BRW_SURFACEFORMAT_R8_UINT
:
429 format
= BRW_SURFACEFORMAT_R8_UNORM
;
432 case BRW_SURFACEFORMAT_R16_SINT
:
433 case BRW_SURFACEFORMAT_R16_UINT
:
434 format
= BRW_SURFACEFORMAT_R16_UNORM
;
437 case BRW_SURFACEFORMAT_R32_SINT
:
438 case BRW_SURFACEFORMAT_R32_UINT
:
439 format
= BRW_SURFACEFORMAT_R32_FLOAT
;
448 if (obj
->StencilSampling
&& firstImage
->_BaseFormat
== GL_DEPTH_STENCIL
) {
449 assert(brw
->gen
>= 8);
451 format
= BRW_SURFACEFORMAT_R8_UINT
;
452 } else if (obj
->Target
== GL_TEXTURE_EXTERNAL_OES
) {
454 mt
= mt
->plane
[plane
- 1];
457 format
= translate_tex_format(brw
, mt
->format
, sampler
->sRGBDecode
);
460 const int surf_index
= surf_offset
- &brw
->wm
.base
.surf_offset
[0];
462 struct isl_view view
= {
464 .base_level
= obj
->MinLevel
+ obj
->BaseLevel
,
465 .levels
= intel_obj
->_MaxLevel
- obj
->BaseLevel
+ 1,
466 .base_array_layer
= obj
->MinLayer
,
467 .array_len
= view_num_layers
,
469 swizzle_to_scs(GET_SWZ(swizzle
, 0), need_green_to_blue
),
470 swizzle_to_scs(GET_SWZ(swizzle
, 1), need_green_to_blue
),
471 swizzle_to_scs(GET_SWZ(swizzle
, 2), need_green_to_blue
),
472 swizzle_to_scs(GET_SWZ(swizzle
, 3), need_green_to_blue
),
474 .usage
= ISL_SURF_USAGE_TEXTURE_BIT
,
477 if (obj
->Target
== GL_TEXTURE_CUBE_MAP
||
478 obj
->Target
== GL_TEXTURE_CUBE_MAP_ARRAY
)
479 view
.usage
|= ISL_SURF_USAGE_CUBE_BIT
;
481 brw_emit_surface_state(brw
, mt
, &view
,
482 surface_state_infos
[brw
->gen
].tex_mocs
, for_gather
,
483 surf_offset
, surf_index
,
484 I915_GEM_DOMAIN_SAMPLER
, 0);
489 gen4_emit_buffer_surface_state(struct brw_context
*brw
,
490 uint32_t *out_offset
,
492 unsigned buffer_offset
,
493 unsigned surface_format
,
494 unsigned buffer_size
,
498 unsigned elements
= buffer_size
/ pitch
;
499 uint32_t *surf
= brw_state_batch(brw
, AUB_TRACE_SURFACE_STATE
,
500 6 * 4, 32, out_offset
);
501 memset(surf
, 0, 6 * 4);
503 surf
[0] = BRW_SURFACE_BUFFER
<< BRW_SURFACE_TYPE_SHIFT
|
504 surface_format
<< BRW_SURFACE_FORMAT_SHIFT
|
505 (brw
->gen
>= 6 ? BRW_SURFACE_RC_READ_WRITE
: 0);
506 surf
[1] = (bo
? bo
->offset64
: 0) + buffer_offset
; /* reloc */
507 surf
[2] = ((elements
- 1) & 0x7f) << BRW_SURFACE_WIDTH_SHIFT
|
508 (((elements
- 1) >> 7) & 0x1fff) << BRW_SURFACE_HEIGHT_SHIFT
;
509 surf
[3] = (((elements
- 1) >> 20) & 0x7f) << BRW_SURFACE_DEPTH_SHIFT
|
510 (pitch
- 1) << BRW_SURFACE_PITCH_SHIFT
;
512 /* Emit relocation to surface contents. The 965 PRM, Volume 4, section
513 * 5.1.2 "Data Cache" says: "the data cache does not exist as a separate
514 * physical cache. It is mapped in hardware to the sampler cache."
517 drm_intel_bo_emit_reloc(brw
->batch
.bo
, *out_offset
+ 4,
519 I915_GEM_DOMAIN_SAMPLER
,
520 (rw
? I915_GEM_DOMAIN_SAMPLER
: 0));
525 brw_update_buffer_texture_surface(struct gl_context
*ctx
,
527 uint32_t *surf_offset
)
529 struct brw_context
*brw
= brw_context(ctx
);
530 struct gl_texture_object
*tObj
= ctx
->Texture
.Unit
[unit
]._Current
;
531 struct intel_buffer_object
*intel_obj
=
532 intel_buffer_object(tObj
->BufferObject
);
533 uint32_t size
= tObj
->BufferSize
;
534 drm_intel_bo
*bo
= NULL
;
535 mesa_format format
= tObj
->_BufferObjectFormat
;
536 uint32_t brw_format
= brw_format_for_mesa_format(format
);
537 int texel_size
= _mesa_get_format_bytes(format
);
540 size
= MIN2(size
, intel_obj
->Base
.Size
);
541 bo
= intel_bufferobj_buffer(brw
, intel_obj
, tObj
->BufferOffset
, size
);
544 if (brw_format
== 0 && format
!= MESA_FORMAT_RGBA_FLOAT32
) {
545 _mesa_problem(NULL
, "bad format %s for texture buffer\n",
546 _mesa_get_format_name(format
));
549 brw
->vtbl
.emit_buffer_surface_state(brw
, surf_offset
, bo
,
558 * Create the constant buffer surface. Vertex/fragment shader constants will be
559 * read from this buffer with Data Port Read instructions/messages.
562 brw_create_constant_surface(struct brw_context
*brw
,
566 uint32_t *out_offset
)
568 brw
->vtbl
.emit_buffer_surface_state(brw
, out_offset
, bo
, offset
,
569 BRW_SURFACEFORMAT_R32G32B32A32_FLOAT
,
574 * Create the buffer surface. Shader buffer variables will be
575 * read from / write to this buffer with Data Port Read/Write
576 * instructions/messages.
579 brw_create_buffer_surface(struct brw_context
*brw
,
583 uint32_t *out_offset
)
585 /* Use a raw surface so we can reuse existing untyped read/write/atomic
586 * messages. We need these specifically for the fragment shader since they
587 * include a pixel mask header that we need to ensure correct behavior
588 * with helper invocations, which cannot write to the buffer.
590 brw
->vtbl
.emit_buffer_surface_state(brw
, out_offset
, bo
, offset
,
591 BRW_SURFACEFORMAT_RAW
,
596 * Set up a binding table entry for use by stream output logic (transform
599 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
602 brw_update_sol_surface(struct brw_context
*brw
,
603 struct gl_buffer_object
*buffer_obj
,
604 uint32_t *out_offset
, unsigned num_vector_components
,
605 unsigned stride_dwords
, unsigned offset_dwords
)
607 struct intel_buffer_object
*intel_bo
= intel_buffer_object(buffer_obj
);
608 uint32_t offset_bytes
= 4 * offset_dwords
;
609 drm_intel_bo
*bo
= intel_bufferobj_buffer(brw
, intel_bo
,
611 buffer_obj
->Size
- offset_bytes
);
612 uint32_t *surf
= brw_state_batch(brw
, AUB_TRACE_SURFACE_STATE
, 6 * 4, 32,
614 uint32_t pitch_minus_1
= 4*stride_dwords
- 1;
615 size_t size_dwords
= buffer_obj
->Size
/ 4;
616 uint32_t buffer_size_minus_1
, width
, height
, depth
, surface_format
;
618 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
619 * too big to map using a single binding table entry?
621 assert((size_dwords
- offset_dwords
) / stride_dwords
622 <= BRW_MAX_NUM_BUFFER_ENTRIES
);
624 if (size_dwords
> offset_dwords
+ num_vector_components
) {
625 /* There is room for at least 1 transform feedback output in the buffer.
626 * Compute the number of additional transform feedback outputs the
627 * buffer has room for.
629 buffer_size_minus_1
=
630 (size_dwords
- offset_dwords
- num_vector_components
) / stride_dwords
;
632 /* There isn't even room for a single transform feedback output in the
633 * buffer. We can't configure the binding table entry to prevent output
634 * entirely; we'll have to rely on the geometry shader to detect
635 * overflow. But to minimize the damage in case of a bug, set up the
636 * binding table entry to just allow a single output.
638 buffer_size_minus_1
= 0;
640 width
= buffer_size_minus_1
& 0x7f;
641 height
= (buffer_size_minus_1
& 0xfff80) >> 7;
642 depth
= (buffer_size_minus_1
& 0x7f00000) >> 20;
644 switch (num_vector_components
) {
646 surface_format
= BRW_SURFACEFORMAT_R32_FLOAT
;
649 surface_format
= BRW_SURFACEFORMAT_R32G32_FLOAT
;
652 surface_format
= BRW_SURFACEFORMAT_R32G32B32_FLOAT
;
655 surface_format
= BRW_SURFACEFORMAT_R32G32B32A32_FLOAT
;
658 unreachable("Invalid vector size for transform feedback output");
661 surf
[0] = BRW_SURFACE_BUFFER
<< BRW_SURFACE_TYPE_SHIFT
|
662 BRW_SURFACE_MIPMAPLAYOUT_BELOW
<< BRW_SURFACE_MIPLAYOUT_SHIFT
|
663 surface_format
<< BRW_SURFACE_FORMAT_SHIFT
|
664 BRW_SURFACE_RC_READ_WRITE
;
665 surf
[1] = bo
->offset64
+ offset_bytes
; /* reloc */
666 surf
[2] = (width
<< BRW_SURFACE_WIDTH_SHIFT
|
667 height
<< BRW_SURFACE_HEIGHT_SHIFT
);
668 surf
[3] = (depth
<< BRW_SURFACE_DEPTH_SHIFT
|
669 pitch_minus_1
<< BRW_SURFACE_PITCH_SHIFT
);
673 /* Emit relocation to surface contents. */
674 drm_intel_bo_emit_reloc(brw
->batch
.bo
,
677 I915_GEM_DOMAIN_RENDER
, I915_GEM_DOMAIN_RENDER
);
680 /* Creates a new WM constant buffer reflecting the current fragment program's
681 * constants, if needed by the fragment program.
683 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
687 brw_upload_wm_pull_constants(struct brw_context
*brw
)
689 struct brw_stage_state
*stage_state
= &brw
->wm
.base
;
690 /* BRW_NEW_FRAGMENT_PROGRAM */
691 struct brw_fragment_program
*fp
=
692 (struct brw_fragment_program
*) brw
->fragment_program
;
693 /* BRW_NEW_FS_PROG_DATA */
694 struct brw_stage_prog_data
*prog_data
= &brw
->wm
.prog_data
->base
;
696 /* _NEW_PROGRAM_CONSTANTS */
697 brw_upload_pull_constants(brw
, BRW_NEW_SURFACES
, &fp
->program
.Base
,
698 stage_state
, prog_data
);
701 const struct brw_tracked_state brw_wm_pull_constants
= {
703 .mesa
= _NEW_PROGRAM_CONSTANTS
,
704 .brw
= BRW_NEW_BATCH
|
706 BRW_NEW_FRAGMENT_PROGRAM
|
707 BRW_NEW_FS_PROG_DATA
,
709 .emit
= brw_upload_wm_pull_constants
,
713 * Creates a null renderbuffer surface.
715 * This is used when the shader doesn't write to any color output. An FB
716 * write to target 0 will still be emitted, because that's how the thread is
717 * terminated (and computed depth is returned), so we need to have the
718 * hardware discard the target 0 color output..
721 brw_emit_null_surface_state(struct brw_context
*brw
,
725 uint32_t *out_offset
)
727 /* From the Sandy bridge PRM, Vol4 Part1 p71 (Surface Type: Programming
730 * A null surface will be used in instances where an actual surface is
731 * not bound. When a write message is generated to a null surface, no
732 * actual surface is written to. When a read message (including any
733 * sampling engine message) is generated to a null surface, the result
734 * is all zeros. Note that a null surface type is allowed to be used
735 * with all messages, even if it is not specificially indicated as
736 * supported. All of the remaining fields in surface state are ignored
737 * for null surfaces, with the following exceptions:
739 * - [DevSNB+]: Width, Height, Depth, and LOD fields must match the
740 * depth buffer’s corresponding state for all render target surfaces,
743 * - Surface Format must be R8G8B8A8_UNORM.
745 unsigned surface_type
= BRW_SURFACE_NULL
;
746 drm_intel_bo
*bo
= NULL
;
747 unsigned pitch_minus_1
= 0;
748 uint32_t multisampling_state
= 0;
749 uint32_t *surf
= brw_state_batch(brw
, AUB_TRACE_SURFACE_STATE
, 6 * 4, 32,
753 /* On Gen6, null render targets seem to cause GPU hangs when
754 * multisampling. So work around this problem by rendering into dummy
757 * To decrease the amount of memory needed by the workaround buffer, we
758 * set its pitch to 128 bytes (the width of a Y tile). This means that
759 * the amount of memory needed for the workaround buffer is
760 * (width_in_tiles + height_in_tiles - 1) tiles.
762 * Note that since the workaround buffer will be interpreted by the
763 * hardware as an interleaved multisampled buffer, we need to compute
764 * width_in_tiles and height_in_tiles by dividing the width and height
765 * by 16 rather than the normal Y-tile size of 32.
767 unsigned width_in_tiles
= ALIGN(width
, 16) / 16;
768 unsigned height_in_tiles
= ALIGN(height
, 16) / 16;
769 unsigned size_needed
= (width_in_tiles
+ height_in_tiles
- 1) * 4096;
770 brw_get_scratch_bo(brw
, &brw
->wm
.multisampled_null_render_target_bo
,
772 bo
= brw
->wm
.multisampled_null_render_target_bo
;
773 surface_type
= BRW_SURFACE_2D
;
775 multisampling_state
= brw_get_surface_num_multisamples(samples
);
778 surf
[0] = (surface_type
<< BRW_SURFACE_TYPE_SHIFT
|
779 BRW_SURFACEFORMAT_B8G8R8A8_UNORM
<< BRW_SURFACE_FORMAT_SHIFT
);
781 surf
[0] |= (1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT
|
782 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT
|
783 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT
|
784 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT
);
786 surf
[1] = bo
? bo
->offset64
: 0;
787 surf
[2] = ((width
- 1) << BRW_SURFACE_WIDTH_SHIFT
|
788 (height
- 1) << BRW_SURFACE_HEIGHT_SHIFT
);
790 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
793 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
795 surf
[3] = (BRW_SURFACE_TILED
| BRW_SURFACE_TILED_Y
|
796 pitch_minus_1
<< BRW_SURFACE_PITCH_SHIFT
);
797 surf
[4] = multisampling_state
;
801 drm_intel_bo_emit_reloc(brw
->batch
.bo
,
804 I915_GEM_DOMAIN_RENDER
, I915_GEM_DOMAIN_RENDER
);
809 * Sets up a surface state structure to point at the given region.
810 * While it is only used for the front/back buffer currently, it should be
811 * usable for further buffers when doing ARB_draw_buffer support.
814 gen4_update_renderbuffer_surface(struct brw_context
*brw
,
815 struct gl_renderbuffer
*rb
,
816 bool layered
, unsigned unit
,
819 struct gl_context
*ctx
= &brw
->ctx
;
820 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
821 struct intel_mipmap_tree
*mt
= irb
->mt
;
823 uint32_t tile_x
, tile_y
;
827 mesa_format rb_format
= _mesa_get_render_format(ctx
, intel_rb_format(irb
));
828 /* BRW_NEW_FS_PROG_DATA */
832 if (rb
->TexImage
&& !brw
->has_surface_tile_offset
) {
833 intel_renderbuffer_get_tile_offsets(irb
, &tile_x
, &tile_y
);
835 if (tile_x
!= 0 || tile_y
!= 0) {
836 /* Original gen4 hardware couldn't draw to a non-tile-aligned
837 * destination in a miptree unless you actually setup your renderbuffer
838 * as a miptree and used the fragile lod/array_index/etc. controls to
839 * select the image. So, instead, we just make a new single-level
840 * miptree and render into that.
842 intel_renderbuffer_move_to_temp(brw
, irb
, false);
847 intel_miptree_used_for_rendering(irb
->mt
);
849 surf
= brw_state_batch(brw
, AUB_TRACE_SURFACE_STATE
, 6 * 4, 32, &offset
);
851 format
= brw
->render_target_format
[rb_format
];
852 if (unlikely(!brw
->format_supported_as_render_target
[rb_format
])) {
853 _mesa_problem(ctx
, "%s: renderbuffer format %s unsupported\n",
854 __func__
, _mesa_get_format_name(rb_format
));
857 surf
[0] = (BRW_SURFACE_2D
<< BRW_SURFACE_TYPE_SHIFT
|
858 format
<< BRW_SURFACE_FORMAT_SHIFT
);
861 assert(mt
->offset
% mt
->cpp
== 0);
862 surf
[1] = (intel_renderbuffer_get_tile_offsets(irb
, &tile_x
, &tile_y
) +
863 mt
->bo
->offset64
+ mt
->offset
);
865 surf
[2] = ((rb
->Width
- 1) << BRW_SURFACE_WIDTH_SHIFT
|
866 (rb
->Height
- 1) << BRW_SURFACE_HEIGHT_SHIFT
);
868 surf
[3] = (brw_get_surface_tiling_bits(mt
->tiling
) |
869 (mt
->pitch
- 1) << BRW_SURFACE_PITCH_SHIFT
);
871 surf
[4] = brw_get_surface_num_multisamples(mt
->num_samples
);
873 assert(brw
->has_surface_tile_offset
|| (tile_x
== 0 && tile_y
== 0));
874 /* Note that the low bits of these fields are missing, so
875 * there's the possibility of getting in trouble.
877 assert(tile_x
% 4 == 0);
878 assert(tile_y
% 2 == 0);
879 surf
[5] = ((tile_x
/ 4) << BRW_SURFACE_X_OFFSET_SHIFT
|
880 (tile_y
/ 2) << BRW_SURFACE_Y_OFFSET_SHIFT
|
881 (mt
->valign
== 4 ? BRW_SURFACE_VERTICAL_ALIGN_ENABLE
: 0));
885 if (!ctx
->Color
.ColorLogicOpEnabled
&&
886 (ctx
->Color
.BlendEnabled
& (1 << unit
)))
887 surf
[0] |= BRW_SURFACE_BLEND_ENABLED
;
889 if (!ctx
->Color
.ColorMask
[unit
][0])
890 surf
[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT
;
891 if (!ctx
->Color
.ColorMask
[unit
][1])
892 surf
[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT
;
893 if (!ctx
->Color
.ColorMask
[unit
][2])
894 surf
[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT
;
896 /* As mentioned above, disable writes to the alpha component when the
897 * renderbuffer is XRGB.
899 if (ctx
->DrawBuffer
->Visual
.alphaBits
== 0 ||
900 !ctx
->Color
.ColorMask
[unit
][3]) {
901 surf
[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT
;
905 drm_intel_bo_emit_reloc(brw
->batch
.bo
,
908 surf
[1] - mt
->bo
->offset64
,
909 I915_GEM_DOMAIN_RENDER
,
910 I915_GEM_DOMAIN_RENDER
);
916 * Construct SURFACE_STATE objects for renderbuffers/draw buffers.
919 brw_update_renderbuffer_surfaces(struct brw_context
*brw
,
920 const struct gl_framebuffer
*fb
,
921 uint32_t render_target_start
,
922 uint32_t *surf_offset
)
925 const unsigned int w
= _mesa_geometric_width(fb
);
926 const unsigned int h
= _mesa_geometric_height(fb
);
927 const unsigned int s
= _mesa_geometric_samples(fb
);
929 /* Update surfaces for drawing buffers */
930 if (fb
->_NumColorDrawBuffers
>= 1) {
931 for (i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
932 const uint32_t surf_index
= render_target_start
+ i
;
934 if (intel_renderbuffer(fb
->_ColorDrawBuffers
[i
])) {
935 surf_offset
[surf_index
] =
936 brw
->vtbl
.update_renderbuffer_surface(
937 brw
, fb
->_ColorDrawBuffers
[i
],
938 _mesa_geometric_layers(fb
) > 0, i
, surf_index
);
940 brw
->vtbl
.emit_null_surface_state(brw
, w
, h
, s
,
941 &surf_offset
[surf_index
]);
945 const uint32_t surf_index
= render_target_start
;
946 brw
->vtbl
.emit_null_surface_state(brw
, w
, h
, s
,
947 &surf_offset
[surf_index
]);
952 update_renderbuffer_surfaces(struct brw_context
*brw
)
954 const struct gl_context
*ctx
= &brw
->ctx
;
956 /* _NEW_BUFFERS | _NEW_COLOR */
957 const struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
958 brw_update_renderbuffer_surfaces(
960 brw
->wm
.prog_data
->binding_table
.render_target_start
,
961 brw
->wm
.base
.surf_offset
);
962 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
965 const struct brw_tracked_state brw_renderbuffer_surfaces
= {
967 .mesa
= _NEW_BUFFERS
|
969 .brw
= BRW_NEW_BATCH
|
971 BRW_NEW_FS_PROG_DATA
,
973 .emit
= update_renderbuffer_surfaces
,
976 const struct brw_tracked_state gen6_renderbuffer_surfaces
= {
978 .mesa
= _NEW_BUFFERS
,
979 .brw
= BRW_NEW_BATCH
|
982 .emit
= update_renderbuffer_surfaces
,
987 update_stage_texture_surfaces(struct brw_context
*brw
,
988 const struct gl_program
*prog
,
989 struct brw_stage_state
*stage_state
,
990 bool for_gather
, uint32_t plane
)
995 struct gl_context
*ctx
= &brw
->ctx
;
997 uint32_t *surf_offset
= stage_state
->surf_offset
;
999 /* BRW_NEW_*_PROG_DATA */
1001 surf_offset
+= stage_state
->prog_data
->binding_table
.gather_texture_start
;
1003 surf_offset
+= stage_state
->prog_data
->binding_table
.plane_start
[plane
];
1005 unsigned num_samplers
= _mesa_fls(prog
->SamplersUsed
);
1006 for (unsigned s
= 0; s
< num_samplers
; s
++) {
1009 if (prog
->SamplersUsed
& (1 << s
)) {
1010 const unsigned unit
= prog
->SamplerUnits
[s
];
1013 if (ctx
->Texture
.Unit
[unit
]._Current
) {
1014 brw
->vtbl
.update_texture_surface(ctx
, unit
, surf_offset
+ s
, for_gather
, plane
);
1022 * Construct SURFACE_STATE objects for enabled textures.
1025 brw_update_texture_surfaces(struct brw_context
*brw
)
1027 /* BRW_NEW_VERTEX_PROGRAM */
1028 struct gl_program
*vs
= (struct gl_program
*) brw
->vertex_program
;
1030 /* BRW_NEW_TESS_PROGRAMS */
1031 struct gl_program
*tcs
= (struct gl_program
*) brw
->tess_ctrl_program
;
1032 struct gl_program
*tes
= (struct gl_program
*) brw
->tess_eval_program
;
1034 /* BRW_NEW_GEOMETRY_PROGRAM */
1035 struct gl_program
*gs
= (struct gl_program
*) brw
->geometry_program
;
1037 /* BRW_NEW_FRAGMENT_PROGRAM */
1038 struct gl_program
*fs
= (struct gl_program
*) brw
->fragment_program
;
1041 update_stage_texture_surfaces(brw
, vs
, &brw
->vs
.base
, false, 0);
1042 update_stage_texture_surfaces(brw
, tcs
, &brw
->tcs
.base
, false, 0);
1043 update_stage_texture_surfaces(brw
, tes
, &brw
->tes
.base
, false, 0);
1044 update_stage_texture_surfaces(brw
, gs
, &brw
->gs
.base
, false, 0);
1045 update_stage_texture_surfaces(brw
, fs
, &brw
->wm
.base
, false, 0);
1047 /* emit alternate set of surface state for gather. this
1048 * allows the surface format to be overriden for only the
1049 * gather4 messages. */
1051 if (vs
&& vs
->UsesGather
)
1052 update_stage_texture_surfaces(brw
, vs
, &brw
->vs
.base
, true, 0);
1053 if (tcs
&& tcs
->UsesGather
)
1054 update_stage_texture_surfaces(brw
, tcs
, &brw
->tcs
.base
, true, 0);
1055 if (tes
&& tes
->UsesGather
)
1056 update_stage_texture_surfaces(brw
, tes
, &brw
->tes
.base
, true, 0);
1057 if (gs
&& gs
->UsesGather
)
1058 update_stage_texture_surfaces(brw
, gs
, &brw
->gs
.base
, true, 0);
1059 if (fs
&& fs
->UsesGather
)
1060 update_stage_texture_surfaces(brw
, fs
, &brw
->wm
.base
, true, 0);
1064 update_stage_texture_surfaces(brw
, fs
, &brw
->wm
.base
, false, 1);
1065 update_stage_texture_surfaces(brw
, fs
, &brw
->wm
.base
, false, 2);
1068 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1071 const struct brw_tracked_state brw_texture_surfaces
= {
1073 .mesa
= _NEW_TEXTURE
,
1074 .brw
= BRW_NEW_BATCH
|
1076 BRW_NEW_FRAGMENT_PROGRAM
|
1077 BRW_NEW_FS_PROG_DATA
|
1078 BRW_NEW_GEOMETRY_PROGRAM
|
1079 BRW_NEW_GS_PROG_DATA
|
1080 BRW_NEW_TESS_PROGRAMS
|
1081 BRW_NEW_TCS_PROG_DATA
|
1082 BRW_NEW_TES_PROG_DATA
|
1083 BRW_NEW_TEXTURE_BUFFER
|
1084 BRW_NEW_VERTEX_PROGRAM
|
1085 BRW_NEW_VS_PROG_DATA
,
1087 .emit
= brw_update_texture_surfaces
,
1091 brw_update_cs_texture_surfaces(struct brw_context
*brw
)
1093 /* BRW_NEW_COMPUTE_PROGRAM */
1094 struct gl_program
*cs
= (struct gl_program
*) brw
->compute_program
;
1097 update_stage_texture_surfaces(brw
, cs
, &brw
->cs
.base
, false, 0);
1099 /* emit alternate set of surface state for gather. this
1100 * allows the surface format to be overriden for only the
1104 if (cs
&& cs
->UsesGather
)
1105 update_stage_texture_surfaces(brw
, cs
, &brw
->cs
.base
, true, 0);
1108 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1111 const struct brw_tracked_state brw_cs_texture_surfaces
= {
1113 .mesa
= _NEW_TEXTURE
,
1114 .brw
= BRW_NEW_BATCH
|
1116 BRW_NEW_COMPUTE_PROGRAM
,
1118 .emit
= brw_update_cs_texture_surfaces
,
1123 brw_upload_ubo_surfaces(struct brw_context
*brw
,
1124 struct gl_linked_shader
*shader
,
1125 struct brw_stage_state
*stage_state
,
1126 struct brw_stage_prog_data
*prog_data
)
1128 struct gl_context
*ctx
= &brw
->ctx
;
1133 uint32_t *ubo_surf_offsets
=
1134 &stage_state
->surf_offset
[prog_data
->binding_table
.ubo_start
];
1136 for (int i
= 0; i
< shader
->NumUniformBlocks
; i
++) {
1137 struct gl_uniform_buffer_binding
*binding
=
1138 &ctx
->UniformBufferBindings
[shader
->UniformBlocks
[i
]->Binding
];
1140 if (binding
->BufferObject
== ctx
->Shared
->NullBufferObj
) {
1141 brw
->vtbl
.emit_null_surface_state(brw
, 1, 1, 1, &ubo_surf_offsets
[i
]);
1143 struct intel_buffer_object
*intel_bo
=
1144 intel_buffer_object(binding
->BufferObject
);
1145 GLsizeiptr size
= binding
->BufferObject
->Size
- binding
->Offset
;
1146 if (!binding
->AutomaticSize
)
1147 size
= MIN2(size
, binding
->Size
);
1149 intel_bufferobj_buffer(brw
, intel_bo
,
1152 brw_create_constant_surface(brw
, bo
, binding
->Offset
,
1154 &ubo_surf_offsets
[i
]);
1158 uint32_t *ssbo_surf_offsets
=
1159 &stage_state
->surf_offset
[prog_data
->binding_table
.ssbo_start
];
1161 for (int i
= 0; i
< shader
->NumShaderStorageBlocks
; i
++) {
1162 struct gl_shader_storage_buffer_binding
*binding
=
1163 &ctx
->ShaderStorageBufferBindings
[shader
->ShaderStorageBlocks
[i
]->Binding
];
1165 if (binding
->BufferObject
== ctx
->Shared
->NullBufferObj
) {
1166 brw
->vtbl
.emit_null_surface_state(brw
, 1, 1, 1, &ssbo_surf_offsets
[i
]);
1168 struct intel_buffer_object
*intel_bo
=
1169 intel_buffer_object(binding
->BufferObject
);
1170 GLsizeiptr size
= binding
->BufferObject
->Size
- binding
->Offset
;
1171 if (!binding
->AutomaticSize
)
1172 size
= MIN2(size
, binding
->Size
);
1174 intel_bufferobj_buffer(brw
, intel_bo
,
1177 brw_create_buffer_surface(brw
, bo
, binding
->Offset
,
1179 &ssbo_surf_offsets
[i
]);
1183 if (shader
->NumUniformBlocks
|| shader
->NumShaderStorageBlocks
)
1184 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1188 brw_upload_wm_ubo_surfaces(struct brw_context
*brw
)
1190 struct gl_context
*ctx
= &brw
->ctx
;
1192 struct gl_shader_program
*prog
= ctx
->_Shader
->_CurrentFragmentProgram
;
1197 /* BRW_NEW_FS_PROG_DATA */
1198 brw_upload_ubo_surfaces(brw
, prog
->_LinkedShaders
[MESA_SHADER_FRAGMENT
],
1199 &brw
->wm
.base
, &brw
->wm
.prog_data
->base
);
1202 const struct brw_tracked_state brw_wm_ubo_surfaces
= {
1204 .mesa
= _NEW_PROGRAM
,
1205 .brw
= BRW_NEW_BATCH
|
1207 BRW_NEW_FS_PROG_DATA
|
1208 BRW_NEW_UNIFORM_BUFFER
,
1210 .emit
= brw_upload_wm_ubo_surfaces
,
1214 brw_upload_cs_ubo_surfaces(struct brw_context
*brw
)
1216 struct gl_context
*ctx
= &brw
->ctx
;
1218 struct gl_shader_program
*prog
=
1219 ctx
->_Shader
->CurrentProgram
[MESA_SHADER_COMPUTE
];
1224 /* BRW_NEW_CS_PROG_DATA */
1225 brw_upload_ubo_surfaces(brw
, prog
->_LinkedShaders
[MESA_SHADER_COMPUTE
],
1226 &brw
->cs
.base
, &brw
->cs
.prog_data
->base
);
1229 const struct brw_tracked_state brw_cs_ubo_surfaces
= {
1231 .mesa
= _NEW_PROGRAM
,
1232 .brw
= BRW_NEW_BATCH
|
1234 BRW_NEW_CS_PROG_DATA
|
1235 BRW_NEW_UNIFORM_BUFFER
,
1237 .emit
= brw_upload_cs_ubo_surfaces
,
1241 brw_upload_abo_surfaces(struct brw_context
*brw
,
1242 struct gl_linked_shader
*shader
,
1243 struct brw_stage_state
*stage_state
,
1244 struct brw_stage_prog_data
*prog_data
)
1246 struct gl_context
*ctx
= &brw
->ctx
;
1247 uint32_t *surf_offsets
=
1248 &stage_state
->surf_offset
[prog_data
->binding_table
.abo_start
];
1250 if (shader
&& shader
->NumAtomicBuffers
) {
1251 for (unsigned i
= 0; i
< shader
->NumAtomicBuffers
; i
++) {
1252 struct gl_atomic_buffer_binding
*binding
=
1253 &ctx
->AtomicBufferBindings
[shader
->AtomicBuffers
[i
]->Binding
];
1254 struct intel_buffer_object
*intel_bo
=
1255 intel_buffer_object(binding
->BufferObject
);
1256 drm_intel_bo
*bo
= intel_bufferobj_buffer(
1257 brw
, intel_bo
, binding
->Offset
, intel_bo
->Base
.Size
- binding
->Offset
);
1259 brw
->vtbl
.emit_buffer_surface_state(brw
, &surf_offsets
[i
], bo
,
1260 binding
->Offset
, BRW_SURFACEFORMAT_RAW
,
1261 bo
->size
- binding
->Offset
, 1, true);
1264 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1269 brw_upload_wm_abo_surfaces(struct brw_context
*brw
)
1271 struct gl_context
*ctx
= &brw
->ctx
;
1273 struct gl_shader_program
*prog
= ctx
->_Shader
->_CurrentFragmentProgram
;
1276 /* BRW_NEW_FS_PROG_DATA */
1277 brw_upload_abo_surfaces(brw
, prog
->_LinkedShaders
[MESA_SHADER_FRAGMENT
],
1278 &brw
->wm
.base
, &brw
->wm
.prog_data
->base
);
1282 const struct brw_tracked_state brw_wm_abo_surfaces
= {
1284 .mesa
= _NEW_PROGRAM
,
1285 .brw
= BRW_NEW_ATOMIC_BUFFER
|
1288 BRW_NEW_FS_PROG_DATA
,
1290 .emit
= brw_upload_wm_abo_surfaces
,
1294 brw_upload_cs_abo_surfaces(struct brw_context
*brw
)
1296 struct gl_context
*ctx
= &brw
->ctx
;
1298 struct gl_shader_program
*prog
=
1299 ctx
->_Shader
->CurrentProgram
[MESA_SHADER_COMPUTE
];
1302 /* BRW_NEW_CS_PROG_DATA */
1303 brw_upload_abo_surfaces(brw
, prog
->_LinkedShaders
[MESA_SHADER_COMPUTE
],
1304 &brw
->cs
.base
, &brw
->cs
.prog_data
->base
);
1308 const struct brw_tracked_state brw_cs_abo_surfaces
= {
1310 .mesa
= _NEW_PROGRAM
,
1311 .brw
= BRW_NEW_ATOMIC_BUFFER
|
1314 BRW_NEW_CS_PROG_DATA
,
1316 .emit
= brw_upload_cs_abo_surfaces
,
1320 brw_upload_cs_image_surfaces(struct brw_context
*brw
)
1322 struct gl_context
*ctx
= &brw
->ctx
;
1324 struct gl_shader_program
*prog
=
1325 ctx
->_Shader
->CurrentProgram
[MESA_SHADER_COMPUTE
];
1328 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1329 brw_upload_image_surfaces(brw
, prog
->_LinkedShaders
[MESA_SHADER_COMPUTE
],
1330 &brw
->cs
.base
, &brw
->cs
.prog_data
->base
);
1334 const struct brw_tracked_state brw_cs_image_surfaces
= {
1336 .mesa
= _NEW_TEXTURE
| _NEW_PROGRAM
,
1337 .brw
= BRW_NEW_BATCH
|
1339 BRW_NEW_CS_PROG_DATA
|
1342 .emit
= brw_upload_cs_image_surfaces
,
1346 get_image_format(struct brw_context
*brw
, mesa_format format
, GLenum access
)
1348 const struct brw_device_info
*devinfo
= brw
->intelScreen
->devinfo
;
1349 uint32_t hw_format
= brw_format_for_mesa_format(format
);
1350 if (access
== GL_WRITE_ONLY
) {
1352 } else if (isl_has_matching_typed_storage_image_format(devinfo
, hw_format
)) {
1353 /* Typed surface reads support a very limited subset of the shader
1354 * image formats. Translate it into the closest format the
1355 * hardware supports.
1357 return isl_lower_storage_image_format(devinfo
, hw_format
);
1359 /* The hardware doesn't actually support a typed format that we can use
1360 * so we have to fall back to untyped read/write messages.
1362 return BRW_SURFACEFORMAT_RAW
;
1367 update_default_image_param(struct brw_context
*brw
,
1368 struct gl_image_unit
*u
,
1369 unsigned surface_idx
,
1370 struct brw_image_param
*param
)
1372 memset(param
, 0, sizeof(*param
));
1373 param
->surface_idx
= surface_idx
;
1374 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1375 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1376 * detailed explanation of these parameters.
1378 param
->swizzling
[0] = 0xff;
1379 param
->swizzling
[1] = 0xff;
1383 update_buffer_image_param(struct brw_context
*brw
,
1384 struct gl_image_unit
*u
,
1385 unsigned surface_idx
,
1386 struct brw_image_param
*param
)
1388 struct gl_buffer_object
*obj
= u
->TexObj
->BufferObject
;
1390 update_default_image_param(brw
, u
, surface_idx
, param
);
1392 param
->size
[0] = obj
->Size
/ _mesa_get_format_bytes(u
->_ActualFormat
);
1393 param
->stride
[0] = _mesa_get_format_bytes(u
->_ActualFormat
);
1397 update_texture_image_param(struct brw_context
*brw
,
1398 struct gl_image_unit
*u
,
1399 unsigned surface_idx
,
1400 struct brw_image_param
*param
)
1402 struct intel_mipmap_tree
*mt
= intel_texture_object(u
->TexObj
)->mt
;
1404 update_default_image_param(brw
, u
, surface_idx
, param
);
1406 param
->size
[0] = minify(mt
->logical_width0
, u
->Level
);
1407 param
->size
[1] = minify(mt
->logical_height0
, u
->Level
);
1408 param
->size
[2] = (!u
->Layered
? 1 :
1409 u
->TexObj
->Target
== GL_TEXTURE_CUBE_MAP
? 6 :
1410 u
->TexObj
->Target
== GL_TEXTURE_3D
?
1411 minify(mt
->logical_depth0
, u
->Level
) :
1412 mt
->logical_depth0
);
1414 intel_miptree_get_image_offset(mt
, u
->Level
, u
->_Layer
,
1418 param
->stride
[0] = mt
->cpp
;
1419 param
->stride
[1] = mt
->pitch
/ mt
->cpp
;
1421 brw_miptree_get_horizontal_slice_pitch(brw
, mt
, u
->Level
);
1423 brw_miptree_get_vertical_slice_pitch(brw
, mt
, u
->Level
);
1425 if (mt
->tiling
== I915_TILING_X
) {
1426 /* An X tile is a rectangular block of 512x8 bytes. */
1427 param
->tiling
[0] = _mesa_logbase2(512 / mt
->cpp
);
1428 param
->tiling
[1] = _mesa_logbase2(8);
1430 if (brw
->has_swizzling
) {
1431 /* Right shifts required to swizzle bits 9 and 10 of the memory
1432 * address with bit 6.
1434 param
->swizzling
[0] = 3;
1435 param
->swizzling
[1] = 4;
1437 } else if (mt
->tiling
== I915_TILING_Y
) {
1438 /* The layout of a Y-tiled surface in memory isn't really fundamentally
1439 * different to the layout of an X-tiled surface, we simply pretend that
1440 * the surface is broken up in a number of smaller 16Bx32 tiles, each
1441 * one arranged in X-major order just like is the case for X-tiling.
1443 param
->tiling
[0] = _mesa_logbase2(16 / mt
->cpp
);
1444 param
->tiling
[1] = _mesa_logbase2(32);
1446 if (brw
->has_swizzling
) {
1447 /* Right shift required to swizzle bit 9 of the memory address with
1450 param
->swizzling
[0] = 3;
1454 /* 3D textures are arranged in 2D in memory with 2^lod slices per row. The
1455 * address calculation algorithm (emit_address_calculation() in
1456 * brw_fs_surface_builder.cpp) handles this as a sort of tiling with
1457 * modulus equal to the LOD.
1459 param
->tiling
[2] = (u
->TexObj
->Target
== GL_TEXTURE_3D
? u
->Level
:
1464 update_image_surface(struct brw_context
*brw
,
1465 struct gl_image_unit
*u
,
1467 unsigned surface_idx
,
1468 uint32_t *surf_offset
,
1469 struct brw_image_param
*param
)
1471 if (_mesa_is_image_unit_valid(&brw
->ctx
, u
)) {
1472 struct gl_texture_object
*obj
= u
->TexObj
;
1473 const unsigned format
= get_image_format(brw
, u
->_ActualFormat
, access
);
1475 if (obj
->Target
== GL_TEXTURE_BUFFER
) {
1476 struct intel_buffer_object
*intel_obj
=
1477 intel_buffer_object(obj
->BufferObject
);
1478 const unsigned texel_size
= (format
== BRW_SURFACEFORMAT_RAW
? 1 :
1479 _mesa_get_format_bytes(u
->_ActualFormat
));
1481 brw
->vtbl
.emit_buffer_surface_state(
1482 brw
, surf_offset
, intel_obj
->buffer
, obj
->BufferOffset
,
1483 format
, intel_obj
->Base
.Size
, texel_size
,
1484 access
!= GL_READ_ONLY
);
1486 update_buffer_image_param(brw
, u
, surface_idx
, param
);
1489 struct intel_texture_object
*intel_obj
= intel_texture_object(obj
);
1490 struct intel_mipmap_tree
*mt
= intel_obj
->mt
;
1492 if (format
== BRW_SURFACEFORMAT_RAW
) {
1493 brw
->vtbl
.emit_buffer_surface_state(
1494 brw
, surf_offset
, mt
->bo
, mt
->offset
,
1495 format
, mt
->bo
->size
- mt
->offset
, 1 /* pitch */,
1496 access
!= GL_READ_ONLY
);
1499 const unsigned num_layers
= (!u
->Layered
? 1 :
1500 obj
->Target
== GL_TEXTURE_CUBE_MAP
? 6 :
1501 mt
->logical_depth0
);
1503 struct isl_view view
= {
1505 .base_level
= obj
->MinLevel
+ u
->Level
,
1507 .base_array_layer
= obj
->MinLayer
+ u
->_Layer
,
1508 .array_len
= num_layers
,
1510 ISL_CHANNEL_SELECT_RED
,
1511 ISL_CHANNEL_SELECT_GREEN
,
1512 ISL_CHANNEL_SELECT_BLUE
,
1513 ISL_CHANNEL_SELECT_ALPHA
,
1515 .usage
= ISL_SURF_USAGE_STORAGE_BIT
,
1518 const int surf_index
= surf_offset
- &brw
->wm
.base
.surf_offset
[0];
1520 brw_emit_surface_state(brw
, mt
, &view
,
1521 surface_state_infos
[brw
->gen
].rb_mocs
, false,
1522 surf_offset
, surf_index
,
1523 I915_GEM_DOMAIN_SAMPLER
,
1524 access
== GL_READ_ONLY
? 0 :
1525 I915_GEM_DOMAIN_SAMPLER
);
1528 update_texture_image_param(brw
, u
, surface_idx
, param
);
1532 brw
->vtbl
.emit_null_surface_state(brw
, 1, 1, 1, surf_offset
);
1533 update_default_image_param(brw
, u
, surface_idx
, param
);
1538 brw_upload_image_surfaces(struct brw_context
*brw
,
1539 struct gl_linked_shader
*shader
,
1540 struct brw_stage_state
*stage_state
,
1541 struct brw_stage_prog_data
*prog_data
)
1543 struct gl_context
*ctx
= &brw
->ctx
;
1545 if (shader
&& shader
->NumImages
) {
1546 for (unsigned i
= 0; i
< shader
->NumImages
; i
++) {
1547 struct gl_image_unit
*u
= &ctx
->ImageUnits
[shader
->ImageUnits
[i
]];
1548 const unsigned surf_idx
= prog_data
->binding_table
.image_start
+ i
;
1550 update_image_surface(brw
, u
, shader
->ImageAccess
[i
],
1552 &stage_state
->surf_offset
[surf_idx
],
1553 &prog_data
->image_param
[i
]);
1556 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1557 /* This may have changed the image metadata dependent on the context
1558 * image unit state and passed to the program as uniforms, make sure
1559 * that push and pull constants are reuploaded.
1561 brw
->NewGLState
|= _NEW_PROGRAM_CONSTANTS
;
1566 brw_upload_wm_image_surfaces(struct brw_context
*brw
)
1568 struct gl_context
*ctx
= &brw
->ctx
;
1569 /* BRW_NEW_FRAGMENT_PROGRAM */
1570 struct gl_shader_program
*prog
= ctx
->_Shader
->_CurrentFragmentProgram
;
1573 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1574 brw_upload_image_surfaces(brw
, prog
->_LinkedShaders
[MESA_SHADER_FRAGMENT
],
1575 &brw
->wm
.base
, &brw
->wm
.prog_data
->base
);
1579 const struct brw_tracked_state brw_wm_image_surfaces
= {
1581 .mesa
= _NEW_TEXTURE
,
1582 .brw
= BRW_NEW_BATCH
|
1584 BRW_NEW_FRAGMENT_PROGRAM
|
1585 BRW_NEW_FS_PROG_DATA
|
1588 .emit
= brw_upload_wm_image_surfaces
,
1592 gen4_init_vtable_surface_functions(struct brw_context
*brw
)
1594 brw
->vtbl
.update_texture_surface
= brw_update_texture_surface
;
1595 brw
->vtbl
.update_renderbuffer_surface
= gen4_update_renderbuffer_surface
;
1596 brw
->vtbl
.emit_null_surface_state
= brw_emit_null_surface_state
;
1597 brw
->vtbl
.emit_buffer_surface_state
= gen4_emit_buffer_surface_state
;
1601 brw_upload_cs_work_groups_surface(struct brw_context
*brw
)
1603 struct gl_context
*ctx
= &brw
->ctx
;
1605 struct gl_shader_program
*prog
=
1606 ctx
->_Shader
->CurrentProgram
[MESA_SHADER_COMPUTE
];
1608 if (prog
&& brw
->cs
.prog_data
->uses_num_work_groups
) {
1609 const unsigned surf_idx
=
1610 brw
->cs
.prog_data
->binding_table
.work_groups_start
;
1611 uint32_t *surf_offset
= &brw
->cs
.base
.surf_offset
[surf_idx
];
1615 if (brw
->compute
.num_work_groups_bo
== NULL
) {
1617 intel_upload_data(brw
,
1618 (void *)brw
->compute
.num_work_groups
,
1624 bo
= brw
->compute
.num_work_groups_bo
;
1625 bo_offset
= brw
->compute
.num_work_groups_offset
;
1628 brw
->vtbl
.emit_buffer_surface_state(brw
, surf_offset
,
1630 BRW_SURFACEFORMAT_RAW
,
1631 3 * sizeof(GLuint
), 1, true);
1632 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1636 const struct brw_tracked_state brw_cs_work_groups_surface
= {
1638 .brw
= BRW_NEW_BLORP
|
1639 BRW_NEW_CS_WORK_GROUPS
1641 .emit
= brw_upload_cs_work_groups_surface
,