2 * Copyright © 2011 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 #include "main/mtypes.h"
24 #include "main/blend.h"
25 #include "main/samplerobj.h"
26 #include "main/texformat.h"
27 #include "main/teximage.h"
28 #include "program/prog_parameter.h"
29 #include "program/prog_instruction.h"
31 #include "intel_mipmap_tree.h"
32 #include "intel_batchbuffer.h"
33 #include "intel_tex.h"
34 #include "intel_fbo.h"
35 #include "intel_buffer_objects.h"
37 #include "brw_context.h"
38 #include "brw_state.h"
39 #include "brw_defines.h"
43 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
44 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
46 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
49 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
51 * which is simply adding 4 then modding by 8 (or anding with 7).
53 * We then may need to apply workarounds for textureGather hardware bugs.
56 swizzle_to_scs(GLenum swizzle
, bool need_green_to_blue
)
58 unsigned scs
= (swizzle
+ 4) & 7;
60 return (need_green_to_blue
&& scs
== HSW_SCS_GREEN
) ? HSW_SCS_BLUE
: scs
;
64 gen7_surface_tiling_mode(uint32_t tiling
)
68 return GEN7_SURFACE_TILING_X
;
70 return GEN7_SURFACE_TILING_Y
;
72 return GEN7_SURFACE_TILING_NONE
;
78 gen7_surface_msaa_bits(unsigned num_samples
, enum intel_msaa_layout layout
)
82 assert(num_samples
<= 16);
84 /* The SURFACE_MULTISAMPLECOUNT_X enums are simply log2(num_samples) << 3. */
85 ss4
|= (ffs(MAX2(num_samples
, 1)) - 1) << 3;
87 if (layout
== INTEL_MSAA_LAYOUT_IMS
)
88 ss4
|= GEN7_SURFACE_MSFMT_DEPTH_STENCIL
;
90 ss4
|= GEN7_SURFACE_MSFMT_MSS
;
97 gen7_set_surface_mcs_info(struct brw_context
*brw
,
100 const struct intel_mipmap_tree
*mcs_mt
,
101 bool is_render_target
)
103 /* From the Ivy Bridge PRM, Vol4 Part1 p76, "MCS Base Address":
105 * "The MCS surface must be stored as Tile Y."
107 assert(mcs_mt
->tiling
== I915_TILING_Y
);
109 /* Compute the pitch in units of tiles. To do this we need to divide the
110 * pitch in bytes by 128, since a single Y-tile is 128 bytes wide.
112 unsigned pitch_tiles
= mcs_mt
->pitch
/ 128;
114 /* The upper 20 bits of surface state DWORD 6 are the upper 20 bits of the
115 * GPU address of the MCS buffer; the lower 12 bits contain other control
116 * information. Since buffer addresses are always on 4k boundaries (and
117 * thus have their lower 12 bits zero), we can use an ordinary reloc to do
118 * the necessary address translation.
120 assert ((mcs_mt
->bo
->offset64
& 0xfff) == 0);
122 surf
[6] = GEN7_SURFACE_MCS_ENABLE
|
123 SET_FIELD(pitch_tiles
- 1, GEN7_SURFACE_MCS_PITCH
) |
124 mcs_mt
->bo
->offset64
;
126 drm_intel_bo_emit_reloc(brw
->batch
.bo
,
130 is_render_target
? I915_GEM_DOMAIN_RENDER
131 : I915_GEM_DOMAIN_SAMPLER
,
132 is_render_target
? I915_GEM_DOMAIN_RENDER
: 0);
137 gen7_check_surface_setup(uint32_t *surf
, bool is_render_target
)
139 unsigned num_multisamples
= surf
[4] & INTEL_MASK(5, 3);
140 unsigned multisampled_surface_storage_format
= surf
[4] & (1 << 6);
141 unsigned surface_array_spacing
= surf
[0] & (1 << 10);
142 bool is_multisampled
= num_multisamples
!= GEN7_SURFACE_MULTISAMPLECOUNT_1
;
144 (void) surface_array_spacing
;
146 /* From the Ivybridge PRM, Volume 4 Part 1, page 66 (RENDER_SURFACE_STATE
147 * dword 0 bit 10 "Surface Array Spacing" Programming Notes):
149 * If Multisampled Surface Storage Format is MSFMT_MSS and Number of
150 * Multisamples is not MULTISAMPLECOUNT_1, this field must be set to
153 if (multisampled_surface_storage_format
== GEN7_SURFACE_MSFMT_MSS
155 assert(surface_array_spacing
== GEN7_SURFACE_ARYSPC_LOD0
);
157 /* From the Ivybridge PRM, Volume 4 Part 1, page 72 (RENDER_SURFACE_STATE
158 * dword 4 bit 6 "Multisampled Surface Storage" Programming Notes):
160 * All multisampled render target surfaces must have this field set to
165 * This field is ignored if Number of Multisamples is MULTISAMPLECOUNT_1.
167 if (is_render_target
&& is_multisampled
) {
168 assert(multisampled_surface_storage_format
== GEN7_SURFACE_MSFMT_MSS
);
171 /* From the Ivybridge PRM, Volume 4 Part 1, page 72 (RENDER_SURFACE_STATE
172 * dword 4 bit 6 "Multisampled Surface Storage Format" Errata):
174 * If the surface’s Number of Multisamples is MULTISAMPLECOUNT_8, Width
175 * is >= 8192 (meaning the actual surface width is >= 8193 pixels), this
176 * field must be set to MSFMT_MSS.
178 uint32_t width
= GET_FIELD(surf
[2], GEN7_SURFACE_WIDTH
) + 1;
179 if (num_multisamples
== GEN7_SURFACE_MULTISAMPLECOUNT_8
&& width
>= 8193) {
180 assert(multisampled_surface_storage_format
== GEN7_SURFACE_MSFMT_MSS
);
183 /* From the Ivybridge PRM, Volume 4 Part 1, page 72 (RENDER_SURFACE_STATE
184 * dword 4 bit 6 "Multisampled Surface Storage Format" Errata):
186 * If the surface’s Number of Multisamples is MULTISAMPLECOUNT_8,
187 * ((Depth+1) * (Height+1)) is > 4,194,304, OR if the surface’s Number of
188 * Multisamples is MULTISAMPLECOUNT_4, ((Depth+1) * (Height+1)) is >
189 * 8,388,608, this field must be set to MSFMT_DEPTH_STENCIL.This field
190 * must be set to MSFMT_DEPTH_STENCIL if Surface Format is one of the
191 * following: I24X8_UNORM, L24X8_UNORM, A24X8_UNORM, or
192 * R24_UNORM_X8_TYPELESS.
194 * But also (from the Programming Notes):
196 * This field is ignored if Number of Multisamples is MULTISAMPLECOUNT_1.
198 uint32_t depth
= GET_FIELD(surf
[3], BRW_SURFACE_DEPTH
) + 1;
199 uint32_t height
= GET_FIELD(surf
[2], GEN7_SURFACE_HEIGHT
) + 1;
200 if (num_multisamples
== GEN7_SURFACE_MULTISAMPLECOUNT_8
&&
201 depth
* height
> 4194304) {
202 assert(multisampled_surface_storage_format
==
203 GEN7_SURFACE_MSFMT_DEPTH_STENCIL
);
205 if (num_multisamples
== GEN7_SURFACE_MULTISAMPLECOUNT_4
&&
206 depth
* height
> 8388608) {
207 assert(multisampled_surface_storage_format
==
208 GEN7_SURFACE_MSFMT_DEPTH_STENCIL
);
210 if (is_multisampled
) {
211 switch (GET_FIELD(surf
[0], BRW_SURFACE_FORMAT
)) {
212 case BRW_SURFACEFORMAT_I24X8_UNORM
:
213 case BRW_SURFACEFORMAT_L24X8_UNORM
:
214 case BRW_SURFACEFORMAT_A24X8_UNORM
:
215 case BRW_SURFACEFORMAT_R24_UNORM_X8_TYPELESS
:
216 assert(multisampled_surface_storage_format
==
217 GEN7_SURFACE_MSFMT_DEPTH_STENCIL
);
223 gen7_emit_buffer_surface_state(struct brw_context
*brw
,
224 uint32_t *out_offset
,
226 unsigned buffer_offset
,
227 unsigned surface_format
,
228 unsigned buffer_size
,
232 uint32_t *surf
= brw_state_batch(brw
, AUB_TRACE_SURFACE_STATE
,
233 8 * 4, 32, out_offset
);
234 memset(surf
, 0, 8 * 4);
236 surf
[0] = BRW_SURFACE_BUFFER
<< BRW_SURFACE_TYPE_SHIFT
|
237 surface_format
<< BRW_SURFACE_FORMAT_SHIFT
|
238 BRW_SURFACE_RC_READ_WRITE
;
239 surf
[1] = (bo
? bo
->offset64
: 0) + buffer_offset
; /* reloc */
240 surf
[2] = SET_FIELD((buffer_size
- 1) & 0x7f, GEN7_SURFACE_WIDTH
) |
241 SET_FIELD(((buffer_size
- 1) >> 7) & 0x3fff, GEN7_SURFACE_HEIGHT
);
242 if (surface_format
== BRW_SURFACEFORMAT_RAW
)
243 surf
[3] = SET_FIELD(((buffer_size
- 1) >> 21) & 0x3ff, BRW_SURFACE_DEPTH
);
245 surf
[3] = SET_FIELD(((buffer_size
- 1) >> 21) & 0x3f, BRW_SURFACE_DEPTH
);
246 surf
[3] |= (pitch
- 1);
248 surf
[5] = SET_FIELD(GEN7_MOCS_L3
, GEN7_SURFACE_MOCS
);
250 if (brw
->is_haswell
) {
251 surf
[7] |= (SET_FIELD(HSW_SCS_RED
, GEN7_SURFACE_SCS_R
) |
252 SET_FIELD(HSW_SCS_GREEN
, GEN7_SURFACE_SCS_G
) |
253 SET_FIELD(HSW_SCS_BLUE
, GEN7_SURFACE_SCS_B
) |
254 SET_FIELD(HSW_SCS_ALPHA
, GEN7_SURFACE_SCS_A
));
257 /* Emit relocation to surface contents */
259 drm_intel_bo_emit_reloc(brw
->batch
.bo
, *out_offset
+ 4,
260 bo
, buffer_offset
, I915_GEM_DOMAIN_SAMPLER
,
261 (rw
? I915_GEM_DOMAIN_SAMPLER
: 0));
264 gen7_check_surface_setup(surf
, false /* is_render_target */);
268 gen7_emit_texture_surface_state(struct brw_context
*brw
,
269 struct intel_mipmap_tree
*mt
,
271 unsigned min_layer
, unsigned max_layer
,
272 unsigned min_level
, unsigned max_level
,
275 uint32_t *surf_offset
,
276 int surf_index
/* unused */,
277 bool rw
, bool for_gather
)
279 const unsigned depth
= max_layer
- min_layer
;
280 uint32_t *surf
= brw_state_batch(brw
, AUB_TRACE_SURFACE_STATE
,
281 8 * 4, 32, surf_offset
);
283 memset(surf
, 0, 8 * 4);
285 surf
[0] = translate_tex_target(target
) << BRW_SURFACE_TYPE_SHIFT
|
286 format
<< BRW_SURFACE_FORMAT_SHIFT
|
287 gen7_surface_tiling_mode(mt
->tiling
);
289 /* mask of faces present in cube map; for other surfaces MBZ. */
290 if (target
== GL_TEXTURE_CUBE_MAP
|| target
== GL_TEXTURE_CUBE_MAP_ARRAY
)
291 surf
[0] |= BRW_SURFACE_CUBEFACE_ENABLES
;
294 surf
[0] |= GEN7_SURFACE_VALIGN_4
;
296 surf
[0] |= GEN7_SURFACE_HALIGN_8
;
298 if (_mesa_is_array_texture(target
) || target
== GL_TEXTURE_CUBE_MAP
)
299 surf
[0] |= GEN7_SURFACE_IS_ARRAY
;
301 if (mt
->array_layout
== ALL_SLICES_AT_EACH_LOD
)
302 surf
[0] |= GEN7_SURFACE_ARYSPC_LOD0
;
304 surf
[1] = mt
->bo
->offset64
+ mt
->offset
; /* reloc */
306 surf
[2] = SET_FIELD(mt
->logical_width0
- 1, GEN7_SURFACE_WIDTH
) |
307 SET_FIELD(mt
->logical_height0
- 1, GEN7_SURFACE_HEIGHT
);
309 surf
[3] = SET_FIELD(depth
- 1, BRW_SURFACE_DEPTH
) |
312 if (brw
->is_haswell
&& _mesa_is_format_integer(mt
->format
))
313 surf
[3] |= HSW_SURFACE_IS_INTEGER_FORMAT
;
315 surf
[4] = gen7_surface_msaa_bits(mt
->num_samples
, mt
->msaa_layout
) |
316 SET_FIELD(min_layer
, GEN7_SURFACE_MIN_ARRAY_ELEMENT
) |
317 SET_FIELD(depth
- 1, GEN7_SURFACE_RENDER_TARGET_VIEW_EXTENT
);
319 surf
[5] = (SET_FIELD(GEN7_MOCS_L3
, GEN7_SURFACE_MOCS
) |
320 SET_FIELD(min_level
- mt
->first_level
, GEN7_SURFACE_MIN_LOD
) |
322 (max_level
- min_level
- 1));
324 surf
[7] = mt
->fast_clear_color_value
;
326 if (brw
->is_haswell
) {
327 const bool need_scs_green_to_blue
= for_gather
&& format
== BRW_SURFACEFORMAT_R32G32_FLOAT_LD
;
330 SET_FIELD(swizzle_to_scs(GET_SWZ(swizzle
, 0), need_scs_green_to_blue
), GEN7_SURFACE_SCS_R
) |
331 SET_FIELD(swizzle_to_scs(GET_SWZ(swizzle
, 1), need_scs_green_to_blue
), GEN7_SURFACE_SCS_G
) |
332 SET_FIELD(swizzle_to_scs(GET_SWZ(swizzle
, 2), need_scs_green_to_blue
), GEN7_SURFACE_SCS_B
) |
333 SET_FIELD(swizzle_to_scs(GET_SWZ(swizzle
, 3), need_scs_green_to_blue
), GEN7_SURFACE_SCS_A
);
337 gen7_set_surface_mcs_info(brw
, surf
, *surf_offset
,
338 mt
->mcs_mt
, false /* is RT */);
341 /* Emit relocation to surface contents */
342 drm_intel_bo_emit_reloc(brw
->batch
.bo
,
345 surf
[1] - mt
->bo
->offset64
,
346 I915_GEM_DOMAIN_SAMPLER
,
347 (rw
? I915_GEM_DOMAIN_SAMPLER
: 0));
349 gen7_check_surface_setup(surf
, false /* is_render_target */);
353 gen7_update_texture_surface(struct gl_context
*ctx
,
355 uint32_t *surf_offset
,
358 struct brw_context
*brw
= brw_context(ctx
);
359 struct gl_texture_object
*obj
= ctx
->Texture
.Unit
[unit
]._Current
;
361 if (obj
->Target
== GL_TEXTURE_BUFFER
) {
362 brw_update_buffer_texture_surface(ctx
, unit
, surf_offset
);
365 struct intel_texture_object
*intel_obj
= intel_texture_object(obj
);
366 struct intel_mipmap_tree
*mt
= intel_obj
->mt
;
367 struct gl_sampler_object
*sampler
= _mesa_get_samplerobj(ctx
, unit
);
368 /* If this is a view with restricted NumLayers, then our effective depth
369 * is not just the miptree depth.
371 const unsigned depth
= (obj
->Immutable
&& obj
->Target
!= GL_TEXTURE_3D
?
372 obj
->NumLayers
: mt
->logical_depth0
);
374 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
375 * texturing functions that return a float, as our code generation always
376 * selects the .x channel (which would always be 0).
378 struct gl_texture_image
*firstImage
= obj
->Image
[0][obj
->BaseLevel
];
379 const bool alpha_depth
= obj
->DepthMode
== GL_ALPHA
&&
380 (firstImage
->_BaseFormat
== GL_DEPTH_COMPONENT
||
381 firstImage
->_BaseFormat
== GL_DEPTH_STENCIL
);
382 const unsigned swizzle
= (unlikely(alpha_depth
) ? SWIZZLE_XYZW
:
383 brw_get_texture_swizzle(&brw
->ctx
, obj
));
385 unsigned format
= translate_tex_format(
386 brw
, intel_obj
->_Format
, sampler
->sRGBDecode
);
388 if (for_gather
&& format
== BRW_SURFACEFORMAT_R32G32_FLOAT
)
389 format
= BRW_SURFACEFORMAT_R32G32_FLOAT_LD
;
391 const int surf_index
= surf_offset
- &brw
->wm
.base
.surf_offset
[0];
393 gen7_emit_texture_surface_state(brw
, mt
, obj
->Target
,
394 obj
->MinLayer
, obj
->MinLayer
+ depth
,
395 obj
->MinLevel
+ obj
->BaseLevel
,
396 obj
->MinLevel
+ intel_obj
->_MaxLevel
+ 1,
397 format
, swizzle
, surf_offset
,
398 surf_index
, false, for_gather
);
403 * Creates a null surface.
405 * This is used when the shader doesn't write to any color output. An FB
406 * write to target 0 will still be emitted, because that's how the thread is
407 * terminated (and computed depth is returned), so we need to have the
408 * hardware discard the target 0 color output..
411 gen7_emit_null_surface_state(struct brw_context
*brw
,
415 uint32_t *out_offset
)
417 /* From the Ivy bridge PRM, Vol4 Part1 p62 (Surface Type: Programming
420 * A null surface is used in instances where an actual surface is not
421 * bound. When a write message is generated to a null surface, no
422 * actual surface is written to. When a read message (including any
423 * sampling engine message) is generated to a null surface, the result
424 * is all zeros. Note that a null surface type is allowed to be used
425 * with all messages, even if it is not specificially indicated as
426 * supported. All of the remaining fields in surface state are ignored
427 * for null surfaces, with the following exceptions: Width, Height,
428 * Depth, LOD, and Render Target View Extent fields must match the
429 * depth buffer’s corresponding state for all render target surfaces,
432 uint32_t *surf
= brw_state_batch(brw
, AUB_TRACE_SURFACE_STATE
, 8 * 4, 32,
434 memset(surf
, 0, 8 * 4);
436 /* From the Ivybridge PRM, Volume 4, Part 1, page 65,
437 * Tiled Surface: Programming Notes:
438 * "If Surface Type is SURFTYPE_NULL, this field must be TRUE."
440 surf
[0] = BRW_SURFACE_NULL
<< BRW_SURFACE_TYPE_SHIFT
|
441 BRW_SURFACEFORMAT_B8G8R8A8_UNORM
<< BRW_SURFACE_FORMAT_SHIFT
|
442 GEN7_SURFACE_TILING_Y
;
444 surf
[2] = SET_FIELD(width
- 1, GEN7_SURFACE_WIDTH
) |
445 SET_FIELD(height
- 1, GEN7_SURFACE_HEIGHT
);
447 gen7_check_surface_setup(surf
, true /* is_render_target */);
451 * Sets up a surface state structure to point at the given region.
452 * While it is only used for the front/back buffer currently, it should be
453 * usable for further buffers when doing ARB_draw_buffer support.
456 gen7_update_renderbuffer_surface(struct brw_context
*brw
,
457 struct gl_renderbuffer
*rb
,
458 bool layered
, unsigned unit
/* unused */,
461 struct gl_context
*ctx
= &brw
->ctx
;
462 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
463 struct intel_mipmap_tree
*mt
= irb
->mt
;
466 mesa_format rb_format
= _mesa_get_render_format(ctx
, intel_rb_format(irb
));
468 bool is_array
= false;
469 int depth
= MAX2(irb
->layer_count
, 1);
470 const uint8_t mocs
= GEN7_MOCS_L3
;
473 int min_array_element
= irb
->mt_layer
/ MAX2(mt
->num_samples
, 1);
475 GLenum gl_target
= rb
->TexImage
?
476 rb
->TexImage
->TexObject
->Target
: GL_TEXTURE_2D
;
478 uint32_t *surf
= brw_state_batch(brw
, AUB_TRACE_SURFACE_STATE
, 8 * 4, 32,
480 memset(surf
, 0, 8 * 4);
482 intel_miptree_used_for_rendering(irb
->mt
);
484 /* Render targets can't use IMS layout */
485 assert(irb
->mt
->msaa_layout
!= INTEL_MSAA_LAYOUT_IMS
);
487 assert(brw_render_target_supported(brw
, rb
));
488 format
= brw
->render_target_format
[rb_format
];
489 if (unlikely(!brw
->format_supported_as_render_target
[rb_format
])) {
490 _mesa_problem(ctx
, "%s: renderbuffer format %s unsupported\n",
491 __func__
, _mesa_get_format_name(rb_format
));
495 case GL_TEXTURE_CUBE_MAP_ARRAY
:
496 case GL_TEXTURE_CUBE_MAP
:
497 surftype
= BRW_SURFACE_2D
;
502 depth
= MAX2(irb
->mt
->logical_depth0
, 1);
505 surftype
= translate_tex_target(gl_target
);
506 is_array
= _mesa_is_array_texture(gl_target
);
510 surf
[0] = surftype
<< BRW_SURFACE_TYPE_SHIFT
|
511 format
<< BRW_SURFACE_FORMAT_SHIFT
|
512 (irb
->mt
->array_layout
== ALL_SLICES_AT_EACH_LOD
?
513 GEN7_SURFACE_ARYSPC_LOD0
: GEN7_SURFACE_ARYSPC_FULL
) |
514 gen7_surface_tiling_mode(mt
->tiling
);
516 if (irb
->mt
->valign
== 4)
517 surf
[0] |= GEN7_SURFACE_VALIGN_4
;
518 if (irb
->mt
->halign
== 8)
519 surf
[0] |= GEN7_SURFACE_HALIGN_8
;
522 surf
[0] |= GEN7_SURFACE_IS_ARRAY
;
525 assert(mt
->offset
% mt
->cpp
== 0);
526 surf
[1] = mt
->bo
->offset64
+ mt
->offset
;
528 assert(brw
->has_surface_tile_offset
);
530 surf
[5] = SET_FIELD(mocs
, GEN7_SURFACE_MOCS
) |
531 (irb
->mt_level
- irb
->mt
->first_level
);
533 surf
[2] = SET_FIELD(irb
->mt
->logical_width0
- 1, GEN7_SURFACE_WIDTH
) |
534 SET_FIELD(irb
->mt
->logical_height0
- 1, GEN7_SURFACE_HEIGHT
);
536 surf
[3] = ((depth
- 1) << BRW_SURFACE_DEPTH_SHIFT
) |
539 surf
[4] = gen7_surface_msaa_bits(irb
->mt
->num_samples
, irb
->mt
->msaa_layout
) |
540 min_array_element
<< GEN7_SURFACE_MIN_ARRAY_ELEMENT_SHIFT
|
541 (depth
- 1) << GEN7_SURFACE_RENDER_TARGET_VIEW_EXTENT_SHIFT
;
543 if (irb
->mt
->mcs_mt
) {
544 gen7_set_surface_mcs_info(brw
, surf
, offset
,
545 irb
->mt
->mcs_mt
, true /* is RT */);
548 surf
[7] = irb
->mt
->fast_clear_color_value
;
550 if (brw
->is_haswell
) {
551 surf
[7] |= (SET_FIELD(HSW_SCS_RED
, GEN7_SURFACE_SCS_R
) |
552 SET_FIELD(HSW_SCS_GREEN
, GEN7_SURFACE_SCS_G
) |
553 SET_FIELD(HSW_SCS_BLUE
, GEN7_SURFACE_SCS_B
) |
554 SET_FIELD(HSW_SCS_ALPHA
, GEN7_SURFACE_SCS_A
));
557 drm_intel_bo_emit_reloc(brw
->batch
.bo
,
560 surf
[1] - mt
->bo
->offset64
,
561 I915_GEM_DOMAIN_RENDER
,
562 I915_GEM_DOMAIN_RENDER
);
564 gen7_check_surface_setup(surf
, true /* is_render_target */);
570 gen7_init_vtable_surface_functions(struct brw_context
*brw
)
572 brw
->vtbl
.update_texture_surface
= gen7_update_texture_surface
;
573 brw
->vtbl
.update_renderbuffer_surface
= gen7_update_renderbuffer_surface
;
574 brw
->vtbl
.emit_null_surface_state
= gen7_emit_null_surface_state
;
575 brw
->vtbl
.emit_texture_surface_state
= gen7_emit_texture_surface_state
;
576 brw
->vtbl
.emit_buffer_surface_state
= gen7_emit_buffer_surface_state
;