2 * Copyright © 2011 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 #include "main/mtypes.h"
24 #include "main/samplerobj.h"
25 #include "program/prog_parameter.h"
27 #include "intel_mipmap_tree.h"
28 #include "intel_batchbuffer.h"
29 #include "intel_tex.h"
30 #include "intel_fbo.h"
31 #include "intel_buffer_objects.h"
33 #include "brw_context.h"
34 #include "brw_state.h"
35 #include "brw_defines.h"
39 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
40 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED)
43 swizzle_to_scs(GLenum swizzle
)
60 assert(!"Should not get here: invalid swizzle mode");
65 gen7_set_surface_tiling(struct gen7_surface_state
*surf
, uint32_t tiling
)
68 case I915_TILING_NONE
:
69 surf
->ss0
.tiled_surface
= 0;
70 surf
->ss0
.tile_walk
= 0;
73 surf
->ss0
.tiled_surface
= 1;
74 surf
->ss0
.tile_walk
= BRW_TILEWALK_XMAJOR
;
77 surf
->ss0
.tiled_surface
= 1;
78 surf
->ss0
.tile_walk
= BRW_TILEWALK_YMAJOR
;
85 gen7_set_surface_msaa(struct gen7_surface_state
*surf
, unsigned num_samples
,
86 enum intel_msaa_layout layout
)
89 surf
->ss4
.num_multisamples
= GEN7_SURFACE_MULTISAMPLECOUNT_8
;
90 else if (num_samples
> 1)
91 surf
->ss4
.num_multisamples
= GEN7_SURFACE_MULTISAMPLECOUNT_4
;
93 surf
->ss4
.num_multisamples
= GEN7_SURFACE_MULTISAMPLECOUNT_1
;
95 surf
->ss4
.multisampled_surface_storage_format
=
96 layout
== INTEL_MSAA_LAYOUT_IMS
?
97 GEN7_SURFACE_MSFMT_DEPTH_STENCIL
:
98 GEN7_SURFACE_MSFMT_MSS
;
103 gen7_set_surface_mcs_info(struct brw_context
*brw
,
104 struct gen7_surface_state
*surf
,
105 uint32_t surf_offset
,
106 const struct intel_mipmap_tree
*mcs_mt
,
107 bool is_render_target
)
109 /* From the Ivy Bridge PRM, Vol4 Part1 p76, "MCS Base Address":
111 * "The MCS surface must be stored as Tile Y."
113 assert(mcs_mt
->region
->tiling
== I915_TILING_Y
);
115 /* Compute the pitch in units of tiles. To do this we need to divide the
116 * pitch in bytes by 128, since a single Y-tile is 128 bytes wide.
118 unsigned pitch_bytes
= mcs_mt
->region
->pitch
* mcs_mt
->cpp
;
119 unsigned pitch_tiles
= pitch_bytes
/ 128;
121 /* The upper 20 bits of surface state DWORD 6 are the upper 20 bits of the
122 * GPU address of the MCS buffer; the lower 12 bits contain other control
123 * information. Since buffer addresses are always on 4k boundaries (and
124 * thus have their lower 12 bits zero), we can use an ordinary reloc to do
125 * the necessary address translation.
127 assert ((mcs_mt
->region
->bo
->offset
& 0xfff) == 0);
128 surf
->ss6
.mcs_enabled
.mcs_enable
= 1;
129 surf
->ss6
.mcs_enabled
.mcs_surface_pitch
= pitch_tiles
- 1;
130 surf
->ss6
.mcs_enabled
.mcs_base_address
= mcs_mt
->region
->bo
->offset
>> 12;
131 drm_intel_bo_emit_reloc(brw
->intel
.batch
.bo
,
133 offsetof(struct gen7_surface_state
, ss6
),
135 surf
->ss6
.raw_data
& 0xfff,
136 is_render_target
? I915_GEM_DOMAIN_RENDER
137 : I915_GEM_DOMAIN_SAMPLER
,
138 is_render_target
? I915_GEM_DOMAIN_RENDER
: 0);
143 gen7_check_surface_setup(struct gen7_surface_state
*surf
,
144 bool is_render_target
)
146 bool is_multisampled
=
147 surf
->ss4
.num_multisamples
!= GEN7_SURFACE_MULTISAMPLECOUNT_1
;
148 /* From the Graphics BSpec: vol5c Shared Functions [SNB+] > State >
149 * SURFACE_STATE > SURFACE_STATE for most messages [DevIVB]: Surface Array
152 * If Multisampled Surface Storage Format is MSFMT_MSS and Number of
153 * Multisamples is not MULTISAMPLECOUNT_1, this field must be set to
156 if (surf
->ss4
.multisampled_surface_storage_format
== GEN7_SURFACE_MSFMT_MSS
158 assert(surf
->ss0
.surface_array_spacing
== GEN7_SURFACE_ARYSPC_LOD0
);
160 /* From the Graphics BSpec: vol5c Shared Functions [SNB+] > State >
161 * SURFACE_STATE > SURFACE_STATE for most messages [DevIVB]: Multisampled
162 * Surface Storage Format:
164 * All multisampled render target surfaces must have this field set to
169 * This field is ignored if Number of Multisamples is MULTISAMPLECOUNT_1.
171 if (is_render_target
&& is_multisampled
) {
172 assert(surf
->ss4
.multisampled_surface_storage_format
==
173 GEN7_SURFACE_MSFMT_MSS
);
176 /* From the Graphics BSpec: vol5c Shared Functions [SNB+] > State >
177 * SURFACE_STATE > SURFACE_STATE for most messages [DevIVB]: Multisampled
178 * Surface Storage Format:
180 * If the surface’s Number of Multisamples is MULTISAMPLECOUNT_8, Width
181 * is >= 8192 (meaning the actual surface width is >= 8193 pixels), this
182 * field must be set to MSFMT_MSS.
184 if (surf
->ss4
.num_multisamples
== GEN7_SURFACE_MULTISAMPLECOUNT_8
&&
185 surf
->ss2
.width
>= 8192) {
186 assert(surf
->ss4
.multisampled_surface_storage_format
==
187 GEN7_SURFACE_MSFMT_MSS
);
190 /* From the Graphics BSpec: vol5c Shared Functions [SNB+] > State >
191 * SURFACE_STATE > SURFACE_STATE for most messages [DevIVB]: Multisampled
192 * Surface Storage Format:
194 * If the surface’s Number of Multisamples is MULTISAMPLECOUNT_8,
195 * ((Depth+1) * (Height+1)) is > 4,194,304, OR if the surface’s Number of
196 * Multisamples is MULTISAMPLECOUNT_4, ((Depth+1) * (Height+1)) is >
197 * 8,388,608, this field must be set to MSFMT_DEPTH_STENCIL.This field
198 * must be set to MSFMT_DEPTH_STENCIL if Surface Format is one of the
199 * following: I24X8_UNORM, L24X8_UNORM, A24X8_UNORM, or
200 * R24_UNORM_X8_TYPELESS.
204 * This field is ignored if Number of Multisamples is MULTISAMPLECOUNT_1.
206 uint32_t depth
= surf
->ss3
.depth
+ 1;
207 uint32_t height
= surf
->ss2
.height
+ 1;
208 if (surf
->ss4
.num_multisamples
== GEN7_SURFACE_MULTISAMPLECOUNT_8
&&
209 depth
* height
> 4194304) {
210 assert(surf
->ss4
.multisampled_surface_storage_format
==
211 GEN7_SURFACE_MSFMT_DEPTH_STENCIL
);
213 if (surf
->ss4
.num_multisamples
== GEN7_SURFACE_MULTISAMPLECOUNT_4
&&
214 depth
* height
> 8388608) {
215 assert(surf
->ss4
.multisampled_surface_storage_format
==
216 GEN7_SURFACE_MSFMT_DEPTH_STENCIL
);
218 if (is_multisampled
) {
219 switch (surf
->ss0
.surface_format
) {
220 case BRW_SURFACEFORMAT_I24X8_UNORM
:
221 case BRW_SURFACEFORMAT_L24X8_UNORM
:
222 case BRW_SURFACEFORMAT_A24X8_UNORM
:
223 case BRW_SURFACEFORMAT_R24_UNORM_X8_TYPELESS
:
224 assert(surf
->ss4
.multisampled_surface_storage_format
==
225 GEN7_SURFACE_MSFMT_DEPTH_STENCIL
);
232 gen7_update_buffer_texture_surface(struct gl_context
*ctx
,
234 uint32_t *binding_table
,
237 struct brw_context
*brw
= brw_context(ctx
);
238 struct gl_texture_object
*tObj
= ctx
->Texture
.Unit
[unit
]._Current
;
239 struct gen7_surface_state
*surf
;
240 struct intel_buffer_object
*intel_obj
=
241 intel_buffer_object(tObj
->BufferObject
);
242 drm_intel_bo
*bo
= intel_obj
? intel_obj
->buffer
: NULL
;
243 gl_format format
= tObj
->_BufferObjectFormat
;
244 int texel_size
= _mesa_get_format_bytes(format
);
246 surf
= brw_state_batch(brw
, AUB_TRACE_SURFACE_STATE
,
247 sizeof(*surf
), 32, &binding_table
[surf_index
]);
248 memset(surf
, 0, sizeof(*surf
));
250 surf
->ss0
.surface_type
= BRW_SURFACE_BUFFER
;
251 surf
->ss0
.surface_format
= brw_format_for_mesa_format(format
);
253 surf
->ss0
.render_cache_read_write
= 1;
255 if (surf
->ss0
.surface_format
== 0 && format
!= MESA_FORMAT_RGBA_FLOAT32
) {
256 _mesa_problem(NULL
, "bad format %s for texture buffer\n",
257 _mesa_get_format_name(format
));
261 surf
->ss1
.base_addr
= bo
->offset
; /* reloc */
263 /* Emit relocation to surface contents. Section 5.1.1 of the gen4
264 * bspec ("Data Cache") says that the data cache does not exist as
265 * a separate cache and is just the sampler cache.
267 drm_intel_bo_emit_reloc(brw
->intel
.batch
.bo
,
268 (binding_table
[surf_index
] +
269 offsetof(struct gen7_surface_state
, ss1
)),
271 I915_GEM_DOMAIN_SAMPLER
, 0);
273 int w
= intel_obj
->Base
.Size
/ texel_size
;
274 surf
->ss2
.width
= w
& 0x7f; /* bits 6:0 of size or width */
275 surf
->ss2
.height
= (w
>> 7) & 0x1fff; /* bits 19:7 of size or width */
276 surf
->ss3
.depth
= (w
>> 20) & 0x7f; /* bits 26:20 of size or width */
277 surf
->ss3
.pitch
= texel_size
- 1;
279 surf
->ss1
.base_addr
= 0;
281 surf
->ss2
.height
= 0;
286 gen7_set_surface_tiling(surf
, I915_TILING_NONE
);
288 gen7_check_surface_setup(surf
, false /* is_render_target */);
292 gen7_update_texture_surface(struct gl_context
*ctx
,
294 uint32_t *binding_table
,
297 struct brw_context
*brw
= brw_context(ctx
);
298 struct gl_texture_object
*tObj
= ctx
->Texture
.Unit
[unit
]._Current
;
299 struct intel_texture_object
*intelObj
= intel_texture_object(tObj
);
300 struct intel_mipmap_tree
*mt
= intelObj
->mt
;
301 struct gl_texture_image
*firstImage
= tObj
->Image
[0][tObj
->BaseLevel
];
302 struct gl_sampler_object
*sampler
= _mesa_get_samplerobj(ctx
, unit
);
303 struct gen7_surface_state
*surf
;
304 int width
, height
, depth
;
306 if (tObj
->Target
== GL_TEXTURE_BUFFER
) {
307 gen7_update_buffer_texture_surface(ctx
, unit
, binding_table
, surf_index
);
311 /* We don't support MSAA for textures. */
312 assert(!mt
->array_spacing_lod0
);
313 assert(mt
->num_samples
<= 1);
315 intel_miptree_get_dimensions_for_image(firstImage
, &width
, &height
, &depth
);
317 surf
= brw_state_batch(brw
, AUB_TRACE_SURFACE_STATE
,
318 sizeof(*surf
), 32, &binding_table
[surf_index
]);
319 memset(surf
, 0, sizeof(*surf
));
321 if (mt
->align_h
== 4)
322 surf
->ss0
.vertical_alignment
= 1;
323 if (mt
->align_w
== 8)
324 surf
->ss0
.horizontal_alignment
= 1;
326 surf
->ss0
.surface_type
= translate_tex_target(tObj
->Target
);
327 surf
->ss0
.surface_format
= translate_tex_format(mt
->format
,
328 firstImage
->InternalFormat
,
330 sampler
->sRGBDecode
);
331 if (tObj
->Target
== GL_TEXTURE_CUBE_MAP
) {
332 surf
->ss0
.cube_pos_x
= 1;
333 surf
->ss0
.cube_pos_y
= 1;
334 surf
->ss0
.cube_pos_z
= 1;
335 surf
->ss0
.cube_neg_x
= 1;
336 surf
->ss0
.cube_neg_y
= 1;
337 surf
->ss0
.cube_neg_z
= 1;
340 surf
->ss0
.is_array
= depth
> 1 && tObj
->Target
!= GL_TEXTURE_3D
;
342 gen7_set_surface_tiling(surf
, intelObj
->mt
->region
->tiling
);
344 /* ss0 remaining fields:
345 * - vert_line_stride (exists on gen6 but we ignore it)
346 * - vert_line_stride_ofs (exists on gen6 but we ignore it)
347 * - surface_array_spacing
348 * - render_cache_read_write (exists on gen6 but ignored here)
351 surf
->ss1
.base_addr
=
352 intelObj
->mt
->region
->bo
->offset
+ intelObj
->mt
->offset
; /* reloc */
354 surf
->ss2
.width
= width
- 1;
355 surf
->ss2
.height
= height
- 1;
357 surf
->ss3
.pitch
= (intelObj
->mt
->region
->pitch
* intelObj
->mt
->cpp
) - 1;
358 surf
->ss3
.depth
= depth
- 1;
362 surf
->ss5
.mip_count
= intelObj
->_MaxLevel
- tObj
->BaseLevel
;
363 surf
->ss5
.min_lod
= 0;
365 /* ss5 remaining fields:
366 * - x_offset (N/A for textures?)
371 if (brw
->intel
.is_haswell
) {
372 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
373 * texturing functions that return a float, as our code generation always
374 * selects the .x channel (which would always be 0).
376 const bool alpha_depth
= tObj
->DepthMode
== GL_ALPHA
&&
377 (firstImage
->_BaseFormat
== GL_DEPTH_COMPONENT
||
378 firstImage
->_BaseFormat
== GL_DEPTH_STENCIL
);
381 unlikely(alpha_depth
) ? SWIZZLE_XYZW
: brw_get_texture_swizzle(tObj
);
383 surf
->ss7
.shader_channel_select_r
= swizzle_to_scs(GET_SWZ(swizzle
, 0));
384 surf
->ss7
.shader_channel_select_g
= swizzle_to_scs(GET_SWZ(swizzle
, 1));
385 surf
->ss7
.shader_channel_select_b
= swizzle_to_scs(GET_SWZ(swizzle
, 2));
386 surf
->ss7
.shader_channel_select_a
= swizzle_to_scs(GET_SWZ(swizzle
, 3));
389 /* Emit relocation to surface contents */
390 drm_intel_bo_emit_reloc(brw
->intel
.batch
.bo
,
391 binding_table
[surf_index
] +
392 offsetof(struct gen7_surface_state
, ss1
),
393 intelObj
->mt
->region
->bo
, intelObj
->mt
->offset
,
394 I915_GEM_DOMAIN_SAMPLER
, 0);
396 gen7_check_surface_setup(surf
, false /* is_render_target */);
400 * Create the constant buffer surface. Vertex/fragment shader constants will
401 * be read from this buffer with Data Port Read instructions/messages.
404 gen7_create_constant_surface(struct brw_context
*brw
,
408 uint32_t *out_offset
)
410 const GLint w
= width
- 1;
411 struct gen7_surface_state
*surf
;
413 surf
= brw_state_batch(brw
, AUB_TRACE_SURFACE_STATE
,
414 sizeof(*surf
), 32, out_offset
);
415 memset(surf
, 0, sizeof(*surf
));
417 surf
->ss0
.surface_type
= BRW_SURFACE_BUFFER
;
418 surf
->ss0
.surface_format
= BRW_SURFACEFORMAT_R32G32B32A32_FLOAT
;
420 surf
->ss0
.render_cache_read_write
= 1;
423 surf
->ss1
.base_addr
= bo
->offset
+ offset
; /* reloc */
425 surf
->ss2
.width
= w
& 0x7f; /* bits 6:0 of size or width */
426 surf
->ss2
.height
= (w
>> 7) & 0x1fff; /* bits 19:7 of size or width */
427 surf
->ss3
.depth
= (w
>> 20) & 0x7f; /* bits 26:20 of size or width */
428 surf
->ss3
.pitch
= (16 - 1); /* stride between samples */
429 gen7_set_surface_tiling(surf
, I915_TILING_NONE
); /* tiling now allowed */
431 if (brw
->intel
.is_haswell
) {
432 surf
->ss7
.shader_channel_select_r
= HSW_SCS_RED
;
433 surf
->ss7
.shader_channel_select_g
= HSW_SCS_GREEN
;
434 surf
->ss7
.shader_channel_select_b
= HSW_SCS_BLUE
;
435 surf
->ss7
.shader_channel_select_a
= HSW_SCS_ALPHA
;
438 /* Emit relocation to surface contents. Section 5.1.1 of the gen4
439 * bspec ("Data Cache") says that the data cache does not exist as
440 * a separate cache and is just the sampler cache.
442 drm_intel_bo_emit_reloc(brw
->intel
.batch
.bo
,
444 offsetof(struct gen7_surface_state
, ss1
)),
446 I915_GEM_DOMAIN_SAMPLER
, 0);
448 gen7_check_surface_setup(surf
, false /* is_render_target */);
452 gen7_update_null_renderbuffer_surface(struct brw_context
*brw
, unsigned unit
)
454 /* From the Ivy bridge PRM, Vol4 Part1 p62 (Surface Type: Programming
457 * A null surface is used in instances where an actual surface is not
458 * bound. When a write message is generated to a null surface, no
459 * actual surface is written to. When a read message (including any
460 * sampling engine message) is generated to a null surface, the result
461 * is all zeros. Note that a null surface type is allowed to be used
462 * with all messages, even if it is not specificially indicated as
463 * supported. All of the remaining fields in surface state are ignored
464 * for null surfaces, with the following exceptions: Width, Height,
465 * Depth, LOD, and Render Target View Extent fields must match the
466 * depth buffer’s corresponding state for all render target surfaces,
469 struct intel_context
*intel
= &brw
->intel
;
470 struct gl_context
*ctx
= &intel
->ctx
;
471 struct gen7_surface_state
*surf
;
474 const struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
476 surf
= brw_state_batch(brw
, AUB_TRACE_SURFACE_STATE
,
477 sizeof(*surf
), 32, &brw
->wm
.surf_offset
[unit
]);
478 memset(surf
, 0, sizeof(*surf
));
480 surf
->ss0
.surface_type
= BRW_SURFACE_NULL
;
481 surf
->ss0
.surface_format
= BRW_SURFACEFORMAT_B8G8R8A8_UNORM
;
483 surf
->ss2
.width
= fb
->Width
- 1;
484 surf
->ss2
.height
= fb
->Height
- 1;
486 /* From the Ivy bridge PRM, Vol4 Part1 p65 (Tiled Surface: Programming Notes):
488 * If Surface Type is SURFTYPE_NULL, this field must be TRUE.
490 gen7_set_surface_tiling(surf
, I915_TILING_Y
);
492 gen7_check_surface_setup(surf
, true /* is_render_target */);
496 * Sets up a surface state structure to point at the given region.
497 * While it is only used for the front/back buffer currently, it should be
498 * usable for further buffers when doing ARB_draw_buffer support.
501 gen7_update_renderbuffer_surface(struct brw_context
*brw
,
502 struct gl_renderbuffer
*rb
,
505 struct intel_context
*intel
= &brw
->intel
;
506 struct gl_context
*ctx
= &intel
->ctx
;
507 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
508 struct intel_region
*region
= irb
->mt
->region
;
509 struct gen7_surface_state
*surf
;
510 uint32_t tile_x
, tile_y
;
511 gl_format rb_format
= intel_rb_format(irb
);
513 surf
= brw_state_batch(brw
, AUB_TRACE_SURFACE_STATE
,
514 sizeof(*surf
), 32, &brw
->wm
.surf_offset
[unit
]);
515 memset(surf
, 0, sizeof(*surf
));
517 /* Render targets can't use IMS layout */
518 assert(irb
->mt
->msaa_layout
!= INTEL_MSAA_LAYOUT_IMS
);
520 if (irb
->mt
->align_h
== 4)
521 surf
->ss0
.vertical_alignment
= 1;
522 if (irb
->mt
->align_w
== 8)
523 surf
->ss0
.horizontal_alignment
= 1;
526 case MESA_FORMAT_SARGB8
:
529 * Without GL_EXT_framebuffer_sRGB we shouldn't bind sRGB surfaces to the
530 * blend/update as sRGB.
532 if (ctx
->Color
.sRGBEnabled
)
533 surf
->ss0
.surface_format
= brw_format_for_mesa_format(rb_format
);
535 surf
->ss0
.surface_format
= BRW_SURFACEFORMAT_B8G8R8A8_UNORM
;
538 assert(brw_render_target_supported(intel
, rb
));
539 surf
->ss0
.surface_format
= brw
->render_target_format
[rb_format
];
540 if (unlikely(!brw
->format_supported_as_render_target
[rb_format
])) {
541 _mesa_problem(ctx
, "%s: renderbuffer format %s unsupported\n",
542 __FUNCTION__
, _mesa_get_format_name(rb_format
));
547 surf
->ss0
.surface_type
= BRW_SURFACE_2D
;
548 surf
->ss0
.surface_array_spacing
= irb
->mt
->array_spacing_lod0
?
549 GEN7_SURFACE_ARYSPC_LOD0
: GEN7_SURFACE_ARYSPC_FULL
;
552 surf
->ss1
.base_addr
= intel_renderbuffer_tile_offsets(irb
, &tile_x
, &tile_y
);
553 surf
->ss1
.base_addr
+= region
->bo
->offset
; /* reloc */
555 assert(brw
->has_surface_tile_offset
);
556 /* Note that the low bits of these fields are missing, so
557 * there's the possibility of getting in trouble.
559 assert(tile_x
% 4 == 0);
560 assert(tile_y
% 2 == 0);
561 surf
->ss5
.x_offset
= tile_x
/ 4;
562 surf
->ss5
.y_offset
= tile_y
/ 2;
564 surf
->ss2
.width
= rb
->Width
- 1;
565 surf
->ss2
.height
= rb
->Height
- 1;
566 gen7_set_surface_tiling(surf
, region
->tiling
);
567 surf
->ss3
.pitch
= (region
->pitch
* region
->cpp
) - 1;
569 gen7_set_surface_msaa(surf
, irb
->mt
->num_samples
, irb
->mt
->msaa_layout
);
571 if (irb
->mt
->msaa_layout
== INTEL_MSAA_LAYOUT_CMS
) {
572 gen7_set_surface_mcs_info(brw
, surf
, brw
->wm
.surf_offset
[unit
],
573 irb
->mt
->mcs_mt
, true /* is_render_target */);
576 if (intel
->is_haswell
) {
577 surf
->ss7
.shader_channel_select_r
= HSW_SCS_RED
;
578 surf
->ss7
.shader_channel_select_g
= HSW_SCS_GREEN
;
579 surf
->ss7
.shader_channel_select_b
= HSW_SCS_BLUE
;
580 surf
->ss7
.shader_channel_select_a
= HSW_SCS_ALPHA
;
583 drm_intel_bo_emit_reloc(brw
->intel
.batch
.bo
,
584 brw
->wm
.surf_offset
[unit
] +
585 offsetof(struct gen7_surface_state
, ss1
),
587 surf
->ss1
.base_addr
- region
->bo
->offset
,
588 I915_GEM_DOMAIN_RENDER
,
589 I915_GEM_DOMAIN_RENDER
);
591 gen7_check_surface_setup(surf
, true /* is_render_target */);
595 gen7_init_vtable_surface_functions(struct brw_context
*brw
)
597 struct intel_context
*intel
= &brw
->intel
;
599 intel
->vtbl
.update_texture_surface
= gen7_update_texture_surface
;
600 intel
->vtbl
.update_renderbuffer_surface
= gen7_update_renderbuffer_surface
;
601 intel
->vtbl
.update_null_renderbuffer_surface
=
602 gen7_update_null_renderbuffer_surface
;
603 intel
->vtbl
.create_constant_surface
= gen7_create_constant_surface
;