i965: Move pre-draw resolve buffers to dd::UpdateState
[mesa.git] / src / mesa / drivers / dri / i965 / gen7_wm_surface_state.c
1 /*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23 #include "main/mtypes.h"
24 #include "main/blend.h"
25 #include "main/samplerobj.h"
26 #include "main/texformat.h"
27 #include "program/prog_parameter.h"
28
29 #include "intel_mipmap_tree.h"
30 #include "intel_batchbuffer.h"
31 #include "intel_tex.h"
32 #include "intel_fbo.h"
33 #include "intel_buffer_objects.h"
34
35 #include "brw_context.h"
36 #include "brw_state.h"
37 #include "brw_defines.h"
38 #include "brw_wm.h"
39
40 /**
41 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
42 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED)
43 */
44 unsigned
45 brw_swizzle_to_scs(GLenum swizzle, bool need_green_to_blue)
46 {
47 switch (swizzle) {
48 case SWIZZLE_X:
49 return HSW_SCS_RED;
50 case SWIZZLE_Y:
51 return need_green_to_blue ? HSW_SCS_BLUE : HSW_SCS_GREEN;
52 case SWIZZLE_Z:
53 return HSW_SCS_BLUE;
54 case SWIZZLE_W:
55 return HSW_SCS_ALPHA;
56 case SWIZZLE_ZERO:
57 return HSW_SCS_ZERO;
58 case SWIZZLE_ONE:
59 return HSW_SCS_ONE;
60 }
61
62 unreachable("Should not get here: invalid swizzle mode");
63 }
64
65 uint32_t
66 gen7_surface_tiling_mode(uint32_t tiling)
67 {
68 switch (tiling) {
69 case I915_TILING_X:
70 return GEN7_SURFACE_TILING_X;
71 case I915_TILING_Y:
72 return GEN7_SURFACE_TILING_Y;
73 default:
74 return GEN7_SURFACE_TILING_NONE;
75 }
76 }
77
78
79 uint32_t
80 gen7_surface_msaa_bits(unsigned num_samples, enum intel_msaa_layout layout)
81 {
82 uint32_t ss4 = 0;
83
84 assert(num_samples <= 8);
85
86 /* The SURFACE_MULTISAMPLECOUNT_X enums are simply log2(num_samples) << 3. */
87 ss4 |= (ffs(MAX2(num_samples, 1)) - 1) << 3;
88
89 if (layout == INTEL_MSAA_LAYOUT_IMS)
90 ss4 |= GEN7_SURFACE_MSFMT_DEPTH_STENCIL;
91 else
92 ss4 |= GEN7_SURFACE_MSFMT_MSS;
93
94 return ss4;
95 }
96
97
98 void
99 gen7_set_surface_mcs_info(struct brw_context *brw,
100 uint32_t *surf,
101 uint32_t surf_offset,
102 const struct intel_mipmap_tree *mcs_mt,
103 bool is_render_target)
104 {
105 /* From the Ivy Bridge PRM, Vol4 Part1 p76, "MCS Base Address":
106 *
107 * "The MCS surface must be stored as Tile Y."
108 */
109 assert(mcs_mt->tiling == I915_TILING_Y);
110
111 /* Compute the pitch in units of tiles. To do this we need to divide the
112 * pitch in bytes by 128, since a single Y-tile is 128 bytes wide.
113 */
114 unsigned pitch_tiles = mcs_mt->pitch / 128;
115
116 /* The upper 20 bits of surface state DWORD 6 are the upper 20 bits of the
117 * GPU address of the MCS buffer; the lower 12 bits contain other control
118 * information. Since buffer addresses are always on 4k boundaries (and
119 * thus have their lower 12 bits zero), we can use an ordinary reloc to do
120 * the necessary address translation.
121 */
122 assert ((mcs_mt->bo->offset64 & 0xfff) == 0);
123
124 surf[6] = GEN7_SURFACE_MCS_ENABLE |
125 SET_FIELD(pitch_tiles - 1, GEN7_SURFACE_MCS_PITCH) |
126 mcs_mt->bo->offset64;
127
128 drm_intel_bo_emit_reloc(brw->batch.bo,
129 surf_offset + 6 * 4,
130 mcs_mt->bo,
131 surf[6] & 0xfff,
132 is_render_target ? I915_GEM_DOMAIN_RENDER
133 : I915_GEM_DOMAIN_SAMPLER,
134 is_render_target ? I915_GEM_DOMAIN_RENDER : 0);
135 }
136
137
138 void
139 gen7_check_surface_setup(uint32_t *surf, bool is_render_target)
140 {
141 unsigned num_multisamples = surf[4] & INTEL_MASK(5, 3);
142 unsigned multisampled_surface_storage_format = surf[4] & (1 << 6);
143 unsigned surface_array_spacing = surf[0] & (1 << 10);
144 bool is_multisampled = num_multisamples != GEN7_SURFACE_MULTISAMPLECOUNT_1;
145
146 (void) surface_array_spacing;
147
148 /* From the Ivybridge PRM, Volume 4 Part 1, page 66 (RENDER_SURFACE_STATE
149 * dword 0 bit 10 "Surface Array Spacing" Programming Notes):
150 *
151 * If Multisampled Surface Storage Format is MSFMT_MSS and Number of
152 * Multisamples is not MULTISAMPLECOUNT_1, this field must be set to
153 * ARYSPC_LOD0.
154 */
155 if (multisampled_surface_storage_format == GEN7_SURFACE_MSFMT_MSS
156 && is_multisampled)
157 assert(surface_array_spacing == GEN7_SURFACE_ARYSPC_LOD0);
158
159 /* From the Ivybridge PRM, Volume 4 Part 1, page 72 (RENDER_SURFACE_STATE
160 * dword 4 bit 6 "Multisampled Surface Storage" Programming Notes):
161 *
162 * All multisampled render target surfaces must have this field set to
163 * MSFMT_MSS.
164 *
165 * But also:
166 *
167 * This field is ignored if Number of Multisamples is MULTISAMPLECOUNT_1.
168 */
169 if (is_render_target && is_multisampled) {
170 assert(multisampled_surface_storage_format == GEN7_SURFACE_MSFMT_MSS);
171 }
172
173 /* From the Ivybridge PRM, Volume 4 Part 1, page 72 (RENDER_SURFACE_STATE
174 * dword 4 bit 6 "Multisampled Surface Storage Format" Errata):
175 *
176 * If the surface’s Number of Multisamples is MULTISAMPLECOUNT_8, Width
177 * is >= 8192 (meaning the actual surface width is >= 8193 pixels), this
178 * field must be set to MSFMT_MSS.
179 */
180 uint32_t width = GET_FIELD(surf[2], GEN7_SURFACE_WIDTH) + 1;
181 if (num_multisamples == GEN7_SURFACE_MULTISAMPLECOUNT_8 && width >= 8193) {
182 assert(multisampled_surface_storage_format == GEN7_SURFACE_MSFMT_MSS);
183 }
184
185 /* From the Ivybridge PRM, Volume 4 Part 1, page 72 (RENDER_SURFACE_STATE
186 * dword 4 bit 6 "Multisampled Surface Storage Format" Errata):
187 *
188 * If the surface’s Number of Multisamples is MULTISAMPLECOUNT_8,
189 * ((Depth+1) * (Height+1)) is > 4,194,304, OR if the surface’s Number of
190 * Multisamples is MULTISAMPLECOUNT_4, ((Depth+1) * (Height+1)) is >
191 * 8,388,608, this field must be set to MSFMT_DEPTH_STENCIL.This field
192 * must be set to MSFMT_DEPTH_STENCIL if Surface Format is one of the
193 * following: I24X8_UNORM, L24X8_UNORM, A24X8_UNORM, or
194 * R24_UNORM_X8_TYPELESS.
195 *
196 * But also (from the Programming Notes):
197 *
198 * This field is ignored if Number of Multisamples is MULTISAMPLECOUNT_1.
199 */
200 uint32_t depth = GET_FIELD(surf[3], BRW_SURFACE_DEPTH) + 1;
201 uint32_t height = GET_FIELD(surf[2], GEN7_SURFACE_HEIGHT) + 1;
202 if (num_multisamples == GEN7_SURFACE_MULTISAMPLECOUNT_8 &&
203 depth * height > 4194304) {
204 assert(multisampled_surface_storage_format ==
205 GEN7_SURFACE_MSFMT_DEPTH_STENCIL);
206 }
207 if (num_multisamples == GEN7_SURFACE_MULTISAMPLECOUNT_4 &&
208 depth * height > 8388608) {
209 assert(multisampled_surface_storage_format ==
210 GEN7_SURFACE_MSFMT_DEPTH_STENCIL);
211 }
212 if (is_multisampled) {
213 switch (GET_FIELD(surf[0], BRW_SURFACE_FORMAT)) {
214 case BRW_SURFACEFORMAT_I24X8_UNORM:
215 case BRW_SURFACEFORMAT_L24X8_UNORM:
216 case BRW_SURFACEFORMAT_A24X8_UNORM:
217 case BRW_SURFACEFORMAT_R24_UNORM_X8_TYPELESS:
218 assert(multisampled_surface_storage_format ==
219 GEN7_SURFACE_MSFMT_DEPTH_STENCIL);
220 }
221 }
222 }
223
224 static void
225 gen7_emit_buffer_surface_state(struct brw_context *brw,
226 uint32_t *out_offset,
227 drm_intel_bo *bo,
228 unsigned buffer_offset,
229 unsigned surface_format,
230 unsigned buffer_size,
231 unsigned pitch,
232 unsigned mocs,
233 bool rw)
234 {
235 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
236 8 * 4, 32, out_offset);
237 memset(surf, 0, 8 * 4);
238
239 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
240 surface_format << BRW_SURFACE_FORMAT_SHIFT |
241 BRW_SURFACE_RC_READ_WRITE;
242 surf[1] = (bo ? bo->offset64 : 0) + buffer_offset; /* reloc */
243 surf[2] = SET_FIELD((buffer_size - 1) & 0x7f, GEN7_SURFACE_WIDTH) |
244 SET_FIELD(((buffer_size - 1) >> 7) & 0x3fff, GEN7_SURFACE_HEIGHT);
245 surf[3] = SET_FIELD(((buffer_size - 1) >> 21) & 0x3f, BRW_SURFACE_DEPTH) |
246 (pitch - 1);
247
248 surf[5] = SET_FIELD(mocs, GEN7_SURFACE_MOCS);
249
250 if (brw->is_haswell) {
251 surf[7] |= (SET_FIELD(HSW_SCS_RED, GEN7_SURFACE_SCS_R) |
252 SET_FIELD(HSW_SCS_GREEN, GEN7_SURFACE_SCS_G) |
253 SET_FIELD(HSW_SCS_BLUE, GEN7_SURFACE_SCS_B) |
254 SET_FIELD(HSW_SCS_ALPHA, GEN7_SURFACE_SCS_A));
255 }
256
257 /* Emit relocation to surface contents */
258 if (bo) {
259 drm_intel_bo_emit_reloc(brw->batch.bo, *out_offset + 4,
260 bo, buffer_offset, I915_GEM_DOMAIN_SAMPLER,
261 (rw ? I915_GEM_DOMAIN_SAMPLER : 0));
262 }
263
264 gen7_check_surface_setup(surf, false /* is_render_target */);
265 }
266
267 static void
268 gen7_update_texture_surface(struct gl_context *ctx,
269 unsigned unit,
270 uint32_t *surf_offset,
271 bool for_gather)
272 {
273 struct brw_context *brw = brw_context(ctx);
274 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
275 struct intel_texture_object *intelObj = intel_texture_object(tObj);
276 struct intel_mipmap_tree *mt = intelObj->mt;
277 struct gl_texture_image *firstImage = tObj->Image[0][tObj->BaseLevel];
278 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
279
280 if (tObj->Target == GL_TEXTURE_BUFFER) {
281 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
282 return;
283 }
284
285 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
286 8 * 4, 32, surf_offset);
287 memset(surf, 0, 8 * 4);
288
289 uint32_t tex_format = translate_tex_format(brw,
290 intelObj->_Format,
291 sampler->sRGBDecode);
292
293 if (for_gather && tex_format == BRW_SURFACEFORMAT_R32G32_FLOAT)
294 tex_format = BRW_SURFACEFORMAT_R32G32_FLOAT_LD;
295
296 surf[0] = translate_tex_target(tObj->Target) << BRW_SURFACE_TYPE_SHIFT |
297 tex_format << BRW_SURFACE_FORMAT_SHIFT |
298 gen7_surface_tiling_mode(mt->tiling);
299
300 /* mask of faces present in cube map; for other surfaces MBZ. */
301 if (tObj->Target == GL_TEXTURE_CUBE_MAP || tObj->Target == GL_TEXTURE_CUBE_MAP_ARRAY)
302 surf[0] |= BRW_SURFACE_CUBEFACE_ENABLES;
303
304 if (mt->align_h == 4)
305 surf[0] |= GEN7_SURFACE_VALIGN_4;
306 if (mt->align_w == 8)
307 surf[0] |= GEN7_SURFACE_HALIGN_8;
308
309 if (mt->logical_depth0 > 1 && tObj->Target != GL_TEXTURE_3D)
310 surf[0] |= GEN7_SURFACE_IS_ARRAY;
311
312 /* if this is a view with restricted NumLayers, then
313 * our effective depth is not just the miptree depth.
314 */
315 uint32_t effective_depth = (tObj->Immutable && tObj->Target != GL_TEXTURE_3D)
316 ? tObj->NumLayers : mt->logical_depth0;
317
318 if (mt->array_spacing_lod0)
319 surf[0] |= GEN7_SURFACE_ARYSPC_LOD0;
320
321 surf[1] = mt->bo->offset64 + mt->offset; /* reloc */
322
323 surf[2] = SET_FIELD(mt->logical_width0 - 1, GEN7_SURFACE_WIDTH) |
324 SET_FIELD(mt->logical_height0 - 1, GEN7_SURFACE_HEIGHT);
325
326 surf[3] = SET_FIELD(effective_depth - 1, BRW_SURFACE_DEPTH) |
327 (mt->pitch - 1);
328
329 surf[4] = gen7_surface_msaa_bits(mt->num_samples, mt->msaa_layout) |
330 SET_FIELD(tObj->MinLayer, GEN7_SURFACE_MIN_ARRAY_ELEMENT) |
331 SET_FIELD((effective_depth - 1),
332 GEN7_SURFACE_RENDER_TARGET_VIEW_EXTENT);
333
334 surf[5] = (SET_FIELD(GEN7_MOCS_L3, GEN7_SURFACE_MOCS) |
335 SET_FIELD(tObj->MinLevel + tObj->BaseLevel - mt->first_level, GEN7_SURFACE_MIN_LOD) |
336 /* mip count */
337 (intelObj->_MaxLevel - tObj->BaseLevel));
338
339 surf[7] = mt->fast_clear_color_value;
340
341 if (brw->is_haswell) {
342 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
343 * texturing functions that return a float, as our code generation always
344 * selects the .x channel (which would always be 0).
345 */
346 const bool alpha_depth = tObj->DepthMode == GL_ALPHA &&
347 (firstImage->_BaseFormat == GL_DEPTH_COMPONENT ||
348 firstImage->_BaseFormat == GL_DEPTH_STENCIL);
349
350 const int swizzle = unlikely(alpha_depth)
351 ? SWIZZLE_XYZW : brw_get_texture_swizzle(ctx, tObj);
352
353 const bool need_scs_green_to_blue = for_gather && tex_format == BRW_SURFACEFORMAT_R32G32_FLOAT_LD;
354
355 surf[7] |=
356 SET_FIELD(brw_swizzle_to_scs(GET_SWZ(swizzle, 0), need_scs_green_to_blue), GEN7_SURFACE_SCS_R) |
357 SET_FIELD(brw_swizzle_to_scs(GET_SWZ(swizzle, 1), need_scs_green_to_blue), GEN7_SURFACE_SCS_G) |
358 SET_FIELD(brw_swizzle_to_scs(GET_SWZ(swizzle, 2), need_scs_green_to_blue), GEN7_SURFACE_SCS_B) |
359 SET_FIELD(brw_swizzle_to_scs(GET_SWZ(swizzle, 3), need_scs_green_to_blue), GEN7_SURFACE_SCS_A);
360 }
361
362 if (mt->mcs_mt) {
363 gen7_set_surface_mcs_info(brw, surf, *surf_offset,
364 mt->mcs_mt, false /* is RT */);
365 }
366
367 /* Emit relocation to surface contents */
368 drm_intel_bo_emit_reloc(brw->batch.bo,
369 *surf_offset + 4,
370 mt->bo,
371 surf[1] - mt->bo->offset64,
372 I915_GEM_DOMAIN_SAMPLER, 0);
373
374 gen7_check_surface_setup(surf, false /* is_render_target */);
375 }
376
377 /**
378 * Create a raw surface for untyped R/W access.
379 */
380 static void
381 gen7_create_raw_surface(struct brw_context *brw, drm_intel_bo *bo,
382 uint32_t offset, uint32_t size,
383 uint32_t *out_offset, bool rw)
384 {
385 gen7_emit_buffer_surface_state(brw,
386 out_offset,
387 bo,
388 offset,
389 BRW_SURFACEFORMAT_RAW,
390 size,
391 1,
392 0 /* mocs */,
393 true /* rw */);
394 }
395
396 /**
397 * Creates a null renderbuffer surface.
398 *
399 * This is used when the shader doesn't write to any color output. An FB
400 * write to target 0 will still be emitted, because that's how the thread is
401 * terminated (and computed depth is returned), so we need to have the
402 * hardware discard the target 0 color output..
403 */
404 static void
405 gen7_update_null_renderbuffer_surface(struct brw_context *brw, unsigned unit)
406 {
407 /* From the Ivy bridge PRM, Vol4 Part1 p62 (Surface Type: Programming
408 * Notes):
409 *
410 * A null surface is used in instances where an actual surface is not
411 * bound. When a write message is generated to a null surface, no
412 * actual surface is written to. When a read message (including any
413 * sampling engine message) is generated to a null surface, the result
414 * is all zeros. Note that a null surface type is allowed to be used
415 * with all messages, even if it is not specificially indicated as
416 * supported. All of the remaining fields in surface state are ignored
417 * for null surfaces, with the following exceptions: Width, Height,
418 * Depth, LOD, and Render Target View Extent fields must match the
419 * depth buffer’s corresponding state for all render target surfaces,
420 * including null.
421 */
422 struct gl_context *ctx = &brw->ctx;
423
424 /* _NEW_BUFFERS */
425 const struct gl_framebuffer *fb = ctx->DrawBuffer;
426 uint32_t surf_index =
427 brw->wm.prog_data->binding_table.render_target_start + unit;
428
429 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 8 * 4, 32,
430 &brw->wm.base.surf_offset[surf_index]);
431 memset(surf, 0, 8 * 4);
432
433 /* From the Ivybridge PRM, Volume 4, Part 1, page 65,
434 * Tiled Surface: Programming Notes:
435 * "If Surface Type is SURFTYPE_NULL, this field must be TRUE."
436 */
437 surf[0] = BRW_SURFACE_NULL << BRW_SURFACE_TYPE_SHIFT |
438 BRW_SURFACEFORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT |
439 GEN7_SURFACE_TILING_Y;
440
441 surf[2] = SET_FIELD(fb->Width - 1, GEN7_SURFACE_WIDTH) |
442 SET_FIELD(fb->Height - 1, GEN7_SURFACE_HEIGHT);
443
444 gen7_check_surface_setup(surf, true /* is_render_target */);
445 }
446
447 /**
448 * Sets up a surface state structure to point at the given region.
449 * While it is only used for the front/back buffer currently, it should be
450 * usable for further buffers when doing ARB_draw_buffer support.
451 */
452 static void
453 gen7_update_renderbuffer_surface(struct brw_context *brw,
454 struct gl_renderbuffer *rb,
455 bool layered,
456 unsigned int unit)
457 {
458 struct gl_context *ctx = &brw->ctx;
459 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
460 struct intel_mipmap_tree *mt = irb->mt;
461 uint32_t format;
462 /* _NEW_BUFFERS */
463 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
464 uint32_t surftype;
465 bool is_array = false;
466 int depth = MAX2(irb->layer_count, 1);
467 const uint8_t mocs = GEN7_MOCS_L3;
468
469 int min_array_element = irb->mt_layer / MAX2(mt->num_samples, 1);
470
471 GLenum gl_target = rb->TexImage ?
472 rb->TexImage->TexObject->Target : GL_TEXTURE_2D;
473
474 uint32_t surf_index =
475 brw->wm.prog_data->binding_table.render_target_start + unit;
476
477 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 8 * 4, 32,
478 &brw->wm.base.surf_offset[surf_index]);
479 memset(surf, 0, 8 * 4);
480
481 intel_miptree_used_for_rendering(irb->mt);
482
483 /* Render targets can't use IMS layout */
484 assert(irb->mt->msaa_layout != INTEL_MSAA_LAYOUT_IMS);
485
486 assert(brw_render_target_supported(brw, rb));
487 format = brw->render_target_format[rb_format];
488 if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
489 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
490 __FUNCTION__, _mesa_get_format_name(rb_format));
491 }
492
493 switch (gl_target) {
494 case GL_TEXTURE_CUBE_MAP_ARRAY:
495 case GL_TEXTURE_CUBE_MAP:
496 surftype = BRW_SURFACE_2D;
497 is_array = true;
498 depth *= 6;
499 break;
500 case GL_TEXTURE_3D:
501 depth = MAX2(irb->mt->logical_depth0, 1);
502 /* fallthrough */
503 default:
504 surftype = translate_tex_target(gl_target);
505 is_array = _mesa_tex_target_is_array(gl_target);
506 break;
507 }
508
509 surf[0] = surftype << BRW_SURFACE_TYPE_SHIFT |
510 format << BRW_SURFACE_FORMAT_SHIFT |
511 (irb->mt->array_spacing_lod0 ? GEN7_SURFACE_ARYSPC_LOD0
512 : GEN7_SURFACE_ARYSPC_FULL) |
513 gen7_surface_tiling_mode(mt->tiling);
514
515 if (irb->mt->align_h == 4)
516 surf[0] |= GEN7_SURFACE_VALIGN_4;
517 if (irb->mt->align_w == 8)
518 surf[0] |= GEN7_SURFACE_HALIGN_8;
519
520 if (is_array) {
521 surf[0] |= GEN7_SURFACE_IS_ARRAY;
522 }
523
524 surf[1] = mt->bo->offset64;
525
526 assert(brw->has_surface_tile_offset);
527
528 surf[5] = SET_FIELD(mocs, GEN7_SURFACE_MOCS) |
529 (irb->mt_level - irb->mt->first_level);
530
531 surf[2] = SET_FIELD(irb->mt->logical_width0 - 1, GEN7_SURFACE_WIDTH) |
532 SET_FIELD(irb->mt->logical_height0 - 1, GEN7_SURFACE_HEIGHT);
533
534 surf[3] = ((depth - 1) << BRW_SURFACE_DEPTH_SHIFT) |
535 (mt->pitch - 1);
536
537 surf[4] = gen7_surface_msaa_bits(irb->mt->num_samples, irb->mt->msaa_layout) |
538 min_array_element << GEN7_SURFACE_MIN_ARRAY_ELEMENT_SHIFT |
539 (depth - 1) << GEN7_SURFACE_RENDER_TARGET_VIEW_EXTENT_SHIFT;
540
541 if (irb->mt->mcs_mt) {
542 gen7_set_surface_mcs_info(brw, surf, brw->wm.base.surf_offset[surf_index],
543 irb->mt->mcs_mt, true /* is RT */);
544 }
545
546 surf[7] = irb->mt->fast_clear_color_value;
547
548 if (brw->is_haswell) {
549 surf[7] |= (SET_FIELD(HSW_SCS_RED, GEN7_SURFACE_SCS_R) |
550 SET_FIELD(HSW_SCS_GREEN, GEN7_SURFACE_SCS_G) |
551 SET_FIELD(HSW_SCS_BLUE, GEN7_SURFACE_SCS_B) |
552 SET_FIELD(HSW_SCS_ALPHA, GEN7_SURFACE_SCS_A));
553 }
554
555 drm_intel_bo_emit_reloc(brw->batch.bo,
556 brw->wm.base.surf_offset[surf_index] + 4,
557 mt->bo,
558 surf[1] - mt->bo->offset64,
559 I915_GEM_DOMAIN_RENDER,
560 I915_GEM_DOMAIN_RENDER);
561
562 gen7_check_surface_setup(surf, true /* is_render_target */);
563 }
564
565 void
566 gen7_init_vtable_surface_functions(struct brw_context *brw)
567 {
568 brw->vtbl.update_texture_surface = gen7_update_texture_surface;
569 brw->vtbl.update_renderbuffer_surface = gen7_update_renderbuffer_surface;
570 brw->vtbl.update_null_renderbuffer_surface =
571 gen7_update_null_renderbuffer_surface;
572 brw->vtbl.create_raw_surface = gen7_create_raw_surface;
573 brw->vtbl.emit_buffer_surface_state = gen7_emit_buffer_surface_state;
574 }