i965/skl: skip fast clears for certain surface formats
[mesa.git] / src / mesa / drivers / dri / i965 / gen8_surface_state.c
1 /*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "main/blend.h"
25 #include "main/mtypes.h"
26 #include "main/samplerobj.h"
27 #include "main/texformat.h"
28 #include "main/teximage.h"
29 #include "program/prog_parameter.h"
30
31 #include "intel_mipmap_tree.h"
32 #include "intel_batchbuffer.h"
33 #include "intel_tex.h"
34 #include "intel_fbo.h"
35 #include "intel_buffer_objects.h"
36
37 #include "brw_context.h"
38 #include "brw_state.h"
39 #include "brw_defines.h"
40 #include "brw_wm.h"
41
42 /**
43 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
44 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
45 *
46 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
47 * 0 1 2 3 4 5
48 * 4 5 6 7 0 1
49 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
50 *
51 * which is simply adding 4 then modding by 8 (or anding with 7).
52 */
53 static unsigned
54 swizzle_to_scs(unsigned swizzle)
55 {
56 return (swizzle + 4) & 7;
57 }
58
59 static uint32_t
60 surface_tiling_resource_mode(uint32_t tr_mode)
61 {
62 switch (tr_mode) {
63 case INTEL_MIPTREE_TRMODE_YF:
64 return GEN9_SURFACE_TRMODE_TILEYF;
65 case INTEL_MIPTREE_TRMODE_YS:
66 return GEN9_SURFACE_TRMODE_TILEYS;
67 default:
68 return GEN9_SURFACE_TRMODE_NONE;
69 }
70 }
71
72 static uint32_t
73 surface_tiling_mode(uint32_t tiling)
74 {
75 switch (tiling) {
76 case I915_TILING_X:
77 return GEN8_SURFACE_TILING_X;
78 case I915_TILING_Y:
79 return GEN8_SURFACE_TILING_Y;
80 default:
81 return GEN8_SURFACE_TILING_NONE;
82 }
83 }
84
85 static unsigned
86 vertical_alignment(const struct brw_context *brw,
87 const struct intel_mipmap_tree *mt,
88 uint32_t surf_type)
89 {
90 /* On Gen9+ vertical alignment is ignored for 1D surfaces and when
91 * tr_mode is not TRMODE_NONE. Set to an arbitrary non-reserved value.
92 */
93 if (brw->gen > 8 &&
94 (mt->tr_mode != INTEL_MIPTREE_TRMODE_NONE ||
95 surf_type == BRW_SURFACE_1D))
96 return GEN8_SURFACE_VALIGN_4;
97
98 switch (mt->valign) {
99 case 4:
100 return GEN8_SURFACE_VALIGN_4;
101 case 8:
102 return GEN8_SURFACE_VALIGN_8;
103 case 16:
104 return GEN8_SURFACE_VALIGN_16;
105 default:
106 unreachable("Unsupported vertical surface alignment.");
107 }
108 }
109
110 static unsigned
111 horizontal_alignment(const struct brw_context *brw,
112 const struct intel_mipmap_tree *mt,
113 uint32_t surf_type)
114 {
115 /* On Gen9+ horizontal alignment is ignored when tr_mode is not
116 * TRMODE_NONE. Set to an arbitrary non-reserved value.
117 */
118 if (brw->gen > 8 &&
119 (mt->tr_mode != INTEL_MIPTREE_TRMODE_NONE ||
120 gen9_use_linear_1d_layout(brw, mt)))
121 return GEN8_SURFACE_HALIGN_4;
122
123 switch (mt->halign) {
124 case 4:
125 return GEN8_SURFACE_HALIGN_4;
126 case 8:
127 return GEN8_SURFACE_HALIGN_8;
128 case 16:
129 return GEN8_SURFACE_HALIGN_16;
130 default:
131 unreachable("Unsupported horizontal surface alignment.");
132 }
133 }
134
135 static uint32_t *
136 allocate_surface_state(struct brw_context *brw, uint32_t *out_offset, int index)
137 {
138 int dwords = brw->gen >= 9 ? 16 : 13;
139 uint32_t *surf = __brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
140 dwords * 4, 64, index, out_offset);
141 memset(surf, 0, dwords * 4);
142 return surf;
143 }
144
145 static void
146 gen8_emit_buffer_surface_state(struct brw_context *brw,
147 uint32_t *out_offset,
148 drm_intel_bo *bo,
149 unsigned buffer_offset,
150 unsigned surface_format,
151 unsigned buffer_size,
152 unsigned pitch,
153 bool rw)
154 {
155 const unsigned mocs = brw->gen >= 9 ? SKL_MOCS_WB : BDW_MOCS_WB;
156 uint32_t *surf = allocate_surface_state(brw, out_offset, -1);
157
158 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
159 surface_format << BRW_SURFACE_FORMAT_SHIFT |
160 BRW_SURFACE_RC_READ_WRITE;
161 surf[1] = SET_FIELD(mocs, GEN8_SURFACE_MOCS);
162
163 surf[2] = SET_FIELD((buffer_size - 1) & 0x7f, GEN7_SURFACE_WIDTH) |
164 SET_FIELD(((buffer_size - 1) >> 7) & 0x3fff, GEN7_SURFACE_HEIGHT);
165 if (surface_format == BRW_SURFACEFORMAT_RAW)
166 surf[3] = SET_FIELD(((buffer_size - 1) >> 21) & 0x3ff, BRW_SURFACE_DEPTH);
167 else
168 surf[3] = SET_FIELD(((buffer_size - 1) >> 21) & 0x3f, BRW_SURFACE_DEPTH);
169 surf[3] |= (pitch - 1);
170 surf[7] = SET_FIELD(HSW_SCS_RED, GEN7_SURFACE_SCS_R) |
171 SET_FIELD(HSW_SCS_GREEN, GEN7_SURFACE_SCS_G) |
172 SET_FIELD(HSW_SCS_BLUE, GEN7_SURFACE_SCS_B) |
173 SET_FIELD(HSW_SCS_ALPHA, GEN7_SURFACE_SCS_A);
174 /* reloc */
175 *((uint64_t *) &surf[8]) = (bo ? bo->offset64 : 0) + buffer_offset;
176
177 /* Emit relocation to surface contents. */
178 if (bo) {
179 drm_intel_bo_emit_reloc(brw->batch.bo, *out_offset + 8 * 4,
180 bo, buffer_offset, I915_GEM_DOMAIN_SAMPLER,
181 rw ? I915_GEM_DOMAIN_SAMPLER : 0);
182 }
183 }
184
185 static void
186 gen8_emit_fast_clear_color(struct brw_context *brw,
187 struct intel_mipmap_tree *mt,
188 uint32_t *surf)
189 {
190 if (brw->gen >= 9) {
191 #define check_fast_clear_val(x) \
192 assert(mt->gen9_fast_clear_color.f[x] == 0.0 || \
193 mt->gen9_fast_clear_color.f[x] == 1.0)
194 check_fast_clear_val(0);
195 check_fast_clear_val(1);
196 check_fast_clear_val(2);
197 check_fast_clear_val(3);
198 #undef check_fast_clear_val
199 surf[12] = mt->gen9_fast_clear_color.ui[0];
200 surf[13] = mt->gen9_fast_clear_color.ui[1];
201 surf[14] = mt->gen9_fast_clear_color.ui[2];
202 surf[15] = mt->gen9_fast_clear_color.ui[3];
203 } else
204 surf[7] |= mt->fast_clear_color_value;
205 }
206
207 static void
208 gen8_emit_texture_surface_state(struct brw_context *brw,
209 struct intel_mipmap_tree *mt,
210 GLenum target,
211 unsigned min_layer, unsigned max_layer,
212 unsigned min_level, unsigned max_level,
213 unsigned format,
214 unsigned swizzle,
215 uint32_t *surf_offset,
216 bool rw, bool for_gather)
217 {
218 const unsigned depth = max_layer - min_layer;
219 struct intel_mipmap_tree *aux_mt = NULL;
220 uint32_t aux_mode = 0;
221 uint32_t mocs_wb = brw->gen >= 9 ? SKL_MOCS_WB : BDW_MOCS_WB;
222 int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
223 unsigned tiling_mode, pitch;
224 const unsigned tr_mode = surface_tiling_resource_mode(mt->tr_mode);
225 const uint32_t surf_type = translate_tex_target(target);
226
227 if (mt->format == MESA_FORMAT_S_UINT8) {
228 tiling_mode = GEN8_SURFACE_TILING_W;
229 pitch = 2 * mt->pitch;
230 } else {
231 tiling_mode = surface_tiling_mode(mt->tiling);
232 pitch = mt->pitch;
233 }
234
235 if (mt->mcs_mt) {
236 aux_mt = mt->mcs_mt;
237 aux_mode = GEN8_SURFACE_AUX_MODE_MCS;
238
239 /*
240 * From the BDW PRM, Volume 2d, page 260 (RENDER_SURFACE_STATE):
241 * "When MCS is enabled for non-MSRT, HALIGN_16 must be used"
242 *
243 * From the hardware spec for GEN9:
244 * "When Auxiliary Surface Mode is set to AUX_CCS_D or AUX_CCS_E, HALIGN
245 * 16 must be used."
246 */
247 if (brw->gen >= 9 || mt->num_samples == 1)
248 assert(mt->halign == 16);
249
250 if (brw->gen >= 9) {
251 assert(mt->num_samples > 1 ||
252 brw_losslessly_compressible_format(brw, surf_type));
253 }
254
255 }
256
257 uint32_t *surf = allocate_surface_state(brw, surf_offset, surf_index);
258
259 surf[0] = SET_FIELD(surf_type, BRW_SURFACE_TYPE) |
260 format << BRW_SURFACE_FORMAT_SHIFT |
261 vertical_alignment(brw, mt, surf_type) |
262 horizontal_alignment(brw, mt, surf_type) |
263 tiling_mode;
264
265 if (surf_type == BRW_SURFACE_CUBE) {
266 surf[0] |= BRW_SURFACE_CUBEFACE_ENABLES;
267 }
268
269 /* From the CHV PRM, Volume 2d, page 321 (RENDER_SURFACE_STATE dword 0
270 * bit 9 "Sampler L2 Bypass Mode Disable" Programming Notes):
271 *
272 * This bit must be set for the following surface types: BC2_UNORM
273 * BC3_UNORM BC5_UNORM BC5_SNORM BC7_UNORM
274 */
275 if ((brw->gen >= 9 || brw->is_cherryview) &&
276 (format == BRW_SURFACEFORMAT_BC2_UNORM ||
277 format == BRW_SURFACEFORMAT_BC3_UNORM ||
278 format == BRW_SURFACEFORMAT_BC5_UNORM ||
279 format == BRW_SURFACEFORMAT_BC5_SNORM ||
280 format == BRW_SURFACEFORMAT_BC7_UNORM))
281 surf[0] |= GEN8_SURFACE_SAMPLER_L2_BYPASS_DISABLE;
282
283 if (_mesa_is_array_texture(target) || target == GL_TEXTURE_CUBE_MAP)
284 surf[0] |= GEN8_SURFACE_IS_ARRAY;
285
286 surf[1] = SET_FIELD(mocs_wb, GEN8_SURFACE_MOCS) | mt->qpitch >> 2;
287
288 surf[2] = SET_FIELD(mt->logical_width0 - 1, GEN7_SURFACE_WIDTH) |
289 SET_FIELD(mt->logical_height0 - 1, GEN7_SURFACE_HEIGHT);
290
291 surf[3] = SET_FIELD(depth - 1, BRW_SURFACE_DEPTH) | (pitch - 1);
292
293 surf[4] = gen7_surface_msaa_bits(mt->num_samples, mt->msaa_layout) |
294 SET_FIELD(min_layer, GEN7_SURFACE_MIN_ARRAY_ELEMENT) |
295 SET_FIELD(depth - 1, GEN7_SURFACE_RENDER_TARGET_VIEW_EXTENT);
296
297 surf[5] = SET_FIELD(min_level - mt->first_level, GEN7_SURFACE_MIN_LOD) |
298 (max_level - min_level - 1); /* mip count */
299
300 if (brw->gen >= 9) {
301 surf[5] |= SET_FIELD(tr_mode, GEN9_SURFACE_TRMODE);
302 /* Disable Mip Tail by setting a large value. */
303 surf[5] |= SET_FIELD(15, GEN9_SURFACE_MIP_TAIL_START_LOD);
304 }
305
306 if (aux_mt) {
307 uint32_t tile_w, tile_h;
308 assert(aux_mt->tiling == I915_TILING_Y);
309 intel_get_tile_dims(aux_mt->tiling, aux_mt->tr_mode,
310 aux_mt->cpp, &tile_w, &tile_h);
311 surf[6] = SET_FIELD(mt->qpitch / 4, GEN8_SURFACE_AUX_QPITCH) |
312 SET_FIELD((aux_mt->pitch / tile_w) - 1,
313 GEN8_SURFACE_AUX_PITCH) |
314 aux_mode;
315 }
316
317 gen8_emit_fast_clear_color(brw, mt, surf);
318 surf[7] |=
319 SET_FIELD(swizzle_to_scs(GET_SWZ(swizzle, 0)), GEN7_SURFACE_SCS_R) |
320 SET_FIELD(swizzle_to_scs(GET_SWZ(swizzle, 1)), GEN7_SURFACE_SCS_G) |
321 SET_FIELD(swizzle_to_scs(GET_SWZ(swizzle, 2)), GEN7_SURFACE_SCS_B) |
322 SET_FIELD(swizzle_to_scs(GET_SWZ(swizzle, 3)), GEN7_SURFACE_SCS_A);
323
324 *((uint64_t *) &surf[8]) = mt->bo->offset64 + mt->offset; /* reloc */
325
326 if (aux_mt) {
327 *((uint64_t *) &surf[10]) = aux_mt->bo->offset64;
328 drm_intel_bo_emit_reloc(brw->batch.bo, *surf_offset + 10 * 4,
329 aux_mt->bo, 0,
330 I915_GEM_DOMAIN_SAMPLER,
331 (rw ? I915_GEM_DOMAIN_SAMPLER : 0));
332 }
333
334 /* Emit relocation to surface contents */
335 drm_intel_bo_emit_reloc(brw->batch.bo,
336 *surf_offset + 8 * 4,
337 mt->bo,
338 mt->offset,
339 I915_GEM_DOMAIN_SAMPLER,
340 (rw ? I915_GEM_DOMAIN_SAMPLER : 0));
341 }
342
343 static void
344 gen8_update_texture_surface(struct gl_context *ctx,
345 unsigned unit,
346 uint32_t *surf_offset,
347 bool for_gather)
348 {
349 struct brw_context *brw = brw_context(ctx);
350 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
351
352 if (obj->Target == GL_TEXTURE_BUFFER) {
353 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
354
355 } else {
356 struct gl_texture_image *firstImage = obj->Image[0][obj->BaseLevel];
357 struct intel_texture_object *intel_obj = intel_texture_object(obj);
358 struct intel_mipmap_tree *mt = intel_obj->mt;
359 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
360 /* If this is a view with restricted NumLayers, then our effective depth
361 * is not just the miptree depth.
362 */
363 const unsigned depth = (obj->Immutable && obj->Target != GL_TEXTURE_3D ?
364 obj->NumLayers : mt->logical_depth0);
365
366 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
367 * texturing functions that return a float, as our code generation always
368 * selects the .x channel (which would always be 0).
369 */
370 const bool alpha_depth = obj->DepthMode == GL_ALPHA &&
371 (firstImage->_BaseFormat == GL_DEPTH_COMPONENT ||
372 firstImage->_BaseFormat == GL_DEPTH_STENCIL);
373 const unsigned swizzle = (unlikely(alpha_depth) ? SWIZZLE_XYZW :
374 brw_get_texture_swizzle(&brw->ctx, obj));
375
376 unsigned format = translate_tex_format(brw, intel_obj->_Format,
377 sampler->sRGBDecode);
378 if (obj->StencilSampling && firstImage->_BaseFormat == GL_DEPTH_STENCIL) {
379 mt = mt->stencil_mt;
380 format = BRW_SURFACEFORMAT_R8_UINT;
381 }
382
383 gen8_emit_texture_surface_state(brw, mt, obj->Target,
384 obj->MinLayer, obj->MinLayer + depth,
385 obj->MinLevel + obj->BaseLevel,
386 obj->MinLevel + intel_obj->_MaxLevel + 1,
387 format, swizzle, surf_offset,
388 false, for_gather);
389 }
390 }
391
392 /**
393 * Creates a null surface.
394 *
395 * This is used when the shader doesn't write to any color output. An FB
396 * write to target 0 will still be emitted, because that's how the thread is
397 * terminated (and computed depth is returned), so we need to have the
398 * hardware discard the target 0 color output..
399 */
400 static void
401 gen8_emit_null_surface_state(struct brw_context *brw,
402 unsigned width,
403 unsigned height,
404 unsigned samples,
405 uint32_t *out_offset)
406 {
407 uint32_t *surf = allocate_surface_state(brw, out_offset, -1);
408
409 surf[0] = BRW_SURFACE_NULL << BRW_SURFACE_TYPE_SHIFT |
410 BRW_SURFACEFORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT |
411 GEN8_SURFACE_TILING_Y;
412 surf[2] = SET_FIELD(width - 1, GEN7_SURFACE_WIDTH) |
413 SET_FIELD(height - 1, GEN7_SURFACE_HEIGHT);
414 }
415
416 /**
417 * Sets up a surface state structure to point at the given region.
418 * While it is only used for the front/back buffer currently, it should be
419 * usable for further buffers when doing ARB_draw_buffer support.
420 */
421 static uint32_t
422 gen8_update_renderbuffer_surface(struct brw_context *brw,
423 struct gl_renderbuffer *rb,
424 bool layered, unsigned unit /* unused */,
425 uint32_t surf_index)
426 {
427 struct gl_context *ctx = &brw->ctx;
428 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
429 struct intel_mipmap_tree *mt = irb->mt;
430 struct intel_mipmap_tree *aux_mt = NULL;
431 uint32_t aux_mode = 0;
432 unsigned width = mt->logical_width0;
433 unsigned height = mt->logical_height0;
434 unsigned pitch = mt->pitch;
435 uint32_t tiling = mt->tiling;
436 unsigned tr_mode = surface_tiling_resource_mode(mt->tr_mode);
437 uint32_t format = 0;
438 uint32_t surf_type;
439 uint32_t offset;
440 bool is_array = false;
441 int depth = MAX2(irb->layer_count, 1);
442 const int min_array_element = (mt->format == MESA_FORMAT_S_UINT8) ?
443 irb->mt_layer : (irb->mt_layer / MAX2(mt->num_samples, 1));
444 GLenum gl_target =
445 rb->TexImage ? rb->TexImage->TexObject->Target : GL_TEXTURE_2D;
446 const uint32_t mocs = brw->gen >= 9 ? SKL_MOCS_PTE : BDW_MOCS_PTE;
447
448 intel_miptree_used_for_rendering(mt);
449
450 switch (gl_target) {
451 case GL_TEXTURE_CUBE_MAP_ARRAY:
452 case GL_TEXTURE_CUBE_MAP:
453 surf_type = BRW_SURFACE_2D;
454 is_array = true;
455 depth *= 6;
456 break;
457 case GL_TEXTURE_3D:
458 depth = MAX2(irb->mt->logical_depth0, 1);
459 /* fallthrough */
460 default:
461 surf_type = translate_tex_target(gl_target);
462 is_array = _mesa_tex_target_is_array(gl_target);
463 break;
464 }
465
466 /* _NEW_BUFFERS */
467 /* Render targets can't use IMS layout. Stencil in turn gets configured as
468 * single sampled and indexed manually by the program.
469 */
470 if (mt->format == MESA_FORMAT_S_UINT8) {
471 brw_configure_w_tiled(mt, true, &width, &height, &pitch,
472 &tiling, &format);
473 } else {
474 assert(mt->msaa_layout != INTEL_MSAA_LAYOUT_IMS);
475 assert(brw_render_target_supported(brw, rb));
476 mesa_format rb_format = _mesa_get_render_format(ctx,
477 intel_rb_format(irb));
478 format = brw->render_target_format[rb_format];
479 if (unlikely(!brw->format_supported_as_render_target[rb_format]))
480 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
481 __func__, _mesa_get_format_name(rb_format));
482 }
483
484 if (mt->mcs_mt) {
485 aux_mt = mt->mcs_mt;
486 aux_mode = GEN8_SURFACE_AUX_MODE_MCS;
487
488 /*
489 * From the BDW PRM, Volume 2d, page 260 (RENDER_SURFACE_STATE):
490 * "When MCS is enabled for non-MSRT, HALIGN_16 must be used"
491 *
492 * From the hardware spec for GEN9:
493 * "When Auxiliary Surface Mode is set to AUX_CCS_D or AUX_CCS_E, HALIGN
494 * 16 must be used."
495 */
496 if (brw->gen >= 9 || mt->num_samples == 1)
497 assert(mt->halign == 16);
498 }
499
500 uint32_t *surf = allocate_surface_state(brw, &offset, surf_index);
501
502 surf[0] = (surf_type << BRW_SURFACE_TYPE_SHIFT) |
503 (is_array ? GEN7_SURFACE_IS_ARRAY : 0) |
504 (format << BRW_SURFACE_FORMAT_SHIFT) |
505 vertical_alignment(brw, mt, surf_type) |
506 horizontal_alignment(brw, mt, surf_type) |
507 surface_tiling_mode(tiling);
508
509 surf[1] = SET_FIELD(mocs, GEN8_SURFACE_MOCS) | mt->qpitch >> 2;
510
511 surf[2] = SET_FIELD(width - 1, GEN7_SURFACE_WIDTH) |
512 SET_FIELD(height - 1, GEN7_SURFACE_HEIGHT);
513
514 surf[3] = (depth - 1) << BRW_SURFACE_DEPTH_SHIFT |
515 (pitch - 1); /* Surface Pitch */
516
517 surf[4] = min_array_element << GEN7_SURFACE_MIN_ARRAY_ELEMENT_SHIFT |
518 (depth - 1) << GEN7_SURFACE_RENDER_TARGET_VIEW_EXTENT_SHIFT;
519
520 if (mt->format != MESA_FORMAT_S_UINT8)
521 surf[4] |= gen7_surface_msaa_bits(mt->num_samples, mt->msaa_layout);
522
523 surf[5] = irb->mt_level - irb->mt->first_level;
524
525 if (brw->gen >= 9) {
526 surf[5] |= SET_FIELD(tr_mode, GEN9_SURFACE_TRMODE);
527 /* Disable Mip Tail by setting a large value. */
528 surf[5] |= SET_FIELD(15, GEN9_SURFACE_MIP_TAIL_START_LOD);
529 }
530
531 if (aux_mt) {
532 uint32_t tile_w, tile_h;
533 assert(aux_mt->tiling == I915_TILING_Y);
534 intel_get_tile_dims(aux_mt->tiling, aux_mt->tr_mode,
535 aux_mt->cpp, &tile_w, &tile_h);
536 surf[6] = SET_FIELD(mt->qpitch / 4, GEN8_SURFACE_AUX_QPITCH) |
537 SET_FIELD((aux_mt->pitch / tile_w) - 1,
538 GEN8_SURFACE_AUX_PITCH) |
539 aux_mode;
540 }
541
542 gen8_emit_fast_clear_color(brw, mt, surf);
543 surf[7] |= SET_FIELD(HSW_SCS_RED, GEN7_SURFACE_SCS_R) |
544 SET_FIELD(HSW_SCS_GREEN, GEN7_SURFACE_SCS_G) |
545 SET_FIELD(HSW_SCS_BLUE, GEN7_SURFACE_SCS_B) |
546 SET_FIELD(HSW_SCS_ALPHA, GEN7_SURFACE_SCS_A);
547
548 assert(mt->offset % mt->cpp == 0);
549 *((uint64_t *) &surf[8]) = mt->bo->offset64 + mt->offset; /* reloc */
550
551 if (aux_mt) {
552 *((uint64_t *) &surf[10]) = aux_mt->bo->offset64;
553 drm_intel_bo_emit_reloc(brw->batch.bo,
554 offset + 10 * 4,
555 aux_mt->bo, 0,
556 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
557 }
558
559 drm_intel_bo_emit_reloc(brw->batch.bo,
560 offset + 8 * 4,
561 mt->bo,
562 mt->offset,
563 I915_GEM_DOMAIN_RENDER,
564 I915_GEM_DOMAIN_RENDER);
565
566 return offset;
567 }
568
569 void
570 gen8_init_vtable_surface_functions(struct brw_context *brw)
571 {
572 brw->vtbl.update_texture_surface = gen8_update_texture_surface;
573 brw->vtbl.update_renderbuffer_surface = gen8_update_renderbuffer_surface;
574 brw->vtbl.emit_null_surface_state = gen8_emit_null_surface_state;
575 brw->vtbl.emit_texture_surface_state = gen8_emit_texture_surface_state;
576 brw->vtbl.emit_buffer_surface_state = gen8_emit_buffer_surface_state;
577 }