i965: Skip update_texture_surface when the plane doesn't exist
[mesa.git] / src / mesa / drivers / dri / i965 / gen8_surface_state.c
1 /*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "main/blend.h"
25 #include "main/mtypes.h"
26 #include "main/samplerobj.h"
27 #include "main/texformat.h"
28 #include "main/teximage.h"
29 #include "program/prog_parameter.h"
30 #include "program/prog_instruction.h"
31
32 #include "intel_mipmap_tree.h"
33 #include "intel_batchbuffer.h"
34 #include "intel_tex.h"
35 #include "intel_fbo.h"
36 #include "intel_buffer_objects.h"
37 #include "intel_image.h"
38
39 #include "brw_context.h"
40 #include "brw_state.h"
41 #include "brw_defines.h"
42 #include "brw_wm.h"
43 #include "isl/isl.h"
44
45 /**
46 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
47 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
48 *
49 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
50 * 0 1 2 3 4 5
51 * 4 5 6 7 0 1
52 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
53 *
54 * which is simply adding 4 then modding by 8 (or anding with 7).
55 */
56 static unsigned
57 swizzle_to_scs(unsigned swizzle)
58 {
59 return (swizzle + 4) & 7;
60 }
61
62 static uint32_t
63 surface_tiling_resource_mode(uint32_t tr_mode)
64 {
65 switch (tr_mode) {
66 case INTEL_MIPTREE_TRMODE_YF:
67 return GEN9_SURFACE_TRMODE_TILEYF;
68 case INTEL_MIPTREE_TRMODE_YS:
69 return GEN9_SURFACE_TRMODE_TILEYS;
70 default:
71 return GEN9_SURFACE_TRMODE_NONE;
72 }
73 }
74
75 uint32_t
76 gen8_surface_tiling_mode(uint32_t tiling)
77 {
78 switch (tiling) {
79 case I915_TILING_X:
80 return GEN8_SURFACE_TILING_X;
81 case I915_TILING_Y:
82 return GEN8_SURFACE_TILING_Y;
83 default:
84 return GEN8_SURFACE_TILING_NONE;
85 }
86 }
87
88 unsigned
89 gen8_vertical_alignment(const struct brw_context *brw,
90 const struct intel_mipmap_tree *mt,
91 uint32_t surf_type)
92 {
93 /* On Gen9+ vertical alignment is ignored for 1D surfaces and when
94 * tr_mode is not TRMODE_NONE. Set to an arbitrary non-reserved value.
95 */
96 if (brw->gen > 8 &&
97 (mt->tr_mode != INTEL_MIPTREE_TRMODE_NONE ||
98 surf_type == BRW_SURFACE_1D))
99 return GEN8_SURFACE_VALIGN_4;
100
101 switch (mt->valign) {
102 case 4:
103 return GEN8_SURFACE_VALIGN_4;
104 case 8:
105 return GEN8_SURFACE_VALIGN_8;
106 case 16:
107 return GEN8_SURFACE_VALIGN_16;
108 default:
109 unreachable("Unsupported vertical surface alignment.");
110 }
111 }
112
113 unsigned
114 gen8_horizontal_alignment(const struct brw_context *brw,
115 const struct intel_mipmap_tree *mt,
116 uint32_t surf_type)
117 {
118 /* On Gen9+ horizontal alignment is ignored when tr_mode is not
119 * TRMODE_NONE. Set to an arbitrary non-reserved value.
120 */
121 if (brw->gen > 8 &&
122 (mt->tr_mode != INTEL_MIPTREE_TRMODE_NONE ||
123 gen9_use_linear_1d_layout(brw, mt)))
124 return GEN8_SURFACE_HALIGN_4;
125
126 switch (mt->halign) {
127 case 4:
128 return GEN8_SURFACE_HALIGN_4;
129 case 8:
130 return GEN8_SURFACE_HALIGN_8;
131 case 16:
132 return GEN8_SURFACE_HALIGN_16;
133 default:
134 unreachable("Unsupported horizontal surface alignment.");
135 }
136 }
137
138 uint32_t *
139 gen8_allocate_surface_state(struct brw_context *brw,
140 uint32_t *out_offset, int index)
141 {
142 int dwords = brw->gen >= 9 ? 16 : 13;
143 uint32_t *surf = __brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
144 dwords * 4, 64, index, out_offset);
145 memset(surf, 0, dwords * 4);
146 return surf;
147 }
148
149 static void
150 gen8_emit_buffer_surface_state(struct brw_context *brw,
151 uint32_t *out_offset,
152 drm_intel_bo *bo,
153 unsigned buffer_offset,
154 unsigned surface_format,
155 unsigned buffer_size,
156 unsigned pitch,
157 bool rw)
158 {
159 const unsigned mocs = brw->gen >= 9 ? SKL_MOCS_WB : BDW_MOCS_WB;
160 uint32_t *surf = gen8_allocate_surface_state(brw, out_offset, -1);
161
162 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
163 surface_format << BRW_SURFACE_FORMAT_SHIFT |
164 BRW_SURFACE_RC_READ_WRITE;
165 surf[1] = SET_FIELD(mocs, GEN8_SURFACE_MOCS);
166
167 surf[2] = SET_FIELD((buffer_size - 1) & 0x7f, GEN7_SURFACE_WIDTH) |
168 SET_FIELD(((buffer_size - 1) >> 7) & 0x3fff, GEN7_SURFACE_HEIGHT);
169 if (surface_format == BRW_SURFACEFORMAT_RAW)
170 surf[3] = SET_FIELD(((buffer_size - 1) >> 21) & 0x3ff, BRW_SURFACE_DEPTH);
171 else
172 surf[3] = SET_FIELD(((buffer_size - 1) >> 21) & 0x3f, BRW_SURFACE_DEPTH);
173 surf[3] |= (pitch - 1);
174 surf[7] = SET_FIELD(HSW_SCS_RED, GEN7_SURFACE_SCS_R) |
175 SET_FIELD(HSW_SCS_GREEN, GEN7_SURFACE_SCS_G) |
176 SET_FIELD(HSW_SCS_BLUE, GEN7_SURFACE_SCS_B) |
177 SET_FIELD(HSW_SCS_ALPHA, GEN7_SURFACE_SCS_A);
178 /* reloc */
179 *((uint64_t *) &surf[8]) = (bo ? bo->offset64 : 0) + buffer_offset;
180
181 /* Emit relocation to surface contents. */
182 if (bo) {
183 drm_intel_bo_emit_reloc(brw->batch.bo, *out_offset + 8 * 4,
184 bo, buffer_offset, I915_GEM_DOMAIN_SAMPLER,
185 rw ? I915_GEM_DOMAIN_SAMPLER : 0);
186 }
187 }
188
189 void
190 gen8_emit_fast_clear_color(const struct brw_context *brw,
191 const struct intel_mipmap_tree *mt,
192 uint32_t *surf)
193 {
194 if (brw->gen >= 9) {
195 surf[12] = mt->gen9_fast_clear_color.ui[0];
196 surf[13] = mt->gen9_fast_clear_color.ui[1];
197 surf[14] = mt->gen9_fast_clear_color.ui[2];
198 surf[15] = mt->gen9_fast_clear_color.ui[3];
199 } else
200 surf[7] |= mt->fast_clear_color_value;
201 }
202
203 uint32_t
204 gen8_get_aux_mode(const struct brw_context *brw,
205 const struct intel_mipmap_tree *mt)
206 {
207 if (mt->mcs_mt == NULL)
208 return GEN8_SURFACE_AUX_MODE_NONE;
209
210 /*
211 * From the BDW PRM, Volume 2d, page 260 (RENDER_SURFACE_STATE):
212 * "When MCS is enabled for non-MSRT, HALIGN_16 must be used"
213 *
214 * From the hardware spec for GEN9:
215 * "When Auxiliary Surface Mode is set to AUX_CCS_D or AUX_CCS_E, HALIGN
216 * 16 must be used."
217 */
218 if (brw->gen >= 9 || mt->num_samples == 1)
219 assert(mt->halign == 16);
220
221 if (intel_miptree_is_lossless_compressed(brw, mt))
222 return GEN9_SURFACE_AUX_MODE_CCS_E;
223
224 return GEN8_SURFACE_AUX_MODE_MCS;
225 }
226
227 static void
228 gen8_emit_texture_surface_state(struct brw_context *brw,
229 struct intel_mipmap_tree *mt,
230 GLenum target,
231 unsigned min_layer, unsigned max_layer,
232 unsigned min_level, unsigned max_level,
233 unsigned format,
234 unsigned swizzle,
235 uint32_t *surf_offset, int surf_index,
236 bool rw, bool for_gather)
237 {
238 const unsigned depth = max_layer - min_layer;
239 struct intel_mipmap_tree *aux_mt = mt->mcs_mt;
240 uint32_t mocs_wb = brw->gen >= 9 ? SKL_MOCS_WB : BDW_MOCS_WB;
241 unsigned tiling_mode, pitch;
242 const unsigned tr_mode = surface_tiling_resource_mode(mt->tr_mode);
243 const uint32_t surf_type = translate_tex_target(target);
244 uint32_t aux_mode = gen8_get_aux_mode(brw, mt);
245
246 if (mt->format == MESA_FORMAT_S_UINT8) {
247 tiling_mode = GEN8_SURFACE_TILING_W;
248 pitch = 2 * mt->pitch;
249 } else {
250 tiling_mode = gen8_surface_tiling_mode(mt->tiling);
251 pitch = mt->pitch;
252 }
253
254 /* Prior to Gen9, MCS is not uploaded for single-sampled surfaces because
255 * the color buffer should always have been resolved before it is used as
256 * a texture so there is no need for it. On Gen9 it will be uploaded when
257 * the surface is losslessly compressed (CCS_E).
258 * However, sampling engine is not capable of re-interpreting the
259 * underlying color buffer in non-compressible formats when the surface
260 * is configured as compressed. Therefore state upload has made sure the
261 * buffer is in resolved state allowing the surface to be configured as
262 * non-compressed.
263 */
264 if (mt->num_samples <= 1 &&
265 (aux_mode != GEN9_SURFACE_AUX_MODE_CCS_E ||
266 !isl_format_supports_lossless_compression(
267 brw->intelScreen->devinfo, format))) {
268 assert(!mt->mcs_mt ||
269 mt->fast_clear_state == INTEL_FAST_CLEAR_STATE_RESOLVED);
270 aux_mt = NULL;
271 aux_mode = GEN8_SURFACE_AUX_MODE_NONE;
272 }
273
274 uint32_t *surf = gen8_allocate_surface_state(brw, surf_offset, surf_index);
275
276 surf[0] = SET_FIELD(surf_type, BRW_SURFACE_TYPE) |
277 format << BRW_SURFACE_FORMAT_SHIFT |
278 gen8_vertical_alignment(brw, mt, surf_type) |
279 gen8_horizontal_alignment(brw, mt, surf_type) |
280 tiling_mode;
281
282 if (surf_type == BRW_SURFACE_CUBE) {
283 surf[0] |= BRW_SURFACE_CUBEFACE_ENABLES;
284 }
285
286 /* From the CHV PRM, Volume 2d, page 321 (RENDER_SURFACE_STATE dword 0
287 * bit 9 "Sampler L2 Bypass Mode Disable" Programming Notes):
288 *
289 * This bit must be set for the following surface types: BC2_UNORM
290 * BC3_UNORM BC5_UNORM BC5_SNORM BC7_UNORM
291 */
292 if ((brw->gen >= 9 || brw->is_cherryview) &&
293 (format == BRW_SURFACEFORMAT_BC2_UNORM ||
294 format == BRW_SURFACEFORMAT_BC3_UNORM ||
295 format == BRW_SURFACEFORMAT_BC5_UNORM ||
296 format == BRW_SURFACEFORMAT_BC5_SNORM ||
297 format == BRW_SURFACEFORMAT_BC7_UNORM))
298 surf[0] |= GEN8_SURFACE_SAMPLER_L2_BYPASS_DISABLE;
299
300 if (mt->target != GL_TEXTURE_3D)
301 surf[0] |= GEN8_SURFACE_IS_ARRAY;
302
303 surf[1] = SET_FIELD(mocs_wb, GEN8_SURFACE_MOCS) | mt->qpitch >> 2;
304
305 surf[2] = SET_FIELD(mt->logical_width0 - 1, GEN7_SURFACE_WIDTH) |
306 SET_FIELD(mt->logical_height0 - 1, GEN7_SURFACE_HEIGHT);
307
308 surf[3] = SET_FIELD(depth - 1, BRW_SURFACE_DEPTH) | (pitch - 1);
309
310 surf[4] = gen7_surface_msaa_bits(mt->num_samples, mt->msaa_layout) |
311 SET_FIELD(min_layer, GEN7_SURFACE_MIN_ARRAY_ELEMENT) |
312 SET_FIELD(depth - 1, GEN7_SURFACE_RENDER_TARGET_VIEW_EXTENT);
313
314 surf[5] = SET_FIELD(min_level - mt->first_level, GEN7_SURFACE_MIN_LOD) |
315 (max_level - min_level - 1); /* mip count */
316
317 if (brw->gen >= 9) {
318 surf[5] |= SET_FIELD(tr_mode, GEN9_SURFACE_TRMODE);
319 /* Disable Mip Tail by setting a large value. */
320 surf[5] |= SET_FIELD(15, GEN9_SURFACE_MIP_TAIL_START_LOD);
321 }
322
323 if (aux_mt) {
324 uint32_t tile_w, tile_h;
325 assert(aux_mt->tiling == I915_TILING_Y);
326 intel_get_tile_dims(aux_mt->tiling, aux_mt->tr_mode,
327 aux_mt->cpp, &tile_w, &tile_h);
328 surf[6] = SET_FIELD(aux_mt->qpitch / 4, GEN8_SURFACE_AUX_QPITCH) |
329 SET_FIELD((aux_mt->pitch / tile_w) - 1,
330 GEN8_SURFACE_AUX_PITCH) |
331 aux_mode;
332 }
333
334 gen8_emit_fast_clear_color(brw, mt, surf);
335 surf[7] |=
336 SET_FIELD(swizzle_to_scs(GET_SWZ(swizzle, 0)), GEN7_SURFACE_SCS_R) |
337 SET_FIELD(swizzle_to_scs(GET_SWZ(swizzle, 1)), GEN7_SURFACE_SCS_G) |
338 SET_FIELD(swizzle_to_scs(GET_SWZ(swizzle, 2)), GEN7_SURFACE_SCS_B) |
339 SET_FIELD(swizzle_to_scs(GET_SWZ(swizzle, 3)), GEN7_SURFACE_SCS_A);
340
341 *((uint64_t *) &surf[8]) = mt->bo->offset64 + mt->offset; /* reloc */
342
343 if (aux_mt) {
344 *((uint64_t *) &surf[10]) = aux_mt->bo->offset64;
345 drm_intel_bo_emit_reloc(brw->batch.bo, *surf_offset + 10 * 4,
346 aux_mt->bo, 0,
347 I915_GEM_DOMAIN_SAMPLER,
348 (rw ? I915_GEM_DOMAIN_SAMPLER : 0));
349 }
350
351 /* Emit relocation to surface contents */
352 drm_intel_bo_emit_reloc(brw->batch.bo,
353 *surf_offset + 8 * 4,
354 mt->bo,
355 mt->offset,
356 I915_GEM_DOMAIN_SAMPLER,
357 (rw ? I915_GEM_DOMAIN_SAMPLER : 0));
358 }
359
360 static void
361 gen8_update_texture_surface(struct gl_context *ctx,
362 unsigned unit,
363 uint32_t *surf_offset,
364 bool for_gather,
365 uint32_t plane)
366 {
367 struct brw_context *brw = brw_context(ctx);
368 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
369
370 if (obj->Target == GL_TEXTURE_BUFFER) {
371 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
372
373 } else {
374 struct gl_texture_image *firstImage = obj->Image[0][obj->BaseLevel];
375 struct intel_texture_object *intel_obj = intel_texture_object(obj);
376 struct intel_mipmap_tree *mt = intel_obj->mt;
377
378 if (plane > 0) {
379 if (mt->plane[plane - 1] == NULL)
380 return;
381 mt = mt->plane[plane - 1];
382 }
383
384 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
385 /* If this is a view with restricted NumLayers, then our effective depth
386 * is not just the miptree depth.
387 */
388 const unsigned depth = (obj->Immutable && obj->Target != GL_TEXTURE_3D ?
389 obj->NumLayers : mt->logical_depth0);
390
391 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
392 * texturing functions that return a float, as our code generation always
393 * selects the .x channel (which would always be 0).
394 */
395 const bool alpha_depth = obj->DepthMode == GL_ALPHA &&
396 (firstImage->_BaseFormat == GL_DEPTH_COMPONENT ||
397 firstImage->_BaseFormat == GL_DEPTH_STENCIL);
398 const unsigned swizzle = (unlikely(alpha_depth) ? SWIZZLE_XYZW :
399 brw_get_texture_swizzle(&brw->ctx, obj));
400
401 mesa_format mesa_fmt = plane == 0 ? intel_obj->_Format : mt->format;
402 unsigned format = translate_tex_format(brw, mesa_fmt,
403 sampler->sRGBDecode);
404
405 if (obj->StencilSampling && firstImage->_BaseFormat == GL_DEPTH_STENCIL) {
406 mt = mt->stencil_mt;
407 format = BRW_SURFACEFORMAT_R8_UINT;
408 }
409
410 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
411
412 gen8_emit_texture_surface_state(brw, mt, obj->Target,
413 obj->MinLayer, obj->MinLayer + depth,
414 obj->MinLevel + obj->BaseLevel,
415 obj->MinLevel + intel_obj->_MaxLevel + 1,
416 format, swizzle, surf_offset,
417 surf_index, false, for_gather);
418 }
419 }
420
421 /**
422 * Creates a null surface.
423 *
424 * This is used when the shader doesn't write to any color output. An FB
425 * write to target 0 will still be emitted, because that's how the thread is
426 * terminated (and computed depth is returned), so we need to have the
427 * hardware discard the target 0 color output..
428 */
429 static void
430 gen8_emit_null_surface_state(struct brw_context *brw,
431 unsigned width,
432 unsigned height,
433 unsigned samples,
434 uint32_t *out_offset)
435 {
436 uint32_t *surf = gen8_allocate_surface_state(brw, out_offset, -1);
437
438 surf[0] = BRW_SURFACE_NULL << BRW_SURFACE_TYPE_SHIFT |
439 BRW_SURFACEFORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT |
440 GEN8_SURFACE_TILING_Y;
441 surf[2] = SET_FIELD(width - 1, GEN7_SURFACE_WIDTH) |
442 SET_FIELD(height - 1, GEN7_SURFACE_HEIGHT);
443 }
444
445 /**
446 * Sets up a surface state structure to point at the given region.
447 * While it is only used for the front/back buffer currently, it should be
448 * usable for further buffers when doing ARB_draw_buffer support.
449 */
450 static uint32_t
451 gen8_update_renderbuffer_surface(struct brw_context *brw,
452 struct gl_renderbuffer *rb,
453 bool layered, unsigned unit /* unused */,
454 uint32_t surf_index)
455 {
456 struct gl_context *ctx = &brw->ctx;
457 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
458 struct intel_mipmap_tree *mt = irb->mt;
459 unsigned width = mt->logical_width0;
460 unsigned height = mt->logical_height0;
461 unsigned pitch = mt->pitch;
462 uint32_t tiling = mt->tiling;
463 unsigned tr_mode = surface_tiling_resource_mode(mt->tr_mode);
464 uint32_t format = 0;
465 uint32_t surf_type;
466 uint32_t offset;
467 bool is_array = false;
468 int depth = MAX2(irb->layer_count, 1);
469 const int min_array_element = (mt->format == MESA_FORMAT_S_UINT8) ?
470 irb->mt_layer : (irb->mt_layer / MAX2(mt->num_samples, 1));
471 GLenum gl_target =
472 rb->TexImage ? rb->TexImage->TexObject->Target : GL_TEXTURE_2D;
473 const uint32_t mocs = brw->gen >= 9 ? SKL_MOCS_PTE : BDW_MOCS_PTE;
474
475 intel_miptree_used_for_rendering(mt);
476
477 switch (gl_target) {
478 case GL_TEXTURE_CUBE_MAP_ARRAY:
479 case GL_TEXTURE_CUBE_MAP:
480 surf_type = BRW_SURFACE_2D;
481 is_array = true;
482 depth *= 6;
483 break;
484 case GL_TEXTURE_3D:
485 depth = MAX2(irb->mt->logical_depth0, 1);
486 /* fallthrough */
487 default:
488 surf_type = translate_tex_target(gl_target);
489 is_array = _mesa_is_array_texture(mt->target);
490 break;
491 }
492
493 /* _NEW_BUFFERS */
494 /* Render targets can't use IMS layout. */
495 assert(mt->msaa_layout != INTEL_MSAA_LAYOUT_IMS);
496 assert(brw_render_target_supported(brw, rb));
497 mesa_format rb_format = _mesa_get_render_format(ctx,
498 intel_rb_format(irb));
499 format = brw->render_target_format[rb_format];
500 if (unlikely(!brw->format_supported_as_render_target[rb_format]))
501 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
502 __func__, _mesa_get_format_name(rb_format));
503
504 struct intel_mipmap_tree *aux_mt = mt->mcs_mt;
505 const uint32_t aux_mode = gen8_get_aux_mode(brw, mt);
506
507 uint32_t *surf = gen8_allocate_surface_state(brw, &offset, surf_index);
508
509 surf[0] = (surf_type << BRW_SURFACE_TYPE_SHIFT) |
510 (is_array ? GEN7_SURFACE_IS_ARRAY : 0) |
511 (format << BRW_SURFACE_FORMAT_SHIFT) |
512 gen8_vertical_alignment(brw, mt, surf_type) |
513 gen8_horizontal_alignment(brw, mt, surf_type) |
514 gen8_surface_tiling_mode(tiling);
515
516 surf[1] = SET_FIELD(mocs, GEN8_SURFACE_MOCS) | mt->qpitch >> 2;
517
518 surf[2] = SET_FIELD(width - 1, GEN7_SURFACE_WIDTH) |
519 SET_FIELD(height - 1, GEN7_SURFACE_HEIGHT);
520
521 surf[3] = (depth - 1) << BRW_SURFACE_DEPTH_SHIFT |
522 (pitch - 1); /* Surface Pitch */
523
524 surf[4] = min_array_element << GEN7_SURFACE_MIN_ARRAY_ELEMENT_SHIFT |
525 (depth - 1) << GEN7_SURFACE_RENDER_TARGET_VIEW_EXTENT_SHIFT;
526
527 if (mt->format != MESA_FORMAT_S_UINT8)
528 surf[4] |= gen7_surface_msaa_bits(mt->num_samples, mt->msaa_layout);
529
530 surf[5] = irb->mt_level - irb->mt->first_level;
531
532 if (brw->gen >= 9) {
533 surf[5] |= SET_FIELD(tr_mode, GEN9_SURFACE_TRMODE);
534 /* Disable Mip Tail by setting a large value. */
535 surf[5] |= SET_FIELD(15, GEN9_SURFACE_MIP_TAIL_START_LOD);
536 }
537
538 if (aux_mt) {
539 uint32_t tile_w, tile_h;
540 assert(aux_mt->tiling == I915_TILING_Y);
541 intel_get_tile_dims(aux_mt->tiling, aux_mt->tr_mode,
542 aux_mt->cpp, &tile_w, &tile_h);
543 surf[6] = SET_FIELD(aux_mt->qpitch / 4, GEN8_SURFACE_AUX_QPITCH) |
544 SET_FIELD((aux_mt->pitch / tile_w) - 1,
545 GEN8_SURFACE_AUX_PITCH) |
546 aux_mode;
547 }
548
549 gen8_emit_fast_clear_color(brw, mt, surf);
550 surf[7] |= SET_FIELD(HSW_SCS_RED, GEN7_SURFACE_SCS_R) |
551 SET_FIELD(HSW_SCS_GREEN, GEN7_SURFACE_SCS_G) |
552 SET_FIELD(HSW_SCS_BLUE, GEN7_SURFACE_SCS_B) |
553 SET_FIELD(HSW_SCS_ALPHA, GEN7_SURFACE_SCS_A);
554
555 assert(mt->offset % mt->cpp == 0);
556 *((uint64_t *) &surf[8]) = mt->bo->offset64 + mt->offset; /* reloc */
557
558 if (aux_mt) {
559 *((uint64_t *) &surf[10]) = aux_mt->bo->offset64;
560 drm_intel_bo_emit_reloc(brw->batch.bo,
561 offset + 10 * 4,
562 aux_mt->bo, 0,
563 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
564 }
565
566 drm_intel_bo_emit_reloc(brw->batch.bo,
567 offset + 8 * 4,
568 mt->bo,
569 mt->offset,
570 I915_GEM_DOMAIN_RENDER,
571 I915_GEM_DOMAIN_RENDER);
572
573 return offset;
574 }
575
576 void
577 gen8_init_vtable_surface_functions(struct brw_context *brw)
578 {
579 brw->vtbl.update_texture_surface = gen8_update_texture_surface;
580 brw->vtbl.update_renderbuffer_surface = gen8_update_renderbuffer_surface;
581 brw->vtbl.emit_null_surface_state = gen8_emit_null_surface_state;
582 brw->vtbl.emit_texture_surface_state = gen8_emit_texture_surface_state;
583 brw->vtbl.emit_buffer_surface_state = gen8_emit_buffer_surface_state;
584 }