i965: Support textures with multiple planes
[mesa.git] / src / mesa / drivers / dri / i965 / gen8_surface_state.c
1 /*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "main/blend.h"
25 #include "main/mtypes.h"
26 #include "main/samplerobj.h"
27 #include "main/texformat.h"
28 #include "main/teximage.h"
29 #include "program/prog_parameter.h"
30 #include "program/prog_instruction.h"
31
32 #include "intel_mipmap_tree.h"
33 #include "intel_batchbuffer.h"
34 #include "intel_tex.h"
35 #include "intel_fbo.h"
36 #include "intel_buffer_objects.h"
37 #include "intel_image.h"
38
39 #include "brw_context.h"
40 #include "brw_state.h"
41 #include "brw_defines.h"
42 #include "brw_wm.h"
43
44 /**
45 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
46 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
47 *
48 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
49 * 0 1 2 3 4 5
50 * 4 5 6 7 0 1
51 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
52 *
53 * which is simply adding 4 then modding by 8 (or anding with 7).
54 */
55 static unsigned
56 swizzle_to_scs(unsigned swizzle)
57 {
58 return (swizzle + 4) & 7;
59 }
60
61 static uint32_t
62 surface_tiling_resource_mode(uint32_t tr_mode)
63 {
64 switch (tr_mode) {
65 case INTEL_MIPTREE_TRMODE_YF:
66 return GEN9_SURFACE_TRMODE_TILEYF;
67 case INTEL_MIPTREE_TRMODE_YS:
68 return GEN9_SURFACE_TRMODE_TILEYS;
69 default:
70 return GEN9_SURFACE_TRMODE_NONE;
71 }
72 }
73
74 uint32_t
75 gen8_surface_tiling_mode(uint32_t tiling)
76 {
77 switch (tiling) {
78 case I915_TILING_X:
79 return GEN8_SURFACE_TILING_X;
80 case I915_TILING_Y:
81 return GEN8_SURFACE_TILING_Y;
82 default:
83 return GEN8_SURFACE_TILING_NONE;
84 }
85 }
86
87 unsigned
88 gen8_vertical_alignment(const struct brw_context *brw,
89 const struct intel_mipmap_tree *mt,
90 uint32_t surf_type)
91 {
92 /* On Gen9+ vertical alignment is ignored for 1D surfaces and when
93 * tr_mode is not TRMODE_NONE. Set to an arbitrary non-reserved value.
94 */
95 if (brw->gen > 8 &&
96 (mt->tr_mode != INTEL_MIPTREE_TRMODE_NONE ||
97 surf_type == BRW_SURFACE_1D))
98 return GEN8_SURFACE_VALIGN_4;
99
100 switch (mt->valign) {
101 case 4:
102 return GEN8_SURFACE_VALIGN_4;
103 case 8:
104 return GEN8_SURFACE_VALIGN_8;
105 case 16:
106 return GEN8_SURFACE_VALIGN_16;
107 default:
108 unreachable("Unsupported vertical surface alignment.");
109 }
110 }
111
112 unsigned
113 gen8_horizontal_alignment(const struct brw_context *brw,
114 const struct intel_mipmap_tree *mt,
115 uint32_t surf_type)
116 {
117 /* On Gen9+ horizontal alignment is ignored when tr_mode is not
118 * TRMODE_NONE. Set to an arbitrary non-reserved value.
119 */
120 if (brw->gen > 8 &&
121 (mt->tr_mode != INTEL_MIPTREE_TRMODE_NONE ||
122 gen9_use_linear_1d_layout(brw, mt)))
123 return GEN8_SURFACE_HALIGN_4;
124
125 switch (mt->halign) {
126 case 4:
127 return GEN8_SURFACE_HALIGN_4;
128 case 8:
129 return GEN8_SURFACE_HALIGN_8;
130 case 16:
131 return GEN8_SURFACE_HALIGN_16;
132 default:
133 unreachable("Unsupported horizontal surface alignment.");
134 }
135 }
136
137 uint32_t *
138 gen8_allocate_surface_state(struct brw_context *brw,
139 uint32_t *out_offset, int index)
140 {
141 int dwords = brw->gen >= 9 ? 16 : 13;
142 uint32_t *surf = __brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
143 dwords * 4, 64, index, out_offset);
144 memset(surf, 0, dwords * 4);
145 return surf;
146 }
147
148 static void
149 gen8_emit_buffer_surface_state(struct brw_context *brw,
150 uint32_t *out_offset,
151 drm_intel_bo *bo,
152 unsigned buffer_offset,
153 unsigned surface_format,
154 unsigned buffer_size,
155 unsigned pitch,
156 bool rw)
157 {
158 const unsigned mocs = brw->gen >= 9 ? SKL_MOCS_WB : BDW_MOCS_WB;
159 uint32_t *surf = gen8_allocate_surface_state(brw, out_offset, -1);
160
161 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
162 surface_format << BRW_SURFACE_FORMAT_SHIFT |
163 BRW_SURFACE_RC_READ_WRITE;
164 surf[1] = SET_FIELD(mocs, GEN8_SURFACE_MOCS);
165
166 surf[2] = SET_FIELD((buffer_size - 1) & 0x7f, GEN7_SURFACE_WIDTH) |
167 SET_FIELD(((buffer_size - 1) >> 7) & 0x3fff, GEN7_SURFACE_HEIGHT);
168 if (surface_format == BRW_SURFACEFORMAT_RAW)
169 surf[3] = SET_FIELD(((buffer_size - 1) >> 21) & 0x3ff, BRW_SURFACE_DEPTH);
170 else
171 surf[3] = SET_FIELD(((buffer_size - 1) >> 21) & 0x3f, BRW_SURFACE_DEPTH);
172 surf[3] |= (pitch - 1);
173 surf[7] = SET_FIELD(HSW_SCS_RED, GEN7_SURFACE_SCS_R) |
174 SET_FIELD(HSW_SCS_GREEN, GEN7_SURFACE_SCS_G) |
175 SET_FIELD(HSW_SCS_BLUE, GEN7_SURFACE_SCS_B) |
176 SET_FIELD(HSW_SCS_ALPHA, GEN7_SURFACE_SCS_A);
177 /* reloc */
178 *((uint64_t *) &surf[8]) = (bo ? bo->offset64 : 0) + buffer_offset;
179
180 /* Emit relocation to surface contents. */
181 if (bo) {
182 drm_intel_bo_emit_reloc(brw->batch.bo, *out_offset + 8 * 4,
183 bo, buffer_offset, I915_GEM_DOMAIN_SAMPLER,
184 rw ? I915_GEM_DOMAIN_SAMPLER : 0);
185 }
186 }
187
188 void
189 gen8_emit_fast_clear_color(const struct brw_context *brw,
190 const struct intel_mipmap_tree *mt,
191 uint32_t *surf)
192 {
193 if (brw->gen >= 9) {
194 surf[12] = mt->gen9_fast_clear_color.ui[0];
195 surf[13] = mt->gen9_fast_clear_color.ui[1];
196 surf[14] = mt->gen9_fast_clear_color.ui[2];
197 surf[15] = mt->gen9_fast_clear_color.ui[3];
198 } else
199 surf[7] |= mt->fast_clear_color_value;
200 }
201
202 uint32_t
203 gen8_get_aux_mode(const struct brw_context *brw,
204 const struct intel_mipmap_tree *mt)
205 {
206 if (mt->mcs_mt == NULL)
207 return GEN8_SURFACE_AUX_MODE_NONE;
208
209 /*
210 * From the BDW PRM, Volume 2d, page 260 (RENDER_SURFACE_STATE):
211 * "When MCS is enabled for non-MSRT, HALIGN_16 must be used"
212 *
213 * From the hardware spec for GEN9:
214 * "When Auxiliary Surface Mode is set to AUX_CCS_D or AUX_CCS_E, HALIGN
215 * 16 must be used."
216 */
217 if (brw->gen >= 9 || mt->num_samples == 1)
218 assert(mt->halign == 16);
219
220 if (intel_miptree_is_lossless_compressed(brw, mt))
221 return GEN9_SURFACE_AUX_MODE_CCS_E;
222
223 return GEN8_SURFACE_AUX_MODE_MCS;
224 }
225
226 static void
227 gen8_emit_texture_surface_state(struct brw_context *brw,
228 struct intel_mipmap_tree *mt,
229 GLenum target,
230 unsigned min_layer, unsigned max_layer,
231 unsigned min_level, unsigned max_level,
232 unsigned format,
233 unsigned swizzle,
234 uint32_t *surf_offset, int surf_index,
235 bool rw, bool for_gather)
236 {
237 const unsigned depth = max_layer - min_layer;
238 struct intel_mipmap_tree *aux_mt = mt->mcs_mt;
239 uint32_t mocs_wb = brw->gen >= 9 ? SKL_MOCS_WB : BDW_MOCS_WB;
240 unsigned tiling_mode, pitch;
241 const unsigned tr_mode = surface_tiling_resource_mode(mt->tr_mode);
242 const uint32_t surf_type = translate_tex_target(target);
243 uint32_t aux_mode = gen8_get_aux_mode(brw, mt);
244
245 if (mt->format == MESA_FORMAT_S_UINT8) {
246 tiling_mode = GEN8_SURFACE_TILING_W;
247 pitch = 2 * mt->pitch;
248 } else {
249 tiling_mode = gen8_surface_tiling_mode(mt->tiling);
250 pitch = mt->pitch;
251 }
252
253 /* Prior to Gen9, MCS is not uploaded for single-sampled surfaces because
254 * the color buffer should always have been resolved before it is used as
255 * a texture so there is no need for it. On Gen9 it will be uploaded when
256 * the surface is losslessly compressed (CCS_E).
257 */
258 if (mt->num_samples <= 1 && aux_mode != GEN9_SURFACE_AUX_MODE_CCS_E) {
259 aux_mt = NULL;
260 aux_mode = GEN8_SURFACE_AUX_MODE_NONE;
261 }
262
263 uint32_t *surf = gen8_allocate_surface_state(brw, surf_offset, surf_index);
264
265 surf[0] = SET_FIELD(surf_type, BRW_SURFACE_TYPE) |
266 format << BRW_SURFACE_FORMAT_SHIFT |
267 gen8_vertical_alignment(brw, mt, surf_type) |
268 gen8_horizontal_alignment(brw, mt, surf_type) |
269 tiling_mode;
270
271 if (surf_type == BRW_SURFACE_CUBE) {
272 surf[0] |= BRW_SURFACE_CUBEFACE_ENABLES;
273 }
274
275 /* From the CHV PRM, Volume 2d, page 321 (RENDER_SURFACE_STATE dword 0
276 * bit 9 "Sampler L2 Bypass Mode Disable" Programming Notes):
277 *
278 * This bit must be set for the following surface types: BC2_UNORM
279 * BC3_UNORM BC5_UNORM BC5_SNORM BC7_UNORM
280 */
281 if ((brw->gen >= 9 || brw->is_cherryview) &&
282 (format == BRW_SURFACEFORMAT_BC2_UNORM ||
283 format == BRW_SURFACEFORMAT_BC3_UNORM ||
284 format == BRW_SURFACEFORMAT_BC5_UNORM ||
285 format == BRW_SURFACEFORMAT_BC5_SNORM ||
286 format == BRW_SURFACEFORMAT_BC7_UNORM))
287 surf[0] |= GEN8_SURFACE_SAMPLER_L2_BYPASS_DISABLE;
288
289 if (_mesa_is_array_texture(mt->target) || mt->target == GL_TEXTURE_CUBE_MAP)
290 surf[0] |= GEN8_SURFACE_IS_ARRAY;
291
292 surf[1] = SET_FIELD(mocs_wb, GEN8_SURFACE_MOCS) | mt->qpitch >> 2;
293
294 surf[2] = SET_FIELD(mt->logical_width0 - 1, GEN7_SURFACE_WIDTH) |
295 SET_FIELD(mt->logical_height0 - 1, GEN7_SURFACE_HEIGHT);
296
297 surf[3] = SET_FIELD(depth - 1, BRW_SURFACE_DEPTH) | (pitch - 1);
298
299 surf[4] = gen7_surface_msaa_bits(mt->num_samples, mt->msaa_layout) |
300 SET_FIELD(min_layer, GEN7_SURFACE_MIN_ARRAY_ELEMENT) |
301 SET_FIELD(depth - 1, GEN7_SURFACE_RENDER_TARGET_VIEW_EXTENT);
302
303 surf[5] = SET_FIELD(min_level - mt->first_level, GEN7_SURFACE_MIN_LOD) |
304 (max_level - min_level - 1); /* mip count */
305
306 if (brw->gen >= 9) {
307 surf[5] |= SET_FIELD(tr_mode, GEN9_SURFACE_TRMODE);
308 /* Disable Mip Tail by setting a large value. */
309 surf[5] |= SET_FIELD(15, GEN9_SURFACE_MIP_TAIL_START_LOD);
310 }
311
312 if (aux_mt) {
313 uint32_t tile_w, tile_h;
314 assert(aux_mt->tiling == I915_TILING_Y);
315 intel_get_tile_dims(aux_mt->tiling, aux_mt->tr_mode,
316 aux_mt->cpp, &tile_w, &tile_h);
317 surf[6] = SET_FIELD(mt->qpitch / 4, GEN8_SURFACE_AUX_QPITCH) |
318 SET_FIELD((aux_mt->pitch / tile_w) - 1,
319 GEN8_SURFACE_AUX_PITCH) |
320 aux_mode;
321 }
322
323 gen8_emit_fast_clear_color(brw, mt, surf);
324 surf[7] |=
325 SET_FIELD(swizzle_to_scs(GET_SWZ(swizzle, 0)), GEN7_SURFACE_SCS_R) |
326 SET_FIELD(swizzle_to_scs(GET_SWZ(swizzle, 1)), GEN7_SURFACE_SCS_G) |
327 SET_FIELD(swizzle_to_scs(GET_SWZ(swizzle, 2)), GEN7_SURFACE_SCS_B) |
328 SET_FIELD(swizzle_to_scs(GET_SWZ(swizzle, 3)), GEN7_SURFACE_SCS_A);
329
330 *((uint64_t *) &surf[8]) = mt->bo->offset64 + mt->offset; /* reloc */
331
332 if (aux_mt) {
333 *((uint64_t *) &surf[10]) = aux_mt->bo->offset64;
334 drm_intel_bo_emit_reloc(brw->batch.bo, *surf_offset + 10 * 4,
335 aux_mt->bo, 0,
336 I915_GEM_DOMAIN_SAMPLER,
337 (rw ? I915_GEM_DOMAIN_SAMPLER : 0));
338 }
339
340 /* Emit relocation to surface contents */
341 drm_intel_bo_emit_reloc(brw->batch.bo,
342 *surf_offset + 8 * 4,
343 mt->bo,
344 mt->offset,
345 I915_GEM_DOMAIN_SAMPLER,
346 (rw ? I915_GEM_DOMAIN_SAMPLER : 0));
347 }
348
349 static void
350 gen8_update_texture_surface(struct gl_context *ctx,
351 unsigned unit,
352 uint32_t *surf_offset,
353 bool for_gather,
354 uint32_t plane)
355 {
356 struct brw_context *brw = brw_context(ctx);
357 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
358
359 if (obj->Target == GL_TEXTURE_BUFFER) {
360 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
361
362 } else {
363 struct gl_texture_image *firstImage = obj->Image[0][obj->BaseLevel];
364 struct intel_texture_object *intel_obj = intel_texture_object(obj);
365 struct intel_mipmap_tree *mt = intel_obj->mt;
366 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
367 /* If this is a view with restricted NumLayers, then our effective depth
368 * is not just the miptree depth.
369 */
370 const unsigned depth = (obj->Immutable && obj->Target != GL_TEXTURE_3D ?
371 obj->NumLayers : mt->logical_depth0);
372
373 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
374 * texturing functions that return a float, as our code generation always
375 * selects the .x channel (which would always be 0).
376 */
377 const bool alpha_depth = obj->DepthMode == GL_ALPHA &&
378 (firstImage->_BaseFormat == GL_DEPTH_COMPONENT ||
379 firstImage->_BaseFormat == GL_DEPTH_STENCIL);
380 const unsigned swizzle = (unlikely(alpha_depth) ? SWIZZLE_XYZW :
381 brw_get_texture_swizzle(&brw->ctx, obj));
382
383 unsigned format = translate_tex_format(brw, intel_obj->_Format,
384 sampler->sRGBDecode);
385 if (obj->StencilSampling && firstImage->_BaseFormat == GL_DEPTH_STENCIL) {
386 mt = mt->stencil_mt;
387 format = BRW_SURFACEFORMAT_R8_UINT;
388 } else if (obj->Target == GL_TEXTURE_EXTERNAL_OES) {
389 if (plane > 0)
390 mt = mt->plane[plane - 1];
391 if (mt == NULL)
392 return;
393
394 format = translate_tex_format(brw, mt->format, sampler->sRGBDecode);
395
396 }
397
398 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
399
400 gen8_emit_texture_surface_state(brw, mt, obj->Target,
401 obj->MinLayer, obj->MinLayer + depth,
402 obj->MinLevel + obj->BaseLevel,
403 obj->MinLevel + intel_obj->_MaxLevel + 1,
404 format, swizzle, surf_offset,
405 surf_index, false, for_gather);
406 }
407 }
408
409 /**
410 * Creates a null surface.
411 *
412 * This is used when the shader doesn't write to any color output. An FB
413 * write to target 0 will still be emitted, because that's how the thread is
414 * terminated (and computed depth is returned), so we need to have the
415 * hardware discard the target 0 color output..
416 */
417 static void
418 gen8_emit_null_surface_state(struct brw_context *brw,
419 unsigned width,
420 unsigned height,
421 unsigned samples,
422 uint32_t *out_offset)
423 {
424 uint32_t *surf = gen8_allocate_surface_state(brw, out_offset, -1);
425
426 surf[0] = BRW_SURFACE_NULL << BRW_SURFACE_TYPE_SHIFT |
427 BRW_SURFACEFORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT |
428 GEN8_SURFACE_TILING_Y;
429 surf[2] = SET_FIELD(width - 1, GEN7_SURFACE_WIDTH) |
430 SET_FIELD(height - 1, GEN7_SURFACE_HEIGHT);
431 }
432
433 /**
434 * Sets up a surface state structure to point at the given region.
435 * While it is only used for the front/back buffer currently, it should be
436 * usable for further buffers when doing ARB_draw_buffer support.
437 */
438 static uint32_t
439 gen8_update_renderbuffer_surface(struct brw_context *brw,
440 struct gl_renderbuffer *rb,
441 bool layered, unsigned unit /* unused */,
442 uint32_t surf_index)
443 {
444 struct gl_context *ctx = &brw->ctx;
445 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
446 struct intel_mipmap_tree *mt = irb->mt;
447 unsigned width = mt->logical_width0;
448 unsigned height = mt->logical_height0;
449 unsigned pitch = mt->pitch;
450 uint32_t tiling = mt->tiling;
451 unsigned tr_mode = surface_tiling_resource_mode(mt->tr_mode);
452 uint32_t format = 0;
453 uint32_t surf_type;
454 uint32_t offset;
455 bool is_array = false;
456 int depth = MAX2(irb->layer_count, 1);
457 const int min_array_element = (mt->format == MESA_FORMAT_S_UINT8) ?
458 irb->mt_layer : (irb->mt_layer / MAX2(mt->num_samples, 1));
459 GLenum gl_target =
460 rb->TexImage ? rb->TexImage->TexObject->Target : GL_TEXTURE_2D;
461 const uint32_t mocs = brw->gen >= 9 ? SKL_MOCS_PTE : BDW_MOCS_PTE;
462
463 intel_miptree_used_for_rendering(mt);
464
465 switch (gl_target) {
466 case GL_TEXTURE_CUBE_MAP_ARRAY:
467 case GL_TEXTURE_CUBE_MAP:
468 surf_type = BRW_SURFACE_2D;
469 is_array = true;
470 depth *= 6;
471 break;
472 case GL_TEXTURE_3D:
473 depth = MAX2(irb->mt->logical_depth0, 1);
474 /* fallthrough */
475 default:
476 surf_type = translate_tex_target(gl_target);
477 is_array = _mesa_is_array_texture(mt->target);
478 break;
479 }
480
481 /* _NEW_BUFFERS */
482 /* Render targets can't use IMS layout. Stencil in turn gets configured as
483 * single sampled and indexed manually by the program.
484 */
485 if (mt->format == MESA_FORMAT_S_UINT8) {
486 brw_configure_w_tiled(mt, true, &width, &height, &pitch,
487 &tiling, &format);
488 } else {
489 assert(mt->msaa_layout != INTEL_MSAA_LAYOUT_IMS);
490 assert(brw_render_target_supported(brw, rb));
491 mesa_format rb_format = _mesa_get_render_format(ctx,
492 intel_rb_format(irb));
493 format = brw->render_target_format[rb_format];
494 if (unlikely(!brw->format_supported_as_render_target[rb_format]))
495 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
496 __func__, _mesa_get_format_name(rb_format));
497 }
498
499 struct intel_mipmap_tree *aux_mt = mt->mcs_mt;
500 const uint32_t aux_mode = gen8_get_aux_mode(brw, mt);
501
502 uint32_t *surf = gen8_allocate_surface_state(brw, &offset, surf_index);
503
504 surf[0] = (surf_type << BRW_SURFACE_TYPE_SHIFT) |
505 (is_array ? GEN7_SURFACE_IS_ARRAY : 0) |
506 (format << BRW_SURFACE_FORMAT_SHIFT) |
507 gen8_vertical_alignment(brw, mt, surf_type) |
508 gen8_horizontal_alignment(brw, mt, surf_type) |
509 gen8_surface_tiling_mode(tiling);
510
511 surf[1] = SET_FIELD(mocs, GEN8_SURFACE_MOCS) | mt->qpitch >> 2;
512
513 surf[2] = SET_FIELD(width - 1, GEN7_SURFACE_WIDTH) |
514 SET_FIELD(height - 1, GEN7_SURFACE_HEIGHT);
515
516 surf[3] = (depth - 1) << BRW_SURFACE_DEPTH_SHIFT |
517 (pitch - 1); /* Surface Pitch */
518
519 surf[4] = min_array_element << GEN7_SURFACE_MIN_ARRAY_ELEMENT_SHIFT |
520 (depth - 1) << GEN7_SURFACE_RENDER_TARGET_VIEW_EXTENT_SHIFT;
521
522 if (mt->format != MESA_FORMAT_S_UINT8)
523 surf[4] |= gen7_surface_msaa_bits(mt->num_samples, mt->msaa_layout);
524
525 surf[5] = irb->mt_level - irb->mt->first_level;
526
527 if (brw->gen >= 9) {
528 surf[5] |= SET_FIELD(tr_mode, GEN9_SURFACE_TRMODE);
529 /* Disable Mip Tail by setting a large value. */
530 surf[5] |= SET_FIELD(15, GEN9_SURFACE_MIP_TAIL_START_LOD);
531 }
532
533 if (aux_mt) {
534 uint32_t tile_w, tile_h;
535 assert(aux_mt->tiling == I915_TILING_Y);
536 intel_get_tile_dims(aux_mt->tiling, aux_mt->tr_mode,
537 aux_mt->cpp, &tile_w, &tile_h);
538 surf[6] = SET_FIELD(mt->qpitch / 4, GEN8_SURFACE_AUX_QPITCH) |
539 SET_FIELD((aux_mt->pitch / tile_w) - 1,
540 GEN8_SURFACE_AUX_PITCH) |
541 aux_mode;
542 }
543
544 gen8_emit_fast_clear_color(brw, mt, surf);
545 surf[7] |= SET_FIELD(HSW_SCS_RED, GEN7_SURFACE_SCS_R) |
546 SET_FIELD(HSW_SCS_GREEN, GEN7_SURFACE_SCS_G) |
547 SET_FIELD(HSW_SCS_BLUE, GEN7_SURFACE_SCS_B) |
548 SET_FIELD(HSW_SCS_ALPHA, GEN7_SURFACE_SCS_A);
549
550 assert(mt->offset % mt->cpp == 0);
551 *((uint64_t *) &surf[8]) = mt->bo->offset64 + mt->offset; /* reloc */
552
553 if (aux_mt) {
554 *((uint64_t *) &surf[10]) = aux_mt->bo->offset64;
555 drm_intel_bo_emit_reloc(brw->batch.bo,
556 offset + 10 * 4,
557 aux_mt->bo, 0,
558 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
559 }
560
561 drm_intel_bo_emit_reloc(brw->batch.bo,
562 offset + 8 * 4,
563 mt->bo,
564 mt->offset,
565 I915_GEM_DOMAIN_RENDER,
566 I915_GEM_DOMAIN_RENDER);
567
568 return offset;
569 }
570
571 void
572 gen8_init_vtable_surface_functions(struct brw_context *brw)
573 {
574 brw->vtbl.update_texture_surface = gen8_update_texture_surface;
575 brw->vtbl.update_renderbuffer_surface = gen8_update_renderbuffer_surface;
576 brw->vtbl.emit_null_surface_state = gen8_emit_null_surface_state;
577 brw->vtbl.emit_texture_surface_state = gen8_emit_texture_surface_state;
578 brw->vtbl.emit_buffer_surface_state = gen8_emit_buffer_surface_state;
579 }