i965: Remove the create_raw_surface vtbl hook.
[mesa.git] / src / mesa / drivers / dri / i965 / gen7_wm_surface_state.c
1 /*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23 #include "main/mtypes.h"
24 #include "main/blend.h"
25 #include "main/samplerobj.h"
26 #include "main/texformat.h"
27 #include "program/prog_parameter.h"
28
29 #include "intel_mipmap_tree.h"
30 #include "intel_batchbuffer.h"
31 #include "intel_tex.h"
32 #include "intel_fbo.h"
33 #include "intel_buffer_objects.h"
34
35 #include "brw_context.h"
36 #include "brw_state.h"
37 #include "brw_defines.h"
38 #include "brw_wm.h"
39
40 /**
41 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
42 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
43 *
44 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
45 * 0 1 2 3 4 5
46 * 4 5 6 7 0 1
47 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
48 *
49 * which is simply adding 4 then modding by 8 (or anding with 7).
50 *
51 * We then may need to apply workarounds for textureGather hardware bugs.
52 */
53 static unsigned
54 swizzle_to_scs(GLenum swizzle, bool need_green_to_blue)
55 {
56 unsigned scs = (swizzle + 4) & 7;
57
58 return (need_green_to_blue && scs == HSW_SCS_GREEN) ? HSW_SCS_BLUE : scs;
59 }
60
61 uint32_t
62 gen7_surface_tiling_mode(uint32_t tiling)
63 {
64 switch (tiling) {
65 case I915_TILING_X:
66 return GEN7_SURFACE_TILING_X;
67 case I915_TILING_Y:
68 return GEN7_SURFACE_TILING_Y;
69 default:
70 return GEN7_SURFACE_TILING_NONE;
71 }
72 }
73
74
75 uint32_t
76 gen7_surface_msaa_bits(unsigned num_samples, enum intel_msaa_layout layout)
77 {
78 uint32_t ss4 = 0;
79
80 assert(num_samples <= 8);
81
82 /* The SURFACE_MULTISAMPLECOUNT_X enums are simply log2(num_samples) << 3. */
83 ss4 |= (ffs(MAX2(num_samples, 1)) - 1) << 3;
84
85 if (layout == INTEL_MSAA_LAYOUT_IMS)
86 ss4 |= GEN7_SURFACE_MSFMT_DEPTH_STENCIL;
87 else
88 ss4 |= GEN7_SURFACE_MSFMT_MSS;
89
90 return ss4;
91 }
92
93
94 void
95 gen7_set_surface_mcs_info(struct brw_context *brw,
96 uint32_t *surf,
97 uint32_t surf_offset,
98 const struct intel_mipmap_tree *mcs_mt,
99 bool is_render_target)
100 {
101 /* From the Ivy Bridge PRM, Vol4 Part1 p76, "MCS Base Address":
102 *
103 * "The MCS surface must be stored as Tile Y."
104 */
105 assert(mcs_mt->tiling == I915_TILING_Y);
106
107 /* Compute the pitch in units of tiles. To do this we need to divide the
108 * pitch in bytes by 128, since a single Y-tile is 128 bytes wide.
109 */
110 unsigned pitch_tiles = mcs_mt->pitch / 128;
111
112 /* The upper 20 bits of surface state DWORD 6 are the upper 20 bits of the
113 * GPU address of the MCS buffer; the lower 12 bits contain other control
114 * information. Since buffer addresses are always on 4k boundaries (and
115 * thus have their lower 12 bits zero), we can use an ordinary reloc to do
116 * the necessary address translation.
117 */
118 assert ((mcs_mt->bo->offset64 & 0xfff) == 0);
119
120 surf[6] = GEN7_SURFACE_MCS_ENABLE |
121 SET_FIELD(pitch_tiles - 1, GEN7_SURFACE_MCS_PITCH) |
122 mcs_mt->bo->offset64;
123
124 drm_intel_bo_emit_reloc(brw->batch.bo,
125 surf_offset + 6 * 4,
126 mcs_mt->bo,
127 surf[6] & 0xfff,
128 is_render_target ? I915_GEM_DOMAIN_RENDER
129 : I915_GEM_DOMAIN_SAMPLER,
130 is_render_target ? I915_GEM_DOMAIN_RENDER : 0);
131 }
132
133
134 void
135 gen7_check_surface_setup(uint32_t *surf, bool is_render_target)
136 {
137 unsigned num_multisamples = surf[4] & INTEL_MASK(5, 3);
138 unsigned multisampled_surface_storage_format = surf[4] & (1 << 6);
139 unsigned surface_array_spacing = surf[0] & (1 << 10);
140 bool is_multisampled = num_multisamples != GEN7_SURFACE_MULTISAMPLECOUNT_1;
141
142 (void) surface_array_spacing;
143
144 /* From the Ivybridge PRM, Volume 4 Part 1, page 66 (RENDER_SURFACE_STATE
145 * dword 0 bit 10 "Surface Array Spacing" Programming Notes):
146 *
147 * If Multisampled Surface Storage Format is MSFMT_MSS and Number of
148 * Multisamples is not MULTISAMPLECOUNT_1, this field must be set to
149 * ARYSPC_LOD0.
150 */
151 if (multisampled_surface_storage_format == GEN7_SURFACE_MSFMT_MSS
152 && is_multisampled)
153 assert(surface_array_spacing == GEN7_SURFACE_ARYSPC_LOD0);
154
155 /* From the Ivybridge PRM, Volume 4 Part 1, page 72 (RENDER_SURFACE_STATE
156 * dword 4 bit 6 "Multisampled Surface Storage" Programming Notes):
157 *
158 * All multisampled render target surfaces must have this field set to
159 * MSFMT_MSS.
160 *
161 * But also:
162 *
163 * This field is ignored if Number of Multisamples is MULTISAMPLECOUNT_1.
164 */
165 if (is_render_target && is_multisampled) {
166 assert(multisampled_surface_storage_format == GEN7_SURFACE_MSFMT_MSS);
167 }
168
169 /* From the Ivybridge PRM, Volume 4 Part 1, page 72 (RENDER_SURFACE_STATE
170 * dword 4 bit 6 "Multisampled Surface Storage Format" Errata):
171 *
172 * If the surface’s Number of Multisamples is MULTISAMPLECOUNT_8, Width
173 * is >= 8192 (meaning the actual surface width is >= 8193 pixels), this
174 * field must be set to MSFMT_MSS.
175 */
176 uint32_t width = GET_FIELD(surf[2], GEN7_SURFACE_WIDTH) + 1;
177 if (num_multisamples == GEN7_SURFACE_MULTISAMPLECOUNT_8 && width >= 8193) {
178 assert(multisampled_surface_storage_format == GEN7_SURFACE_MSFMT_MSS);
179 }
180
181 /* From the Ivybridge PRM, Volume 4 Part 1, page 72 (RENDER_SURFACE_STATE
182 * dword 4 bit 6 "Multisampled Surface Storage Format" Errata):
183 *
184 * If the surface’s Number of Multisamples is MULTISAMPLECOUNT_8,
185 * ((Depth+1) * (Height+1)) is > 4,194,304, OR if the surface’s Number of
186 * Multisamples is MULTISAMPLECOUNT_4, ((Depth+1) * (Height+1)) is >
187 * 8,388,608, this field must be set to MSFMT_DEPTH_STENCIL.This field
188 * must be set to MSFMT_DEPTH_STENCIL if Surface Format is one of the
189 * following: I24X8_UNORM, L24X8_UNORM, A24X8_UNORM, or
190 * R24_UNORM_X8_TYPELESS.
191 *
192 * But also (from the Programming Notes):
193 *
194 * This field is ignored if Number of Multisamples is MULTISAMPLECOUNT_1.
195 */
196 uint32_t depth = GET_FIELD(surf[3], BRW_SURFACE_DEPTH) + 1;
197 uint32_t height = GET_FIELD(surf[2], GEN7_SURFACE_HEIGHT) + 1;
198 if (num_multisamples == GEN7_SURFACE_MULTISAMPLECOUNT_8 &&
199 depth * height > 4194304) {
200 assert(multisampled_surface_storage_format ==
201 GEN7_SURFACE_MSFMT_DEPTH_STENCIL);
202 }
203 if (num_multisamples == GEN7_SURFACE_MULTISAMPLECOUNT_4 &&
204 depth * height > 8388608) {
205 assert(multisampled_surface_storage_format ==
206 GEN7_SURFACE_MSFMT_DEPTH_STENCIL);
207 }
208 if (is_multisampled) {
209 switch (GET_FIELD(surf[0], BRW_SURFACE_FORMAT)) {
210 case BRW_SURFACEFORMAT_I24X8_UNORM:
211 case BRW_SURFACEFORMAT_L24X8_UNORM:
212 case BRW_SURFACEFORMAT_A24X8_UNORM:
213 case BRW_SURFACEFORMAT_R24_UNORM_X8_TYPELESS:
214 assert(multisampled_surface_storage_format ==
215 GEN7_SURFACE_MSFMT_DEPTH_STENCIL);
216 }
217 }
218 }
219
220 static void
221 gen7_emit_buffer_surface_state(struct brw_context *brw,
222 uint32_t *out_offset,
223 drm_intel_bo *bo,
224 unsigned buffer_offset,
225 unsigned surface_format,
226 unsigned buffer_size,
227 unsigned pitch,
228 bool rw)
229 {
230 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
231 8 * 4, 32, out_offset);
232 memset(surf, 0, 8 * 4);
233
234 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
235 surface_format << BRW_SURFACE_FORMAT_SHIFT |
236 BRW_SURFACE_RC_READ_WRITE;
237 surf[1] = (bo ? bo->offset64 : 0) + buffer_offset; /* reloc */
238 surf[2] = SET_FIELD((buffer_size - 1) & 0x7f, GEN7_SURFACE_WIDTH) |
239 SET_FIELD(((buffer_size - 1) >> 7) & 0x3fff, GEN7_SURFACE_HEIGHT);
240 surf[3] = SET_FIELD(((buffer_size - 1) >> 21) & 0x3f, BRW_SURFACE_DEPTH) |
241 (pitch - 1);
242
243 surf[5] = SET_FIELD(GEN7_MOCS_L3, GEN7_SURFACE_MOCS);
244
245 if (brw->is_haswell) {
246 surf[7] |= (SET_FIELD(HSW_SCS_RED, GEN7_SURFACE_SCS_R) |
247 SET_FIELD(HSW_SCS_GREEN, GEN7_SURFACE_SCS_G) |
248 SET_FIELD(HSW_SCS_BLUE, GEN7_SURFACE_SCS_B) |
249 SET_FIELD(HSW_SCS_ALPHA, GEN7_SURFACE_SCS_A));
250 }
251
252 /* Emit relocation to surface contents */
253 if (bo) {
254 drm_intel_bo_emit_reloc(brw->batch.bo, *out_offset + 4,
255 bo, buffer_offset, I915_GEM_DOMAIN_SAMPLER,
256 (rw ? I915_GEM_DOMAIN_SAMPLER : 0));
257 }
258
259 gen7_check_surface_setup(surf, false /* is_render_target */);
260 }
261
262 static void
263 gen7_update_texture_surface(struct gl_context *ctx,
264 unsigned unit,
265 uint32_t *surf_offset,
266 bool for_gather)
267 {
268 struct brw_context *brw = brw_context(ctx);
269 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
270 struct intel_texture_object *intelObj = intel_texture_object(tObj);
271 struct intel_mipmap_tree *mt = intelObj->mt;
272 struct gl_texture_image *firstImage = tObj->Image[0][tObj->BaseLevel];
273 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
274
275 if (tObj->Target == GL_TEXTURE_BUFFER) {
276 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
277 return;
278 }
279
280 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
281 8 * 4, 32, surf_offset);
282 memset(surf, 0, 8 * 4);
283
284 uint32_t tex_format = translate_tex_format(brw,
285 intelObj->_Format,
286 sampler->sRGBDecode);
287
288 if (for_gather && tex_format == BRW_SURFACEFORMAT_R32G32_FLOAT)
289 tex_format = BRW_SURFACEFORMAT_R32G32_FLOAT_LD;
290
291 surf[0] = translate_tex_target(tObj->Target) << BRW_SURFACE_TYPE_SHIFT |
292 tex_format << BRW_SURFACE_FORMAT_SHIFT |
293 gen7_surface_tiling_mode(mt->tiling);
294
295 /* mask of faces present in cube map; for other surfaces MBZ. */
296 if (tObj->Target == GL_TEXTURE_CUBE_MAP || tObj->Target == GL_TEXTURE_CUBE_MAP_ARRAY)
297 surf[0] |= BRW_SURFACE_CUBEFACE_ENABLES;
298
299 if (mt->align_h == 4)
300 surf[0] |= GEN7_SURFACE_VALIGN_4;
301 if (mt->align_w == 8)
302 surf[0] |= GEN7_SURFACE_HALIGN_8;
303
304 if (mt->logical_depth0 > 1 && tObj->Target != GL_TEXTURE_3D)
305 surf[0] |= GEN7_SURFACE_IS_ARRAY;
306
307 /* if this is a view with restricted NumLayers, then
308 * our effective depth is not just the miptree depth.
309 */
310 uint32_t effective_depth = (tObj->Immutable && tObj->Target != GL_TEXTURE_3D)
311 ? tObj->NumLayers : mt->logical_depth0;
312
313 if (mt->array_layout == ALL_SLICES_AT_EACH_LOD)
314 surf[0] |= GEN7_SURFACE_ARYSPC_LOD0;
315
316 surf[1] = mt->bo->offset64 + mt->offset; /* reloc */
317
318 surf[2] = SET_FIELD(mt->logical_width0 - 1, GEN7_SURFACE_WIDTH) |
319 SET_FIELD(mt->logical_height0 - 1, GEN7_SURFACE_HEIGHT);
320
321 surf[3] = SET_FIELD(effective_depth - 1, BRW_SURFACE_DEPTH) |
322 (mt->pitch - 1);
323
324 if (brw->is_haswell && tObj->_IsIntegerFormat)
325 surf[3] |= HSW_SURFACE_IS_INTEGER_FORMAT;
326
327 surf[4] = gen7_surface_msaa_bits(mt->num_samples, mt->msaa_layout) |
328 SET_FIELD(tObj->MinLayer, GEN7_SURFACE_MIN_ARRAY_ELEMENT) |
329 SET_FIELD((effective_depth - 1),
330 GEN7_SURFACE_RENDER_TARGET_VIEW_EXTENT);
331
332 surf[5] = (SET_FIELD(GEN7_MOCS_L3, GEN7_SURFACE_MOCS) |
333 SET_FIELD(tObj->MinLevel + tObj->BaseLevel - mt->first_level, GEN7_SURFACE_MIN_LOD) |
334 /* mip count */
335 (intelObj->_MaxLevel - tObj->BaseLevel));
336
337 surf[7] = mt->fast_clear_color_value;
338
339 if (brw->is_haswell) {
340 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
341 * texturing functions that return a float, as our code generation always
342 * selects the .x channel (which would always be 0).
343 */
344 const bool alpha_depth = tObj->DepthMode == GL_ALPHA &&
345 (firstImage->_BaseFormat == GL_DEPTH_COMPONENT ||
346 firstImage->_BaseFormat == GL_DEPTH_STENCIL);
347
348 const int swizzle = unlikely(alpha_depth)
349 ? SWIZZLE_XYZW : brw_get_texture_swizzle(ctx, tObj);
350
351 const bool need_scs_green_to_blue = for_gather && tex_format == BRW_SURFACEFORMAT_R32G32_FLOAT_LD;
352
353 surf[7] |=
354 SET_FIELD(swizzle_to_scs(GET_SWZ(swizzle, 0), need_scs_green_to_blue), GEN7_SURFACE_SCS_R) |
355 SET_FIELD(swizzle_to_scs(GET_SWZ(swizzle, 1), need_scs_green_to_blue), GEN7_SURFACE_SCS_G) |
356 SET_FIELD(swizzle_to_scs(GET_SWZ(swizzle, 2), need_scs_green_to_blue), GEN7_SURFACE_SCS_B) |
357 SET_FIELD(swizzle_to_scs(GET_SWZ(swizzle, 3), need_scs_green_to_blue), GEN7_SURFACE_SCS_A);
358 }
359
360 if (mt->mcs_mt) {
361 gen7_set_surface_mcs_info(brw, surf, *surf_offset,
362 mt->mcs_mt, false /* is RT */);
363 }
364
365 /* Emit relocation to surface contents */
366 drm_intel_bo_emit_reloc(brw->batch.bo,
367 *surf_offset + 4,
368 mt->bo,
369 surf[1] - mt->bo->offset64,
370 I915_GEM_DOMAIN_SAMPLER, 0);
371
372 gen7_check_surface_setup(surf, false /* is_render_target */);
373 }
374
375 /**
376 * Creates a null surface.
377 *
378 * This is used when the shader doesn't write to any color output. An FB
379 * write to target 0 will still be emitted, because that's how the thread is
380 * terminated (and computed depth is returned), so we need to have the
381 * hardware discard the target 0 color output..
382 */
383 static void
384 gen7_emit_null_surface_state(struct brw_context *brw,
385 unsigned width,
386 unsigned height,
387 unsigned samples,
388 uint32_t *out_offset)
389 {
390 /* From the Ivy bridge PRM, Vol4 Part1 p62 (Surface Type: Programming
391 * Notes):
392 *
393 * A null surface is used in instances where an actual surface is not
394 * bound. When a write message is generated to a null surface, no
395 * actual surface is written to. When a read message (including any
396 * sampling engine message) is generated to a null surface, the result
397 * is all zeros. Note that a null surface type is allowed to be used
398 * with all messages, even if it is not specificially indicated as
399 * supported. All of the remaining fields in surface state are ignored
400 * for null surfaces, with the following exceptions: Width, Height,
401 * Depth, LOD, and Render Target View Extent fields must match the
402 * depth buffer’s corresponding state for all render target surfaces,
403 * including null.
404 */
405 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 8 * 4, 32,
406 out_offset);
407 memset(surf, 0, 8 * 4);
408
409 /* From the Ivybridge PRM, Volume 4, Part 1, page 65,
410 * Tiled Surface: Programming Notes:
411 * "If Surface Type is SURFTYPE_NULL, this field must be TRUE."
412 */
413 surf[0] = BRW_SURFACE_NULL << BRW_SURFACE_TYPE_SHIFT |
414 BRW_SURFACEFORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT |
415 GEN7_SURFACE_TILING_Y;
416
417 surf[2] = SET_FIELD(width - 1, GEN7_SURFACE_WIDTH) |
418 SET_FIELD(height - 1, GEN7_SURFACE_HEIGHT);
419
420 gen7_check_surface_setup(surf, true /* is_render_target */);
421 }
422
423 /**
424 * Sets up a surface state structure to point at the given region.
425 * While it is only used for the front/back buffer currently, it should be
426 * usable for further buffers when doing ARB_draw_buffer support.
427 */
428 static void
429 gen7_update_renderbuffer_surface(struct brw_context *brw,
430 struct gl_renderbuffer *rb,
431 bool layered,
432 unsigned int unit)
433 {
434 struct gl_context *ctx = &brw->ctx;
435 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
436 struct intel_mipmap_tree *mt = irb->mt;
437 uint32_t format;
438 /* _NEW_BUFFERS */
439 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
440 uint32_t surftype;
441 bool is_array = false;
442 int depth = MAX2(irb->layer_count, 1);
443 const uint8_t mocs = GEN7_MOCS_L3;
444
445 int min_array_element = irb->mt_layer / MAX2(mt->num_samples, 1);
446
447 GLenum gl_target = rb->TexImage ?
448 rb->TexImage->TexObject->Target : GL_TEXTURE_2D;
449
450 uint32_t surf_index =
451 brw->wm.prog_data->binding_table.render_target_start + unit;
452
453 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 8 * 4, 32,
454 &brw->wm.base.surf_offset[surf_index]);
455 memset(surf, 0, 8 * 4);
456
457 intel_miptree_used_for_rendering(irb->mt);
458
459 /* Render targets can't use IMS layout */
460 assert(irb->mt->msaa_layout != INTEL_MSAA_LAYOUT_IMS);
461
462 assert(brw_render_target_supported(brw, rb));
463 format = brw->render_target_format[rb_format];
464 if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
465 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
466 __FUNCTION__, _mesa_get_format_name(rb_format));
467 }
468
469 switch (gl_target) {
470 case GL_TEXTURE_CUBE_MAP_ARRAY:
471 case GL_TEXTURE_CUBE_MAP:
472 surftype = BRW_SURFACE_2D;
473 is_array = true;
474 depth *= 6;
475 break;
476 case GL_TEXTURE_3D:
477 depth = MAX2(irb->mt->logical_depth0, 1);
478 /* fallthrough */
479 default:
480 surftype = translate_tex_target(gl_target);
481 is_array = _mesa_tex_target_is_array(gl_target);
482 break;
483 }
484
485 surf[0] = surftype << BRW_SURFACE_TYPE_SHIFT |
486 format << BRW_SURFACE_FORMAT_SHIFT |
487 (irb->mt->array_layout == ALL_SLICES_AT_EACH_LOD ?
488 GEN7_SURFACE_ARYSPC_LOD0 : GEN7_SURFACE_ARYSPC_FULL) |
489 gen7_surface_tiling_mode(mt->tiling);
490
491 if (irb->mt->align_h == 4)
492 surf[0] |= GEN7_SURFACE_VALIGN_4;
493 if (irb->mt->align_w == 8)
494 surf[0] |= GEN7_SURFACE_HALIGN_8;
495
496 if (is_array) {
497 surf[0] |= GEN7_SURFACE_IS_ARRAY;
498 }
499
500 assert(mt->offset % mt->cpp == 0);
501 surf[1] = mt->bo->offset64 + mt->offset;
502
503 assert(brw->has_surface_tile_offset);
504
505 surf[5] = SET_FIELD(mocs, GEN7_SURFACE_MOCS) |
506 (irb->mt_level - irb->mt->first_level);
507
508 surf[2] = SET_FIELD(irb->mt->logical_width0 - 1, GEN7_SURFACE_WIDTH) |
509 SET_FIELD(irb->mt->logical_height0 - 1, GEN7_SURFACE_HEIGHT);
510
511 surf[3] = ((depth - 1) << BRW_SURFACE_DEPTH_SHIFT) |
512 (mt->pitch - 1);
513
514 surf[4] = gen7_surface_msaa_bits(irb->mt->num_samples, irb->mt->msaa_layout) |
515 min_array_element << GEN7_SURFACE_MIN_ARRAY_ELEMENT_SHIFT |
516 (depth - 1) << GEN7_SURFACE_RENDER_TARGET_VIEW_EXTENT_SHIFT;
517
518 if (irb->mt->mcs_mt) {
519 gen7_set_surface_mcs_info(brw, surf, brw->wm.base.surf_offset[surf_index],
520 irb->mt->mcs_mt, true /* is RT */);
521 }
522
523 surf[7] = irb->mt->fast_clear_color_value;
524
525 if (brw->is_haswell) {
526 surf[7] |= (SET_FIELD(HSW_SCS_RED, GEN7_SURFACE_SCS_R) |
527 SET_FIELD(HSW_SCS_GREEN, GEN7_SURFACE_SCS_G) |
528 SET_FIELD(HSW_SCS_BLUE, GEN7_SURFACE_SCS_B) |
529 SET_FIELD(HSW_SCS_ALPHA, GEN7_SURFACE_SCS_A));
530 }
531
532 drm_intel_bo_emit_reloc(brw->batch.bo,
533 brw->wm.base.surf_offset[surf_index] + 4,
534 mt->bo,
535 surf[1] - mt->bo->offset64,
536 I915_GEM_DOMAIN_RENDER,
537 I915_GEM_DOMAIN_RENDER);
538
539 gen7_check_surface_setup(surf, true /* is_render_target */);
540 }
541
542 void
543 gen7_init_vtable_surface_functions(struct brw_context *brw)
544 {
545 brw->vtbl.update_texture_surface = gen7_update_texture_surface;
546 brw->vtbl.update_renderbuffer_surface = gen7_update_renderbuffer_surface;
547 brw->vtbl.emit_null_surface_state = gen7_emit_null_surface_state;
548 brw->vtbl.emit_buffer_surface_state = gen7_emit_buffer_surface_state;
549 }