dd7b0d4cfe10fb181ca2a450a6278957d0ba625e
[mesa.git] / src / mesa / drivers / dri / i965 / gen7_wm_surface_state.c
1 /*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23 #include "main/mtypes.h"
24 #include "main/samplerobj.h"
25 #include "program/prog_parameter.h"
26
27 #include "intel_mipmap_tree.h"
28 #include "intel_batchbuffer.h"
29 #include "intel_tex.h"
30 #include "intel_fbo.h"
31 #include "intel_buffer_objects.h"
32
33 #include "brw_context.h"
34 #include "brw_state.h"
35 #include "brw_defines.h"
36 #include "brw_wm.h"
37
38 /**
39 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
40 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED)
41 */
42 static unsigned
43 swizzle_to_scs(GLenum swizzle)
44 {
45 switch (swizzle) {
46 case SWIZZLE_X:
47 return HSW_SCS_RED;
48 case SWIZZLE_Y:
49 return HSW_SCS_GREEN;
50 case SWIZZLE_Z:
51 return HSW_SCS_BLUE;
52 case SWIZZLE_W:
53 return HSW_SCS_ALPHA;
54 case SWIZZLE_ZERO:
55 return HSW_SCS_ZERO;
56 case SWIZZLE_ONE:
57 return HSW_SCS_ONE;
58 }
59
60 assert(!"Should not get here: invalid swizzle mode");
61 return HSW_SCS_ZERO;
62 }
63
64 void
65 gen7_set_surface_tiling(struct gen7_surface_state *surf, uint32_t tiling)
66 {
67 switch (tiling) {
68 case I915_TILING_NONE:
69 surf->ss0.tiled_surface = 0;
70 surf->ss0.tile_walk = 0;
71 break;
72 case I915_TILING_X:
73 surf->ss0.tiled_surface = 1;
74 surf->ss0.tile_walk = BRW_TILEWALK_XMAJOR;
75 break;
76 case I915_TILING_Y:
77 surf->ss0.tiled_surface = 1;
78 surf->ss0.tile_walk = BRW_TILEWALK_YMAJOR;
79 break;
80 }
81 }
82
83
84 void
85 gen7_set_surface_msaa(struct gen7_surface_state *surf, unsigned num_samples,
86 enum intel_msaa_layout layout)
87 {
88 if (num_samples > 4)
89 surf->ss4.num_multisamples = GEN7_SURFACE_MULTISAMPLECOUNT_8;
90 else if (num_samples > 1)
91 surf->ss4.num_multisamples = GEN7_SURFACE_MULTISAMPLECOUNT_4;
92 else
93 surf->ss4.num_multisamples = GEN7_SURFACE_MULTISAMPLECOUNT_1;
94
95 surf->ss4.multisampled_surface_storage_format =
96 layout == INTEL_MSAA_LAYOUT_IMS ?
97 GEN7_SURFACE_MSFMT_DEPTH_STENCIL :
98 GEN7_SURFACE_MSFMT_MSS;
99 }
100
101
102 void
103 gen7_set_surface_mcs_info(struct brw_context *brw,
104 struct gen7_surface_state *surf,
105 uint32_t surf_offset,
106 const struct intel_mipmap_tree *mcs_mt,
107 bool is_render_target)
108 {
109 /* From the Ivy Bridge PRM, Vol4 Part1 p76, "MCS Base Address":
110 *
111 * "The MCS surface must be stored as Tile Y."
112 */
113 assert(mcs_mt->region->tiling == I915_TILING_Y);
114
115 /* Compute the pitch in units of tiles. To do this we need to divide the
116 * pitch in bytes by 128, since a single Y-tile is 128 bytes wide.
117 */
118 unsigned pitch_bytes = mcs_mt->region->pitch * mcs_mt->cpp;
119 unsigned pitch_tiles = pitch_bytes / 128;
120
121 /* The upper 20 bits of surface state DWORD 6 are the upper 20 bits of the
122 * GPU address of the MCS buffer; the lower 12 bits contain other control
123 * information. Since buffer addresses are always on 4k boundaries (and
124 * thus have their lower 12 bits zero), we can use an ordinary reloc to do
125 * the necessary address translation.
126 */
127 assert ((mcs_mt->region->bo->offset & 0xfff) == 0);
128 surf->ss6.mcs_enabled.mcs_enable = 1;
129 surf->ss6.mcs_enabled.mcs_surface_pitch = pitch_tiles - 1;
130 surf->ss6.mcs_enabled.mcs_base_address = mcs_mt->region->bo->offset >> 12;
131 drm_intel_bo_emit_reloc(brw->intel.batch.bo,
132 surf_offset +
133 offsetof(struct gen7_surface_state, ss6),
134 mcs_mt->region->bo,
135 surf->ss6.raw_data & 0xfff,
136 is_render_target ? I915_GEM_DOMAIN_RENDER
137 : I915_GEM_DOMAIN_SAMPLER,
138 is_render_target ? I915_GEM_DOMAIN_RENDER : 0);
139 }
140
141
142 void
143 gen7_check_surface_setup(struct gen7_surface_state *surf,
144 bool is_render_target)
145 {
146 bool is_multisampled =
147 surf->ss4.num_multisamples != GEN7_SURFACE_MULTISAMPLECOUNT_1;
148 /* From the Graphics BSpec: vol5c Shared Functions [SNB+] > State >
149 * SURFACE_STATE > SURFACE_STATE for most messages [DevIVB]: Surface Array
150 * Spacing:
151 *
152 * If Multisampled Surface Storage Format is MSFMT_MSS and Number of
153 * Multisamples is not MULTISAMPLECOUNT_1, this field must be set to
154 * ARYSPC_LOD0.
155 */
156 if (surf->ss4.multisampled_surface_storage_format == GEN7_SURFACE_MSFMT_MSS
157 && is_multisampled)
158 assert(surf->ss0.surface_array_spacing == GEN7_SURFACE_ARYSPC_LOD0);
159
160 /* From the Graphics BSpec: vol5c Shared Functions [SNB+] > State >
161 * SURFACE_STATE > SURFACE_STATE for most messages [DevIVB]: Multisampled
162 * Surface Storage Format:
163 *
164 * All multisampled render target surfaces must have this field set to
165 * MSFMT_MSS.
166 *
167 * But also:
168 *
169 * This field is ignored if Number of Multisamples is MULTISAMPLECOUNT_1.
170 */
171 if (is_render_target && is_multisampled) {
172 assert(surf->ss4.multisampled_surface_storage_format ==
173 GEN7_SURFACE_MSFMT_MSS);
174 }
175
176 /* From the Graphics BSpec: vol5c Shared Functions [SNB+] > State >
177 * SURFACE_STATE > SURFACE_STATE for most messages [DevIVB]: Multisampled
178 * Surface Storage Format:
179 *
180 * If the surface’s Number of Multisamples is MULTISAMPLECOUNT_8, Width
181 * is >= 8192 (meaning the actual surface width is >= 8193 pixels), this
182 * field must be set to MSFMT_MSS.
183 */
184 if (surf->ss4.num_multisamples == GEN7_SURFACE_MULTISAMPLECOUNT_8 &&
185 surf->ss2.width >= 8192) {
186 assert(surf->ss4.multisampled_surface_storage_format ==
187 GEN7_SURFACE_MSFMT_MSS);
188 }
189
190 /* From the Graphics BSpec: vol5c Shared Functions [SNB+] > State >
191 * SURFACE_STATE > SURFACE_STATE for most messages [DevIVB]: Multisampled
192 * Surface Storage Format:
193 *
194 * If the surface’s Number of Multisamples is MULTISAMPLECOUNT_8,
195 * ((Depth+1) * (Height+1)) is > 4,194,304, OR if the surface’s Number of
196 * Multisamples is MULTISAMPLECOUNT_4, ((Depth+1) * (Height+1)) is >
197 * 8,388,608, this field must be set to MSFMT_DEPTH_STENCIL.This field
198 * must be set to MSFMT_DEPTH_STENCIL if Surface Format is one of the
199 * following: I24X8_UNORM, L24X8_UNORM, A24X8_UNORM, or
200 * R24_UNORM_X8_TYPELESS.
201 *
202 * But also:
203 *
204 * This field is ignored if Number of Multisamples is MULTISAMPLECOUNT_1.
205 */
206 uint32_t depth = surf->ss3.depth + 1;
207 uint32_t height = surf->ss2.height + 1;
208 if (surf->ss4.num_multisamples == GEN7_SURFACE_MULTISAMPLECOUNT_8 &&
209 depth * height > 4194304) {
210 assert(surf->ss4.multisampled_surface_storage_format ==
211 GEN7_SURFACE_MSFMT_DEPTH_STENCIL);
212 }
213 if (surf->ss4.num_multisamples == GEN7_SURFACE_MULTISAMPLECOUNT_4 &&
214 depth * height > 8388608) {
215 assert(surf->ss4.multisampled_surface_storage_format ==
216 GEN7_SURFACE_MSFMT_DEPTH_STENCIL);
217 }
218 if (is_multisampled) {
219 switch (surf->ss0.surface_format) {
220 case BRW_SURFACEFORMAT_I24X8_UNORM:
221 case BRW_SURFACEFORMAT_L24X8_UNORM:
222 case BRW_SURFACEFORMAT_A24X8_UNORM:
223 case BRW_SURFACEFORMAT_R24_UNORM_X8_TYPELESS:
224 assert(surf->ss4.multisampled_surface_storage_format ==
225 GEN7_SURFACE_MSFMT_DEPTH_STENCIL);
226 }
227 }
228 }
229
230
231 static void
232 gen7_update_buffer_texture_surface(struct gl_context *ctx,
233 unsigned unit,
234 uint32_t *binding_table,
235 unsigned surf_index)
236 {
237 struct brw_context *brw = brw_context(ctx);
238 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
239 struct gen7_surface_state *surf;
240 struct intel_buffer_object *intel_obj =
241 intel_buffer_object(tObj->BufferObject);
242 drm_intel_bo *bo = intel_obj ? intel_obj->buffer : NULL;
243 gl_format format = tObj->_BufferObjectFormat;
244 int texel_size = _mesa_get_format_bytes(format);
245
246 surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
247 sizeof(*surf), 32, &binding_table[surf_index]);
248 memset(surf, 0, sizeof(*surf));
249
250 surf->ss0.surface_type = BRW_SURFACE_BUFFER;
251 surf->ss0.surface_format = brw_format_for_mesa_format(format);
252
253 surf->ss0.render_cache_read_write = 1;
254
255 if (surf->ss0.surface_format == 0 && format != MESA_FORMAT_RGBA_FLOAT32) {
256 _mesa_problem(NULL, "bad format %s for texture buffer\n",
257 _mesa_get_format_name(format));
258 }
259
260 if (bo) {
261 surf->ss1.base_addr = bo->offset; /* reloc */
262
263 /* Emit relocation to surface contents. Section 5.1.1 of the gen4
264 * bspec ("Data Cache") says that the data cache does not exist as
265 * a separate cache and is just the sampler cache.
266 */
267 drm_intel_bo_emit_reloc(brw->intel.batch.bo,
268 (binding_table[surf_index] +
269 offsetof(struct gen7_surface_state, ss1)),
270 bo, 0,
271 I915_GEM_DOMAIN_SAMPLER, 0);
272
273 int w = intel_obj->Base.Size / texel_size;
274 surf->ss2.width = w & 0x7f; /* bits 6:0 of size or width */
275 surf->ss2.height = (w >> 7) & 0x1fff; /* bits 19:7 of size or width */
276 surf->ss3.depth = (w >> 20) & 0x7f; /* bits 26:20 of size or width */
277 surf->ss3.pitch = texel_size - 1;
278 } else {
279 surf->ss1.base_addr = 0;
280 surf->ss2.width = 0;
281 surf->ss2.height = 0;
282 surf->ss3.depth = 0;
283 surf->ss3.pitch = 0;
284 }
285
286 gen7_set_surface_tiling(surf, I915_TILING_NONE);
287
288 gen7_check_surface_setup(surf, false /* is_render_target */);
289 }
290
291 static void
292 gen7_update_texture_surface(struct gl_context *ctx,
293 unsigned unit,
294 uint32_t *binding_table,
295 unsigned surf_index)
296 {
297 struct brw_context *brw = brw_context(ctx);
298 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
299 struct intel_texture_object *intelObj = intel_texture_object(tObj);
300 struct intel_mipmap_tree *mt = intelObj->mt;
301 struct gl_texture_image *firstImage = tObj->Image[0][tObj->BaseLevel];
302 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
303 struct gen7_surface_state *surf;
304 int width, height, depth;
305
306 if (tObj->Target == GL_TEXTURE_BUFFER) {
307 gen7_update_buffer_texture_surface(ctx, unit, binding_table, surf_index);
308 return;
309 }
310
311 /* We don't support MSAA for textures. */
312 assert(!mt->array_spacing_lod0);
313 assert(mt->num_samples <= 1);
314
315 intel_miptree_get_dimensions_for_image(firstImage, &width, &height, &depth);
316
317 surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
318 sizeof(*surf), 32, &binding_table[surf_index]);
319 memset(surf, 0, sizeof(*surf));
320
321 if (mt->align_h == 4)
322 surf->ss0.vertical_alignment = 1;
323 if (mt->align_w == 8)
324 surf->ss0.horizontal_alignment = 1;
325
326 surf->ss0.surface_type = translate_tex_target(tObj->Target);
327 surf->ss0.surface_format = translate_tex_format(mt->format,
328 firstImage->InternalFormat,
329 tObj->DepthMode,
330 sampler->sRGBDecode);
331 if (tObj->Target == GL_TEXTURE_CUBE_MAP) {
332 surf->ss0.cube_pos_x = 1;
333 surf->ss0.cube_pos_y = 1;
334 surf->ss0.cube_pos_z = 1;
335 surf->ss0.cube_neg_x = 1;
336 surf->ss0.cube_neg_y = 1;
337 surf->ss0.cube_neg_z = 1;
338 }
339
340 surf->ss0.is_array = depth > 1 && tObj->Target != GL_TEXTURE_3D;
341
342 gen7_set_surface_tiling(surf, intelObj->mt->region->tiling);
343
344 /* ss0 remaining fields:
345 * - vert_line_stride (exists on gen6 but we ignore it)
346 * - vert_line_stride_ofs (exists on gen6 but we ignore it)
347 * - surface_array_spacing
348 * - render_cache_read_write (exists on gen6 but ignored here)
349 */
350
351 surf->ss1.base_addr =
352 intelObj->mt->region->bo->offset + intelObj->mt->offset; /* reloc */
353
354 surf->ss2.width = width - 1;
355 surf->ss2.height = height - 1;
356
357 surf->ss3.pitch = (intelObj->mt->region->pitch * intelObj->mt->cpp) - 1;
358 surf->ss3.depth = depth - 1;
359
360 /* ss4: ignored? */
361
362 surf->ss5.mip_count = intelObj->_MaxLevel - tObj->BaseLevel;
363 surf->ss5.min_lod = 0;
364
365 /* ss5 remaining fields:
366 * - x_offset (N/A for textures?)
367 * - y_offset (ditto)
368 * - cache_control
369 */
370
371 if (brw->intel.is_haswell) {
372 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
373 * texturing functions that return a float, as our code generation always
374 * selects the .x channel (which would always be 0).
375 */
376 const bool alpha_depth = tObj->DepthMode == GL_ALPHA &&
377 (firstImage->_BaseFormat == GL_DEPTH_COMPONENT ||
378 firstImage->_BaseFormat == GL_DEPTH_STENCIL);
379
380 const int swizzle =
381 unlikely(alpha_depth) ? SWIZZLE_XYZW : brw_get_texture_swizzle(tObj);
382
383 surf->ss7.shader_channel_select_r = swizzle_to_scs(GET_SWZ(swizzle, 0));
384 surf->ss7.shader_channel_select_g = swizzle_to_scs(GET_SWZ(swizzle, 1));
385 surf->ss7.shader_channel_select_b = swizzle_to_scs(GET_SWZ(swizzle, 2));
386 surf->ss7.shader_channel_select_a = swizzle_to_scs(GET_SWZ(swizzle, 3));
387 }
388
389 /* Emit relocation to surface contents */
390 drm_intel_bo_emit_reloc(brw->intel.batch.bo,
391 binding_table[surf_index] +
392 offsetof(struct gen7_surface_state, ss1),
393 intelObj->mt->region->bo, intelObj->mt->offset,
394 I915_GEM_DOMAIN_SAMPLER, 0);
395
396 gen7_check_surface_setup(surf, false /* is_render_target */);
397 }
398
399 /**
400 * Create the constant buffer surface. Vertex/fragment shader constants will
401 * be read from this buffer with Data Port Read instructions/messages.
402 */
403 void
404 gen7_create_constant_surface(struct brw_context *brw,
405 drm_intel_bo *bo,
406 uint32_t offset,
407 int width,
408 uint32_t *out_offset)
409 {
410 const GLint w = width - 1;
411 struct gen7_surface_state *surf;
412
413 surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
414 sizeof(*surf), 32, out_offset);
415 memset(surf, 0, sizeof(*surf));
416
417 surf->ss0.surface_type = BRW_SURFACE_BUFFER;
418 surf->ss0.surface_format = BRW_SURFACEFORMAT_R32G32B32A32_FLOAT;
419
420 surf->ss0.render_cache_read_write = 1;
421
422 assert(bo);
423 surf->ss1.base_addr = bo->offset + offset; /* reloc */
424
425 surf->ss2.width = w & 0x7f; /* bits 6:0 of size or width */
426 surf->ss2.height = (w >> 7) & 0x1fff; /* bits 19:7 of size or width */
427 surf->ss3.depth = (w >> 20) & 0x7f; /* bits 26:20 of size or width */
428 surf->ss3.pitch = (16 - 1); /* stride between samples */
429 gen7_set_surface_tiling(surf, I915_TILING_NONE); /* tiling now allowed */
430
431 if (brw->intel.is_haswell) {
432 surf->ss7.shader_channel_select_r = HSW_SCS_RED;
433 surf->ss7.shader_channel_select_g = HSW_SCS_GREEN;
434 surf->ss7.shader_channel_select_b = HSW_SCS_BLUE;
435 surf->ss7.shader_channel_select_a = HSW_SCS_ALPHA;
436 }
437
438 /* Emit relocation to surface contents. Section 5.1.1 of the gen4
439 * bspec ("Data Cache") says that the data cache does not exist as
440 * a separate cache and is just the sampler cache.
441 */
442 drm_intel_bo_emit_reloc(brw->intel.batch.bo,
443 (*out_offset +
444 offsetof(struct gen7_surface_state, ss1)),
445 bo, offset,
446 I915_GEM_DOMAIN_SAMPLER, 0);
447
448 gen7_check_surface_setup(surf, false /* is_render_target */);
449 }
450
451 static void
452 gen7_update_null_renderbuffer_surface(struct brw_context *brw, unsigned unit)
453 {
454 /* From the Ivy bridge PRM, Vol4 Part1 p62 (Surface Type: Programming
455 * Notes):
456 *
457 * A null surface is used in instances where an actual surface is not
458 * bound. When a write message is generated to a null surface, no
459 * actual surface is written to. When a read message (including any
460 * sampling engine message) is generated to a null surface, the result
461 * is all zeros. Note that a null surface type is allowed to be used
462 * with all messages, even if it is not specificially indicated as
463 * supported. All of the remaining fields in surface state are ignored
464 * for null surfaces, with the following exceptions: Width, Height,
465 * Depth, LOD, and Render Target View Extent fields must match the
466 * depth buffer’s corresponding state for all render target surfaces,
467 * including null.
468 */
469 struct intel_context *intel = &brw->intel;
470 struct gl_context *ctx = &intel->ctx;
471 struct gen7_surface_state *surf;
472
473 /* _NEW_BUFFERS */
474 const struct gl_framebuffer *fb = ctx->DrawBuffer;
475
476 surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
477 sizeof(*surf), 32, &brw->wm.surf_offset[unit]);
478 memset(surf, 0, sizeof(*surf));
479
480 surf->ss0.surface_type = BRW_SURFACE_NULL;
481 surf->ss0.surface_format = BRW_SURFACEFORMAT_B8G8R8A8_UNORM;
482
483 surf->ss2.width = fb->Width - 1;
484 surf->ss2.height = fb->Height - 1;
485
486 /* From the Ivy bridge PRM, Vol4 Part1 p65 (Tiled Surface: Programming Notes):
487 *
488 * If Surface Type is SURFTYPE_NULL, this field must be TRUE.
489 */
490 gen7_set_surface_tiling(surf, I915_TILING_Y);
491
492 gen7_check_surface_setup(surf, true /* is_render_target */);
493 }
494
495 /**
496 * Sets up a surface state structure to point at the given region.
497 * While it is only used for the front/back buffer currently, it should be
498 * usable for further buffers when doing ARB_draw_buffer support.
499 */
500 static void
501 gen7_update_renderbuffer_surface(struct brw_context *brw,
502 struct gl_renderbuffer *rb,
503 unsigned int unit)
504 {
505 struct intel_context *intel = &brw->intel;
506 struct gl_context *ctx = &intel->ctx;
507 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
508 struct intel_region *region = irb->mt->region;
509 struct gen7_surface_state *surf;
510 uint32_t tile_x, tile_y;
511 gl_format rb_format = intel_rb_format(irb);
512
513 surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
514 sizeof(*surf), 32, &brw->wm.surf_offset[unit]);
515 memset(surf, 0, sizeof(*surf));
516
517 /* Render targets can't use IMS layout */
518 assert(irb->mt->msaa_layout != INTEL_MSAA_LAYOUT_IMS);
519
520 if (irb->mt->align_h == 4)
521 surf->ss0.vertical_alignment = 1;
522 if (irb->mt->align_w == 8)
523 surf->ss0.horizontal_alignment = 1;
524
525 switch (rb_format) {
526 case MESA_FORMAT_SARGB8:
527 /* _NEW_BUFFERS
528 *
529 * Without GL_EXT_framebuffer_sRGB we shouldn't bind sRGB surfaces to the
530 * blend/update as sRGB.
531 */
532 if (ctx->Color.sRGBEnabled)
533 surf->ss0.surface_format = brw_format_for_mesa_format(rb_format);
534 else
535 surf->ss0.surface_format = BRW_SURFACEFORMAT_B8G8R8A8_UNORM;
536 break;
537 default:
538 assert(brw_render_target_supported(intel, rb));
539 surf->ss0.surface_format = brw->render_target_format[rb_format];
540 if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
541 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
542 __FUNCTION__, _mesa_get_format_name(rb_format));
543 }
544 break;
545 }
546
547 surf->ss0.surface_type = BRW_SURFACE_2D;
548 surf->ss0.surface_array_spacing = irb->mt->array_spacing_lod0 ?
549 GEN7_SURFACE_ARYSPC_LOD0 : GEN7_SURFACE_ARYSPC_FULL;
550
551 /* reloc */
552 surf->ss1.base_addr = intel_renderbuffer_tile_offsets(irb, &tile_x, &tile_y);
553 surf->ss1.base_addr += region->bo->offset; /* reloc */
554
555 assert(brw->has_surface_tile_offset);
556 /* Note that the low bits of these fields are missing, so
557 * there's the possibility of getting in trouble.
558 */
559 assert(tile_x % 4 == 0);
560 assert(tile_y % 2 == 0);
561 surf->ss5.x_offset = tile_x / 4;
562 surf->ss5.y_offset = tile_y / 2;
563
564 surf->ss2.width = rb->Width - 1;
565 surf->ss2.height = rb->Height - 1;
566 gen7_set_surface_tiling(surf, region->tiling);
567 surf->ss3.pitch = (region->pitch * region->cpp) - 1;
568
569 gen7_set_surface_msaa(surf, irb->mt->num_samples, irb->mt->msaa_layout);
570
571 if (irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) {
572 gen7_set_surface_mcs_info(brw, surf, brw->wm.surf_offset[unit],
573 irb->mt->mcs_mt, true /* is_render_target */);
574 }
575
576 if (intel->is_haswell) {
577 surf->ss7.shader_channel_select_r = HSW_SCS_RED;
578 surf->ss7.shader_channel_select_g = HSW_SCS_GREEN;
579 surf->ss7.shader_channel_select_b = HSW_SCS_BLUE;
580 surf->ss7.shader_channel_select_a = HSW_SCS_ALPHA;
581 }
582
583 drm_intel_bo_emit_reloc(brw->intel.batch.bo,
584 brw->wm.surf_offset[unit] +
585 offsetof(struct gen7_surface_state, ss1),
586 region->bo,
587 surf->ss1.base_addr - region->bo->offset,
588 I915_GEM_DOMAIN_RENDER,
589 I915_GEM_DOMAIN_RENDER);
590
591 gen7_check_surface_setup(surf, true /* is_render_target */);
592 }
593
594 void
595 gen7_init_vtable_surface_functions(struct brw_context *brw)
596 {
597 struct intel_context *intel = &brw->intel;
598
599 intel->vtbl.update_texture_surface = gen7_update_texture_surface;
600 intel->vtbl.update_renderbuffer_surface = gen7_update_renderbuffer_surface;
601 intel->vtbl.update_null_renderbuffer_surface =
602 gen7_update_null_renderbuffer_surface;
603 intel->vtbl.create_constant_surface = gen7_create_constant_surface;
604 }