i965/nir/vec4: Implement load_const intrinsic
[mesa.git] / src / mesa / drivers / dri / i965 / gen7_wm_surface_state.c
1 /*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23 #include "main/mtypes.h"
24 #include "main/blend.h"
25 #include "main/samplerobj.h"
26 #include "main/texformat.h"
27 #include "main/teximage.h"
28 #include "program/prog_parameter.h"
29
30 #include "intel_mipmap_tree.h"
31 #include "intel_batchbuffer.h"
32 #include "intel_tex.h"
33 #include "intel_fbo.h"
34 #include "intel_buffer_objects.h"
35
36 #include "brw_context.h"
37 #include "brw_state.h"
38 #include "brw_defines.h"
39 #include "brw_wm.h"
40
41 /**
42 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
43 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
44 *
45 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
46 * 0 1 2 3 4 5
47 * 4 5 6 7 0 1
48 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
49 *
50 * which is simply adding 4 then modding by 8 (or anding with 7).
51 *
52 * We then may need to apply workarounds for textureGather hardware bugs.
53 */
54 static unsigned
55 swizzle_to_scs(GLenum swizzle, bool need_green_to_blue)
56 {
57 unsigned scs = (swizzle + 4) & 7;
58
59 return (need_green_to_blue && scs == HSW_SCS_GREEN) ? HSW_SCS_BLUE : scs;
60 }
61
62 uint32_t
63 gen7_surface_tiling_mode(uint32_t tiling)
64 {
65 switch (tiling) {
66 case I915_TILING_X:
67 return GEN7_SURFACE_TILING_X;
68 case I915_TILING_Y:
69 return GEN7_SURFACE_TILING_Y;
70 default:
71 return GEN7_SURFACE_TILING_NONE;
72 }
73 }
74
75
76 uint32_t
77 gen7_surface_msaa_bits(unsigned num_samples, enum intel_msaa_layout layout)
78 {
79 uint32_t ss4 = 0;
80
81 assert(num_samples <= 8);
82
83 /* The SURFACE_MULTISAMPLECOUNT_X enums are simply log2(num_samples) << 3. */
84 ss4 |= (ffs(MAX2(num_samples, 1)) - 1) << 3;
85
86 if (layout == INTEL_MSAA_LAYOUT_IMS)
87 ss4 |= GEN7_SURFACE_MSFMT_DEPTH_STENCIL;
88 else
89 ss4 |= GEN7_SURFACE_MSFMT_MSS;
90
91 return ss4;
92 }
93
94
95 void
96 gen7_set_surface_mcs_info(struct brw_context *brw,
97 uint32_t *surf,
98 uint32_t surf_offset,
99 const struct intel_mipmap_tree *mcs_mt,
100 bool is_render_target)
101 {
102 /* From the Ivy Bridge PRM, Vol4 Part1 p76, "MCS Base Address":
103 *
104 * "The MCS surface must be stored as Tile Y."
105 */
106 assert(mcs_mt->tiling == I915_TILING_Y);
107
108 /* Compute the pitch in units of tiles. To do this we need to divide the
109 * pitch in bytes by 128, since a single Y-tile is 128 bytes wide.
110 */
111 unsigned pitch_tiles = mcs_mt->pitch / 128;
112
113 /* The upper 20 bits of surface state DWORD 6 are the upper 20 bits of the
114 * GPU address of the MCS buffer; the lower 12 bits contain other control
115 * information. Since buffer addresses are always on 4k boundaries (and
116 * thus have their lower 12 bits zero), we can use an ordinary reloc to do
117 * the necessary address translation.
118 */
119 assert ((mcs_mt->bo->offset64 & 0xfff) == 0);
120
121 surf[6] = GEN7_SURFACE_MCS_ENABLE |
122 SET_FIELD(pitch_tiles - 1, GEN7_SURFACE_MCS_PITCH) |
123 mcs_mt->bo->offset64;
124
125 drm_intel_bo_emit_reloc(brw->batch.bo,
126 surf_offset + 6 * 4,
127 mcs_mt->bo,
128 surf[6] & 0xfff,
129 is_render_target ? I915_GEM_DOMAIN_RENDER
130 : I915_GEM_DOMAIN_SAMPLER,
131 is_render_target ? I915_GEM_DOMAIN_RENDER : 0);
132 }
133
134
135 void
136 gen7_check_surface_setup(uint32_t *surf, bool is_render_target)
137 {
138 unsigned num_multisamples = surf[4] & INTEL_MASK(5, 3);
139 unsigned multisampled_surface_storage_format = surf[4] & (1 << 6);
140 unsigned surface_array_spacing = surf[0] & (1 << 10);
141 bool is_multisampled = num_multisamples != GEN7_SURFACE_MULTISAMPLECOUNT_1;
142
143 (void) surface_array_spacing;
144
145 /* From the Ivybridge PRM, Volume 4 Part 1, page 66 (RENDER_SURFACE_STATE
146 * dword 0 bit 10 "Surface Array Spacing" Programming Notes):
147 *
148 * If Multisampled Surface Storage Format is MSFMT_MSS and Number of
149 * Multisamples is not MULTISAMPLECOUNT_1, this field must be set to
150 * ARYSPC_LOD0.
151 */
152 if (multisampled_surface_storage_format == GEN7_SURFACE_MSFMT_MSS
153 && is_multisampled)
154 assert(surface_array_spacing == GEN7_SURFACE_ARYSPC_LOD0);
155
156 /* From the Ivybridge PRM, Volume 4 Part 1, page 72 (RENDER_SURFACE_STATE
157 * dword 4 bit 6 "Multisampled Surface Storage" Programming Notes):
158 *
159 * All multisampled render target surfaces must have this field set to
160 * MSFMT_MSS.
161 *
162 * But also:
163 *
164 * This field is ignored if Number of Multisamples is MULTISAMPLECOUNT_1.
165 */
166 if (is_render_target && is_multisampled) {
167 assert(multisampled_surface_storage_format == GEN7_SURFACE_MSFMT_MSS);
168 }
169
170 /* From the Ivybridge PRM, Volume 4 Part 1, page 72 (RENDER_SURFACE_STATE
171 * dword 4 bit 6 "Multisampled Surface Storage Format" Errata):
172 *
173 * If the surface’s Number of Multisamples is MULTISAMPLECOUNT_8, Width
174 * is >= 8192 (meaning the actual surface width is >= 8193 pixels), this
175 * field must be set to MSFMT_MSS.
176 */
177 uint32_t width = GET_FIELD(surf[2], GEN7_SURFACE_WIDTH) + 1;
178 if (num_multisamples == GEN7_SURFACE_MULTISAMPLECOUNT_8 && width >= 8193) {
179 assert(multisampled_surface_storage_format == GEN7_SURFACE_MSFMT_MSS);
180 }
181
182 /* From the Ivybridge PRM, Volume 4 Part 1, page 72 (RENDER_SURFACE_STATE
183 * dword 4 bit 6 "Multisampled Surface Storage Format" Errata):
184 *
185 * If the surface’s Number of Multisamples is MULTISAMPLECOUNT_8,
186 * ((Depth+1) * (Height+1)) is > 4,194,304, OR if the surface’s Number of
187 * Multisamples is MULTISAMPLECOUNT_4, ((Depth+1) * (Height+1)) is >
188 * 8,388,608, this field must be set to MSFMT_DEPTH_STENCIL.This field
189 * must be set to MSFMT_DEPTH_STENCIL if Surface Format is one of the
190 * following: I24X8_UNORM, L24X8_UNORM, A24X8_UNORM, or
191 * R24_UNORM_X8_TYPELESS.
192 *
193 * But also (from the Programming Notes):
194 *
195 * This field is ignored if Number of Multisamples is MULTISAMPLECOUNT_1.
196 */
197 uint32_t depth = GET_FIELD(surf[3], BRW_SURFACE_DEPTH) + 1;
198 uint32_t height = GET_FIELD(surf[2], GEN7_SURFACE_HEIGHT) + 1;
199 if (num_multisamples == GEN7_SURFACE_MULTISAMPLECOUNT_8 &&
200 depth * height > 4194304) {
201 assert(multisampled_surface_storage_format ==
202 GEN7_SURFACE_MSFMT_DEPTH_STENCIL);
203 }
204 if (num_multisamples == GEN7_SURFACE_MULTISAMPLECOUNT_4 &&
205 depth * height > 8388608) {
206 assert(multisampled_surface_storage_format ==
207 GEN7_SURFACE_MSFMT_DEPTH_STENCIL);
208 }
209 if (is_multisampled) {
210 switch (GET_FIELD(surf[0], BRW_SURFACE_FORMAT)) {
211 case BRW_SURFACEFORMAT_I24X8_UNORM:
212 case BRW_SURFACEFORMAT_L24X8_UNORM:
213 case BRW_SURFACEFORMAT_A24X8_UNORM:
214 case BRW_SURFACEFORMAT_R24_UNORM_X8_TYPELESS:
215 assert(multisampled_surface_storage_format ==
216 GEN7_SURFACE_MSFMT_DEPTH_STENCIL);
217 }
218 }
219 }
220
221 static void
222 gen7_emit_buffer_surface_state(struct brw_context *brw,
223 uint32_t *out_offset,
224 drm_intel_bo *bo,
225 unsigned buffer_offset,
226 unsigned surface_format,
227 unsigned buffer_size,
228 unsigned pitch,
229 bool rw)
230 {
231 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
232 8 * 4, 32, out_offset);
233 memset(surf, 0, 8 * 4);
234
235 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
236 surface_format << BRW_SURFACE_FORMAT_SHIFT |
237 BRW_SURFACE_RC_READ_WRITE;
238 surf[1] = (bo ? bo->offset64 : 0) + buffer_offset; /* reloc */
239 surf[2] = SET_FIELD((buffer_size - 1) & 0x7f, GEN7_SURFACE_WIDTH) |
240 SET_FIELD(((buffer_size - 1) >> 7) & 0x3fff, GEN7_SURFACE_HEIGHT);
241 if (surface_format == BRW_SURFACEFORMAT_RAW)
242 surf[3] = SET_FIELD(((buffer_size - 1) >> 21) & 0x3ff, BRW_SURFACE_DEPTH);
243 else
244 surf[3] = SET_FIELD(((buffer_size - 1) >> 21) & 0x3f, BRW_SURFACE_DEPTH);
245 surf[3] |= (pitch - 1);
246
247 surf[5] = SET_FIELD(GEN7_MOCS_L3, GEN7_SURFACE_MOCS);
248
249 if (brw->is_haswell) {
250 surf[7] |= (SET_FIELD(HSW_SCS_RED, GEN7_SURFACE_SCS_R) |
251 SET_FIELD(HSW_SCS_GREEN, GEN7_SURFACE_SCS_G) |
252 SET_FIELD(HSW_SCS_BLUE, GEN7_SURFACE_SCS_B) |
253 SET_FIELD(HSW_SCS_ALPHA, GEN7_SURFACE_SCS_A));
254 }
255
256 /* Emit relocation to surface contents */
257 if (bo) {
258 drm_intel_bo_emit_reloc(brw->batch.bo, *out_offset + 4,
259 bo, buffer_offset, I915_GEM_DOMAIN_SAMPLER,
260 (rw ? I915_GEM_DOMAIN_SAMPLER : 0));
261 }
262
263 gen7_check_surface_setup(surf, false /* is_render_target */);
264 }
265
266 static void
267 gen7_emit_texture_surface_state(struct brw_context *brw,
268 struct intel_mipmap_tree *mt,
269 GLenum target,
270 unsigned min_layer, unsigned max_layer,
271 unsigned min_level, unsigned max_level,
272 unsigned format,
273 unsigned swizzle,
274 uint32_t *surf_offset,
275 bool rw, bool for_gather)
276 {
277 const unsigned depth = max_layer - min_layer;
278 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
279 8 * 4, 32, surf_offset);
280
281 memset(surf, 0, 8 * 4);
282
283 surf[0] = translate_tex_target(target) << BRW_SURFACE_TYPE_SHIFT |
284 format << BRW_SURFACE_FORMAT_SHIFT |
285 gen7_surface_tiling_mode(mt->tiling);
286
287 /* mask of faces present in cube map; for other surfaces MBZ. */
288 if (target == GL_TEXTURE_CUBE_MAP || target == GL_TEXTURE_CUBE_MAP_ARRAY)
289 surf[0] |= BRW_SURFACE_CUBEFACE_ENABLES;
290
291 if (mt->align_h == 4)
292 surf[0] |= GEN7_SURFACE_VALIGN_4;
293 if (mt->align_w == 8)
294 surf[0] |= GEN7_SURFACE_HALIGN_8;
295
296 if (_mesa_is_array_texture(target) || target == GL_TEXTURE_CUBE_MAP)
297 surf[0] |= GEN7_SURFACE_IS_ARRAY;
298
299 if (mt->array_layout == ALL_SLICES_AT_EACH_LOD)
300 surf[0] |= GEN7_SURFACE_ARYSPC_LOD0;
301
302 surf[1] = mt->bo->offset64 + mt->offset; /* reloc */
303
304 surf[2] = SET_FIELD(mt->logical_width0 - 1, GEN7_SURFACE_WIDTH) |
305 SET_FIELD(mt->logical_height0 - 1, GEN7_SURFACE_HEIGHT);
306
307 surf[3] = SET_FIELD(depth - 1, BRW_SURFACE_DEPTH) |
308 (mt->pitch - 1);
309
310 if (brw->is_haswell && _mesa_is_format_integer(mt->format))
311 surf[3] |= HSW_SURFACE_IS_INTEGER_FORMAT;
312
313 surf[4] = gen7_surface_msaa_bits(mt->num_samples, mt->msaa_layout) |
314 SET_FIELD(min_layer, GEN7_SURFACE_MIN_ARRAY_ELEMENT) |
315 SET_FIELD(depth - 1, GEN7_SURFACE_RENDER_TARGET_VIEW_EXTENT);
316
317 surf[5] = (SET_FIELD(GEN7_MOCS_L3, GEN7_SURFACE_MOCS) |
318 SET_FIELD(min_level - mt->first_level, GEN7_SURFACE_MIN_LOD) |
319 /* mip count */
320 (max_level - min_level - 1));
321
322 surf[7] = mt->fast_clear_color_value;
323
324 if (brw->is_haswell) {
325 const bool need_scs_green_to_blue = for_gather && format == BRW_SURFACEFORMAT_R32G32_FLOAT_LD;
326
327 surf[7] |=
328 SET_FIELD(swizzle_to_scs(GET_SWZ(swizzle, 0), need_scs_green_to_blue), GEN7_SURFACE_SCS_R) |
329 SET_FIELD(swizzle_to_scs(GET_SWZ(swizzle, 1), need_scs_green_to_blue), GEN7_SURFACE_SCS_G) |
330 SET_FIELD(swizzle_to_scs(GET_SWZ(swizzle, 2), need_scs_green_to_blue), GEN7_SURFACE_SCS_B) |
331 SET_FIELD(swizzle_to_scs(GET_SWZ(swizzle, 3), need_scs_green_to_blue), GEN7_SURFACE_SCS_A);
332 }
333
334 if (mt->mcs_mt) {
335 gen7_set_surface_mcs_info(brw, surf, *surf_offset,
336 mt->mcs_mt, false /* is RT */);
337 }
338
339 /* Emit relocation to surface contents */
340 drm_intel_bo_emit_reloc(brw->batch.bo,
341 *surf_offset + 4,
342 mt->bo,
343 surf[1] - mt->bo->offset64,
344 I915_GEM_DOMAIN_SAMPLER,
345 (rw ? I915_GEM_DOMAIN_SAMPLER : 0));
346
347 gen7_check_surface_setup(surf, false /* is_render_target */);
348 }
349
350 static void
351 gen7_update_texture_surface(struct gl_context *ctx,
352 unsigned unit,
353 uint32_t *surf_offset,
354 bool for_gather)
355 {
356 struct brw_context *brw = brw_context(ctx);
357 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
358
359 if (obj->Target == GL_TEXTURE_BUFFER) {
360 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
361
362 } else {
363 struct intel_texture_object *intel_obj = intel_texture_object(obj);
364 struct intel_mipmap_tree *mt = intel_obj->mt;
365 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
366 /* If this is a view with restricted NumLayers, then our effective depth
367 * is not just the miptree depth.
368 */
369 const unsigned depth = (obj->Immutable && obj->Target != GL_TEXTURE_3D ?
370 obj->NumLayers : mt->logical_depth0);
371
372 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
373 * texturing functions that return a float, as our code generation always
374 * selects the .x channel (which would always be 0).
375 */
376 struct gl_texture_image *firstImage = obj->Image[0][obj->BaseLevel];
377 const bool alpha_depth = obj->DepthMode == GL_ALPHA &&
378 (firstImage->_BaseFormat == GL_DEPTH_COMPONENT ||
379 firstImage->_BaseFormat == GL_DEPTH_STENCIL);
380 const unsigned swizzle = (unlikely(alpha_depth) ? SWIZZLE_XYZW :
381 brw_get_texture_swizzle(&brw->ctx, obj));
382
383 unsigned format = translate_tex_format(
384 brw, intel_obj->_Format, sampler->sRGBDecode);
385
386 if (for_gather && format == BRW_SURFACEFORMAT_R32G32_FLOAT)
387 format = BRW_SURFACEFORMAT_R32G32_FLOAT_LD;
388
389 gen7_emit_texture_surface_state(brw, mt, obj->Target,
390 obj->MinLayer, obj->MinLayer + depth,
391 obj->MinLevel + obj->BaseLevel,
392 obj->MinLevel + intel_obj->_MaxLevel + 1,
393 format, swizzle,
394 surf_offset, false, for_gather);
395 }
396 }
397
398 /**
399 * Creates a null surface.
400 *
401 * This is used when the shader doesn't write to any color output. An FB
402 * write to target 0 will still be emitted, because that's how the thread is
403 * terminated (and computed depth is returned), so we need to have the
404 * hardware discard the target 0 color output..
405 */
406 static void
407 gen7_emit_null_surface_state(struct brw_context *brw,
408 unsigned width,
409 unsigned height,
410 unsigned samples,
411 uint32_t *out_offset)
412 {
413 /* From the Ivy bridge PRM, Vol4 Part1 p62 (Surface Type: Programming
414 * Notes):
415 *
416 * A null surface is used in instances where an actual surface is not
417 * bound. When a write message is generated to a null surface, no
418 * actual surface is written to. When a read message (including any
419 * sampling engine message) is generated to a null surface, the result
420 * is all zeros. Note that a null surface type is allowed to be used
421 * with all messages, even if it is not specificially indicated as
422 * supported. All of the remaining fields in surface state are ignored
423 * for null surfaces, with the following exceptions: Width, Height,
424 * Depth, LOD, and Render Target View Extent fields must match the
425 * depth buffer’s corresponding state for all render target surfaces,
426 * including null.
427 */
428 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 8 * 4, 32,
429 out_offset);
430 memset(surf, 0, 8 * 4);
431
432 /* From the Ivybridge PRM, Volume 4, Part 1, page 65,
433 * Tiled Surface: Programming Notes:
434 * "If Surface Type is SURFTYPE_NULL, this field must be TRUE."
435 */
436 surf[0] = BRW_SURFACE_NULL << BRW_SURFACE_TYPE_SHIFT |
437 BRW_SURFACEFORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT |
438 GEN7_SURFACE_TILING_Y;
439
440 surf[2] = SET_FIELD(width - 1, GEN7_SURFACE_WIDTH) |
441 SET_FIELD(height - 1, GEN7_SURFACE_HEIGHT);
442
443 gen7_check_surface_setup(surf, true /* is_render_target */);
444 }
445
446 /**
447 * Sets up a surface state structure to point at the given region.
448 * While it is only used for the front/back buffer currently, it should be
449 * usable for further buffers when doing ARB_draw_buffer support.
450 */
451 static uint32_t
452 gen7_update_renderbuffer_surface(struct brw_context *brw,
453 struct gl_renderbuffer *rb,
454 bool layered, unsigned unit /* unused */,
455 uint32_t surf_index)
456 {
457 struct gl_context *ctx = &brw->ctx;
458 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
459 struct intel_mipmap_tree *mt = irb->mt;
460 uint32_t format;
461 /* _NEW_BUFFERS */
462 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
463 uint32_t surftype;
464 bool is_array = false;
465 int depth = MAX2(irb->layer_count, 1);
466 const uint8_t mocs = GEN7_MOCS_L3;
467 uint32_t offset;
468
469 int min_array_element = irb->mt_layer / MAX2(mt->num_samples, 1);
470
471 GLenum gl_target = rb->TexImage ?
472 rb->TexImage->TexObject->Target : GL_TEXTURE_2D;
473
474 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 8 * 4, 32,
475 &offset);
476 memset(surf, 0, 8 * 4);
477
478 intel_miptree_used_for_rendering(irb->mt);
479
480 /* Render targets can't use IMS layout */
481 assert(irb->mt->msaa_layout != INTEL_MSAA_LAYOUT_IMS);
482
483 assert(brw_render_target_supported(brw, rb));
484 format = brw->render_target_format[rb_format];
485 if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
486 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
487 __func__, _mesa_get_format_name(rb_format));
488 }
489
490 switch (gl_target) {
491 case GL_TEXTURE_CUBE_MAP_ARRAY:
492 case GL_TEXTURE_CUBE_MAP:
493 surftype = BRW_SURFACE_2D;
494 is_array = true;
495 depth *= 6;
496 break;
497 case GL_TEXTURE_3D:
498 depth = MAX2(irb->mt->logical_depth0, 1);
499 /* fallthrough */
500 default:
501 surftype = translate_tex_target(gl_target);
502 is_array = _mesa_tex_target_is_array(gl_target);
503 break;
504 }
505
506 surf[0] = surftype << BRW_SURFACE_TYPE_SHIFT |
507 format << BRW_SURFACE_FORMAT_SHIFT |
508 (irb->mt->array_layout == ALL_SLICES_AT_EACH_LOD ?
509 GEN7_SURFACE_ARYSPC_LOD0 : GEN7_SURFACE_ARYSPC_FULL) |
510 gen7_surface_tiling_mode(mt->tiling);
511
512 if (irb->mt->align_h == 4)
513 surf[0] |= GEN7_SURFACE_VALIGN_4;
514 if (irb->mt->align_w == 8)
515 surf[0] |= GEN7_SURFACE_HALIGN_8;
516
517 if (is_array) {
518 surf[0] |= GEN7_SURFACE_IS_ARRAY;
519 }
520
521 assert(mt->offset % mt->cpp == 0);
522 surf[1] = mt->bo->offset64 + mt->offset;
523
524 assert(brw->has_surface_tile_offset);
525
526 surf[5] = SET_FIELD(mocs, GEN7_SURFACE_MOCS) |
527 (irb->mt_level - irb->mt->first_level);
528
529 surf[2] = SET_FIELD(irb->mt->logical_width0 - 1, GEN7_SURFACE_WIDTH) |
530 SET_FIELD(irb->mt->logical_height0 - 1, GEN7_SURFACE_HEIGHT);
531
532 surf[3] = ((depth - 1) << BRW_SURFACE_DEPTH_SHIFT) |
533 (mt->pitch - 1);
534
535 surf[4] = gen7_surface_msaa_bits(irb->mt->num_samples, irb->mt->msaa_layout) |
536 min_array_element << GEN7_SURFACE_MIN_ARRAY_ELEMENT_SHIFT |
537 (depth - 1) << GEN7_SURFACE_RENDER_TARGET_VIEW_EXTENT_SHIFT;
538
539 if (irb->mt->mcs_mt) {
540 gen7_set_surface_mcs_info(brw, surf, offset,
541 irb->mt->mcs_mt, true /* is RT */);
542 }
543
544 surf[7] = irb->mt->fast_clear_color_value;
545
546 if (brw->is_haswell) {
547 surf[7] |= (SET_FIELD(HSW_SCS_RED, GEN7_SURFACE_SCS_R) |
548 SET_FIELD(HSW_SCS_GREEN, GEN7_SURFACE_SCS_G) |
549 SET_FIELD(HSW_SCS_BLUE, GEN7_SURFACE_SCS_B) |
550 SET_FIELD(HSW_SCS_ALPHA, GEN7_SURFACE_SCS_A));
551 }
552
553 drm_intel_bo_emit_reloc(brw->batch.bo,
554 offset + 4,
555 mt->bo,
556 surf[1] - mt->bo->offset64,
557 I915_GEM_DOMAIN_RENDER,
558 I915_GEM_DOMAIN_RENDER);
559
560 gen7_check_surface_setup(surf, true /* is_render_target */);
561
562 return offset;
563 }
564
565 void
566 gen7_init_vtable_surface_functions(struct brw_context *brw)
567 {
568 brw->vtbl.update_texture_surface = gen7_update_texture_surface;
569 brw->vtbl.update_renderbuffer_surface = gen7_update_renderbuffer_surface;
570 brw->vtbl.emit_null_surface_state = gen7_emit_null_surface_state;
571 brw->vtbl.emit_texture_surface_state = gen7_emit_texture_surface_state;
572 brw->vtbl.emit_buffer_surface_state = gen7_emit_buffer_surface_state;
573 }