i965: Skip update_texture_surface when the plane doesn't exist
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_surface_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "main/context.h"
34 #include "main/blend.h"
35 #include "main/mtypes.h"
36 #include "main/samplerobj.h"
37 #include "main/shaderimage.h"
38 #include "program/prog_parameter.h"
39 #include "program/prog_instruction.h"
40 #include "main/framebuffer.h"
41
42 #include "isl/isl.h"
43
44 #include "intel_mipmap_tree.h"
45 #include "intel_batchbuffer.h"
46 #include "intel_tex.h"
47 #include "intel_fbo.h"
48 #include "intel_buffer_objects.h"
49
50 #include "brw_context.h"
51 #include "brw_state.h"
52 #include "brw_defines.h"
53 #include "brw_wm.h"
54
55 GLuint
56 translate_tex_target(GLenum target)
57 {
58 switch (target) {
59 case GL_TEXTURE_1D:
60 case GL_TEXTURE_1D_ARRAY_EXT:
61 return BRW_SURFACE_1D;
62
63 case GL_TEXTURE_RECTANGLE_NV:
64 return BRW_SURFACE_2D;
65
66 case GL_TEXTURE_2D:
67 case GL_TEXTURE_2D_ARRAY_EXT:
68 case GL_TEXTURE_EXTERNAL_OES:
69 case GL_TEXTURE_2D_MULTISAMPLE:
70 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
71 return BRW_SURFACE_2D;
72
73 case GL_TEXTURE_3D:
74 return BRW_SURFACE_3D;
75
76 case GL_TEXTURE_CUBE_MAP:
77 case GL_TEXTURE_CUBE_MAP_ARRAY:
78 return BRW_SURFACE_CUBE;
79
80 default:
81 unreachable("not reached");
82 }
83 }
84
85 uint32_t
86 brw_get_surface_tiling_bits(uint32_t tiling)
87 {
88 switch (tiling) {
89 case I915_TILING_X:
90 return BRW_SURFACE_TILED;
91 case I915_TILING_Y:
92 return BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y;
93 default:
94 return 0;
95 }
96 }
97
98
99 uint32_t
100 brw_get_surface_num_multisamples(unsigned num_samples)
101 {
102 if (num_samples > 1)
103 return BRW_SURFACE_MULTISAMPLECOUNT_4;
104 else
105 return BRW_SURFACE_MULTISAMPLECOUNT_1;
106 }
107
108 /**
109 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
110 * swizzling.
111 */
112 int
113 brw_get_texture_swizzle(const struct gl_context *ctx,
114 const struct gl_texture_object *t)
115 {
116 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
117
118 int swizzles[SWIZZLE_NIL + 1] = {
119 SWIZZLE_X,
120 SWIZZLE_Y,
121 SWIZZLE_Z,
122 SWIZZLE_W,
123 SWIZZLE_ZERO,
124 SWIZZLE_ONE,
125 SWIZZLE_NIL
126 };
127
128 if (img->_BaseFormat == GL_DEPTH_COMPONENT ||
129 img->_BaseFormat == GL_DEPTH_STENCIL) {
130 GLenum depth_mode = t->DepthMode;
131
132 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
133 * with depth component data specified with a sized internal format.
134 * Otherwise, it's left at the old default, GL_LUMINANCE.
135 */
136 if (_mesa_is_gles3(ctx) &&
137 img->InternalFormat != GL_DEPTH_COMPONENT &&
138 img->InternalFormat != GL_DEPTH_STENCIL) {
139 depth_mode = GL_RED;
140 }
141
142 switch (depth_mode) {
143 case GL_ALPHA:
144 swizzles[0] = SWIZZLE_ZERO;
145 swizzles[1] = SWIZZLE_ZERO;
146 swizzles[2] = SWIZZLE_ZERO;
147 swizzles[3] = SWIZZLE_X;
148 break;
149 case GL_LUMINANCE:
150 swizzles[0] = SWIZZLE_X;
151 swizzles[1] = SWIZZLE_X;
152 swizzles[2] = SWIZZLE_X;
153 swizzles[3] = SWIZZLE_ONE;
154 break;
155 case GL_INTENSITY:
156 swizzles[0] = SWIZZLE_X;
157 swizzles[1] = SWIZZLE_X;
158 swizzles[2] = SWIZZLE_X;
159 swizzles[3] = SWIZZLE_X;
160 break;
161 case GL_RED:
162 swizzles[0] = SWIZZLE_X;
163 swizzles[1] = SWIZZLE_ZERO;
164 swizzles[2] = SWIZZLE_ZERO;
165 swizzles[3] = SWIZZLE_ONE;
166 break;
167 }
168 }
169
170 GLenum datatype = _mesa_get_format_datatype(img->TexFormat);
171
172 /* If the texture's format is alpha-only, force R, G, and B to
173 * 0.0. Similarly, if the texture's format has no alpha channel,
174 * force the alpha value read to 1.0. This allows for the
175 * implementation to use an RGBA texture for any of these formats
176 * without leaking any unexpected values.
177 */
178 switch (img->_BaseFormat) {
179 case GL_ALPHA:
180 swizzles[0] = SWIZZLE_ZERO;
181 swizzles[1] = SWIZZLE_ZERO;
182 swizzles[2] = SWIZZLE_ZERO;
183 break;
184 case GL_LUMINANCE:
185 if (t->_IsIntegerFormat || datatype == GL_SIGNED_NORMALIZED) {
186 swizzles[0] = SWIZZLE_X;
187 swizzles[1] = SWIZZLE_X;
188 swizzles[2] = SWIZZLE_X;
189 swizzles[3] = SWIZZLE_ONE;
190 }
191 break;
192 case GL_LUMINANCE_ALPHA:
193 if (datatype == GL_SIGNED_NORMALIZED) {
194 swizzles[0] = SWIZZLE_X;
195 swizzles[1] = SWIZZLE_X;
196 swizzles[2] = SWIZZLE_X;
197 swizzles[3] = SWIZZLE_W;
198 }
199 break;
200 case GL_INTENSITY:
201 if (datatype == GL_SIGNED_NORMALIZED) {
202 swizzles[0] = SWIZZLE_X;
203 swizzles[1] = SWIZZLE_X;
204 swizzles[2] = SWIZZLE_X;
205 swizzles[3] = SWIZZLE_X;
206 }
207 break;
208 case GL_RED:
209 case GL_RG:
210 case GL_RGB:
211 if (_mesa_get_format_bits(img->TexFormat, GL_ALPHA_BITS) > 0)
212 swizzles[3] = SWIZZLE_ONE;
213 break;
214 }
215
216 return MAKE_SWIZZLE4(swizzles[GET_SWZ(t->_Swizzle, 0)],
217 swizzles[GET_SWZ(t->_Swizzle, 1)],
218 swizzles[GET_SWZ(t->_Swizzle, 2)],
219 swizzles[GET_SWZ(t->_Swizzle, 3)]);
220 }
221
222 static void
223 gen4_emit_buffer_surface_state(struct brw_context *brw,
224 uint32_t *out_offset,
225 drm_intel_bo *bo,
226 unsigned buffer_offset,
227 unsigned surface_format,
228 unsigned buffer_size,
229 unsigned pitch,
230 bool rw)
231 {
232 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
233 6 * 4, 32, out_offset);
234 memset(surf, 0, 6 * 4);
235
236 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
237 surface_format << BRW_SURFACE_FORMAT_SHIFT |
238 (brw->gen >= 6 ? BRW_SURFACE_RC_READ_WRITE : 0);
239 surf[1] = (bo ? bo->offset64 : 0) + buffer_offset; /* reloc */
240 surf[2] = ((buffer_size - 1) & 0x7f) << BRW_SURFACE_WIDTH_SHIFT |
241 (((buffer_size - 1) >> 7) & 0x1fff) << BRW_SURFACE_HEIGHT_SHIFT;
242 surf[3] = (((buffer_size - 1) >> 20) & 0x7f) << BRW_SURFACE_DEPTH_SHIFT |
243 (pitch - 1) << BRW_SURFACE_PITCH_SHIFT;
244
245 /* Emit relocation to surface contents. The 965 PRM, Volume 4, section
246 * 5.1.2 "Data Cache" says: "the data cache does not exist as a separate
247 * physical cache. It is mapped in hardware to the sampler cache."
248 */
249 if (bo) {
250 drm_intel_bo_emit_reloc(brw->batch.bo, *out_offset + 4,
251 bo, buffer_offset,
252 I915_GEM_DOMAIN_SAMPLER,
253 (rw ? I915_GEM_DOMAIN_SAMPLER : 0));
254 }
255 }
256
257 void
258 brw_update_buffer_texture_surface(struct gl_context *ctx,
259 unsigned unit,
260 uint32_t *surf_offset)
261 {
262 struct brw_context *brw = brw_context(ctx);
263 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
264 struct intel_buffer_object *intel_obj =
265 intel_buffer_object(tObj->BufferObject);
266 uint32_t size = tObj->BufferSize;
267 drm_intel_bo *bo = NULL;
268 mesa_format format = tObj->_BufferObjectFormat;
269 uint32_t brw_format = brw_format_for_mesa_format(format);
270 int texel_size = _mesa_get_format_bytes(format);
271
272 if (intel_obj) {
273 size = MIN2(size, intel_obj->Base.Size);
274 bo = intel_bufferobj_buffer(brw, intel_obj, tObj->BufferOffset, size);
275 }
276
277 if (brw_format == 0 && format != MESA_FORMAT_RGBA_FLOAT32) {
278 _mesa_problem(NULL, "bad format %s for texture buffer\n",
279 _mesa_get_format_name(format));
280 }
281
282 brw->vtbl.emit_buffer_surface_state(brw, surf_offset, bo,
283 tObj->BufferOffset,
284 brw_format,
285 size / texel_size,
286 texel_size,
287 false /* rw */);
288 }
289
290 static void
291 brw_update_texture_surface(struct gl_context *ctx,
292 unsigned unit,
293 uint32_t *surf_offset,
294 bool for_gather,
295 uint32_t plane)
296 {
297 struct brw_context *brw = brw_context(ctx);
298 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
299 struct intel_texture_object *intelObj = intel_texture_object(tObj);
300 struct intel_mipmap_tree *mt = intelObj->mt;
301 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
302 uint32_t *surf;
303
304 /* BRW_NEW_TEXTURE_BUFFER */
305 if (tObj->Target == GL_TEXTURE_BUFFER) {
306 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
307 return;
308 }
309
310 if (plane > 0) {
311 if (mt->plane[plane - 1] == NULL)
312 return;
313 mt = mt->plane[plane - 1];
314 }
315
316 surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
317 6 * 4, 32, surf_offset);
318
319 uint32_t tex_format = translate_tex_format(brw, intelObj->_Format,
320 sampler->sRGBDecode);
321
322 if (for_gather) {
323 /* Sandybridge's gather4 message is broken for integer formats.
324 * To work around this, we pretend the surface is UNORM for
325 * 8 or 16-bit formats, and emit shader instructions to recover
326 * the real INT/UINT value. For 32-bit formats, we pretend
327 * the surface is FLOAT, and simply reinterpret the resulting
328 * bits.
329 */
330 switch (tex_format) {
331 case BRW_SURFACEFORMAT_R8_SINT:
332 case BRW_SURFACEFORMAT_R8_UINT:
333 tex_format = BRW_SURFACEFORMAT_R8_UNORM;
334 break;
335
336 case BRW_SURFACEFORMAT_R16_SINT:
337 case BRW_SURFACEFORMAT_R16_UINT:
338 tex_format = BRW_SURFACEFORMAT_R16_UNORM;
339 break;
340
341 case BRW_SURFACEFORMAT_R32_SINT:
342 case BRW_SURFACEFORMAT_R32_UINT:
343 tex_format = BRW_SURFACEFORMAT_R32_FLOAT;
344 break;
345
346 default:
347 break;
348 }
349 }
350
351 surf[0] = (translate_tex_target(tObj->Target) << BRW_SURFACE_TYPE_SHIFT |
352 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
353 BRW_SURFACE_CUBEFACE_ENABLES |
354 tex_format << BRW_SURFACE_FORMAT_SHIFT);
355
356 surf[1] = mt->bo->offset64 + mt->offset; /* reloc */
357
358 surf[2] = ((intelObj->_MaxLevel - tObj->BaseLevel) << BRW_SURFACE_LOD_SHIFT |
359 (mt->logical_width0 - 1) << BRW_SURFACE_WIDTH_SHIFT |
360 (mt->logical_height0 - 1) << BRW_SURFACE_HEIGHT_SHIFT);
361
362 surf[3] = (brw_get_surface_tiling_bits(mt->tiling) |
363 (mt->logical_depth0 - 1) << BRW_SURFACE_DEPTH_SHIFT |
364 (mt->pitch - 1) << BRW_SURFACE_PITCH_SHIFT);
365
366 const unsigned min_lod = tObj->MinLevel + tObj->BaseLevel - mt->first_level;
367 surf[4] = (brw_get_surface_num_multisamples(mt->num_samples) |
368 SET_FIELD(min_lod, BRW_SURFACE_MIN_LOD) |
369 SET_FIELD(tObj->MinLayer, BRW_SURFACE_MIN_ARRAY_ELEMENT));
370
371 surf[5] = mt->valign == 4 ? BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0;
372
373 /* Emit relocation to surface contents */
374 drm_intel_bo_emit_reloc(brw->batch.bo,
375 *surf_offset + 4,
376 mt->bo,
377 surf[1] - mt->bo->offset64,
378 I915_GEM_DOMAIN_SAMPLER, 0);
379 }
380
381 /**
382 * Create the constant buffer surface. Vertex/fragment shader constants will be
383 * read from this buffer with Data Port Read instructions/messages.
384 */
385 void
386 brw_create_constant_surface(struct brw_context *brw,
387 drm_intel_bo *bo,
388 uint32_t offset,
389 uint32_t size,
390 uint32_t *out_offset)
391 {
392 brw->vtbl.emit_buffer_surface_state(brw, out_offset, bo, offset,
393 BRW_SURFACEFORMAT_R32G32B32A32_FLOAT,
394 size, 1, false);
395 }
396
397 /**
398 * Create the buffer surface. Shader buffer variables will be
399 * read from / write to this buffer with Data Port Read/Write
400 * instructions/messages.
401 */
402 void
403 brw_create_buffer_surface(struct brw_context *brw,
404 drm_intel_bo *bo,
405 uint32_t offset,
406 uint32_t size,
407 uint32_t *out_offset)
408 {
409 /* Use a raw surface so we can reuse existing untyped read/write/atomic
410 * messages. We need these specifically for the fragment shader since they
411 * include a pixel mask header that we need to ensure correct behavior
412 * with helper invocations, which cannot write to the buffer.
413 */
414 brw->vtbl.emit_buffer_surface_state(brw, out_offset, bo, offset,
415 BRW_SURFACEFORMAT_RAW,
416 size, 1, true);
417 }
418
419 /**
420 * Set up a binding table entry for use by stream output logic (transform
421 * feedback).
422 *
423 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
424 */
425 void
426 brw_update_sol_surface(struct brw_context *brw,
427 struct gl_buffer_object *buffer_obj,
428 uint32_t *out_offset, unsigned num_vector_components,
429 unsigned stride_dwords, unsigned offset_dwords)
430 {
431 struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
432 uint32_t offset_bytes = 4 * offset_dwords;
433 drm_intel_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
434 offset_bytes,
435 buffer_obj->Size - offset_bytes);
436 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
437 out_offset);
438 uint32_t pitch_minus_1 = 4*stride_dwords - 1;
439 size_t size_dwords = buffer_obj->Size / 4;
440 uint32_t buffer_size_minus_1, width, height, depth, surface_format;
441
442 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
443 * too big to map using a single binding table entry?
444 */
445 assert((size_dwords - offset_dwords) / stride_dwords
446 <= BRW_MAX_NUM_BUFFER_ENTRIES);
447
448 if (size_dwords > offset_dwords + num_vector_components) {
449 /* There is room for at least 1 transform feedback output in the buffer.
450 * Compute the number of additional transform feedback outputs the
451 * buffer has room for.
452 */
453 buffer_size_minus_1 =
454 (size_dwords - offset_dwords - num_vector_components) / stride_dwords;
455 } else {
456 /* There isn't even room for a single transform feedback output in the
457 * buffer. We can't configure the binding table entry to prevent output
458 * entirely; we'll have to rely on the geometry shader to detect
459 * overflow. But to minimize the damage in case of a bug, set up the
460 * binding table entry to just allow a single output.
461 */
462 buffer_size_minus_1 = 0;
463 }
464 width = buffer_size_minus_1 & 0x7f;
465 height = (buffer_size_minus_1 & 0xfff80) >> 7;
466 depth = (buffer_size_minus_1 & 0x7f00000) >> 20;
467
468 switch (num_vector_components) {
469 case 1:
470 surface_format = BRW_SURFACEFORMAT_R32_FLOAT;
471 break;
472 case 2:
473 surface_format = BRW_SURFACEFORMAT_R32G32_FLOAT;
474 break;
475 case 3:
476 surface_format = BRW_SURFACEFORMAT_R32G32B32_FLOAT;
477 break;
478 case 4:
479 surface_format = BRW_SURFACEFORMAT_R32G32B32A32_FLOAT;
480 break;
481 default:
482 unreachable("Invalid vector size for transform feedback output");
483 }
484
485 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
486 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
487 surface_format << BRW_SURFACE_FORMAT_SHIFT |
488 BRW_SURFACE_RC_READ_WRITE;
489 surf[1] = bo->offset64 + offset_bytes; /* reloc */
490 surf[2] = (width << BRW_SURFACE_WIDTH_SHIFT |
491 height << BRW_SURFACE_HEIGHT_SHIFT);
492 surf[3] = (depth << BRW_SURFACE_DEPTH_SHIFT |
493 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
494 surf[4] = 0;
495 surf[5] = 0;
496
497 /* Emit relocation to surface contents. */
498 drm_intel_bo_emit_reloc(brw->batch.bo,
499 *out_offset + 4,
500 bo, offset_bytes,
501 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
502 }
503
504 /* Creates a new WM constant buffer reflecting the current fragment program's
505 * constants, if needed by the fragment program.
506 *
507 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
508 * state atom.
509 */
510 static void
511 brw_upload_wm_pull_constants(struct brw_context *brw)
512 {
513 struct brw_stage_state *stage_state = &brw->wm.base;
514 /* BRW_NEW_FRAGMENT_PROGRAM */
515 struct brw_fragment_program *fp =
516 (struct brw_fragment_program *) brw->fragment_program;
517 /* BRW_NEW_FS_PROG_DATA */
518 struct brw_stage_prog_data *prog_data = &brw->wm.prog_data->base;
519
520 /* _NEW_PROGRAM_CONSTANTS */
521 brw_upload_pull_constants(brw, BRW_NEW_SURFACES, &fp->program.Base,
522 stage_state, prog_data);
523 }
524
525 const struct brw_tracked_state brw_wm_pull_constants = {
526 .dirty = {
527 .mesa = _NEW_PROGRAM_CONSTANTS,
528 .brw = BRW_NEW_BATCH |
529 BRW_NEW_BLORP |
530 BRW_NEW_FRAGMENT_PROGRAM |
531 BRW_NEW_FS_PROG_DATA,
532 },
533 .emit = brw_upload_wm_pull_constants,
534 };
535
536 /**
537 * Creates a null renderbuffer surface.
538 *
539 * This is used when the shader doesn't write to any color output. An FB
540 * write to target 0 will still be emitted, because that's how the thread is
541 * terminated (and computed depth is returned), so we need to have the
542 * hardware discard the target 0 color output..
543 */
544 static void
545 brw_emit_null_surface_state(struct brw_context *brw,
546 unsigned width,
547 unsigned height,
548 unsigned samples,
549 uint32_t *out_offset)
550 {
551 /* From the Sandy bridge PRM, Vol4 Part1 p71 (Surface Type: Programming
552 * Notes):
553 *
554 * A null surface will be used in instances where an actual surface is
555 * not bound. When a write message is generated to a null surface, no
556 * actual surface is written to. When a read message (including any
557 * sampling engine message) is generated to a null surface, the result
558 * is all zeros. Note that a null surface type is allowed to be used
559 * with all messages, even if it is not specificially indicated as
560 * supported. All of the remaining fields in surface state are ignored
561 * for null surfaces, with the following exceptions:
562 *
563 * - [DevSNB+]: Width, Height, Depth, and LOD fields must match the
564 * depth buffer’s corresponding state for all render target surfaces,
565 * including null.
566 *
567 * - Surface Format must be R8G8B8A8_UNORM.
568 */
569 unsigned surface_type = BRW_SURFACE_NULL;
570 drm_intel_bo *bo = NULL;
571 unsigned pitch_minus_1 = 0;
572 uint32_t multisampling_state = 0;
573 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
574 out_offset);
575
576 if (samples > 1) {
577 /* On Gen6, null render targets seem to cause GPU hangs when
578 * multisampling. So work around this problem by rendering into dummy
579 * color buffer.
580 *
581 * To decrease the amount of memory needed by the workaround buffer, we
582 * set its pitch to 128 bytes (the width of a Y tile). This means that
583 * the amount of memory needed for the workaround buffer is
584 * (width_in_tiles + height_in_tiles - 1) tiles.
585 *
586 * Note that since the workaround buffer will be interpreted by the
587 * hardware as an interleaved multisampled buffer, we need to compute
588 * width_in_tiles and height_in_tiles by dividing the width and height
589 * by 16 rather than the normal Y-tile size of 32.
590 */
591 unsigned width_in_tiles = ALIGN(width, 16) / 16;
592 unsigned height_in_tiles = ALIGN(height, 16) / 16;
593 unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
594 brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
595 size_needed);
596 bo = brw->wm.multisampled_null_render_target_bo;
597 surface_type = BRW_SURFACE_2D;
598 pitch_minus_1 = 127;
599 multisampling_state = brw_get_surface_num_multisamples(samples);
600 }
601
602 surf[0] = (surface_type << BRW_SURFACE_TYPE_SHIFT |
603 BRW_SURFACEFORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
604 if (brw->gen < 6) {
605 surf[0] |= (1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT |
606 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT |
607 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT |
608 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT);
609 }
610 surf[1] = bo ? bo->offset64 : 0;
611 surf[2] = ((width - 1) << BRW_SURFACE_WIDTH_SHIFT |
612 (height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
613
614 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
615 * Notes):
616 *
617 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
618 */
619 surf[3] = (BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y |
620 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
621 surf[4] = multisampling_state;
622 surf[5] = 0;
623
624 if (bo) {
625 drm_intel_bo_emit_reloc(brw->batch.bo,
626 *out_offset + 4,
627 bo, 0,
628 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
629 }
630 }
631
632 /**
633 * Sets up a surface state structure to point at the given region.
634 * While it is only used for the front/back buffer currently, it should be
635 * usable for further buffers when doing ARB_draw_buffer support.
636 */
637 static uint32_t
638 brw_update_renderbuffer_surface(struct brw_context *brw,
639 struct gl_renderbuffer *rb,
640 bool layered, unsigned unit,
641 uint32_t surf_index)
642 {
643 struct gl_context *ctx = &brw->ctx;
644 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
645 struct intel_mipmap_tree *mt = irb->mt;
646 uint32_t *surf;
647 uint32_t tile_x, tile_y;
648 uint32_t format = 0;
649 uint32_t offset;
650 /* _NEW_BUFFERS */
651 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
652 /* BRW_NEW_FS_PROG_DATA */
653
654 assert(!layered);
655
656 if (rb->TexImage && !brw->has_surface_tile_offset) {
657 intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y);
658
659 if (tile_x != 0 || tile_y != 0) {
660 /* Original gen4 hardware couldn't draw to a non-tile-aligned
661 * destination in a miptree unless you actually setup your renderbuffer
662 * as a miptree and used the fragile lod/array_index/etc. controls to
663 * select the image. So, instead, we just make a new single-level
664 * miptree and render into that.
665 */
666 intel_renderbuffer_move_to_temp(brw, irb, false);
667 mt = irb->mt;
668 }
669 }
670
671 intel_miptree_used_for_rendering(irb->mt);
672
673 surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32, &offset);
674
675 format = brw->render_target_format[rb_format];
676 if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
677 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
678 __func__, _mesa_get_format_name(rb_format));
679 }
680
681 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
682 format << BRW_SURFACE_FORMAT_SHIFT);
683
684 /* reloc */
685 assert(mt->offset % mt->cpp == 0);
686 surf[1] = (intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y) +
687 mt->bo->offset64 + mt->offset);
688
689 surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
690 (rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
691
692 surf[3] = (brw_get_surface_tiling_bits(mt->tiling) |
693 (mt->pitch - 1) << BRW_SURFACE_PITCH_SHIFT);
694
695 surf[4] = brw_get_surface_num_multisamples(mt->num_samples);
696
697 assert(brw->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
698 /* Note that the low bits of these fields are missing, so
699 * there's the possibility of getting in trouble.
700 */
701 assert(tile_x % 4 == 0);
702 assert(tile_y % 2 == 0);
703 surf[5] = ((tile_x / 4) << BRW_SURFACE_X_OFFSET_SHIFT |
704 (tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT |
705 (mt->valign == 4 ? BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
706
707 if (brw->gen < 6) {
708 /* _NEW_COLOR */
709 if (!ctx->Color.ColorLogicOpEnabled &&
710 (ctx->Color.BlendEnabled & (1 << unit)))
711 surf[0] |= BRW_SURFACE_BLEND_ENABLED;
712
713 if (!ctx->Color.ColorMask[unit][0])
714 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT;
715 if (!ctx->Color.ColorMask[unit][1])
716 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT;
717 if (!ctx->Color.ColorMask[unit][2])
718 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT;
719
720 /* As mentioned above, disable writes to the alpha component when the
721 * renderbuffer is XRGB.
722 */
723 if (ctx->DrawBuffer->Visual.alphaBits == 0 ||
724 !ctx->Color.ColorMask[unit][3]) {
725 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT;
726 }
727 }
728
729 drm_intel_bo_emit_reloc(brw->batch.bo,
730 offset + 4,
731 mt->bo,
732 surf[1] - mt->bo->offset64,
733 I915_GEM_DOMAIN_RENDER,
734 I915_GEM_DOMAIN_RENDER);
735
736 return offset;
737 }
738
739 /**
740 * Construct SURFACE_STATE objects for renderbuffers/draw buffers.
741 */
742 void
743 brw_update_renderbuffer_surfaces(struct brw_context *brw,
744 const struct gl_framebuffer *fb,
745 uint32_t render_target_start,
746 uint32_t *surf_offset)
747 {
748 GLuint i;
749 const unsigned int w = _mesa_geometric_width(fb);
750 const unsigned int h = _mesa_geometric_height(fb);
751 const unsigned int s = _mesa_geometric_samples(fb);
752
753 /* Update surfaces for drawing buffers */
754 if (fb->_NumColorDrawBuffers >= 1) {
755 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
756 const uint32_t surf_index = render_target_start + i;
757
758 if (intel_renderbuffer(fb->_ColorDrawBuffers[i])) {
759 surf_offset[surf_index] =
760 brw->vtbl.update_renderbuffer_surface(
761 brw, fb->_ColorDrawBuffers[i],
762 _mesa_geometric_layers(fb) > 0, i, surf_index);
763 } else {
764 brw->vtbl.emit_null_surface_state(brw, w, h, s,
765 &surf_offset[surf_index]);
766 }
767 }
768 } else {
769 const uint32_t surf_index = render_target_start;
770 brw->vtbl.emit_null_surface_state(brw, w, h, s,
771 &surf_offset[surf_index]);
772 }
773 }
774
775 static void
776 update_renderbuffer_surfaces(struct brw_context *brw)
777 {
778 const struct gl_context *ctx = &brw->ctx;
779
780 /* _NEW_BUFFERS | _NEW_COLOR */
781 const struct gl_framebuffer *fb = ctx->DrawBuffer;
782 brw_update_renderbuffer_surfaces(
783 brw, fb,
784 brw->wm.prog_data->binding_table.render_target_start,
785 brw->wm.base.surf_offset);
786 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
787 }
788
789 const struct brw_tracked_state brw_renderbuffer_surfaces = {
790 .dirty = {
791 .mesa = _NEW_BUFFERS |
792 _NEW_COLOR,
793 .brw = BRW_NEW_BATCH |
794 BRW_NEW_BLORP |
795 BRW_NEW_FS_PROG_DATA,
796 },
797 .emit = update_renderbuffer_surfaces,
798 };
799
800 const struct brw_tracked_state gen6_renderbuffer_surfaces = {
801 .dirty = {
802 .mesa = _NEW_BUFFERS,
803 .brw = BRW_NEW_BATCH |
804 BRW_NEW_BLORP,
805 },
806 .emit = update_renderbuffer_surfaces,
807 };
808
809
810 static void
811 update_stage_texture_surfaces(struct brw_context *brw,
812 const struct gl_program *prog,
813 struct brw_stage_state *stage_state,
814 bool for_gather, uint32_t plane)
815 {
816 if (!prog)
817 return;
818
819 struct gl_context *ctx = &brw->ctx;
820
821 uint32_t *surf_offset = stage_state->surf_offset;
822
823 /* BRW_NEW_*_PROG_DATA */
824 if (for_gather)
825 surf_offset += stage_state->prog_data->binding_table.gather_texture_start;
826 else
827 surf_offset += stage_state->prog_data->binding_table.plane_start[plane];
828
829 unsigned num_samplers = _mesa_fls(prog->SamplersUsed);
830 for (unsigned s = 0; s < num_samplers; s++) {
831 surf_offset[s] = 0;
832
833 if (prog->SamplersUsed & (1 << s)) {
834 const unsigned unit = prog->SamplerUnits[s];
835
836 /* _NEW_TEXTURE */
837 if (ctx->Texture.Unit[unit]._Current) {
838 brw->vtbl.update_texture_surface(ctx, unit, surf_offset + s, for_gather, plane);
839 }
840 }
841 }
842 }
843
844
845 /**
846 * Construct SURFACE_STATE objects for enabled textures.
847 */
848 static void
849 brw_update_texture_surfaces(struct brw_context *brw)
850 {
851 /* BRW_NEW_VERTEX_PROGRAM */
852 struct gl_program *vs = (struct gl_program *) brw->vertex_program;
853
854 /* BRW_NEW_TESS_PROGRAMS */
855 struct gl_program *tcs = (struct gl_program *) brw->tess_ctrl_program;
856 struct gl_program *tes = (struct gl_program *) brw->tess_eval_program;
857
858 /* BRW_NEW_GEOMETRY_PROGRAM */
859 struct gl_program *gs = (struct gl_program *) brw->geometry_program;
860
861 /* BRW_NEW_FRAGMENT_PROGRAM */
862 struct gl_program *fs = (struct gl_program *) brw->fragment_program;
863
864 /* _NEW_TEXTURE */
865 update_stage_texture_surfaces(brw, vs, &brw->vs.base, false, 0);
866 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, false, 0);
867 update_stage_texture_surfaces(brw, tes, &brw->tes.base, false, 0);
868 update_stage_texture_surfaces(brw, gs, &brw->gs.base, false, 0);
869 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 0);
870
871 /* emit alternate set of surface state for gather. this
872 * allows the surface format to be overriden for only the
873 * gather4 messages. */
874 if (brw->gen < 8) {
875 if (vs && vs->UsesGather)
876 update_stage_texture_surfaces(brw, vs, &brw->vs.base, true, 0);
877 if (tcs && tcs->UsesGather)
878 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, true, 0);
879 if (tes && tes->UsesGather)
880 update_stage_texture_surfaces(brw, tes, &brw->tes.base, true, 0);
881 if (gs && gs->UsesGather)
882 update_stage_texture_surfaces(brw, gs, &brw->gs.base, true, 0);
883 if (fs && fs->UsesGather)
884 update_stage_texture_surfaces(brw, fs, &brw->wm.base, true, 0);
885 }
886
887 if (fs) {
888 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 1);
889 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 2);
890 }
891
892 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
893 }
894
895 const struct brw_tracked_state brw_texture_surfaces = {
896 .dirty = {
897 .mesa = _NEW_TEXTURE,
898 .brw = BRW_NEW_BATCH |
899 BRW_NEW_BLORP |
900 BRW_NEW_FRAGMENT_PROGRAM |
901 BRW_NEW_FS_PROG_DATA |
902 BRW_NEW_GEOMETRY_PROGRAM |
903 BRW_NEW_GS_PROG_DATA |
904 BRW_NEW_TESS_PROGRAMS |
905 BRW_NEW_TCS_PROG_DATA |
906 BRW_NEW_TES_PROG_DATA |
907 BRW_NEW_TEXTURE_BUFFER |
908 BRW_NEW_VERTEX_PROGRAM |
909 BRW_NEW_VS_PROG_DATA,
910 },
911 .emit = brw_update_texture_surfaces,
912 };
913
914 static void
915 brw_update_cs_texture_surfaces(struct brw_context *brw)
916 {
917 /* BRW_NEW_COMPUTE_PROGRAM */
918 struct gl_program *cs = (struct gl_program *) brw->compute_program;
919
920 /* _NEW_TEXTURE */
921 update_stage_texture_surfaces(brw, cs, &brw->cs.base, false, 0);
922
923 /* emit alternate set of surface state for gather. this
924 * allows the surface format to be overriden for only the
925 * gather4 messages.
926 */
927 if (brw->gen < 8) {
928 if (cs && cs->UsesGather)
929 update_stage_texture_surfaces(brw, cs, &brw->cs.base, true, 0);
930 }
931
932 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
933 }
934
935 const struct brw_tracked_state brw_cs_texture_surfaces = {
936 .dirty = {
937 .mesa = _NEW_TEXTURE,
938 .brw = BRW_NEW_BATCH |
939 BRW_NEW_BLORP |
940 BRW_NEW_COMPUTE_PROGRAM,
941 },
942 .emit = brw_update_cs_texture_surfaces,
943 };
944
945
946 void
947 brw_upload_ubo_surfaces(struct brw_context *brw,
948 struct gl_shader *shader,
949 struct brw_stage_state *stage_state,
950 struct brw_stage_prog_data *prog_data)
951 {
952 struct gl_context *ctx = &brw->ctx;
953
954 if (!shader)
955 return;
956
957 uint32_t *ubo_surf_offsets =
958 &stage_state->surf_offset[prog_data->binding_table.ubo_start];
959
960 for (int i = 0; i < shader->NumUniformBlocks; i++) {
961 struct gl_uniform_buffer_binding *binding =
962 &ctx->UniformBufferBindings[shader->UniformBlocks[i]->Binding];
963
964 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
965 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ubo_surf_offsets[i]);
966 } else {
967 struct intel_buffer_object *intel_bo =
968 intel_buffer_object(binding->BufferObject);
969 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
970 if (!binding->AutomaticSize)
971 size = MIN2(size, binding->Size);
972 drm_intel_bo *bo =
973 intel_bufferobj_buffer(brw, intel_bo,
974 binding->Offset,
975 size);
976 brw_create_constant_surface(brw, bo, binding->Offset,
977 size,
978 &ubo_surf_offsets[i]);
979 }
980 }
981
982 uint32_t *ssbo_surf_offsets =
983 &stage_state->surf_offset[prog_data->binding_table.ssbo_start];
984
985 for (int i = 0; i < shader->NumShaderStorageBlocks; i++) {
986 struct gl_shader_storage_buffer_binding *binding =
987 &ctx->ShaderStorageBufferBindings[shader->ShaderStorageBlocks[i]->Binding];
988
989 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
990 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ssbo_surf_offsets[i]);
991 } else {
992 struct intel_buffer_object *intel_bo =
993 intel_buffer_object(binding->BufferObject);
994 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
995 if (!binding->AutomaticSize)
996 size = MIN2(size, binding->Size);
997 drm_intel_bo *bo =
998 intel_bufferobj_buffer(brw, intel_bo,
999 binding->Offset,
1000 size);
1001 brw_create_buffer_surface(brw, bo, binding->Offset,
1002 size,
1003 &ssbo_surf_offsets[i]);
1004 }
1005 }
1006
1007 if (shader->NumUniformBlocks || shader->NumShaderStorageBlocks)
1008 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1009 }
1010
1011 static void
1012 brw_upload_wm_ubo_surfaces(struct brw_context *brw)
1013 {
1014 struct gl_context *ctx = &brw->ctx;
1015 /* _NEW_PROGRAM */
1016 struct gl_shader_program *prog = ctx->_Shader->_CurrentFragmentProgram;
1017
1018 if (!prog)
1019 return;
1020
1021 /* BRW_NEW_FS_PROG_DATA */
1022 brw_upload_ubo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
1023 &brw->wm.base, &brw->wm.prog_data->base);
1024 }
1025
1026 const struct brw_tracked_state brw_wm_ubo_surfaces = {
1027 .dirty = {
1028 .mesa = _NEW_PROGRAM,
1029 .brw = BRW_NEW_BATCH |
1030 BRW_NEW_BLORP |
1031 BRW_NEW_FS_PROG_DATA |
1032 BRW_NEW_UNIFORM_BUFFER,
1033 },
1034 .emit = brw_upload_wm_ubo_surfaces,
1035 };
1036
1037 static void
1038 brw_upload_cs_ubo_surfaces(struct brw_context *brw)
1039 {
1040 struct gl_context *ctx = &brw->ctx;
1041 /* _NEW_PROGRAM */
1042 struct gl_shader_program *prog =
1043 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1044
1045 if (!prog)
1046 return;
1047
1048 /* BRW_NEW_CS_PROG_DATA */
1049 brw_upload_ubo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_COMPUTE],
1050 &brw->cs.base, &brw->cs.prog_data->base);
1051 }
1052
1053 const struct brw_tracked_state brw_cs_ubo_surfaces = {
1054 .dirty = {
1055 .mesa = _NEW_PROGRAM,
1056 .brw = BRW_NEW_BATCH |
1057 BRW_NEW_BLORP |
1058 BRW_NEW_CS_PROG_DATA |
1059 BRW_NEW_UNIFORM_BUFFER,
1060 },
1061 .emit = brw_upload_cs_ubo_surfaces,
1062 };
1063
1064 void
1065 brw_upload_abo_surfaces(struct brw_context *brw,
1066 struct gl_shader *shader,
1067 struct brw_stage_state *stage_state,
1068 struct brw_stage_prog_data *prog_data)
1069 {
1070 struct gl_context *ctx = &brw->ctx;
1071 uint32_t *surf_offsets =
1072 &stage_state->surf_offset[prog_data->binding_table.abo_start];
1073
1074 if (shader && shader->NumAtomicBuffers) {
1075 for (unsigned i = 0; i < shader->NumAtomicBuffers; i++) {
1076 struct gl_atomic_buffer_binding *binding =
1077 &ctx->AtomicBufferBindings[shader->AtomicBuffers[i]->Binding];
1078 struct intel_buffer_object *intel_bo =
1079 intel_buffer_object(binding->BufferObject);
1080 drm_intel_bo *bo = intel_bufferobj_buffer(
1081 brw, intel_bo, binding->Offset, intel_bo->Base.Size - binding->Offset);
1082
1083 brw->vtbl.emit_buffer_surface_state(brw, &surf_offsets[i], bo,
1084 binding->Offset, BRW_SURFACEFORMAT_RAW,
1085 bo->size - binding->Offset, 1, true);
1086 }
1087
1088 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1089 }
1090 }
1091
1092 static void
1093 brw_upload_wm_abo_surfaces(struct brw_context *brw)
1094 {
1095 struct gl_context *ctx = &brw->ctx;
1096 /* _NEW_PROGRAM */
1097 struct gl_shader_program *prog = ctx->_Shader->_CurrentFragmentProgram;
1098
1099 if (prog) {
1100 /* BRW_NEW_FS_PROG_DATA */
1101 brw_upload_abo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
1102 &brw->wm.base, &brw->wm.prog_data->base);
1103 }
1104 }
1105
1106 const struct brw_tracked_state brw_wm_abo_surfaces = {
1107 .dirty = {
1108 .mesa = _NEW_PROGRAM,
1109 .brw = BRW_NEW_ATOMIC_BUFFER |
1110 BRW_NEW_BLORP |
1111 BRW_NEW_BATCH |
1112 BRW_NEW_FS_PROG_DATA,
1113 },
1114 .emit = brw_upload_wm_abo_surfaces,
1115 };
1116
1117 static void
1118 brw_upload_cs_abo_surfaces(struct brw_context *brw)
1119 {
1120 struct gl_context *ctx = &brw->ctx;
1121 /* _NEW_PROGRAM */
1122 struct gl_shader_program *prog =
1123 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1124
1125 if (prog) {
1126 /* BRW_NEW_CS_PROG_DATA */
1127 brw_upload_abo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_COMPUTE],
1128 &brw->cs.base, &brw->cs.prog_data->base);
1129 }
1130 }
1131
1132 const struct brw_tracked_state brw_cs_abo_surfaces = {
1133 .dirty = {
1134 .mesa = _NEW_PROGRAM,
1135 .brw = BRW_NEW_ATOMIC_BUFFER |
1136 BRW_NEW_BLORP |
1137 BRW_NEW_BATCH |
1138 BRW_NEW_CS_PROG_DATA,
1139 },
1140 .emit = brw_upload_cs_abo_surfaces,
1141 };
1142
1143 static void
1144 brw_upload_cs_image_surfaces(struct brw_context *brw)
1145 {
1146 struct gl_context *ctx = &brw->ctx;
1147 /* _NEW_PROGRAM */
1148 struct gl_shader_program *prog =
1149 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1150
1151 if (prog) {
1152 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1153 brw_upload_image_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_COMPUTE],
1154 &brw->cs.base, &brw->cs.prog_data->base);
1155 }
1156 }
1157
1158 const struct brw_tracked_state brw_cs_image_surfaces = {
1159 .dirty = {
1160 .mesa = _NEW_TEXTURE | _NEW_PROGRAM,
1161 .brw = BRW_NEW_BATCH |
1162 BRW_NEW_BLORP |
1163 BRW_NEW_CS_PROG_DATA |
1164 BRW_NEW_IMAGE_UNITS
1165 },
1166 .emit = brw_upload_cs_image_surfaces,
1167 };
1168
1169 static uint32_t
1170 get_image_format(struct brw_context *brw, mesa_format format, GLenum access)
1171 {
1172 const struct brw_device_info *devinfo = brw->intelScreen->devinfo;
1173 uint32_t hw_format = brw_format_for_mesa_format(format);
1174 if (access == GL_WRITE_ONLY) {
1175 return hw_format;
1176 } else if (isl_has_matching_typed_storage_image_format(devinfo, hw_format)) {
1177 /* Typed surface reads support a very limited subset of the shader
1178 * image formats. Translate it into the closest format the
1179 * hardware supports.
1180 */
1181 return isl_lower_storage_image_format(devinfo, hw_format);
1182 } else {
1183 /* The hardware doesn't actually support a typed format that we can use
1184 * so we have to fall back to untyped read/write messages.
1185 */
1186 return BRW_SURFACEFORMAT_RAW;
1187 }
1188 }
1189
1190 static void
1191 update_default_image_param(struct brw_context *brw,
1192 struct gl_image_unit *u,
1193 unsigned surface_idx,
1194 struct brw_image_param *param)
1195 {
1196 memset(param, 0, sizeof(*param));
1197 param->surface_idx = surface_idx;
1198 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1199 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1200 * detailed explanation of these parameters.
1201 */
1202 param->swizzling[0] = 0xff;
1203 param->swizzling[1] = 0xff;
1204 }
1205
1206 static void
1207 update_buffer_image_param(struct brw_context *brw,
1208 struct gl_image_unit *u,
1209 unsigned surface_idx,
1210 struct brw_image_param *param)
1211 {
1212 struct gl_buffer_object *obj = u->TexObj->BufferObject;
1213
1214 update_default_image_param(brw, u, surface_idx, param);
1215
1216 param->size[0] = obj->Size / _mesa_get_format_bytes(u->_ActualFormat);
1217 param->stride[0] = _mesa_get_format_bytes(u->_ActualFormat);
1218 }
1219
1220 static void
1221 update_texture_image_param(struct brw_context *brw,
1222 struct gl_image_unit *u,
1223 unsigned surface_idx,
1224 struct brw_image_param *param)
1225 {
1226 struct intel_mipmap_tree *mt = intel_texture_object(u->TexObj)->mt;
1227
1228 update_default_image_param(brw, u, surface_idx, param);
1229
1230 param->size[0] = minify(mt->logical_width0, u->Level);
1231 param->size[1] = minify(mt->logical_height0, u->Level);
1232 param->size[2] = (!u->Layered ? 1 :
1233 u->TexObj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1234 u->TexObj->Target == GL_TEXTURE_3D ?
1235 minify(mt->logical_depth0, u->Level) :
1236 mt->logical_depth0);
1237
1238 intel_miptree_get_image_offset(mt, u->Level, u->_Layer,
1239 &param->offset[0],
1240 &param->offset[1]);
1241
1242 param->stride[0] = mt->cpp;
1243 param->stride[1] = mt->pitch / mt->cpp;
1244 param->stride[2] =
1245 brw_miptree_get_horizontal_slice_pitch(brw, mt, u->Level);
1246 param->stride[3] =
1247 brw_miptree_get_vertical_slice_pitch(brw, mt, u->Level);
1248
1249 if (mt->tiling == I915_TILING_X) {
1250 /* An X tile is a rectangular block of 512x8 bytes. */
1251 param->tiling[0] = _mesa_logbase2(512 / mt->cpp);
1252 param->tiling[1] = _mesa_logbase2(8);
1253
1254 if (brw->has_swizzling) {
1255 /* Right shifts required to swizzle bits 9 and 10 of the memory
1256 * address with bit 6.
1257 */
1258 param->swizzling[0] = 3;
1259 param->swizzling[1] = 4;
1260 }
1261 } else if (mt->tiling == I915_TILING_Y) {
1262 /* The layout of a Y-tiled surface in memory isn't really fundamentally
1263 * different to the layout of an X-tiled surface, we simply pretend that
1264 * the surface is broken up in a number of smaller 16Bx32 tiles, each
1265 * one arranged in X-major order just like is the case for X-tiling.
1266 */
1267 param->tiling[0] = _mesa_logbase2(16 / mt->cpp);
1268 param->tiling[1] = _mesa_logbase2(32);
1269
1270 if (brw->has_swizzling) {
1271 /* Right shift required to swizzle bit 9 of the memory address with
1272 * bit 6.
1273 */
1274 param->swizzling[0] = 3;
1275 }
1276 }
1277
1278 /* 3D textures are arranged in 2D in memory with 2^lod slices per row. The
1279 * address calculation algorithm (emit_address_calculation() in
1280 * brw_fs_surface_builder.cpp) handles this as a sort of tiling with
1281 * modulus equal to the LOD.
1282 */
1283 param->tiling[2] = (u->TexObj->Target == GL_TEXTURE_3D ? u->Level :
1284 0);
1285 }
1286
1287 static void
1288 update_image_surface(struct brw_context *brw,
1289 struct gl_image_unit *u,
1290 GLenum access,
1291 unsigned surface_idx,
1292 uint32_t *surf_offset,
1293 struct brw_image_param *param)
1294 {
1295 if (_mesa_is_image_unit_valid(&brw->ctx, u)) {
1296 struct gl_texture_object *obj = u->TexObj;
1297 const unsigned format = get_image_format(brw, u->_ActualFormat, access);
1298
1299 if (obj->Target == GL_TEXTURE_BUFFER) {
1300 struct intel_buffer_object *intel_obj =
1301 intel_buffer_object(obj->BufferObject);
1302 const unsigned texel_size = (format == BRW_SURFACEFORMAT_RAW ? 1 :
1303 _mesa_get_format_bytes(u->_ActualFormat));
1304
1305 brw->vtbl.emit_buffer_surface_state(
1306 brw, surf_offset, intel_obj->buffer, obj->BufferOffset,
1307 format, intel_obj->Base.Size / texel_size, texel_size,
1308 access != GL_READ_ONLY);
1309
1310 update_buffer_image_param(brw, u, surface_idx, param);
1311
1312 } else {
1313 struct intel_texture_object *intel_obj = intel_texture_object(obj);
1314 struct intel_mipmap_tree *mt = intel_obj->mt;
1315
1316 if (format == BRW_SURFACEFORMAT_RAW) {
1317 brw->vtbl.emit_buffer_surface_state(
1318 brw, surf_offset, mt->bo, mt->offset,
1319 format, mt->bo->size - mt->offset, 1 /* pitch */,
1320 access != GL_READ_ONLY);
1321
1322 } else {
1323 const unsigned min_layer = obj->MinLayer + u->_Layer;
1324 const unsigned min_level = obj->MinLevel + u->Level;
1325 const unsigned num_layers = (!u->Layered ? 1 :
1326 obj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1327 mt->logical_depth0);
1328 const GLenum target = (obj->Target == GL_TEXTURE_CUBE_MAP ||
1329 obj->Target == GL_TEXTURE_CUBE_MAP_ARRAY ?
1330 GL_TEXTURE_2D_ARRAY : obj->Target);
1331 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
1332
1333 brw->vtbl.emit_texture_surface_state(
1334 brw, mt, target,
1335 min_layer, min_layer + num_layers,
1336 min_level, min_level + 1,
1337 format, SWIZZLE_XYZW,
1338 surf_offset, surf_index, access != GL_READ_ONLY, false);
1339 }
1340
1341 update_texture_image_param(brw, u, surface_idx, param);
1342 }
1343
1344 } else {
1345 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, surf_offset);
1346 update_default_image_param(brw, u, surface_idx, param);
1347 }
1348 }
1349
1350 void
1351 brw_upload_image_surfaces(struct brw_context *brw,
1352 struct gl_shader *shader,
1353 struct brw_stage_state *stage_state,
1354 struct brw_stage_prog_data *prog_data)
1355 {
1356 struct gl_context *ctx = &brw->ctx;
1357
1358 if (shader && shader->NumImages) {
1359 for (unsigned i = 0; i < shader->NumImages; i++) {
1360 struct gl_image_unit *u = &ctx->ImageUnits[shader->ImageUnits[i]];
1361 const unsigned surf_idx = prog_data->binding_table.image_start + i;
1362
1363 update_image_surface(brw, u, shader->ImageAccess[i],
1364 surf_idx,
1365 &stage_state->surf_offset[surf_idx],
1366 &prog_data->image_param[i]);
1367 }
1368
1369 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1370 /* This may have changed the image metadata dependent on the context
1371 * image unit state and passed to the program as uniforms, make sure
1372 * that push and pull constants are reuploaded.
1373 */
1374 brw->NewGLState |= _NEW_PROGRAM_CONSTANTS;
1375 }
1376 }
1377
1378 static void
1379 brw_upload_wm_image_surfaces(struct brw_context *brw)
1380 {
1381 struct gl_context *ctx = &brw->ctx;
1382 /* BRW_NEW_FRAGMENT_PROGRAM */
1383 struct gl_shader_program *prog = ctx->_Shader->_CurrentFragmentProgram;
1384
1385 if (prog) {
1386 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1387 brw_upload_image_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
1388 &brw->wm.base, &brw->wm.prog_data->base);
1389 }
1390 }
1391
1392 const struct brw_tracked_state brw_wm_image_surfaces = {
1393 .dirty = {
1394 .mesa = _NEW_TEXTURE,
1395 .brw = BRW_NEW_BATCH |
1396 BRW_NEW_BLORP |
1397 BRW_NEW_FRAGMENT_PROGRAM |
1398 BRW_NEW_FS_PROG_DATA |
1399 BRW_NEW_IMAGE_UNITS
1400 },
1401 .emit = brw_upload_wm_image_surfaces,
1402 };
1403
1404 void
1405 gen4_init_vtable_surface_functions(struct brw_context *brw)
1406 {
1407 brw->vtbl.update_texture_surface = brw_update_texture_surface;
1408 brw->vtbl.update_renderbuffer_surface = brw_update_renderbuffer_surface;
1409 brw->vtbl.emit_null_surface_state = brw_emit_null_surface_state;
1410 brw->vtbl.emit_buffer_surface_state = gen4_emit_buffer_surface_state;
1411 }
1412
1413 static void
1414 brw_upload_cs_work_groups_surface(struct brw_context *brw)
1415 {
1416 struct gl_context *ctx = &brw->ctx;
1417 /* _NEW_PROGRAM */
1418 struct gl_shader_program *prog =
1419 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1420
1421 if (prog && brw->cs.prog_data->uses_num_work_groups) {
1422 const unsigned surf_idx =
1423 brw->cs.prog_data->binding_table.work_groups_start;
1424 uint32_t *surf_offset = &brw->cs.base.surf_offset[surf_idx];
1425 drm_intel_bo *bo;
1426 uint32_t bo_offset;
1427
1428 if (brw->compute.num_work_groups_bo == NULL) {
1429 bo = NULL;
1430 intel_upload_data(brw,
1431 (void *)brw->compute.num_work_groups,
1432 3 * sizeof(GLuint),
1433 sizeof(GLuint),
1434 &bo,
1435 &bo_offset);
1436 } else {
1437 bo = brw->compute.num_work_groups_bo;
1438 bo_offset = brw->compute.num_work_groups_offset;
1439 }
1440
1441 brw->vtbl.emit_buffer_surface_state(brw, surf_offset,
1442 bo, bo_offset,
1443 BRW_SURFACEFORMAT_RAW,
1444 3 * sizeof(GLuint), 1, true);
1445 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1446 }
1447 }
1448
1449 const struct brw_tracked_state brw_cs_work_groups_surface = {
1450 .dirty = {
1451 .brw = BRW_NEW_BLORP |
1452 BRW_NEW_CS_WORK_GROUPS
1453 },
1454 .emit = brw_upload_cs_work_groups_surface,
1455 };