glsl/mesa: split gl_shader in two
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_surface_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "main/context.h"
34 #include "main/blend.h"
35 #include "main/mtypes.h"
36 #include "main/samplerobj.h"
37 #include "main/shaderimage.h"
38 #include "program/prog_parameter.h"
39 #include "program/prog_instruction.h"
40 #include "main/framebuffer.h"
41
42 #include "isl/isl.h"
43
44 #include "intel_mipmap_tree.h"
45 #include "intel_batchbuffer.h"
46 #include "intel_tex.h"
47 #include "intel_fbo.h"
48 #include "intel_buffer_objects.h"
49
50 #include "brw_context.h"
51 #include "brw_state.h"
52 #include "brw_defines.h"
53 #include "brw_wm.h"
54
55 GLuint
56 translate_tex_target(GLenum target)
57 {
58 switch (target) {
59 case GL_TEXTURE_1D:
60 case GL_TEXTURE_1D_ARRAY_EXT:
61 return BRW_SURFACE_1D;
62
63 case GL_TEXTURE_RECTANGLE_NV:
64 return BRW_SURFACE_2D;
65
66 case GL_TEXTURE_2D:
67 case GL_TEXTURE_2D_ARRAY_EXT:
68 case GL_TEXTURE_EXTERNAL_OES:
69 case GL_TEXTURE_2D_MULTISAMPLE:
70 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
71 return BRW_SURFACE_2D;
72
73 case GL_TEXTURE_3D:
74 return BRW_SURFACE_3D;
75
76 case GL_TEXTURE_CUBE_MAP:
77 case GL_TEXTURE_CUBE_MAP_ARRAY:
78 return BRW_SURFACE_CUBE;
79
80 default:
81 unreachable("not reached");
82 }
83 }
84
85 uint32_t
86 brw_get_surface_tiling_bits(uint32_t tiling)
87 {
88 switch (tiling) {
89 case I915_TILING_X:
90 return BRW_SURFACE_TILED;
91 case I915_TILING_Y:
92 return BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y;
93 default:
94 return 0;
95 }
96 }
97
98
99 uint32_t
100 brw_get_surface_num_multisamples(unsigned num_samples)
101 {
102 if (num_samples > 1)
103 return BRW_SURFACE_MULTISAMPLECOUNT_4;
104 else
105 return BRW_SURFACE_MULTISAMPLECOUNT_1;
106 }
107
108 /**
109 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
110 * swizzling.
111 */
112 int
113 brw_get_texture_swizzle(const struct gl_context *ctx,
114 const struct gl_texture_object *t)
115 {
116 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
117
118 int swizzles[SWIZZLE_NIL + 1] = {
119 SWIZZLE_X,
120 SWIZZLE_Y,
121 SWIZZLE_Z,
122 SWIZZLE_W,
123 SWIZZLE_ZERO,
124 SWIZZLE_ONE,
125 SWIZZLE_NIL
126 };
127
128 if (img->_BaseFormat == GL_DEPTH_COMPONENT ||
129 img->_BaseFormat == GL_DEPTH_STENCIL) {
130 GLenum depth_mode = t->DepthMode;
131
132 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
133 * with depth component data specified with a sized internal format.
134 * Otherwise, it's left at the old default, GL_LUMINANCE.
135 */
136 if (_mesa_is_gles3(ctx) &&
137 img->InternalFormat != GL_DEPTH_COMPONENT &&
138 img->InternalFormat != GL_DEPTH_STENCIL) {
139 depth_mode = GL_RED;
140 }
141
142 switch (depth_mode) {
143 case GL_ALPHA:
144 swizzles[0] = SWIZZLE_ZERO;
145 swizzles[1] = SWIZZLE_ZERO;
146 swizzles[2] = SWIZZLE_ZERO;
147 swizzles[3] = SWIZZLE_X;
148 break;
149 case GL_LUMINANCE:
150 swizzles[0] = SWIZZLE_X;
151 swizzles[1] = SWIZZLE_X;
152 swizzles[2] = SWIZZLE_X;
153 swizzles[3] = SWIZZLE_ONE;
154 break;
155 case GL_INTENSITY:
156 swizzles[0] = SWIZZLE_X;
157 swizzles[1] = SWIZZLE_X;
158 swizzles[2] = SWIZZLE_X;
159 swizzles[3] = SWIZZLE_X;
160 break;
161 case GL_RED:
162 swizzles[0] = SWIZZLE_X;
163 swizzles[1] = SWIZZLE_ZERO;
164 swizzles[2] = SWIZZLE_ZERO;
165 swizzles[3] = SWIZZLE_ONE;
166 break;
167 }
168 }
169
170 GLenum datatype = _mesa_get_format_datatype(img->TexFormat);
171
172 /* If the texture's format is alpha-only, force R, G, and B to
173 * 0.0. Similarly, if the texture's format has no alpha channel,
174 * force the alpha value read to 1.0. This allows for the
175 * implementation to use an RGBA texture for any of these formats
176 * without leaking any unexpected values.
177 */
178 switch (img->_BaseFormat) {
179 case GL_ALPHA:
180 swizzles[0] = SWIZZLE_ZERO;
181 swizzles[1] = SWIZZLE_ZERO;
182 swizzles[2] = SWIZZLE_ZERO;
183 break;
184 case GL_LUMINANCE:
185 if (t->_IsIntegerFormat || datatype == GL_SIGNED_NORMALIZED) {
186 swizzles[0] = SWIZZLE_X;
187 swizzles[1] = SWIZZLE_X;
188 swizzles[2] = SWIZZLE_X;
189 swizzles[3] = SWIZZLE_ONE;
190 }
191 break;
192 case GL_LUMINANCE_ALPHA:
193 if (datatype == GL_SIGNED_NORMALIZED) {
194 swizzles[0] = SWIZZLE_X;
195 swizzles[1] = SWIZZLE_X;
196 swizzles[2] = SWIZZLE_X;
197 swizzles[3] = SWIZZLE_W;
198 }
199 break;
200 case GL_INTENSITY:
201 if (datatype == GL_SIGNED_NORMALIZED) {
202 swizzles[0] = SWIZZLE_X;
203 swizzles[1] = SWIZZLE_X;
204 swizzles[2] = SWIZZLE_X;
205 swizzles[3] = SWIZZLE_X;
206 }
207 break;
208 case GL_RED:
209 case GL_RG:
210 case GL_RGB:
211 if (_mesa_get_format_bits(img->TexFormat, GL_ALPHA_BITS) > 0)
212 swizzles[3] = SWIZZLE_ONE;
213 break;
214 }
215
216 return MAKE_SWIZZLE4(swizzles[GET_SWZ(t->_Swizzle, 0)],
217 swizzles[GET_SWZ(t->_Swizzle, 1)],
218 swizzles[GET_SWZ(t->_Swizzle, 2)],
219 swizzles[GET_SWZ(t->_Swizzle, 3)]);
220 }
221
222 static void
223 gen4_emit_buffer_surface_state(struct brw_context *brw,
224 uint32_t *out_offset,
225 drm_intel_bo *bo,
226 unsigned buffer_offset,
227 unsigned surface_format,
228 unsigned buffer_size,
229 unsigned pitch,
230 bool rw)
231 {
232 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
233 6 * 4, 32, out_offset);
234 memset(surf, 0, 6 * 4);
235
236 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
237 surface_format << BRW_SURFACE_FORMAT_SHIFT |
238 (brw->gen >= 6 ? BRW_SURFACE_RC_READ_WRITE : 0);
239 surf[1] = (bo ? bo->offset64 : 0) + buffer_offset; /* reloc */
240 surf[2] = ((buffer_size - 1) & 0x7f) << BRW_SURFACE_WIDTH_SHIFT |
241 (((buffer_size - 1) >> 7) & 0x1fff) << BRW_SURFACE_HEIGHT_SHIFT;
242 surf[3] = (((buffer_size - 1) >> 20) & 0x7f) << BRW_SURFACE_DEPTH_SHIFT |
243 (pitch - 1) << BRW_SURFACE_PITCH_SHIFT;
244
245 /* Emit relocation to surface contents. The 965 PRM, Volume 4, section
246 * 5.1.2 "Data Cache" says: "the data cache does not exist as a separate
247 * physical cache. It is mapped in hardware to the sampler cache."
248 */
249 if (bo) {
250 drm_intel_bo_emit_reloc(brw->batch.bo, *out_offset + 4,
251 bo, buffer_offset,
252 I915_GEM_DOMAIN_SAMPLER,
253 (rw ? I915_GEM_DOMAIN_SAMPLER : 0));
254 }
255 }
256
257 void
258 brw_update_buffer_texture_surface(struct gl_context *ctx,
259 unsigned unit,
260 uint32_t *surf_offset)
261 {
262 struct brw_context *brw = brw_context(ctx);
263 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
264 struct intel_buffer_object *intel_obj =
265 intel_buffer_object(tObj->BufferObject);
266 uint32_t size = tObj->BufferSize;
267 drm_intel_bo *bo = NULL;
268 mesa_format format = tObj->_BufferObjectFormat;
269 uint32_t brw_format = brw_format_for_mesa_format(format);
270 int texel_size = _mesa_get_format_bytes(format);
271
272 if (intel_obj) {
273 size = MIN2(size, intel_obj->Base.Size);
274 bo = intel_bufferobj_buffer(brw, intel_obj, tObj->BufferOffset, size);
275 }
276
277 if (brw_format == 0 && format != MESA_FORMAT_RGBA_FLOAT32) {
278 _mesa_problem(NULL, "bad format %s for texture buffer\n",
279 _mesa_get_format_name(format));
280 }
281
282 brw->vtbl.emit_buffer_surface_state(brw, surf_offset, bo,
283 tObj->BufferOffset,
284 brw_format,
285 size / texel_size,
286 texel_size,
287 false /* rw */);
288 }
289
290 static void
291 brw_update_texture_surface(struct gl_context *ctx,
292 unsigned unit,
293 uint32_t *surf_offset,
294 bool for_gather,
295 uint32_t plane)
296 {
297 struct brw_context *brw = brw_context(ctx);
298 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
299 struct intel_texture_object *intelObj = intel_texture_object(tObj);
300 struct intel_mipmap_tree *mt = intelObj->mt;
301 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
302 uint32_t *surf;
303
304 /* BRW_NEW_TEXTURE_BUFFER */
305 if (tObj->Target == GL_TEXTURE_BUFFER) {
306 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
307 return;
308 }
309
310 if (plane > 0) {
311 if (mt->plane[plane - 1] == NULL)
312 return;
313 mt = mt->plane[plane - 1];
314 }
315
316 surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
317 6 * 4, 32, surf_offset);
318
319 mesa_format mesa_fmt = plane == 0 ? intelObj->_Format : mt->format;
320 uint32_t tex_format = translate_tex_format(brw, mesa_fmt,
321 sampler->sRGBDecode);
322
323 if (for_gather) {
324 /* Sandybridge's gather4 message is broken for integer formats.
325 * To work around this, we pretend the surface is UNORM for
326 * 8 or 16-bit formats, and emit shader instructions to recover
327 * the real INT/UINT value. For 32-bit formats, we pretend
328 * the surface is FLOAT, and simply reinterpret the resulting
329 * bits.
330 */
331 switch (tex_format) {
332 case BRW_SURFACEFORMAT_R8_SINT:
333 case BRW_SURFACEFORMAT_R8_UINT:
334 tex_format = BRW_SURFACEFORMAT_R8_UNORM;
335 break;
336
337 case BRW_SURFACEFORMAT_R16_SINT:
338 case BRW_SURFACEFORMAT_R16_UINT:
339 tex_format = BRW_SURFACEFORMAT_R16_UNORM;
340 break;
341
342 case BRW_SURFACEFORMAT_R32_SINT:
343 case BRW_SURFACEFORMAT_R32_UINT:
344 tex_format = BRW_SURFACEFORMAT_R32_FLOAT;
345 break;
346
347 default:
348 break;
349 }
350 }
351
352 surf[0] = (translate_tex_target(tObj->Target) << BRW_SURFACE_TYPE_SHIFT |
353 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
354 BRW_SURFACE_CUBEFACE_ENABLES |
355 tex_format << BRW_SURFACE_FORMAT_SHIFT);
356
357 surf[1] = mt->bo->offset64 + mt->offset; /* reloc */
358
359 surf[2] = ((intelObj->_MaxLevel - tObj->BaseLevel) << BRW_SURFACE_LOD_SHIFT |
360 (mt->logical_width0 - 1) << BRW_SURFACE_WIDTH_SHIFT |
361 (mt->logical_height0 - 1) << BRW_SURFACE_HEIGHT_SHIFT);
362
363 surf[3] = (brw_get_surface_tiling_bits(mt->tiling) |
364 (mt->logical_depth0 - 1) << BRW_SURFACE_DEPTH_SHIFT |
365 (mt->pitch - 1) << BRW_SURFACE_PITCH_SHIFT);
366
367 const unsigned min_lod = tObj->MinLevel + tObj->BaseLevel - mt->first_level;
368 surf[4] = (brw_get_surface_num_multisamples(mt->num_samples) |
369 SET_FIELD(min_lod, BRW_SURFACE_MIN_LOD) |
370 SET_FIELD(tObj->MinLayer, BRW_SURFACE_MIN_ARRAY_ELEMENT));
371
372 surf[5] = mt->valign == 4 ? BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0;
373
374 /* Emit relocation to surface contents */
375 drm_intel_bo_emit_reloc(brw->batch.bo,
376 *surf_offset + 4,
377 mt->bo,
378 surf[1] - mt->bo->offset64,
379 I915_GEM_DOMAIN_SAMPLER, 0);
380 }
381
382 /**
383 * Create the constant buffer surface. Vertex/fragment shader constants will be
384 * read from this buffer with Data Port Read instructions/messages.
385 */
386 void
387 brw_create_constant_surface(struct brw_context *brw,
388 drm_intel_bo *bo,
389 uint32_t offset,
390 uint32_t size,
391 uint32_t *out_offset)
392 {
393 brw->vtbl.emit_buffer_surface_state(brw, out_offset, bo, offset,
394 BRW_SURFACEFORMAT_R32G32B32A32_FLOAT,
395 size, 1, false);
396 }
397
398 /**
399 * Create the buffer surface. Shader buffer variables will be
400 * read from / write to this buffer with Data Port Read/Write
401 * instructions/messages.
402 */
403 void
404 brw_create_buffer_surface(struct brw_context *brw,
405 drm_intel_bo *bo,
406 uint32_t offset,
407 uint32_t size,
408 uint32_t *out_offset)
409 {
410 /* Use a raw surface so we can reuse existing untyped read/write/atomic
411 * messages. We need these specifically for the fragment shader since they
412 * include a pixel mask header that we need to ensure correct behavior
413 * with helper invocations, which cannot write to the buffer.
414 */
415 brw->vtbl.emit_buffer_surface_state(brw, out_offset, bo, offset,
416 BRW_SURFACEFORMAT_RAW,
417 size, 1, true);
418 }
419
420 /**
421 * Set up a binding table entry for use by stream output logic (transform
422 * feedback).
423 *
424 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
425 */
426 void
427 brw_update_sol_surface(struct brw_context *brw,
428 struct gl_buffer_object *buffer_obj,
429 uint32_t *out_offset, unsigned num_vector_components,
430 unsigned stride_dwords, unsigned offset_dwords)
431 {
432 struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
433 uint32_t offset_bytes = 4 * offset_dwords;
434 drm_intel_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
435 offset_bytes,
436 buffer_obj->Size - offset_bytes);
437 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
438 out_offset);
439 uint32_t pitch_minus_1 = 4*stride_dwords - 1;
440 size_t size_dwords = buffer_obj->Size / 4;
441 uint32_t buffer_size_minus_1, width, height, depth, surface_format;
442
443 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
444 * too big to map using a single binding table entry?
445 */
446 assert((size_dwords - offset_dwords) / stride_dwords
447 <= BRW_MAX_NUM_BUFFER_ENTRIES);
448
449 if (size_dwords > offset_dwords + num_vector_components) {
450 /* There is room for at least 1 transform feedback output in the buffer.
451 * Compute the number of additional transform feedback outputs the
452 * buffer has room for.
453 */
454 buffer_size_minus_1 =
455 (size_dwords - offset_dwords - num_vector_components) / stride_dwords;
456 } else {
457 /* There isn't even room for a single transform feedback output in the
458 * buffer. We can't configure the binding table entry to prevent output
459 * entirely; we'll have to rely on the geometry shader to detect
460 * overflow. But to minimize the damage in case of a bug, set up the
461 * binding table entry to just allow a single output.
462 */
463 buffer_size_minus_1 = 0;
464 }
465 width = buffer_size_minus_1 & 0x7f;
466 height = (buffer_size_minus_1 & 0xfff80) >> 7;
467 depth = (buffer_size_minus_1 & 0x7f00000) >> 20;
468
469 switch (num_vector_components) {
470 case 1:
471 surface_format = BRW_SURFACEFORMAT_R32_FLOAT;
472 break;
473 case 2:
474 surface_format = BRW_SURFACEFORMAT_R32G32_FLOAT;
475 break;
476 case 3:
477 surface_format = BRW_SURFACEFORMAT_R32G32B32_FLOAT;
478 break;
479 case 4:
480 surface_format = BRW_SURFACEFORMAT_R32G32B32A32_FLOAT;
481 break;
482 default:
483 unreachable("Invalid vector size for transform feedback output");
484 }
485
486 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
487 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
488 surface_format << BRW_SURFACE_FORMAT_SHIFT |
489 BRW_SURFACE_RC_READ_WRITE;
490 surf[1] = bo->offset64 + offset_bytes; /* reloc */
491 surf[2] = (width << BRW_SURFACE_WIDTH_SHIFT |
492 height << BRW_SURFACE_HEIGHT_SHIFT);
493 surf[3] = (depth << BRW_SURFACE_DEPTH_SHIFT |
494 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
495 surf[4] = 0;
496 surf[5] = 0;
497
498 /* Emit relocation to surface contents. */
499 drm_intel_bo_emit_reloc(brw->batch.bo,
500 *out_offset + 4,
501 bo, offset_bytes,
502 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
503 }
504
505 /* Creates a new WM constant buffer reflecting the current fragment program's
506 * constants, if needed by the fragment program.
507 *
508 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
509 * state atom.
510 */
511 static void
512 brw_upload_wm_pull_constants(struct brw_context *brw)
513 {
514 struct brw_stage_state *stage_state = &brw->wm.base;
515 /* BRW_NEW_FRAGMENT_PROGRAM */
516 struct brw_fragment_program *fp =
517 (struct brw_fragment_program *) brw->fragment_program;
518 /* BRW_NEW_FS_PROG_DATA */
519 struct brw_stage_prog_data *prog_data = &brw->wm.prog_data->base;
520
521 /* _NEW_PROGRAM_CONSTANTS */
522 brw_upload_pull_constants(brw, BRW_NEW_SURFACES, &fp->program.Base,
523 stage_state, prog_data);
524 }
525
526 const struct brw_tracked_state brw_wm_pull_constants = {
527 .dirty = {
528 .mesa = _NEW_PROGRAM_CONSTANTS,
529 .brw = BRW_NEW_BATCH |
530 BRW_NEW_BLORP |
531 BRW_NEW_FRAGMENT_PROGRAM |
532 BRW_NEW_FS_PROG_DATA,
533 },
534 .emit = brw_upload_wm_pull_constants,
535 };
536
537 /**
538 * Creates a null renderbuffer surface.
539 *
540 * This is used when the shader doesn't write to any color output. An FB
541 * write to target 0 will still be emitted, because that's how the thread is
542 * terminated (and computed depth is returned), so we need to have the
543 * hardware discard the target 0 color output..
544 */
545 static void
546 brw_emit_null_surface_state(struct brw_context *brw,
547 unsigned width,
548 unsigned height,
549 unsigned samples,
550 uint32_t *out_offset)
551 {
552 /* From the Sandy bridge PRM, Vol4 Part1 p71 (Surface Type: Programming
553 * Notes):
554 *
555 * A null surface will be used in instances where an actual surface is
556 * not bound. When a write message is generated to a null surface, no
557 * actual surface is written to. When a read message (including any
558 * sampling engine message) is generated to a null surface, the result
559 * is all zeros. Note that a null surface type is allowed to be used
560 * with all messages, even if it is not specificially indicated as
561 * supported. All of the remaining fields in surface state are ignored
562 * for null surfaces, with the following exceptions:
563 *
564 * - [DevSNB+]: Width, Height, Depth, and LOD fields must match the
565 * depth buffer’s corresponding state for all render target surfaces,
566 * including null.
567 *
568 * - Surface Format must be R8G8B8A8_UNORM.
569 */
570 unsigned surface_type = BRW_SURFACE_NULL;
571 drm_intel_bo *bo = NULL;
572 unsigned pitch_minus_1 = 0;
573 uint32_t multisampling_state = 0;
574 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
575 out_offset);
576
577 if (samples > 1) {
578 /* On Gen6, null render targets seem to cause GPU hangs when
579 * multisampling. So work around this problem by rendering into dummy
580 * color buffer.
581 *
582 * To decrease the amount of memory needed by the workaround buffer, we
583 * set its pitch to 128 bytes (the width of a Y tile). This means that
584 * the amount of memory needed for the workaround buffer is
585 * (width_in_tiles + height_in_tiles - 1) tiles.
586 *
587 * Note that since the workaround buffer will be interpreted by the
588 * hardware as an interleaved multisampled buffer, we need to compute
589 * width_in_tiles and height_in_tiles by dividing the width and height
590 * by 16 rather than the normal Y-tile size of 32.
591 */
592 unsigned width_in_tiles = ALIGN(width, 16) / 16;
593 unsigned height_in_tiles = ALIGN(height, 16) / 16;
594 unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
595 brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
596 size_needed);
597 bo = brw->wm.multisampled_null_render_target_bo;
598 surface_type = BRW_SURFACE_2D;
599 pitch_minus_1 = 127;
600 multisampling_state = brw_get_surface_num_multisamples(samples);
601 }
602
603 surf[0] = (surface_type << BRW_SURFACE_TYPE_SHIFT |
604 BRW_SURFACEFORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
605 if (brw->gen < 6) {
606 surf[0] |= (1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT |
607 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT |
608 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT |
609 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT);
610 }
611 surf[1] = bo ? bo->offset64 : 0;
612 surf[2] = ((width - 1) << BRW_SURFACE_WIDTH_SHIFT |
613 (height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
614
615 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
616 * Notes):
617 *
618 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
619 */
620 surf[3] = (BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y |
621 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
622 surf[4] = multisampling_state;
623 surf[5] = 0;
624
625 if (bo) {
626 drm_intel_bo_emit_reloc(brw->batch.bo,
627 *out_offset + 4,
628 bo, 0,
629 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
630 }
631 }
632
633 /**
634 * Sets up a surface state structure to point at the given region.
635 * While it is only used for the front/back buffer currently, it should be
636 * usable for further buffers when doing ARB_draw_buffer support.
637 */
638 static uint32_t
639 brw_update_renderbuffer_surface(struct brw_context *brw,
640 struct gl_renderbuffer *rb,
641 bool layered, unsigned unit,
642 uint32_t surf_index)
643 {
644 struct gl_context *ctx = &brw->ctx;
645 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
646 struct intel_mipmap_tree *mt = irb->mt;
647 uint32_t *surf;
648 uint32_t tile_x, tile_y;
649 uint32_t format = 0;
650 uint32_t offset;
651 /* _NEW_BUFFERS */
652 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
653 /* BRW_NEW_FS_PROG_DATA */
654
655 assert(!layered);
656
657 if (rb->TexImage && !brw->has_surface_tile_offset) {
658 intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y);
659
660 if (tile_x != 0 || tile_y != 0) {
661 /* Original gen4 hardware couldn't draw to a non-tile-aligned
662 * destination in a miptree unless you actually setup your renderbuffer
663 * as a miptree and used the fragile lod/array_index/etc. controls to
664 * select the image. So, instead, we just make a new single-level
665 * miptree and render into that.
666 */
667 intel_renderbuffer_move_to_temp(brw, irb, false);
668 mt = irb->mt;
669 }
670 }
671
672 intel_miptree_used_for_rendering(irb->mt);
673
674 surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32, &offset);
675
676 format = brw->render_target_format[rb_format];
677 if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
678 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
679 __func__, _mesa_get_format_name(rb_format));
680 }
681
682 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
683 format << BRW_SURFACE_FORMAT_SHIFT);
684
685 /* reloc */
686 assert(mt->offset % mt->cpp == 0);
687 surf[1] = (intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y) +
688 mt->bo->offset64 + mt->offset);
689
690 surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
691 (rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
692
693 surf[3] = (brw_get_surface_tiling_bits(mt->tiling) |
694 (mt->pitch - 1) << BRW_SURFACE_PITCH_SHIFT);
695
696 surf[4] = brw_get_surface_num_multisamples(mt->num_samples);
697
698 assert(brw->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
699 /* Note that the low bits of these fields are missing, so
700 * there's the possibility of getting in trouble.
701 */
702 assert(tile_x % 4 == 0);
703 assert(tile_y % 2 == 0);
704 surf[5] = ((tile_x / 4) << BRW_SURFACE_X_OFFSET_SHIFT |
705 (tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT |
706 (mt->valign == 4 ? BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
707
708 if (brw->gen < 6) {
709 /* _NEW_COLOR */
710 if (!ctx->Color.ColorLogicOpEnabled &&
711 (ctx->Color.BlendEnabled & (1 << unit)))
712 surf[0] |= BRW_SURFACE_BLEND_ENABLED;
713
714 if (!ctx->Color.ColorMask[unit][0])
715 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT;
716 if (!ctx->Color.ColorMask[unit][1])
717 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT;
718 if (!ctx->Color.ColorMask[unit][2])
719 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT;
720
721 /* As mentioned above, disable writes to the alpha component when the
722 * renderbuffer is XRGB.
723 */
724 if (ctx->DrawBuffer->Visual.alphaBits == 0 ||
725 !ctx->Color.ColorMask[unit][3]) {
726 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT;
727 }
728 }
729
730 drm_intel_bo_emit_reloc(brw->batch.bo,
731 offset + 4,
732 mt->bo,
733 surf[1] - mt->bo->offset64,
734 I915_GEM_DOMAIN_RENDER,
735 I915_GEM_DOMAIN_RENDER);
736
737 return offset;
738 }
739
740 /**
741 * Construct SURFACE_STATE objects for renderbuffers/draw buffers.
742 */
743 void
744 brw_update_renderbuffer_surfaces(struct brw_context *brw,
745 const struct gl_framebuffer *fb,
746 uint32_t render_target_start,
747 uint32_t *surf_offset)
748 {
749 GLuint i;
750 const unsigned int w = _mesa_geometric_width(fb);
751 const unsigned int h = _mesa_geometric_height(fb);
752 const unsigned int s = _mesa_geometric_samples(fb);
753
754 /* Update surfaces for drawing buffers */
755 if (fb->_NumColorDrawBuffers >= 1) {
756 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
757 const uint32_t surf_index = render_target_start + i;
758
759 if (intel_renderbuffer(fb->_ColorDrawBuffers[i])) {
760 surf_offset[surf_index] =
761 brw->vtbl.update_renderbuffer_surface(
762 brw, fb->_ColorDrawBuffers[i],
763 _mesa_geometric_layers(fb) > 0, i, surf_index);
764 } else {
765 brw->vtbl.emit_null_surface_state(brw, w, h, s,
766 &surf_offset[surf_index]);
767 }
768 }
769 } else {
770 const uint32_t surf_index = render_target_start;
771 brw->vtbl.emit_null_surface_state(brw, w, h, s,
772 &surf_offset[surf_index]);
773 }
774 }
775
776 static void
777 update_renderbuffer_surfaces(struct brw_context *brw)
778 {
779 const struct gl_context *ctx = &brw->ctx;
780
781 /* _NEW_BUFFERS | _NEW_COLOR */
782 const struct gl_framebuffer *fb = ctx->DrawBuffer;
783 brw_update_renderbuffer_surfaces(
784 brw, fb,
785 brw->wm.prog_data->binding_table.render_target_start,
786 brw->wm.base.surf_offset);
787 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
788 }
789
790 const struct brw_tracked_state brw_renderbuffer_surfaces = {
791 .dirty = {
792 .mesa = _NEW_BUFFERS |
793 _NEW_COLOR,
794 .brw = BRW_NEW_BATCH |
795 BRW_NEW_BLORP |
796 BRW_NEW_FS_PROG_DATA,
797 },
798 .emit = update_renderbuffer_surfaces,
799 };
800
801 const struct brw_tracked_state gen6_renderbuffer_surfaces = {
802 .dirty = {
803 .mesa = _NEW_BUFFERS,
804 .brw = BRW_NEW_BATCH |
805 BRW_NEW_BLORP,
806 },
807 .emit = update_renderbuffer_surfaces,
808 };
809
810
811 static void
812 update_stage_texture_surfaces(struct brw_context *brw,
813 const struct gl_program *prog,
814 struct brw_stage_state *stage_state,
815 bool for_gather, uint32_t plane)
816 {
817 if (!prog)
818 return;
819
820 struct gl_context *ctx = &brw->ctx;
821
822 uint32_t *surf_offset = stage_state->surf_offset;
823
824 /* BRW_NEW_*_PROG_DATA */
825 if (for_gather)
826 surf_offset += stage_state->prog_data->binding_table.gather_texture_start;
827 else
828 surf_offset += stage_state->prog_data->binding_table.plane_start[plane];
829
830 unsigned num_samplers = _mesa_fls(prog->SamplersUsed);
831 for (unsigned s = 0; s < num_samplers; s++) {
832 surf_offset[s] = 0;
833
834 if (prog->SamplersUsed & (1 << s)) {
835 const unsigned unit = prog->SamplerUnits[s];
836
837 /* _NEW_TEXTURE */
838 if (ctx->Texture.Unit[unit]._Current) {
839 brw->vtbl.update_texture_surface(ctx, unit, surf_offset + s, for_gather, plane);
840 }
841 }
842 }
843 }
844
845
846 /**
847 * Construct SURFACE_STATE objects for enabled textures.
848 */
849 static void
850 brw_update_texture_surfaces(struct brw_context *brw)
851 {
852 /* BRW_NEW_VERTEX_PROGRAM */
853 struct gl_program *vs = (struct gl_program *) brw->vertex_program;
854
855 /* BRW_NEW_TESS_PROGRAMS */
856 struct gl_program *tcs = (struct gl_program *) brw->tess_ctrl_program;
857 struct gl_program *tes = (struct gl_program *) brw->tess_eval_program;
858
859 /* BRW_NEW_GEOMETRY_PROGRAM */
860 struct gl_program *gs = (struct gl_program *) brw->geometry_program;
861
862 /* BRW_NEW_FRAGMENT_PROGRAM */
863 struct gl_program *fs = (struct gl_program *) brw->fragment_program;
864
865 /* _NEW_TEXTURE */
866 update_stage_texture_surfaces(brw, vs, &brw->vs.base, false, 0);
867 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, false, 0);
868 update_stage_texture_surfaces(brw, tes, &brw->tes.base, false, 0);
869 update_stage_texture_surfaces(brw, gs, &brw->gs.base, false, 0);
870 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 0);
871
872 /* emit alternate set of surface state for gather. this
873 * allows the surface format to be overriden for only the
874 * gather4 messages. */
875 if (brw->gen < 8) {
876 if (vs && vs->UsesGather)
877 update_stage_texture_surfaces(brw, vs, &brw->vs.base, true, 0);
878 if (tcs && tcs->UsesGather)
879 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, true, 0);
880 if (tes && tes->UsesGather)
881 update_stage_texture_surfaces(brw, tes, &brw->tes.base, true, 0);
882 if (gs && gs->UsesGather)
883 update_stage_texture_surfaces(brw, gs, &brw->gs.base, true, 0);
884 if (fs && fs->UsesGather)
885 update_stage_texture_surfaces(brw, fs, &brw->wm.base, true, 0);
886 }
887
888 if (fs) {
889 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 1);
890 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 2);
891 }
892
893 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
894 }
895
896 const struct brw_tracked_state brw_texture_surfaces = {
897 .dirty = {
898 .mesa = _NEW_TEXTURE,
899 .brw = BRW_NEW_BATCH |
900 BRW_NEW_BLORP |
901 BRW_NEW_FRAGMENT_PROGRAM |
902 BRW_NEW_FS_PROG_DATA |
903 BRW_NEW_GEOMETRY_PROGRAM |
904 BRW_NEW_GS_PROG_DATA |
905 BRW_NEW_TESS_PROGRAMS |
906 BRW_NEW_TCS_PROG_DATA |
907 BRW_NEW_TES_PROG_DATA |
908 BRW_NEW_TEXTURE_BUFFER |
909 BRW_NEW_VERTEX_PROGRAM |
910 BRW_NEW_VS_PROG_DATA,
911 },
912 .emit = brw_update_texture_surfaces,
913 };
914
915 static void
916 brw_update_cs_texture_surfaces(struct brw_context *brw)
917 {
918 /* BRW_NEW_COMPUTE_PROGRAM */
919 struct gl_program *cs = (struct gl_program *) brw->compute_program;
920
921 /* _NEW_TEXTURE */
922 update_stage_texture_surfaces(brw, cs, &brw->cs.base, false, 0);
923
924 /* emit alternate set of surface state for gather. this
925 * allows the surface format to be overriden for only the
926 * gather4 messages.
927 */
928 if (brw->gen < 8) {
929 if (cs && cs->UsesGather)
930 update_stage_texture_surfaces(brw, cs, &brw->cs.base, true, 0);
931 }
932
933 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
934 }
935
936 const struct brw_tracked_state brw_cs_texture_surfaces = {
937 .dirty = {
938 .mesa = _NEW_TEXTURE,
939 .brw = BRW_NEW_BATCH |
940 BRW_NEW_BLORP |
941 BRW_NEW_COMPUTE_PROGRAM,
942 },
943 .emit = brw_update_cs_texture_surfaces,
944 };
945
946
947 void
948 brw_upload_ubo_surfaces(struct brw_context *brw,
949 struct gl_linked_shader *shader,
950 struct brw_stage_state *stage_state,
951 struct brw_stage_prog_data *prog_data)
952 {
953 struct gl_context *ctx = &brw->ctx;
954
955 if (!shader)
956 return;
957
958 uint32_t *ubo_surf_offsets =
959 &stage_state->surf_offset[prog_data->binding_table.ubo_start];
960
961 for (int i = 0; i < shader->NumUniformBlocks; i++) {
962 struct gl_uniform_buffer_binding *binding =
963 &ctx->UniformBufferBindings[shader->UniformBlocks[i]->Binding];
964
965 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
966 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ubo_surf_offsets[i]);
967 } else {
968 struct intel_buffer_object *intel_bo =
969 intel_buffer_object(binding->BufferObject);
970 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
971 if (!binding->AutomaticSize)
972 size = MIN2(size, binding->Size);
973 drm_intel_bo *bo =
974 intel_bufferobj_buffer(brw, intel_bo,
975 binding->Offset,
976 size);
977 brw_create_constant_surface(brw, bo, binding->Offset,
978 size,
979 &ubo_surf_offsets[i]);
980 }
981 }
982
983 uint32_t *ssbo_surf_offsets =
984 &stage_state->surf_offset[prog_data->binding_table.ssbo_start];
985
986 for (int i = 0; i < shader->NumShaderStorageBlocks; i++) {
987 struct gl_shader_storage_buffer_binding *binding =
988 &ctx->ShaderStorageBufferBindings[shader->ShaderStorageBlocks[i]->Binding];
989
990 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
991 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ssbo_surf_offsets[i]);
992 } else {
993 struct intel_buffer_object *intel_bo =
994 intel_buffer_object(binding->BufferObject);
995 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
996 if (!binding->AutomaticSize)
997 size = MIN2(size, binding->Size);
998 drm_intel_bo *bo =
999 intel_bufferobj_buffer(brw, intel_bo,
1000 binding->Offset,
1001 size);
1002 brw_create_buffer_surface(brw, bo, binding->Offset,
1003 size,
1004 &ssbo_surf_offsets[i]);
1005 }
1006 }
1007
1008 if (shader->NumUniformBlocks || shader->NumShaderStorageBlocks)
1009 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1010 }
1011
1012 static void
1013 brw_upload_wm_ubo_surfaces(struct brw_context *brw)
1014 {
1015 struct gl_context *ctx = &brw->ctx;
1016 /* _NEW_PROGRAM */
1017 struct gl_shader_program *prog = ctx->_Shader->_CurrentFragmentProgram;
1018
1019 if (!prog)
1020 return;
1021
1022 /* BRW_NEW_FS_PROG_DATA */
1023 brw_upload_ubo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
1024 &brw->wm.base, &brw->wm.prog_data->base);
1025 }
1026
1027 const struct brw_tracked_state brw_wm_ubo_surfaces = {
1028 .dirty = {
1029 .mesa = _NEW_PROGRAM,
1030 .brw = BRW_NEW_BATCH |
1031 BRW_NEW_BLORP |
1032 BRW_NEW_FS_PROG_DATA |
1033 BRW_NEW_UNIFORM_BUFFER,
1034 },
1035 .emit = brw_upload_wm_ubo_surfaces,
1036 };
1037
1038 static void
1039 brw_upload_cs_ubo_surfaces(struct brw_context *brw)
1040 {
1041 struct gl_context *ctx = &brw->ctx;
1042 /* _NEW_PROGRAM */
1043 struct gl_shader_program *prog =
1044 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1045
1046 if (!prog)
1047 return;
1048
1049 /* BRW_NEW_CS_PROG_DATA */
1050 brw_upload_ubo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_COMPUTE],
1051 &brw->cs.base, &brw->cs.prog_data->base);
1052 }
1053
1054 const struct brw_tracked_state brw_cs_ubo_surfaces = {
1055 .dirty = {
1056 .mesa = _NEW_PROGRAM,
1057 .brw = BRW_NEW_BATCH |
1058 BRW_NEW_BLORP |
1059 BRW_NEW_CS_PROG_DATA |
1060 BRW_NEW_UNIFORM_BUFFER,
1061 },
1062 .emit = brw_upload_cs_ubo_surfaces,
1063 };
1064
1065 void
1066 brw_upload_abo_surfaces(struct brw_context *brw,
1067 struct gl_linked_shader *shader,
1068 struct brw_stage_state *stage_state,
1069 struct brw_stage_prog_data *prog_data)
1070 {
1071 struct gl_context *ctx = &brw->ctx;
1072 uint32_t *surf_offsets =
1073 &stage_state->surf_offset[prog_data->binding_table.abo_start];
1074
1075 if (shader && shader->NumAtomicBuffers) {
1076 for (unsigned i = 0; i < shader->NumAtomicBuffers; i++) {
1077 struct gl_atomic_buffer_binding *binding =
1078 &ctx->AtomicBufferBindings[shader->AtomicBuffers[i]->Binding];
1079 struct intel_buffer_object *intel_bo =
1080 intel_buffer_object(binding->BufferObject);
1081 drm_intel_bo *bo = intel_bufferobj_buffer(
1082 brw, intel_bo, binding->Offset, intel_bo->Base.Size - binding->Offset);
1083
1084 brw->vtbl.emit_buffer_surface_state(brw, &surf_offsets[i], bo,
1085 binding->Offset, BRW_SURFACEFORMAT_RAW,
1086 bo->size - binding->Offset, 1, true);
1087 }
1088
1089 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1090 }
1091 }
1092
1093 static void
1094 brw_upload_wm_abo_surfaces(struct brw_context *brw)
1095 {
1096 struct gl_context *ctx = &brw->ctx;
1097 /* _NEW_PROGRAM */
1098 struct gl_shader_program *prog = ctx->_Shader->_CurrentFragmentProgram;
1099
1100 if (prog) {
1101 /* BRW_NEW_FS_PROG_DATA */
1102 brw_upload_abo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
1103 &brw->wm.base, &brw->wm.prog_data->base);
1104 }
1105 }
1106
1107 const struct brw_tracked_state brw_wm_abo_surfaces = {
1108 .dirty = {
1109 .mesa = _NEW_PROGRAM,
1110 .brw = BRW_NEW_ATOMIC_BUFFER |
1111 BRW_NEW_BLORP |
1112 BRW_NEW_BATCH |
1113 BRW_NEW_FS_PROG_DATA,
1114 },
1115 .emit = brw_upload_wm_abo_surfaces,
1116 };
1117
1118 static void
1119 brw_upload_cs_abo_surfaces(struct brw_context *brw)
1120 {
1121 struct gl_context *ctx = &brw->ctx;
1122 /* _NEW_PROGRAM */
1123 struct gl_shader_program *prog =
1124 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1125
1126 if (prog) {
1127 /* BRW_NEW_CS_PROG_DATA */
1128 brw_upload_abo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_COMPUTE],
1129 &brw->cs.base, &brw->cs.prog_data->base);
1130 }
1131 }
1132
1133 const struct brw_tracked_state brw_cs_abo_surfaces = {
1134 .dirty = {
1135 .mesa = _NEW_PROGRAM,
1136 .brw = BRW_NEW_ATOMIC_BUFFER |
1137 BRW_NEW_BLORP |
1138 BRW_NEW_BATCH |
1139 BRW_NEW_CS_PROG_DATA,
1140 },
1141 .emit = brw_upload_cs_abo_surfaces,
1142 };
1143
1144 static void
1145 brw_upload_cs_image_surfaces(struct brw_context *brw)
1146 {
1147 struct gl_context *ctx = &brw->ctx;
1148 /* _NEW_PROGRAM */
1149 struct gl_shader_program *prog =
1150 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1151
1152 if (prog) {
1153 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1154 brw_upload_image_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_COMPUTE],
1155 &brw->cs.base, &brw->cs.prog_data->base);
1156 }
1157 }
1158
1159 const struct brw_tracked_state brw_cs_image_surfaces = {
1160 .dirty = {
1161 .mesa = _NEW_TEXTURE | _NEW_PROGRAM,
1162 .brw = BRW_NEW_BATCH |
1163 BRW_NEW_BLORP |
1164 BRW_NEW_CS_PROG_DATA |
1165 BRW_NEW_IMAGE_UNITS
1166 },
1167 .emit = brw_upload_cs_image_surfaces,
1168 };
1169
1170 static uint32_t
1171 get_image_format(struct brw_context *brw, mesa_format format, GLenum access)
1172 {
1173 const struct brw_device_info *devinfo = brw->intelScreen->devinfo;
1174 uint32_t hw_format = brw_format_for_mesa_format(format);
1175 if (access == GL_WRITE_ONLY) {
1176 return hw_format;
1177 } else if (isl_has_matching_typed_storage_image_format(devinfo, hw_format)) {
1178 /* Typed surface reads support a very limited subset of the shader
1179 * image formats. Translate it into the closest format the
1180 * hardware supports.
1181 */
1182 return isl_lower_storage_image_format(devinfo, hw_format);
1183 } else {
1184 /* The hardware doesn't actually support a typed format that we can use
1185 * so we have to fall back to untyped read/write messages.
1186 */
1187 return BRW_SURFACEFORMAT_RAW;
1188 }
1189 }
1190
1191 static void
1192 update_default_image_param(struct brw_context *brw,
1193 struct gl_image_unit *u,
1194 unsigned surface_idx,
1195 struct brw_image_param *param)
1196 {
1197 memset(param, 0, sizeof(*param));
1198 param->surface_idx = surface_idx;
1199 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1200 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1201 * detailed explanation of these parameters.
1202 */
1203 param->swizzling[0] = 0xff;
1204 param->swizzling[1] = 0xff;
1205 }
1206
1207 static void
1208 update_buffer_image_param(struct brw_context *brw,
1209 struct gl_image_unit *u,
1210 unsigned surface_idx,
1211 struct brw_image_param *param)
1212 {
1213 struct gl_buffer_object *obj = u->TexObj->BufferObject;
1214
1215 update_default_image_param(brw, u, surface_idx, param);
1216
1217 param->size[0] = obj->Size / _mesa_get_format_bytes(u->_ActualFormat);
1218 param->stride[0] = _mesa_get_format_bytes(u->_ActualFormat);
1219 }
1220
1221 static void
1222 update_texture_image_param(struct brw_context *brw,
1223 struct gl_image_unit *u,
1224 unsigned surface_idx,
1225 struct brw_image_param *param)
1226 {
1227 struct intel_mipmap_tree *mt = intel_texture_object(u->TexObj)->mt;
1228
1229 update_default_image_param(brw, u, surface_idx, param);
1230
1231 param->size[0] = minify(mt->logical_width0, u->Level);
1232 param->size[1] = minify(mt->logical_height0, u->Level);
1233 param->size[2] = (!u->Layered ? 1 :
1234 u->TexObj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1235 u->TexObj->Target == GL_TEXTURE_3D ?
1236 minify(mt->logical_depth0, u->Level) :
1237 mt->logical_depth0);
1238
1239 intel_miptree_get_image_offset(mt, u->Level, u->_Layer,
1240 &param->offset[0],
1241 &param->offset[1]);
1242
1243 param->stride[0] = mt->cpp;
1244 param->stride[1] = mt->pitch / mt->cpp;
1245 param->stride[2] =
1246 brw_miptree_get_horizontal_slice_pitch(brw, mt, u->Level);
1247 param->stride[3] =
1248 brw_miptree_get_vertical_slice_pitch(brw, mt, u->Level);
1249
1250 if (mt->tiling == I915_TILING_X) {
1251 /* An X tile is a rectangular block of 512x8 bytes. */
1252 param->tiling[0] = _mesa_logbase2(512 / mt->cpp);
1253 param->tiling[1] = _mesa_logbase2(8);
1254
1255 if (brw->has_swizzling) {
1256 /* Right shifts required to swizzle bits 9 and 10 of the memory
1257 * address with bit 6.
1258 */
1259 param->swizzling[0] = 3;
1260 param->swizzling[1] = 4;
1261 }
1262 } else if (mt->tiling == I915_TILING_Y) {
1263 /* The layout of a Y-tiled surface in memory isn't really fundamentally
1264 * different to the layout of an X-tiled surface, we simply pretend that
1265 * the surface is broken up in a number of smaller 16Bx32 tiles, each
1266 * one arranged in X-major order just like is the case for X-tiling.
1267 */
1268 param->tiling[0] = _mesa_logbase2(16 / mt->cpp);
1269 param->tiling[1] = _mesa_logbase2(32);
1270
1271 if (brw->has_swizzling) {
1272 /* Right shift required to swizzle bit 9 of the memory address with
1273 * bit 6.
1274 */
1275 param->swizzling[0] = 3;
1276 }
1277 }
1278
1279 /* 3D textures are arranged in 2D in memory with 2^lod slices per row. The
1280 * address calculation algorithm (emit_address_calculation() in
1281 * brw_fs_surface_builder.cpp) handles this as a sort of tiling with
1282 * modulus equal to the LOD.
1283 */
1284 param->tiling[2] = (u->TexObj->Target == GL_TEXTURE_3D ? u->Level :
1285 0);
1286 }
1287
1288 static void
1289 update_image_surface(struct brw_context *brw,
1290 struct gl_image_unit *u,
1291 GLenum access,
1292 unsigned surface_idx,
1293 uint32_t *surf_offset,
1294 struct brw_image_param *param)
1295 {
1296 if (_mesa_is_image_unit_valid(&brw->ctx, u)) {
1297 struct gl_texture_object *obj = u->TexObj;
1298 const unsigned format = get_image_format(brw, u->_ActualFormat, access);
1299
1300 if (obj->Target == GL_TEXTURE_BUFFER) {
1301 struct intel_buffer_object *intel_obj =
1302 intel_buffer_object(obj->BufferObject);
1303 const unsigned texel_size = (format == BRW_SURFACEFORMAT_RAW ? 1 :
1304 _mesa_get_format_bytes(u->_ActualFormat));
1305
1306 brw->vtbl.emit_buffer_surface_state(
1307 brw, surf_offset, intel_obj->buffer, obj->BufferOffset,
1308 format, intel_obj->Base.Size / texel_size, texel_size,
1309 access != GL_READ_ONLY);
1310
1311 update_buffer_image_param(brw, u, surface_idx, param);
1312
1313 } else {
1314 struct intel_texture_object *intel_obj = intel_texture_object(obj);
1315 struct intel_mipmap_tree *mt = intel_obj->mt;
1316
1317 if (format == BRW_SURFACEFORMAT_RAW) {
1318 brw->vtbl.emit_buffer_surface_state(
1319 brw, surf_offset, mt->bo, mt->offset,
1320 format, mt->bo->size - mt->offset, 1 /* pitch */,
1321 access != GL_READ_ONLY);
1322
1323 } else {
1324 const unsigned min_layer = obj->MinLayer + u->_Layer;
1325 const unsigned min_level = obj->MinLevel + u->Level;
1326 const unsigned num_layers = (!u->Layered ? 1 :
1327 obj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1328 mt->logical_depth0);
1329 const GLenum target = (obj->Target == GL_TEXTURE_CUBE_MAP ||
1330 obj->Target == GL_TEXTURE_CUBE_MAP_ARRAY ?
1331 GL_TEXTURE_2D_ARRAY : obj->Target);
1332 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
1333
1334 brw->vtbl.emit_texture_surface_state(
1335 brw, mt, target,
1336 min_layer, min_layer + num_layers,
1337 min_level, min_level + 1,
1338 format, SWIZZLE_XYZW,
1339 surf_offset, surf_index, access != GL_READ_ONLY, false);
1340 }
1341
1342 update_texture_image_param(brw, u, surface_idx, param);
1343 }
1344
1345 } else {
1346 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, surf_offset);
1347 update_default_image_param(brw, u, surface_idx, param);
1348 }
1349 }
1350
1351 void
1352 brw_upload_image_surfaces(struct brw_context *brw,
1353 struct gl_linked_shader *shader,
1354 struct brw_stage_state *stage_state,
1355 struct brw_stage_prog_data *prog_data)
1356 {
1357 struct gl_context *ctx = &brw->ctx;
1358
1359 if (shader && shader->NumImages) {
1360 for (unsigned i = 0; i < shader->NumImages; i++) {
1361 struct gl_image_unit *u = &ctx->ImageUnits[shader->ImageUnits[i]];
1362 const unsigned surf_idx = prog_data->binding_table.image_start + i;
1363
1364 update_image_surface(brw, u, shader->ImageAccess[i],
1365 surf_idx,
1366 &stage_state->surf_offset[surf_idx],
1367 &prog_data->image_param[i]);
1368 }
1369
1370 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1371 /* This may have changed the image metadata dependent on the context
1372 * image unit state and passed to the program as uniforms, make sure
1373 * that push and pull constants are reuploaded.
1374 */
1375 brw->NewGLState |= _NEW_PROGRAM_CONSTANTS;
1376 }
1377 }
1378
1379 static void
1380 brw_upload_wm_image_surfaces(struct brw_context *brw)
1381 {
1382 struct gl_context *ctx = &brw->ctx;
1383 /* BRW_NEW_FRAGMENT_PROGRAM */
1384 struct gl_shader_program *prog = ctx->_Shader->_CurrentFragmentProgram;
1385
1386 if (prog) {
1387 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1388 brw_upload_image_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
1389 &brw->wm.base, &brw->wm.prog_data->base);
1390 }
1391 }
1392
1393 const struct brw_tracked_state brw_wm_image_surfaces = {
1394 .dirty = {
1395 .mesa = _NEW_TEXTURE,
1396 .brw = BRW_NEW_BATCH |
1397 BRW_NEW_BLORP |
1398 BRW_NEW_FRAGMENT_PROGRAM |
1399 BRW_NEW_FS_PROG_DATA |
1400 BRW_NEW_IMAGE_UNITS
1401 },
1402 .emit = brw_upload_wm_image_surfaces,
1403 };
1404
1405 void
1406 gen4_init_vtable_surface_functions(struct brw_context *brw)
1407 {
1408 brw->vtbl.update_texture_surface = brw_update_texture_surface;
1409 brw->vtbl.update_renderbuffer_surface = brw_update_renderbuffer_surface;
1410 brw->vtbl.emit_null_surface_state = brw_emit_null_surface_state;
1411 brw->vtbl.emit_buffer_surface_state = gen4_emit_buffer_surface_state;
1412 }
1413
1414 static void
1415 brw_upload_cs_work_groups_surface(struct brw_context *brw)
1416 {
1417 struct gl_context *ctx = &brw->ctx;
1418 /* _NEW_PROGRAM */
1419 struct gl_shader_program *prog =
1420 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1421
1422 if (prog && brw->cs.prog_data->uses_num_work_groups) {
1423 const unsigned surf_idx =
1424 brw->cs.prog_data->binding_table.work_groups_start;
1425 uint32_t *surf_offset = &brw->cs.base.surf_offset[surf_idx];
1426 drm_intel_bo *bo;
1427 uint32_t bo_offset;
1428
1429 if (brw->compute.num_work_groups_bo == NULL) {
1430 bo = NULL;
1431 intel_upload_data(brw,
1432 (void *)brw->compute.num_work_groups,
1433 3 * sizeof(GLuint),
1434 sizeof(GLuint),
1435 &bo,
1436 &bo_offset);
1437 } else {
1438 bo = brw->compute.num_work_groups_bo;
1439 bo_offset = brw->compute.num_work_groups_offset;
1440 }
1441
1442 brw->vtbl.emit_buffer_surface_state(brw, surf_offset,
1443 bo, bo_offset,
1444 BRW_SURFACEFORMAT_RAW,
1445 3 * sizeof(GLuint), 1, true);
1446 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1447 }
1448 }
1449
1450 const struct brw_tracked_state brw_cs_work_groups_surface = {
1451 .dirty = {
1452 .brw = BRW_NEW_BLORP |
1453 BRW_NEW_CS_WORK_GROUPS
1454 },
1455 .emit = brw_upload_cs_work_groups_surface,
1456 };