i965: Make all atoms to track BRW_NEW_BLORP by default
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_surface_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "main/context.h"
34 #include "main/blend.h"
35 #include "main/mtypes.h"
36 #include "main/samplerobj.h"
37 #include "main/shaderimage.h"
38 #include "program/prog_parameter.h"
39 #include "program/prog_instruction.h"
40 #include "main/framebuffer.h"
41
42 #include "isl/isl.h"
43
44 #include "intel_mipmap_tree.h"
45 #include "intel_batchbuffer.h"
46 #include "intel_tex.h"
47 #include "intel_fbo.h"
48 #include "intel_buffer_objects.h"
49
50 #include "brw_context.h"
51 #include "brw_state.h"
52 #include "brw_defines.h"
53 #include "brw_wm.h"
54
55 GLuint
56 translate_tex_target(GLenum target)
57 {
58 switch (target) {
59 case GL_TEXTURE_1D:
60 case GL_TEXTURE_1D_ARRAY_EXT:
61 return BRW_SURFACE_1D;
62
63 case GL_TEXTURE_RECTANGLE_NV:
64 return BRW_SURFACE_2D;
65
66 case GL_TEXTURE_2D:
67 case GL_TEXTURE_2D_ARRAY_EXT:
68 case GL_TEXTURE_EXTERNAL_OES:
69 case GL_TEXTURE_2D_MULTISAMPLE:
70 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
71 return BRW_SURFACE_2D;
72
73 case GL_TEXTURE_3D:
74 return BRW_SURFACE_3D;
75
76 case GL_TEXTURE_CUBE_MAP:
77 case GL_TEXTURE_CUBE_MAP_ARRAY:
78 return BRW_SURFACE_CUBE;
79
80 default:
81 unreachable("not reached");
82 }
83 }
84
85 uint32_t
86 brw_get_surface_tiling_bits(uint32_t tiling)
87 {
88 switch (tiling) {
89 case I915_TILING_X:
90 return BRW_SURFACE_TILED;
91 case I915_TILING_Y:
92 return BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y;
93 default:
94 return 0;
95 }
96 }
97
98
99 uint32_t
100 brw_get_surface_num_multisamples(unsigned num_samples)
101 {
102 if (num_samples > 1)
103 return BRW_SURFACE_MULTISAMPLECOUNT_4;
104 else
105 return BRW_SURFACE_MULTISAMPLECOUNT_1;
106 }
107
108 void
109 brw_configure_w_tiled(const struct intel_mipmap_tree *mt,
110 bool is_render_target,
111 unsigned *width, unsigned *height,
112 unsigned *pitch, uint32_t *tiling, unsigned *format)
113 {
114 static const unsigned halign_stencil = 8;
115
116 /* In Y-tiling row is twice as wide as in W-tiling, and subsequently
117 * there are half as many rows.
118 * In addition, mip-levels are accessed manually by the program and
119 * therefore the surface is setup to cover all the mip-levels for one slice.
120 * (Hardware is still used to access individual slices).
121 */
122 *tiling = I915_TILING_Y;
123 *pitch = mt->pitch * 2;
124 *width = ALIGN(mt->total_width, halign_stencil) * 2;
125 *height = (mt->total_height / mt->physical_depth0) / 2;
126
127 if (is_render_target) {
128 *format = BRW_SURFACEFORMAT_R8_UINT;
129 }
130 }
131
132
133 /**
134 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
135 * swizzling.
136 */
137 int
138 brw_get_texture_swizzle(const struct gl_context *ctx,
139 const struct gl_texture_object *t)
140 {
141 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
142
143 int swizzles[SWIZZLE_NIL + 1] = {
144 SWIZZLE_X,
145 SWIZZLE_Y,
146 SWIZZLE_Z,
147 SWIZZLE_W,
148 SWIZZLE_ZERO,
149 SWIZZLE_ONE,
150 SWIZZLE_NIL
151 };
152
153 if (img->_BaseFormat == GL_DEPTH_COMPONENT ||
154 img->_BaseFormat == GL_DEPTH_STENCIL) {
155 GLenum depth_mode = t->DepthMode;
156
157 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
158 * with depth component data specified with a sized internal format.
159 * Otherwise, it's left at the old default, GL_LUMINANCE.
160 */
161 if (_mesa_is_gles3(ctx) &&
162 img->InternalFormat != GL_DEPTH_COMPONENT &&
163 img->InternalFormat != GL_DEPTH_STENCIL) {
164 depth_mode = GL_RED;
165 }
166
167 switch (depth_mode) {
168 case GL_ALPHA:
169 swizzles[0] = SWIZZLE_ZERO;
170 swizzles[1] = SWIZZLE_ZERO;
171 swizzles[2] = SWIZZLE_ZERO;
172 swizzles[3] = SWIZZLE_X;
173 break;
174 case GL_LUMINANCE:
175 swizzles[0] = SWIZZLE_X;
176 swizzles[1] = SWIZZLE_X;
177 swizzles[2] = SWIZZLE_X;
178 swizzles[3] = SWIZZLE_ONE;
179 break;
180 case GL_INTENSITY:
181 swizzles[0] = SWIZZLE_X;
182 swizzles[1] = SWIZZLE_X;
183 swizzles[2] = SWIZZLE_X;
184 swizzles[3] = SWIZZLE_X;
185 break;
186 case GL_RED:
187 swizzles[0] = SWIZZLE_X;
188 swizzles[1] = SWIZZLE_ZERO;
189 swizzles[2] = SWIZZLE_ZERO;
190 swizzles[3] = SWIZZLE_ONE;
191 break;
192 }
193 }
194
195 GLenum datatype = _mesa_get_format_datatype(img->TexFormat);
196
197 /* If the texture's format is alpha-only, force R, G, and B to
198 * 0.0. Similarly, if the texture's format has no alpha channel,
199 * force the alpha value read to 1.0. This allows for the
200 * implementation to use an RGBA texture for any of these formats
201 * without leaking any unexpected values.
202 */
203 switch (img->_BaseFormat) {
204 case GL_ALPHA:
205 swizzles[0] = SWIZZLE_ZERO;
206 swizzles[1] = SWIZZLE_ZERO;
207 swizzles[2] = SWIZZLE_ZERO;
208 break;
209 case GL_LUMINANCE:
210 if (t->_IsIntegerFormat || datatype == GL_SIGNED_NORMALIZED) {
211 swizzles[0] = SWIZZLE_X;
212 swizzles[1] = SWIZZLE_X;
213 swizzles[2] = SWIZZLE_X;
214 swizzles[3] = SWIZZLE_ONE;
215 }
216 break;
217 case GL_LUMINANCE_ALPHA:
218 if (datatype == GL_SIGNED_NORMALIZED) {
219 swizzles[0] = SWIZZLE_X;
220 swizzles[1] = SWIZZLE_X;
221 swizzles[2] = SWIZZLE_X;
222 swizzles[3] = SWIZZLE_W;
223 }
224 break;
225 case GL_INTENSITY:
226 if (datatype == GL_SIGNED_NORMALIZED) {
227 swizzles[0] = SWIZZLE_X;
228 swizzles[1] = SWIZZLE_X;
229 swizzles[2] = SWIZZLE_X;
230 swizzles[3] = SWIZZLE_X;
231 }
232 break;
233 case GL_RED:
234 case GL_RG:
235 case GL_RGB:
236 if (_mesa_get_format_bits(img->TexFormat, GL_ALPHA_BITS) > 0)
237 swizzles[3] = SWIZZLE_ONE;
238 break;
239 }
240
241 return MAKE_SWIZZLE4(swizzles[GET_SWZ(t->_Swizzle, 0)],
242 swizzles[GET_SWZ(t->_Swizzle, 1)],
243 swizzles[GET_SWZ(t->_Swizzle, 2)],
244 swizzles[GET_SWZ(t->_Swizzle, 3)]);
245 }
246
247 static void
248 gen4_emit_buffer_surface_state(struct brw_context *brw,
249 uint32_t *out_offset,
250 drm_intel_bo *bo,
251 unsigned buffer_offset,
252 unsigned surface_format,
253 unsigned buffer_size,
254 unsigned pitch,
255 bool rw)
256 {
257 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
258 6 * 4, 32, out_offset);
259 memset(surf, 0, 6 * 4);
260
261 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
262 surface_format << BRW_SURFACE_FORMAT_SHIFT |
263 (brw->gen >= 6 ? BRW_SURFACE_RC_READ_WRITE : 0);
264 surf[1] = (bo ? bo->offset64 : 0) + buffer_offset; /* reloc */
265 surf[2] = (buffer_size & 0x7f) << BRW_SURFACE_WIDTH_SHIFT |
266 ((buffer_size >> 7) & 0x1fff) << BRW_SURFACE_HEIGHT_SHIFT;
267 surf[3] = ((buffer_size >> 20) & 0x7f) << BRW_SURFACE_DEPTH_SHIFT |
268 (pitch - 1) << BRW_SURFACE_PITCH_SHIFT;
269
270 /* Emit relocation to surface contents. The 965 PRM, Volume 4, section
271 * 5.1.2 "Data Cache" says: "the data cache does not exist as a separate
272 * physical cache. It is mapped in hardware to the sampler cache."
273 */
274 if (bo) {
275 drm_intel_bo_emit_reloc(brw->batch.bo, *out_offset + 4,
276 bo, buffer_offset,
277 I915_GEM_DOMAIN_SAMPLER,
278 (rw ? I915_GEM_DOMAIN_SAMPLER : 0));
279 }
280 }
281
282 void
283 brw_update_buffer_texture_surface(struct gl_context *ctx,
284 unsigned unit,
285 uint32_t *surf_offset)
286 {
287 struct brw_context *brw = brw_context(ctx);
288 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
289 struct intel_buffer_object *intel_obj =
290 intel_buffer_object(tObj->BufferObject);
291 uint32_t size = tObj->BufferSize;
292 drm_intel_bo *bo = NULL;
293 mesa_format format = tObj->_BufferObjectFormat;
294 uint32_t brw_format = brw_format_for_mesa_format(format);
295 int texel_size = _mesa_get_format_bytes(format);
296
297 if (intel_obj) {
298 size = MIN2(size, intel_obj->Base.Size);
299 bo = intel_bufferobj_buffer(brw, intel_obj, tObj->BufferOffset, size);
300 }
301
302 if (brw_format == 0 && format != MESA_FORMAT_RGBA_FLOAT32) {
303 _mesa_problem(NULL, "bad format %s for texture buffer\n",
304 _mesa_get_format_name(format));
305 }
306
307 brw->vtbl.emit_buffer_surface_state(brw, surf_offset, bo,
308 tObj->BufferOffset,
309 brw_format,
310 size / texel_size,
311 texel_size,
312 false /* rw */);
313 }
314
315 static void
316 brw_update_texture_surface(struct gl_context *ctx,
317 unsigned unit,
318 uint32_t *surf_offset,
319 bool for_gather)
320 {
321 struct brw_context *brw = brw_context(ctx);
322 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
323 struct intel_texture_object *intelObj = intel_texture_object(tObj);
324 struct intel_mipmap_tree *mt = intelObj->mt;
325 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
326 uint32_t *surf;
327
328 /* BRW_NEW_TEXTURE_BUFFER */
329 if (tObj->Target == GL_TEXTURE_BUFFER) {
330 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
331 return;
332 }
333
334 surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
335 6 * 4, 32, surf_offset);
336
337 uint32_t tex_format = translate_tex_format(brw, mt->format,
338 sampler->sRGBDecode);
339
340 if (for_gather) {
341 /* Sandybridge's gather4 message is broken for integer formats.
342 * To work around this, we pretend the surface is UNORM for
343 * 8 or 16-bit formats, and emit shader instructions to recover
344 * the real INT/UINT value. For 32-bit formats, we pretend
345 * the surface is FLOAT, and simply reinterpret the resulting
346 * bits.
347 */
348 switch (tex_format) {
349 case BRW_SURFACEFORMAT_R8_SINT:
350 case BRW_SURFACEFORMAT_R8_UINT:
351 tex_format = BRW_SURFACEFORMAT_R8_UNORM;
352 break;
353
354 case BRW_SURFACEFORMAT_R16_SINT:
355 case BRW_SURFACEFORMAT_R16_UINT:
356 tex_format = BRW_SURFACEFORMAT_R16_UNORM;
357 break;
358
359 case BRW_SURFACEFORMAT_R32_SINT:
360 case BRW_SURFACEFORMAT_R32_UINT:
361 tex_format = BRW_SURFACEFORMAT_R32_FLOAT;
362 break;
363
364 default:
365 break;
366 }
367 }
368
369 surf[0] = (translate_tex_target(tObj->Target) << BRW_SURFACE_TYPE_SHIFT |
370 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
371 BRW_SURFACE_CUBEFACE_ENABLES |
372 tex_format << BRW_SURFACE_FORMAT_SHIFT);
373
374 surf[1] = mt->bo->offset64 + mt->offset; /* reloc */
375
376 surf[2] = ((intelObj->_MaxLevel - tObj->BaseLevel) << BRW_SURFACE_LOD_SHIFT |
377 (mt->logical_width0 - 1) << BRW_SURFACE_WIDTH_SHIFT |
378 (mt->logical_height0 - 1) << BRW_SURFACE_HEIGHT_SHIFT);
379
380 surf[3] = (brw_get_surface_tiling_bits(mt->tiling) |
381 (mt->logical_depth0 - 1) << BRW_SURFACE_DEPTH_SHIFT |
382 (mt->pitch - 1) << BRW_SURFACE_PITCH_SHIFT);
383
384 surf[4] = (brw_get_surface_num_multisamples(mt->num_samples) |
385 SET_FIELD(tObj->BaseLevel - mt->first_level, BRW_SURFACE_MIN_LOD));
386
387 surf[5] = mt->valign == 4 ? BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0;
388
389 /* Emit relocation to surface contents */
390 drm_intel_bo_emit_reloc(brw->batch.bo,
391 *surf_offset + 4,
392 mt->bo,
393 surf[1] - mt->bo->offset64,
394 I915_GEM_DOMAIN_SAMPLER, 0);
395 }
396
397 /**
398 * Create the constant buffer surface. Vertex/fragment shader constants will be
399 * read from this buffer with Data Port Read instructions/messages.
400 */
401 void
402 brw_create_constant_surface(struct brw_context *brw,
403 drm_intel_bo *bo,
404 uint32_t offset,
405 uint32_t size,
406 uint32_t *out_offset)
407 {
408 brw->vtbl.emit_buffer_surface_state(brw, out_offset, bo, offset,
409 BRW_SURFACEFORMAT_R32G32B32A32_FLOAT,
410 size, 1, false);
411 }
412
413 /**
414 * Create the buffer surface. Shader buffer variables will be
415 * read from / write to this buffer with Data Port Read/Write
416 * instructions/messages.
417 */
418 void
419 brw_create_buffer_surface(struct brw_context *brw,
420 drm_intel_bo *bo,
421 uint32_t offset,
422 uint32_t size,
423 uint32_t *out_offset)
424 {
425 /* Use a raw surface so we can reuse existing untyped read/write/atomic
426 * messages. We need these specifically for the fragment shader since they
427 * include a pixel mask header that we need to ensure correct behavior
428 * with helper invocations, which cannot write to the buffer.
429 */
430 brw->vtbl.emit_buffer_surface_state(brw, out_offset, bo, offset,
431 BRW_SURFACEFORMAT_RAW,
432 size, 1, true);
433 }
434
435 /**
436 * Set up a binding table entry for use by stream output logic (transform
437 * feedback).
438 *
439 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
440 */
441 void
442 brw_update_sol_surface(struct brw_context *brw,
443 struct gl_buffer_object *buffer_obj,
444 uint32_t *out_offset, unsigned num_vector_components,
445 unsigned stride_dwords, unsigned offset_dwords)
446 {
447 struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
448 uint32_t offset_bytes = 4 * offset_dwords;
449 drm_intel_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
450 offset_bytes,
451 buffer_obj->Size - offset_bytes);
452 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
453 out_offset);
454 uint32_t pitch_minus_1 = 4*stride_dwords - 1;
455 size_t size_dwords = buffer_obj->Size / 4;
456 uint32_t buffer_size_minus_1, width, height, depth, surface_format;
457
458 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
459 * too big to map using a single binding table entry?
460 */
461 assert((size_dwords - offset_dwords) / stride_dwords
462 <= BRW_MAX_NUM_BUFFER_ENTRIES);
463
464 if (size_dwords > offset_dwords + num_vector_components) {
465 /* There is room for at least 1 transform feedback output in the buffer.
466 * Compute the number of additional transform feedback outputs the
467 * buffer has room for.
468 */
469 buffer_size_minus_1 =
470 (size_dwords - offset_dwords - num_vector_components) / stride_dwords;
471 } else {
472 /* There isn't even room for a single transform feedback output in the
473 * buffer. We can't configure the binding table entry to prevent output
474 * entirely; we'll have to rely on the geometry shader to detect
475 * overflow. But to minimize the damage in case of a bug, set up the
476 * binding table entry to just allow a single output.
477 */
478 buffer_size_minus_1 = 0;
479 }
480 width = buffer_size_minus_1 & 0x7f;
481 height = (buffer_size_minus_1 & 0xfff80) >> 7;
482 depth = (buffer_size_minus_1 & 0x7f00000) >> 20;
483
484 switch (num_vector_components) {
485 case 1:
486 surface_format = BRW_SURFACEFORMAT_R32_FLOAT;
487 break;
488 case 2:
489 surface_format = BRW_SURFACEFORMAT_R32G32_FLOAT;
490 break;
491 case 3:
492 surface_format = BRW_SURFACEFORMAT_R32G32B32_FLOAT;
493 break;
494 case 4:
495 surface_format = BRW_SURFACEFORMAT_R32G32B32A32_FLOAT;
496 break;
497 default:
498 unreachable("Invalid vector size for transform feedback output");
499 }
500
501 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
502 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
503 surface_format << BRW_SURFACE_FORMAT_SHIFT |
504 BRW_SURFACE_RC_READ_WRITE;
505 surf[1] = bo->offset64 + offset_bytes; /* reloc */
506 surf[2] = (width << BRW_SURFACE_WIDTH_SHIFT |
507 height << BRW_SURFACE_HEIGHT_SHIFT);
508 surf[3] = (depth << BRW_SURFACE_DEPTH_SHIFT |
509 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
510 surf[4] = 0;
511 surf[5] = 0;
512
513 /* Emit relocation to surface contents. */
514 drm_intel_bo_emit_reloc(brw->batch.bo,
515 *out_offset + 4,
516 bo, offset_bytes,
517 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
518 }
519
520 /* Creates a new WM constant buffer reflecting the current fragment program's
521 * constants, if needed by the fragment program.
522 *
523 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
524 * state atom.
525 */
526 static void
527 brw_upload_wm_pull_constants(struct brw_context *brw)
528 {
529 struct brw_stage_state *stage_state = &brw->wm.base;
530 /* BRW_NEW_FRAGMENT_PROGRAM */
531 struct brw_fragment_program *fp =
532 (struct brw_fragment_program *) brw->fragment_program;
533 /* BRW_NEW_FS_PROG_DATA */
534 struct brw_stage_prog_data *prog_data = &brw->wm.prog_data->base;
535
536 /* _NEW_PROGRAM_CONSTANTS */
537 brw_upload_pull_constants(brw, BRW_NEW_SURFACES, &fp->program.Base,
538 stage_state, prog_data);
539 }
540
541 const struct brw_tracked_state brw_wm_pull_constants = {
542 .dirty = {
543 .mesa = _NEW_PROGRAM_CONSTANTS,
544 .brw = BRW_NEW_BATCH |
545 BRW_NEW_BLORP |
546 BRW_NEW_FRAGMENT_PROGRAM |
547 BRW_NEW_FS_PROG_DATA,
548 },
549 .emit = brw_upload_wm_pull_constants,
550 };
551
552 /**
553 * Creates a null renderbuffer surface.
554 *
555 * This is used when the shader doesn't write to any color output. An FB
556 * write to target 0 will still be emitted, because that's how the thread is
557 * terminated (and computed depth is returned), so we need to have the
558 * hardware discard the target 0 color output..
559 */
560 static void
561 brw_emit_null_surface_state(struct brw_context *brw,
562 unsigned width,
563 unsigned height,
564 unsigned samples,
565 uint32_t *out_offset)
566 {
567 /* From the Sandy bridge PRM, Vol4 Part1 p71 (Surface Type: Programming
568 * Notes):
569 *
570 * A null surface will be used in instances where an actual surface is
571 * not bound. When a write message is generated to a null surface, no
572 * actual surface is written to. When a read message (including any
573 * sampling engine message) is generated to a null surface, the result
574 * is all zeros. Note that a null surface type is allowed to be used
575 * with all messages, even if it is not specificially indicated as
576 * supported. All of the remaining fields in surface state are ignored
577 * for null surfaces, with the following exceptions:
578 *
579 * - [DevSNB+]: Width, Height, Depth, and LOD fields must match the
580 * depth buffer’s corresponding state for all render target surfaces,
581 * including null.
582 *
583 * - Surface Format must be R8G8B8A8_UNORM.
584 */
585 unsigned surface_type = BRW_SURFACE_NULL;
586 drm_intel_bo *bo = NULL;
587 unsigned pitch_minus_1 = 0;
588 uint32_t multisampling_state = 0;
589 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
590 out_offset);
591
592 if (samples > 1) {
593 /* On Gen6, null render targets seem to cause GPU hangs when
594 * multisampling. So work around this problem by rendering into dummy
595 * color buffer.
596 *
597 * To decrease the amount of memory needed by the workaround buffer, we
598 * set its pitch to 128 bytes (the width of a Y tile). This means that
599 * the amount of memory needed for the workaround buffer is
600 * (width_in_tiles + height_in_tiles - 1) tiles.
601 *
602 * Note that since the workaround buffer will be interpreted by the
603 * hardware as an interleaved multisampled buffer, we need to compute
604 * width_in_tiles and height_in_tiles by dividing the width and height
605 * by 16 rather than the normal Y-tile size of 32.
606 */
607 unsigned width_in_tiles = ALIGN(width, 16) / 16;
608 unsigned height_in_tiles = ALIGN(height, 16) / 16;
609 unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
610 brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
611 size_needed);
612 bo = brw->wm.multisampled_null_render_target_bo;
613 surface_type = BRW_SURFACE_2D;
614 pitch_minus_1 = 127;
615 multisampling_state = brw_get_surface_num_multisamples(samples);
616 }
617
618 surf[0] = (surface_type << BRW_SURFACE_TYPE_SHIFT |
619 BRW_SURFACEFORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
620 if (brw->gen < 6) {
621 surf[0] |= (1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT |
622 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT |
623 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT |
624 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT);
625 }
626 surf[1] = bo ? bo->offset64 : 0;
627 surf[2] = ((width - 1) << BRW_SURFACE_WIDTH_SHIFT |
628 (height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
629
630 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
631 * Notes):
632 *
633 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
634 */
635 surf[3] = (BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y |
636 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
637 surf[4] = multisampling_state;
638 surf[5] = 0;
639
640 if (bo) {
641 drm_intel_bo_emit_reloc(brw->batch.bo,
642 *out_offset + 4,
643 bo, 0,
644 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
645 }
646 }
647
648 /**
649 * Sets up a surface state structure to point at the given region.
650 * While it is only used for the front/back buffer currently, it should be
651 * usable for further buffers when doing ARB_draw_buffer support.
652 */
653 static uint32_t
654 brw_update_renderbuffer_surface(struct brw_context *brw,
655 struct gl_renderbuffer *rb,
656 bool layered, unsigned unit,
657 uint32_t surf_index)
658 {
659 struct gl_context *ctx = &brw->ctx;
660 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
661 struct intel_mipmap_tree *mt = irb->mt;
662 uint32_t *surf;
663 uint32_t tile_x, tile_y;
664 uint32_t format = 0;
665 uint32_t offset;
666 /* _NEW_BUFFERS */
667 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
668 /* BRW_NEW_FS_PROG_DATA */
669
670 assert(!layered);
671
672 if (rb->TexImage && !brw->has_surface_tile_offset) {
673 intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y);
674
675 if (tile_x != 0 || tile_y != 0) {
676 /* Original gen4 hardware couldn't draw to a non-tile-aligned
677 * destination in a miptree unless you actually setup your renderbuffer
678 * as a miptree and used the fragile lod/array_index/etc. controls to
679 * select the image. So, instead, we just make a new single-level
680 * miptree and render into that.
681 */
682 intel_renderbuffer_move_to_temp(brw, irb, false);
683 mt = irb->mt;
684 }
685 }
686
687 intel_miptree_used_for_rendering(irb->mt);
688
689 surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32, &offset);
690
691 format = brw->render_target_format[rb_format];
692 if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
693 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
694 __func__, _mesa_get_format_name(rb_format));
695 }
696
697 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
698 format << BRW_SURFACE_FORMAT_SHIFT);
699
700 /* reloc */
701 assert(mt->offset % mt->cpp == 0);
702 surf[1] = (intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y) +
703 mt->bo->offset64 + mt->offset);
704
705 surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
706 (rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
707
708 surf[3] = (brw_get_surface_tiling_bits(mt->tiling) |
709 (mt->pitch - 1) << BRW_SURFACE_PITCH_SHIFT);
710
711 surf[4] = brw_get_surface_num_multisamples(mt->num_samples);
712
713 assert(brw->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
714 /* Note that the low bits of these fields are missing, so
715 * there's the possibility of getting in trouble.
716 */
717 assert(tile_x % 4 == 0);
718 assert(tile_y % 2 == 0);
719 surf[5] = ((tile_x / 4) << BRW_SURFACE_X_OFFSET_SHIFT |
720 (tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT |
721 (mt->valign == 4 ? BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
722
723 if (brw->gen < 6) {
724 /* _NEW_COLOR */
725 if (!ctx->Color.ColorLogicOpEnabled &&
726 (ctx->Color.BlendEnabled & (1 << unit)))
727 surf[0] |= BRW_SURFACE_BLEND_ENABLED;
728
729 if (!ctx->Color.ColorMask[unit][0])
730 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT;
731 if (!ctx->Color.ColorMask[unit][1])
732 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT;
733 if (!ctx->Color.ColorMask[unit][2])
734 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT;
735
736 /* As mentioned above, disable writes to the alpha component when the
737 * renderbuffer is XRGB.
738 */
739 if (ctx->DrawBuffer->Visual.alphaBits == 0 ||
740 !ctx->Color.ColorMask[unit][3]) {
741 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT;
742 }
743 }
744
745 drm_intel_bo_emit_reloc(brw->batch.bo,
746 offset + 4,
747 mt->bo,
748 surf[1] - mt->bo->offset64,
749 I915_GEM_DOMAIN_RENDER,
750 I915_GEM_DOMAIN_RENDER);
751
752 return offset;
753 }
754
755 /**
756 * Construct SURFACE_STATE objects for renderbuffers/draw buffers.
757 */
758 void
759 brw_update_renderbuffer_surfaces(struct brw_context *brw,
760 const struct gl_framebuffer *fb,
761 uint32_t render_target_start,
762 uint32_t *surf_offset)
763 {
764 GLuint i;
765 const unsigned int w = _mesa_geometric_width(fb);
766 const unsigned int h = _mesa_geometric_height(fb);
767 const unsigned int s = _mesa_geometric_samples(fb);
768
769 /* Update surfaces for drawing buffers */
770 if (fb->_NumColorDrawBuffers >= 1) {
771 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
772 const uint32_t surf_index = render_target_start + i;
773
774 if (intel_renderbuffer(fb->_ColorDrawBuffers[i])) {
775 surf_offset[surf_index] =
776 brw->vtbl.update_renderbuffer_surface(
777 brw, fb->_ColorDrawBuffers[i],
778 _mesa_geometric_layers(fb) > 0, i, surf_index);
779 } else {
780 brw->vtbl.emit_null_surface_state(brw, w, h, s,
781 &surf_offset[surf_index]);
782 }
783 }
784 } else {
785 const uint32_t surf_index = render_target_start;
786 brw->vtbl.emit_null_surface_state(brw, w, h, s,
787 &surf_offset[surf_index]);
788 }
789 }
790
791 static void
792 update_renderbuffer_surfaces(struct brw_context *brw)
793 {
794 const struct gl_context *ctx = &brw->ctx;
795
796 /* _NEW_BUFFERS | _NEW_COLOR */
797 const struct gl_framebuffer *fb = ctx->DrawBuffer;
798 brw_update_renderbuffer_surfaces(
799 brw, fb,
800 brw->wm.prog_data->binding_table.render_target_start,
801 brw->wm.base.surf_offset);
802 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
803 }
804
805 const struct brw_tracked_state brw_renderbuffer_surfaces = {
806 .dirty = {
807 .mesa = _NEW_BUFFERS |
808 _NEW_COLOR,
809 .brw = BRW_NEW_BATCH |
810 BRW_NEW_BLORP |
811 BRW_NEW_FS_PROG_DATA,
812 },
813 .emit = update_renderbuffer_surfaces,
814 };
815
816 const struct brw_tracked_state gen6_renderbuffer_surfaces = {
817 .dirty = {
818 .mesa = _NEW_BUFFERS,
819 .brw = BRW_NEW_BATCH |
820 BRW_NEW_BLORP,
821 },
822 .emit = update_renderbuffer_surfaces,
823 };
824
825
826 static void
827 update_stage_texture_surfaces(struct brw_context *brw,
828 const struct gl_program *prog,
829 struct brw_stage_state *stage_state,
830 bool for_gather)
831 {
832 if (!prog)
833 return;
834
835 struct gl_context *ctx = &brw->ctx;
836
837 uint32_t *surf_offset = stage_state->surf_offset;
838
839 /* BRW_NEW_*_PROG_DATA */
840 if (for_gather)
841 surf_offset += stage_state->prog_data->binding_table.gather_texture_start;
842 else
843 surf_offset += stage_state->prog_data->binding_table.texture_start;
844
845 unsigned num_samplers = _mesa_fls(prog->SamplersUsed);
846 for (unsigned s = 0; s < num_samplers; s++) {
847 surf_offset[s] = 0;
848
849 if (prog->SamplersUsed & (1 << s)) {
850 const unsigned unit = prog->SamplerUnits[s];
851
852 /* _NEW_TEXTURE */
853 if (ctx->Texture.Unit[unit]._Current) {
854 brw->vtbl.update_texture_surface(ctx, unit, surf_offset + s, for_gather);
855 }
856 }
857 }
858 }
859
860
861 /**
862 * Construct SURFACE_STATE objects for enabled textures.
863 */
864 static void
865 brw_update_texture_surfaces(struct brw_context *brw)
866 {
867 /* BRW_NEW_VERTEX_PROGRAM */
868 struct gl_program *vs = (struct gl_program *) brw->vertex_program;
869
870 /* BRW_NEW_TESS_PROGRAMS */
871 struct gl_program *tcs = (struct gl_program *) brw->tess_ctrl_program;
872 struct gl_program *tes = (struct gl_program *) brw->tess_eval_program;
873
874 /* BRW_NEW_GEOMETRY_PROGRAM */
875 struct gl_program *gs = (struct gl_program *) brw->geometry_program;
876
877 /* BRW_NEW_FRAGMENT_PROGRAM */
878 struct gl_program *fs = (struct gl_program *) brw->fragment_program;
879
880 /* _NEW_TEXTURE */
881 update_stage_texture_surfaces(brw, vs, &brw->vs.base, false);
882 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, false);
883 update_stage_texture_surfaces(brw, tes, &brw->tes.base, false);
884 update_stage_texture_surfaces(brw, gs, &brw->gs.base, false);
885 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false);
886
887 /* emit alternate set of surface state for gather. this
888 * allows the surface format to be overriden for only the
889 * gather4 messages. */
890 if (brw->gen < 8) {
891 if (vs && vs->UsesGather)
892 update_stage_texture_surfaces(brw, vs, &brw->vs.base, true);
893 if (tcs && tcs->UsesGather)
894 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, true);
895 if (tes && tes->UsesGather)
896 update_stage_texture_surfaces(brw, tes, &brw->tes.base, true);
897 if (gs && gs->UsesGather)
898 update_stage_texture_surfaces(brw, gs, &brw->gs.base, true);
899 if (fs && fs->UsesGather)
900 update_stage_texture_surfaces(brw, fs, &brw->wm.base, true);
901 }
902
903 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
904 }
905
906 const struct brw_tracked_state brw_texture_surfaces = {
907 .dirty = {
908 .mesa = _NEW_TEXTURE,
909 .brw = BRW_NEW_BATCH |
910 BRW_NEW_BLORP |
911 BRW_NEW_FRAGMENT_PROGRAM |
912 BRW_NEW_FS_PROG_DATA |
913 BRW_NEW_GEOMETRY_PROGRAM |
914 BRW_NEW_GS_PROG_DATA |
915 BRW_NEW_TESS_PROGRAMS |
916 BRW_NEW_TCS_PROG_DATA |
917 BRW_NEW_TES_PROG_DATA |
918 BRW_NEW_TEXTURE_BUFFER |
919 BRW_NEW_VERTEX_PROGRAM |
920 BRW_NEW_VS_PROG_DATA,
921 },
922 .emit = brw_update_texture_surfaces,
923 };
924
925 static void
926 brw_update_cs_texture_surfaces(struct brw_context *brw)
927 {
928 /* BRW_NEW_COMPUTE_PROGRAM */
929 struct gl_program *cs = (struct gl_program *) brw->compute_program;
930
931 /* _NEW_TEXTURE */
932 update_stage_texture_surfaces(brw, cs, &brw->cs.base, false);
933
934 /* emit alternate set of surface state for gather. this
935 * allows the surface format to be overriden for only the
936 * gather4 messages.
937 */
938 if (brw->gen < 8) {
939 if (cs && cs->UsesGather)
940 update_stage_texture_surfaces(brw, cs, &brw->cs.base, true);
941 }
942
943 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
944 }
945
946 const struct brw_tracked_state brw_cs_texture_surfaces = {
947 .dirty = {
948 .mesa = _NEW_TEXTURE,
949 .brw = BRW_NEW_BATCH |
950 BRW_NEW_BLORP |
951 BRW_NEW_COMPUTE_PROGRAM,
952 },
953 .emit = brw_update_cs_texture_surfaces,
954 };
955
956
957 void
958 brw_upload_ubo_surfaces(struct brw_context *brw,
959 struct gl_shader *shader,
960 struct brw_stage_state *stage_state,
961 struct brw_stage_prog_data *prog_data)
962 {
963 struct gl_context *ctx = &brw->ctx;
964
965 if (!shader)
966 return;
967
968 uint32_t *ubo_surf_offsets =
969 &stage_state->surf_offset[prog_data->binding_table.ubo_start];
970
971 for (int i = 0; i < shader->NumUniformBlocks; i++) {
972 struct gl_uniform_buffer_binding *binding =
973 &ctx->UniformBufferBindings[shader->UniformBlocks[i]->Binding];
974
975 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
976 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ubo_surf_offsets[i]);
977 } else {
978 struct intel_buffer_object *intel_bo =
979 intel_buffer_object(binding->BufferObject);
980 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
981 if (!binding->AutomaticSize)
982 size = MIN2(size, binding->Size);
983 drm_intel_bo *bo =
984 intel_bufferobj_buffer(brw, intel_bo,
985 binding->Offset,
986 size);
987 brw_create_constant_surface(brw, bo, binding->Offset,
988 size,
989 &ubo_surf_offsets[i]);
990 }
991 }
992
993 uint32_t *ssbo_surf_offsets =
994 &stage_state->surf_offset[prog_data->binding_table.ssbo_start];
995
996 for (int i = 0; i < shader->NumShaderStorageBlocks; i++) {
997 struct gl_shader_storage_buffer_binding *binding =
998 &ctx->ShaderStorageBufferBindings[shader->ShaderStorageBlocks[i]->Binding];
999
1000 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1001 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ssbo_surf_offsets[i]);
1002 } else {
1003 struct intel_buffer_object *intel_bo =
1004 intel_buffer_object(binding->BufferObject);
1005 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1006 if (!binding->AutomaticSize)
1007 size = MIN2(size, binding->Size);
1008 drm_intel_bo *bo =
1009 intel_bufferobj_buffer(brw, intel_bo,
1010 binding->Offset,
1011 size);
1012 brw_create_buffer_surface(brw, bo, binding->Offset,
1013 size,
1014 &ssbo_surf_offsets[i]);
1015 }
1016 }
1017
1018 if (shader->NumUniformBlocks || shader->NumShaderStorageBlocks)
1019 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1020 }
1021
1022 static void
1023 brw_upload_wm_ubo_surfaces(struct brw_context *brw)
1024 {
1025 struct gl_context *ctx = &brw->ctx;
1026 /* _NEW_PROGRAM */
1027 struct gl_shader_program *prog = ctx->_Shader->_CurrentFragmentProgram;
1028
1029 if (!prog)
1030 return;
1031
1032 /* BRW_NEW_FS_PROG_DATA */
1033 brw_upload_ubo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
1034 &brw->wm.base, &brw->wm.prog_data->base);
1035 }
1036
1037 const struct brw_tracked_state brw_wm_ubo_surfaces = {
1038 .dirty = {
1039 .mesa = _NEW_PROGRAM,
1040 .brw = BRW_NEW_BATCH |
1041 BRW_NEW_BLORP |
1042 BRW_NEW_FS_PROG_DATA |
1043 BRW_NEW_UNIFORM_BUFFER,
1044 },
1045 .emit = brw_upload_wm_ubo_surfaces,
1046 };
1047
1048 static void
1049 brw_upload_cs_ubo_surfaces(struct brw_context *brw)
1050 {
1051 struct gl_context *ctx = &brw->ctx;
1052 /* _NEW_PROGRAM */
1053 struct gl_shader_program *prog =
1054 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1055
1056 if (!prog)
1057 return;
1058
1059 /* BRW_NEW_CS_PROG_DATA */
1060 brw_upload_ubo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_COMPUTE],
1061 &brw->cs.base, &brw->cs.prog_data->base);
1062 }
1063
1064 const struct brw_tracked_state brw_cs_ubo_surfaces = {
1065 .dirty = {
1066 .mesa = _NEW_PROGRAM,
1067 .brw = BRW_NEW_BATCH |
1068 BRW_NEW_BLORP |
1069 BRW_NEW_CS_PROG_DATA |
1070 BRW_NEW_UNIFORM_BUFFER,
1071 },
1072 .emit = brw_upload_cs_ubo_surfaces,
1073 };
1074
1075 void
1076 brw_upload_abo_surfaces(struct brw_context *brw,
1077 struct gl_shader *shader,
1078 struct brw_stage_state *stage_state,
1079 struct brw_stage_prog_data *prog_data)
1080 {
1081 struct gl_context *ctx = &brw->ctx;
1082 uint32_t *surf_offsets =
1083 &stage_state->surf_offset[prog_data->binding_table.abo_start];
1084
1085 if (shader && shader->NumAtomicBuffers) {
1086 for (unsigned i = 0; i < shader->NumAtomicBuffers; i++) {
1087 struct gl_atomic_buffer_binding *binding =
1088 &ctx->AtomicBufferBindings[shader->AtomicBuffers[i]->Binding];
1089 struct intel_buffer_object *intel_bo =
1090 intel_buffer_object(binding->BufferObject);
1091 drm_intel_bo *bo = intel_bufferobj_buffer(
1092 brw, intel_bo, binding->Offset, intel_bo->Base.Size - binding->Offset);
1093
1094 brw->vtbl.emit_buffer_surface_state(brw, &surf_offsets[i], bo,
1095 binding->Offset, BRW_SURFACEFORMAT_RAW,
1096 bo->size - binding->Offset, 1, true);
1097 }
1098
1099 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1100 }
1101 }
1102
1103 static void
1104 brw_upload_wm_abo_surfaces(struct brw_context *brw)
1105 {
1106 struct gl_context *ctx = &brw->ctx;
1107 /* _NEW_PROGRAM */
1108 struct gl_shader_program *prog = ctx->_Shader->_CurrentFragmentProgram;
1109
1110 if (prog) {
1111 /* BRW_NEW_FS_PROG_DATA */
1112 brw_upload_abo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
1113 &brw->wm.base, &brw->wm.prog_data->base);
1114 }
1115 }
1116
1117 const struct brw_tracked_state brw_wm_abo_surfaces = {
1118 .dirty = {
1119 .mesa = _NEW_PROGRAM,
1120 .brw = BRW_NEW_ATOMIC_BUFFER |
1121 BRW_NEW_BLORP |
1122 BRW_NEW_BATCH |
1123 BRW_NEW_FS_PROG_DATA,
1124 },
1125 .emit = brw_upload_wm_abo_surfaces,
1126 };
1127
1128 static void
1129 brw_upload_cs_abo_surfaces(struct brw_context *brw)
1130 {
1131 struct gl_context *ctx = &brw->ctx;
1132 /* _NEW_PROGRAM */
1133 struct gl_shader_program *prog =
1134 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1135
1136 if (prog) {
1137 /* BRW_NEW_CS_PROG_DATA */
1138 brw_upload_abo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_COMPUTE],
1139 &brw->cs.base, &brw->cs.prog_data->base);
1140 }
1141 }
1142
1143 const struct brw_tracked_state brw_cs_abo_surfaces = {
1144 .dirty = {
1145 .mesa = _NEW_PROGRAM,
1146 .brw = BRW_NEW_ATOMIC_BUFFER |
1147 BRW_NEW_BLORP |
1148 BRW_NEW_BATCH |
1149 BRW_NEW_CS_PROG_DATA,
1150 },
1151 .emit = brw_upload_cs_abo_surfaces,
1152 };
1153
1154 static void
1155 brw_upload_cs_image_surfaces(struct brw_context *brw)
1156 {
1157 struct gl_context *ctx = &brw->ctx;
1158 /* _NEW_PROGRAM */
1159 struct gl_shader_program *prog =
1160 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1161
1162 if (prog) {
1163 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1164 brw_upload_image_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_COMPUTE],
1165 &brw->cs.base, &brw->cs.prog_data->base);
1166 }
1167 }
1168
1169 const struct brw_tracked_state brw_cs_image_surfaces = {
1170 .dirty = {
1171 .mesa = _NEW_TEXTURE | _NEW_PROGRAM,
1172 .brw = BRW_NEW_BATCH |
1173 BRW_NEW_BLORP |
1174 BRW_NEW_CS_PROG_DATA |
1175 BRW_NEW_IMAGE_UNITS
1176 },
1177 .emit = brw_upload_cs_image_surfaces,
1178 };
1179
1180 static uint32_t
1181 get_image_format(struct brw_context *brw, mesa_format format, GLenum access)
1182 {
1183 const struct brw_device_info *devinfo = brw->intelScreen->devinfo;
1184 uint32_t hw_format = brw_format_for_mesa_format(format);
1185 if (access == GL_WRITE_ONLY) {
1186 return hw_format;
1187 } else if (isl_has_matching_typed_storage_image_format(devinfo, hw_format)) {
1188 /* Typed surface reads support a very limited subset of the shader
1189 * image formats. Translate it into the closest format the
1190 * hardware supports.
1191 */
1192 return isl_lower_storage_image_format(devinfo, hw_format);
1193 } else {
1194 /* The hardware doesn't actually support a typed format that we can use
1195 * so we have to fall back to untyped read/write messages.
1196 */
1197 return BRW_SURFACEFORMAT_RAW;
1198 }
1199 }
1200
1201 static void
1202 update_default_image_param(struct brw_context *brw,
1203 struct gl_image_unit *u,
1204 unsigned surface_idx,
1205 struct brw_image_param *param)
1206 {
1207 memset(param, 0, sizeof(*param));
1208 param->surface_idx = surface_idx;
1209 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1210 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1211 * detailed explanation of these parameters.
1212 */
1213 param->swizzling[0] = 0xff;
1214 param->swizzling[1] = 0xff;
1215 }
1216
1217 static void
1218 update_buffer_image_param(struct brw_context *brw,
1219 struct gl_image_unit *u,
1220 unsigned surface_idx,
1221 struct brw_image_param *param)
1222 {
1223 struct gl_buffer_object *obj = u->TexObj->BufferObject;
1224
1225 update_default_image_param(brw, u, surface_idx, param);
1226
1227 param->size[0] = obj->Size / _mesa_get_format_bytes(u->_ActualFormat);
1228 param->stride[0] = _mesa_get_format_bytes(u->_ActualFormat);
1229 }
1230
1231 static void
1232 update_texture_image_param(struct brw_context *brw,
1233 struct gl_image_unit *u,
1234 unsigned surface_idx,
1235 struct brw_image_param *param)
1236 {
1237 struct intel_mipmap_tree *mt = intel_texture_object(u->TexObj)->mt;
1238
1239 update_default_image_param(brw, u, surface_idx, param);
1240
1241 param->size[0] = minify(mt->logical_width0, u->Level);
1242 param->size[1] = minify(mt->logical_height0, u->Level);
1243 param->size[2] = (!u->Layered ? 1 :
1244 u->TexObj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1245 u->TexObj->Target == GL_TEXTURE_3D ?
1246 minify(mt->logical_depth0, u->Level) :
1247 mt->logical_depth0);
1248
1249 intel_miptree_get_image_offset(mt, u->Level, u->_Layer,
1250 &param->offset[0],
1251 &param->offset[1]);
1252
1253 param->stride[0] = mt->cpp;
1254 param->stride[1] = mt->pitch / mt->cpp;
1255 param->stride[2] =
1256 brw_miptree_get_horizontal_slice_pitch(brw, mt, u->Level);
1257 param->stride[3] =
1258 brw_miptree_get_vertical_slice_pitch(brw, mt, u->Level);
1259
1260 if (mt->tiling == I915_TILING_X) {
1261 /* An X tile is a rectangular block of 512x8 bytes. */
1262 param->tiling[0] = _mesa_logbase2(512 / mt->cpp);
1263 param->tiling[1] = _mesa_logbase2(8);
1264
1265 if (brw->has_swizzling) {
1266 /* Right shifts required to swizzle bits 9 and 10 of the memory
1267 * address with bit 6.
1268 */
1269 param->swizzling[0] = 3;
1270 param->swizzling[1] = 4;
1271 }
1272 } else if (mt->tiling == I915_TILING_Y) {
1273 /* The layout of a Y-tiled surface in memory isn't really fundamentally
1274 * different to the layout of an X-tiled surface, we simply pretend that
1275 * the surface is broken up in a number of smaller 16Bx32 tiles, each
1276 * one arranged in X-major order just like is the case for X-tiling.
1277 */
1278 param->tiling[0] = _mesa_logbase2(16 / mt->cpp);
1279 param->tiling[1] = _mesa_logbase2(32);
1280
1281 if (brw->has_swizzling) {
1282 /* Right shift required to swizzle bit 9 of the memory address with
1283 * bit 6.
1284 */
1285 param->swizzling[0] = 3;
1286 }
1287 }
1288
1289 /* 3D textures are arranged in 2D in memory with 2^lod slices per row. The
1290 * address calculation algorithm (emit_address_calculation() in
1291 * brw_fs_surface_builder.cpp) handles this as a sort of tiling with
1292 * modulus equal to the LOD.
1293 */
1294 param->tiling[2] = (u->TexObj->Target == GL_TEXTURE_3D ? u->Level :
1295 0);
1296 }
1297
1298 static void
1299 update_image_surface(struct brw_context *brw,
1300 struct gl_image_unit *u,
1301 GLenum access,
1302 unsigned surface_idx,
1303 uint32_t *surf_offset,
1304 struct brw_image_param *param)
1305 {
1306 if (_mesa_is_image_unit_valid(&brw->ctx, u)) {
1307 struct gl_texture_object *obj = u->TexObj;
1308 const unsigned format = get_image_format(brw, u->_ActualFormat, access);
1309
1310 if (obj->Target == GL_TEXTURE_BUFFER) {
1311 struct intel_buffer_object *intel_obj =
1312 intel_buffer_object(obj->BufferObject);
1313 const unsigned texel_size = (format == BRW_SURFACEFORMAT_RAW ? 1 :
1314 _mesa_get_format_bytes(u->_ActualFormat));
1315
1316 brw->vtbl.emit_buffer_surface_state(
1317 brw, surf_offset, intel_obj->buffer, obj->BufferOffset,
1318 format, intel_obj->Base.Size / texel_size, texel_size,
1319 access != GL_READ_ONLY);
1320
1321 update_buffer_image_param(brw, u, surface_idx, param);
1322
1323 } else {
1324 struct intel_texture_object *intel_obj = intel_texture_object(obj);
1325 struct intel_mipmap_tree *mt = intel_obj->mt;
1326
1327 if (format == BRW_SURFACEFORMAT_RAW) {
1328 brw->vtbl.emit_buffer_surface_state(
1329 brw, surf_offset, mt->bo, mt->offset,
1330 format, mt->bo->size - mt->offset, 1 /* pitch */,
1331 access != GL_READ_ONLY);
1332
1333 } else {
1334 const unsigned min_layer = obj->MinLayer + u->_Layer;
1335 const unsigned min_level = obj->MinLevel + u->Level;
1336 const unsigned num_layers = (!u->Layered ? 1 :
1337 obj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1338 mt->logical_depth0);
1339 const GLenum target = (obj->Target == GL_TEXTURE_CUBE_MAP ||
1340 obj->Target == GL_TEXTURE_CUBE_MAP_ARRAY ?
1341 GL_TEXTURE_2D_ARRAY : obj->Target);
1342 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
1343
1344 brw->vtbl.emit_texture_surface_state(
1345 brw, mt, target,
1346 min_layer, min_layer + num_layers,
1347 min_level, min_level + 1,
1348 format, SWIZZLE_XYZW,
1349 surf_offset, surf_index, access != GL_READ_ONLY, false);
1350 }
1351
1352 update_texture_image_param(brw, u, surface_idx, param);
1353 }
1354
1355 } else {
1356 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, surf_offset);
1357 update_default_image_param(brw, u, surface_idx, param);
1358 }
1359 }
1360
1361 void
1362 brw_upload_image_surfaces(struct brw_context *brw,
1363 struct gl_shader *shader,
1364 struct brw_stage_state *stage_state,
1365 struct brw_stage_prog_data *prog_data)
1366 {
1367 struct gl_context *ctx = &brw->ctx;
1368
1369 if (shader && shader->NumImages) {
1370 for (unsigned i = 0; i < shader->NumImages; i++) {
1371 struct gl_image_unit *u = &ctx->ImageUnits[shader->ImageUnits[i]];
1372 const unsigned surf_idx = prog_data->binding_table.image_start + i;
1373
1374 update_image_surface(brw, u, shader->ImageAccess[i],
1375 surf_idx,
1376 &stage_state->surf_offset[surf_idx],
1377 &prog_data->image_param[i]);
1378 }
1379
1380 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1381 /* This may have changed the image metadata dependent on the context
1382 * image unit state and passed to the program as uniforms, make sure
1383 * that push and pull constants are reuploaded.
1384 */
1385 brw->NewGLState |= _NEW_PROGRAM_CONSTANTS;
1386 }
1387 }
1388
1389 static void
1390 brw_upload_wm_image_surfaces(struct brw_context *brw)
1391 {
1392 struct gl_context *ctx = &brw->ctx;
1393 /* BRW_NEW_FRAGMENT_PROGRAM */
1394 struct gl_shader_program *prog = ctx->_Shader->_CurrentFragmentProgram;
1395
1396 if (prog) {
1397 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1398 brw_upload_image_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
1399 &brw->wm.base, &brw->wm.prog_data->base);
1400 }
1401 }
1402
1403 const struct brw_tracked_state brw_wm_image_surfaces = {
1404 .dirty = {
1405 .mesa = _NEW_TEXTURE,
1406 .brw = BRW_NEW_BATCH |
1407 BRW_NEW_BLORP |
1408 BRW_NEW_FRAGMENT_PROGRAM |
1409 BRW_NEW_FS_PROG_DATA |
1410 BRW_NEW_IMAGE_UNITS
1411 },
1412 .emit = brw_upload_wm_image_surfaces,
1413 };
1414
1415 void
1416 gen4_init_vtable_surface_functions(struct brw_context *brw)
1417 {
1418 brw->vtbl.update_texture_surface = brw_update_texture_surface;
1419 brw->vtbl.update_renderbuffer_surface = brw_update_renderbuffer_surface;
1420 brw->vtbl.emit_null_surface_state = brw_emit_null_surface_state;
1421 brw->vtbl.emit_buffer_surface_state = gen4_emit_buffer_surface_state;
1422 }
1423
1424 static void
1425 brw_upload_cs_work_groups_surface(struct brw_context *brw)
1426 {
1427 struct gl_context *ctx = &brw->ctx;
1428 /* _NEW_PROGRAM */
1429 struct gl_shader_program *prog =
1430 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1431
1432 if (prog && brw->cs.prog_data->uses_num_work_groups) {
1433 const unsigned surf_idx =
1434 brw->cs.prog_data->binding_table.work_groups_start;
1435 uint32_t *surf_offset = &brw->cs.base.surf_offset[surf_idx];
1436 drm_intel_bo *bo;
1437 uint32_t bo_offset;
1438
1439 if (brw->compute.num_work_groups_bo == NULL) {
1440 bo = NULL;
1441 intel_upload_data(brw,
1442 (void *)brw->compute.num_work_groups,
1443 3 * sizeof(GLuint),
1444 sizeof(GLuint),
1445 &bo,
1446 &bo_offset);
1447 } else {
1448 bo = brw->compute.num_work_groups_bo;
1449 bo_offset = brw->compute.num_work_groups_offset;
1450 }
1451
1452 brw->vtbl.emit_buffer_surface_state(brw, surf_offset,
1453 bo, bo_offset,
1454 BRW_SURFACEFORMAT_RAW,
1455 3 * sizeof(GLuint), 1, true);
1456 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1457 }
1458 }
1459
1460 const struct brw_tracked_state brw_cs_work_groups_surface = {
1461 .dirty = {
1462 .brw = BRW_NEW_BLORP |
1463 BRW_NEW_CS_WORK_GROUPS
1464 },
1465 .emit = brw_upload_cs_work_groups_surface,
1466 };