i965/cs: Setup surface binding for gl_NumWorkGroups
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_surface_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "main/context.h"
34 #include "main/blend.h"
35 #include "main/mtypes.h"
36 #include "main/samplerobj.h"
37 #include "program/prog_parameter.h"
38 #include "main/framebuffer.h"
39
40 #include "intel_mipmap_tree.h"
41 #include "intel_batchbuffer.h"
42 #include "intel_tex.h"
43 #include "intel_fbo.h"
44 #include "intel_buffer_objects.h"
45
46 #include "brw_context.h"
47 #include "brw_state.h"
48 #include "brw_defines.h"
49 #include "brw_wm.h"
50
51 GLuint
52 translate_tex_target(GLenum target)
53 {
54 switch (target) {
55 case GL_TEXTURE_1D:
56 case GL_TEXTURE_1D_ARRAY_EXT:
57 return BRW_SURFACE_1D;
58
59 case GL_TEXTURE_RECTANGLE_NV:
60 return BRW_SURFACE_2D;
61
62 case GL_TEXTURE_2D:
63 case GL_TEXTURE_2D_ARRAY_EXT:
64 case GL_TEXTURE_EXTERNAL_OES:
65 case GL_TEXTURE_2D_MULTISAMPLE:
66 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
67 return BRW_SURFACE_2D;
68
69 case GL_TEXTURE_3D:
70 return BRW_SURFACE_3D;
71
72 case GL_TEXTURE_CUBE_MAP:
73 case GL_TEXTURE_CUBE_MAP_ARRAY:
74 return BRW_SURFACE_CUBE;
75
76 default:
77 unreachable("not reached");
78 }
79 }
80
81 uint32_t
82 brw_get_surface_tiling_bits(uint32_t tiling)
83 {
84 switch (tiling) {
85 case I915_TILING_X:
86 return BRW_SURFACE_TILED;
87 case I915_TILING_Y:
88 return BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y;
89 default:
90 return 0;
91 }
92 }
93
94
95 uint32_t
96 brw_get_surface_num_multisamples(unsigned num_samples)
97 {
98 if (num_samples > 1)
99 return BRW_SURFACE_MULTISAMPLECOUNT_4;
100 else
101 return BRW_SURFACE_MULTISAMPLECOUNT_1;
102 }
103
104 void
105 brw_configure_w_tiled(const struct intel_mipmap_tree *mt,
106 bool is_render_target,
107 unsigned *width, unsigned *height,
108 unsigned *pitch, uint32_t *tiling, unsigned *format)
109 {
110 static const unsigned halign_stencil = 8;
111
112 /* In Y-tiling row is twice as wide as in W-tiling, and subsequently
113 * there are half as many rows.
114 * In addition, mip-levels are accessed manually by the program and
115 * therefore the surface is setup to cover all the mip-levels for one slice.
116 * (Hardware is still used to access individual slices).
117 */
118 *tiling = I915_TILING_Y;
119 *pitch = mt->pitch * 2;
120 *width = ALIGN(mt->total_width, halign_stencil) * 2;
121 *height = (mt->total_height / mt->physical_depth0) / 2;
122
123 if (is_render_target) {
124 *format = BRW_SURFACEFORMAT_R8_UINT;
125 }
126 }
127
128
129 /**
130 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
131 * swizzling.
132 */
133 int
134 brw_get_texture_swizzle(const struct gl_context *ctx,
135 const struct gl_texture_object *t)
136 {
137 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
138
139 int swizzles[SWIZZLE_NIL + 1] = {
140 SWIZZLE_X,
141 SWIZZLE_Y,
142 SWIZZLE_Z,
143 SWIZZLE_W,
144 SWIZZLE_ZERO,
145 SWIZZLE_ONE,
146 SWIZZLE_NIL
147 };
148
149 if (img->_BaseFormat == GL_DEPTH_COMPONENT ||
150 img->_BaseFormat == GL_DEPTH_STENCIL) {
151 GLenum depth_mode = t->DepthMode;
152
153 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
154 * with depth component data specified with a sized internal format.
155 * Otherwise, it's left at the old default, GL_LUMINANCE.
156 */
157 if (_mesa_is_gles3(ctx) &&
158 img->InternalFormat != GL_DEPTH_COMPONENT &&
159 img->InternalFormat != GL_DEPTH_STENCIL) {
160 depth_mode = GL_RED;
161 }
162
163 switch (depth_mode) {
164 case GL_ALPHA:
165 swizzles[0] = SWIZZLE_ZERO;
166 swizzles[1] = SWIZZLE_ZERO;
167 swizzles[2] = SWIZZLE_ZERO;
168 swizzles[3] = SWIZZLE_X;
169 break;
170 case GL_LUMINANCE:
171 swizzles[0] = SWIZZLE_X;
172 swizzles[1] = SWIZZLE_X;
173 swizzles[2] = SWIZZLE_X;
174 swizzles[3] = SWIZZLE_ONE;
175 break;
176 case GL_INTENSITY:
177 swizzles[0] = SWIZZLE_X;
178 swizzles[1] = SWIZZLE_X;
179 swizzles[2] = SWIZZLE_X;
180 swizzles[3] = SWIZZLE_X;
181 break;
182 case GL_RED:
183 swizzles[0] = SWIZZLE_X;
184 swizzles[1] = SWIZZLE_ZERO;
185 swizzles[2] = SWIZZLE_ZERO;
186 swizzles[3] = SWIZZLE_ONE;
187 break;
188 }
189 }
190
191 GLenum datatype = _mesa_get_format_datatype(img->TexFormat);
192
193 /* If the texture's format is alpha-only, force R, G, and B to
194 * 0.0. Similarly, if the texture's format has no alpha channel,
195 * force the alpha value read to 1.0. This allows for the
196 * implementation to use an RGBA texture for any of these formats
197 * without leaking any unexpected values.
198 */
199 switch (img->_BaseFormat) {
200 case GL_ALPHA:
201 swizzles[0] = SWIZZLE_ZERO;
202 swizzles[1] = SWIZZLE_ZERO;
203 swizzles[2] = SWIZZLE_ZERO;
204 break;
205 case GL_LUMINANCE:
206 if (t->_IsIntegerFormat || datatype == GL_SIGNED_NORMALIZED) {
207 swizzles[0] = SWIZZLE_X;
208 swizzles[1] = SWIZZLE_X;
209 swizzles[2] = SWIZZLE_X;
210 swizzles[3] = SWIZZLE_ONE;
211 }
212 break;
213 case GL_LUMINANCE_ALPHA:
214 if (datatype == GL_SIGNED_NORMALIZED) {
215 swizzles[0] = SWIZZLE_X;
216 swizzles[1] = SWIZZLE_X;
217 swizzles[2] = SWIZZLE_X;
218 swizzles[3] = SWIZZLE_W;
219 }
220 break;
221 case GL_INTENSITY:
222 if (datatype == GL_SIGNED_NORMALIZED) {
223 swizzles[0] = SWIZZLE_X;
224 swizzles[1] = SWIZZLE_X;
225 swizzles[2] = SWIZZLE_X;
226 swizzles[3] = SWIZZLE_X;
227 }
228 break;
229 case GL_RED:
230 case GL_RG:
231 case GL_RGB:
232 if (_mesa_get_format_bits(img->TexFormat, GL_ALPHA_BITS) > 0)
233 swizzles[3] = SWIZZLE_ONE;
234 break;
235 }
236
237 return MAKE_SWIZZLE4(swizzles[GET_SWZ(t->_Swizzle, 0)],
238 swizzles[GET_SWZ(t->_Swizzle, 1)],
239 swizzles[GET_SWZ(t->_Swizzle, 2)],
240 swizzles[GET_SWZ(t->_Swizzle, 3)]);
241 }
242
243 static void
244 gen4_emit_buffer_surface_state(struct brw_context *brw,
245 uint32_t *out_offset,
246 drm_intel_bo *bo,
247 unsigned buffer_offset,
248 unsigned surface_format,
249 unsigned buffer_size,
250 unsigned pitch,
251 bool rw)
252 {
253 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
254 6 * 4, 32, out_offset);
255 memset(surf, 0, 6 * 4);
256
257 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
258 surface_format << BRW_SURFACE_FORMAT_SHIFT |
259 (brw->gen >= 6 ? BRW_SURFACE_RC_READ_WRITE : 0);
260 surf[1] = (bo ? bo->offset64 : 0) + buffer_offset; /* reloc */
261 surf[2] = (buffer_size & 0x7f) << BRW_SURFACE_WIDTH_SHIFT |
262 ((buffer_size >> 7) & 0x1fff) << BRW_SURFACE_HEIGHT_SHIFT;
263 surf[3] = ((buffer_size >> 20) & 0x7f) << BRW_SURFACE_DEPTH_SHIFT |
264 (pitch - 1) << BRW_SURFACE_PITCH_SHIFT;
265
266 /* Emit relocation to surface contents. The 965 PRM, Volume 4, section
267 * 5.1.2 "Data Cache" says: "the data cache does not exist as a separate
268 * physical cache. It is mapped in hardware to the sampler cache."
269 */
270 if (bo) {
271 drm_intel_bo_emit_reloc(brw->batch.bo, *out_offset + 4,
272 bo, buffer_offset,
273 I915_GEM_DOMAIN_SAMPLER,
274 (rw ? I915_GEM_DOMAIN_SAMPLER : 0));
275 }
276 }
277
278 void
279 brw_update_buffer_texture_surface(struct gl_context *ctx,
280 unsigned unit,
281 uint32_t *surf_offset)
282 {
283 struct brw_context *brw = brw_context(ctx);
284 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
285 struct intel_buffer_object *intel_obj =
286 intel_buffer_object(tObj->BufferObject);
287 uint32_t size = tObj->BufferSize;
288 drm_intel_bo *bo = NULL;
289 mesa_format format = tObj->_BufferObjectFormat;
290 uint32_t brw_format = brw_format_for_mesa_format(format);
291 int texel_size = _mesa_get_format_bytes(format);
292
293 if (intel_obj) {
294 size = MIN2(size, intel_obj->Base.Size);
295 bo = intel_bufferobj_buffer(brw, intel_obj, tObj->BufferOffset, size);
296 }
297
298 if (brw_format == 0 && format != MESA_FORMAT_RGBA_FLOAT32) {
299 _mesa_problem(NULL, "bad format %s for texture buffer\n",
300 _mesa_get_format_name(format));
301 }
302
303 brw->vtbl.emit_buffer_surface_state(brw, surf_offset, bo,
304 tObj->BufferOffset,
305 brw_format,
306 size / texel_size,
307 texel_size,
308 false /* rw */);
309 }
310
311 static void
312 brw_update_texture_surface(struct gl_context *ctx,
313 unsigned unit,
314 uint32_t *surf_offset,
315 bool for_gather)
316 {
317 struct brw_context *brw = brw_context(ctx);
318 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
319 struct intel_texture_object *intelObj = intel_texture_object(tObj);
320 struct intel_mipmap_tree *mt = intelObj->mt;
321 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
322 uint32_t *surf;
323
324 /* BRW_NEW_TEXTURE_BUFFER */
325 if (tObj->Target == GL_TEXTURE_BUFFER) {
326 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
327 return;
328 }
329
330 surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
331 6 * 4, 32, surf_offset);
332
333 uint32_t tex_format = translate_tex_format(brw, mt->format,
334 sampler->sRGBDecode);
335
336 if (for_gather) {
337 /* Sandybridge's gather4 message is broken for integer formats.
338 * To work around this, we pretend the surface is UNORM for
339 * 8 or 16-bit formats, and emit shader instructions to recover
340 * the real INT/UINT value. For 32-bit formats, we pretend
341 * the surface is FLOAT, and simply reinterpret the resulting
342 * bits.
343 */
344 switch (tex_format) {
345 case BRW_SURFACEFORMAT_R8_SINT:
346 case BRW_SURFACEFORMAT_R8_UINT:
347 tex_format = BRW_SURFACEFORMAT_R8_UNORM;
348 break;
349
350 case BRW_SURFACEFORMAT_R16_SINT:
351 case BRW_SURFACEFORMAT_R16_UINT:
352 tex_format = BRW_SURFACEFORMAT_R16_UNORM;
353 break;
354
355 case BRW_SURFACEFORMAT_R32_SINT:
356 case BRW_SURFACEFORMAT_R32_UINT:
357 tex_format = BRW_SURFACEFORMAT_R32_FLOAT;
358 break;
359
360 default:
361 break;
362 }
363 }
364
365 surf[0] = (translate_tex_target(tObj->Target) << BRW_SURFACE_TYPE_SHIFT |
366 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
367 BRW_SURFACE_CUBEFACE_ENABLES |
368 tex_format << BRW_SURFACE_FORMAT_SHIFT);
369
370 surf[1] = mt->bo->offset64 + mt->offset; /* reloc */
371
372 surf[2] = ((intelObj->_MaxLevel - tObj->BaseLevel) << BRW_SURFACE_LOD_SHIFT |
373 (mt->logical_width0 - 1) << BRW_SURFACE_WIDTH_SHIFT |
374 (mt->logical_height0 - 1) << BRW_SURFACE_HEIGHT_SHIFT);
375
376 surf[3] = (brw_get_surface_tiling_bits(mt->tiling) |
377 (mt->logical_depth0 - 1) << BRW_SURFACE_DEPTH_SHIFT |
378 (mt->pitch - 1) << BRW_SURFACE_PITCH_SHIFT);
379
380 surf[4] = (brw_get_surface_num_multisamples(mt->num_samples) |
381 SET_FIELD(tObj->BaseLevel - mt->first_level, BRW_SURFACE_MIN_LOD));
382
383 surf[5] = mt->align_h == 4 ? BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0;
384
385 /* Emit relocation to surface contents */
386 drm_intel_bo_emit_reloc(brw->batch.bo,
387 *surf_offset + 4,
388 mt->bo,
389 surf[1] - mt->bo->offset64,
390 I915_GEM_DOMAIN_SAMPLER, 0);
391 }
392
393 /**
394 * Create the constant buffer surface. Vertex/fragment shader constants will be
395 * read from this buffer with Data Port Read instructions/messages.
396 */
397 void
398 brw_create_constant_surface(struct brw_context *brw,
399 drm_intel_bo *bo,
400 uint32_t offset,
401 uint32_t size,
402 uint32_t *out_offset,
403 bool dword_pitch)
404 {
405 uint32_t stride = dword_pitch ? 4 : 16;
406 uint32_t elements = ALIGN(size, stride) / stride;
407
408 brw->vtbl.emit_buffer_surface_state(brw, out_offset, bo, offset,
409 BRW_SURFACEFORMAT_R32G32B32A32_FLOAT,
410 elements, stride, false);
411 }
412
413 /**
414 * Create the buffer surface. Shader buffer variables will be
415 * read from / write to this buffer with Data Port Read/Write
416 * instructions/messages.
417 */
418 void
419 brw_create_buffer_surface(struct brw_context *brw,
420 drm_intel_bo *bo,
421 uint32_t offset,
422 uint32_t size,
423 uint32_t *out_offset,
424 bool dword_pitch)
425 {
426 /* Use a raw surface so we can reuse existing untyped read/write/atomic
427 * messages. We need these specifically for the fragment shader since they
428 * include a pixel mask header that we need to ensure correct behavior
429 * with helper invocations, which cannot write to the buffer.
430 */
431 brw->vtbl.emit_buffer_surface_state(brw, out_offset, bo, offset,
432 BRW_SURFACEFORMAT_RAW,
433 size, 1, true);
434 }
435
436 /**
437 * Set up a binding table entry for use by stream output logic (transform
438 * feedback).
439 *
440 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
441 */
442 void
443 brw_update_sol_surface(struct brw_context *brw,
444 struct gl_buffer_object *buffer_obj,
445 uint32_t *out_offset, unsigned num_vector_components,
446 unsigned stride_dwords, unsigned offset_dwords)
447 {
448 struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
449 uint32_t offset_bytes = 4 * offset_dwords;
450 drm_intel_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
451 offset_bytes,
452 buffer_obj->Size - offset_bytes);
453 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
454 out_offset);
455 uint32_t pitch_minus_1 = 4*stride_dwords - 1;
456 size_t size_dwords = buffer_obj->Size / 4;
457 uint32_t buffer_size_minus_1, width, height, depth, surface_format;
458
459 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
460 * too big to map using a single binding table entry?
461 */
462 assert((size_dwords - offset_dwords) / stride_dwords
463 <= BRW_MAX_NUM_BUFFER_ENTRIES);
464
465 if (size_dwords > offset_dwords + num_vector_components) {
466 /* There is room for at least 1 transform feedback output in the buffer.
467 * Compute the number of additional transform feedback outputs the
468 * buffer has room for.
469 */
470 buffer_size_minus_1 =
471 (size_dwords - offset_dwords - num_vector_components) / stride_dwords;
472 } else {
473 /* There isn't even room for a single transform feedback output in the
474 * buffer. We can't configure the binding table entry to prevent output
475 * entirely; we'll have to rely on the geometry shader to detect
476 * overflow. But to minimize the damage in case of a bug, set up the
477 * binding table entry to just allow a single output.
478 */
479 buffer_size_minus_1 = 0;
480 }
481 width = buffer_size_minus_1 & 0x7f;
482 height = (buffer_size_minus_1 & 0xfff80) >> 7;
483 depth = (buffer_size_minus_1 & 0x7f00000) >> 20;
484
485 switch (num_vector_components) {
486 case 1:
487 surface_format = BRW_SURFACEFORMAT_R32_FLOAT;
488 break;
489 case 2:
490 surface_format = BRW_SURFACEFORMAT_R32G32_FLOAT;
491 break;
492 case 3:
493 surface_format = BRW_SURFACEFORMAT_R32G32B32_FLOAT;
494 break;
495 case 4:
496 surface_format = BRW_SURFACEFORMAT_R32G32B32A32_FLOAT;
497 break;
498 default:
499 unreachable("Invalid vector size for transform feedback output");
500 }
501
502 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
503 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
504 surface_format << BRW_SURFACE_FORMAT_SHIFT |
505 BRW_SURFACE_RC_READ_WRITE;
506 surf[1] = bo->offset64 + offset_bytes; /* reloc */
507 surf[2] = (width << BRW_SURFACE_WIDTH_SHIFT |
508 height << BRW_SURFACE_HEIGHT_SHIFT);
509 surf[3] = (depth << BRW_SURFACE_DEPTH_SHIFT |
510 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
511 surf[4] = 0;
512 surf[5] = 0;
513
514 /* Emit relocation to surface contents. */
515 drm_intel_bo_emit_reloc(brw->batch.bo,
516 *out_offset + 4,
517 bo, offset_bytes,
518 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
519 }
520
521 /* Creates a new WM constant buffer reflecting the current fragment program's
522 * constants, if needed by the fragment program.
523 *
524 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
525 * state atom.
526 */
527 static void
528 brw_upload_wm_pull_constants(struct brw_context *brw)
529 {
530 struct brw_stage_state *stage_state = &brw->wm.base;
531 /* BRW_NEW_FRAGMENT_PROGRAM */
532 struct brw_fragment_program *fp =
533 (struct brw_fragment_program *) brw->fragment_program;
534 /* BRW_NEW_FS_PROG_DATA */
535 struct brw_stage_prog_data *prog_data = &brw->wm.prog_data->base;
536
537 /* _NEW_PROGRAM_CONSTANTS */
538 brw_upload_pull_constants(brw, BRW_NEW_SURFACES, &fp->program.Base,
539 stage_state, prog_data, true);
540 }
541
542 const struct brw_tracked_state brw_wm_pull_constants = {
543 .dirty = {
544 .mesa = _NEW_PROGRAM_CONSTANTS,
545 .brw = BRW_NEW_BATCH |
546 BRW_NEW_FRAGMENT_PROGRAM |
547 BRW_NEW_FS_PROG_DATA,
548 },
549 .emit = brw_upload_wm_pull_constants,
550 };
551
552 /**
553 * Creates a null renderbuffer surface.
554 *
555 * This is used when the shader doesn't write to any color output. An FB
556 * write to target 0 will still be emitted, because that's how the thread is
557 * terminated (and computed depth is returned), so we need to have the
558 * hardware discard the target 0 color output..
559 */
560 static void
561 brw_emit_null_surface_state(struct brw_context *brw,
562 unsigned width,
563 unsigned height,
564 unsigned samples,
565 uint32_t *out_offset)
566 {
567 /* From the Sandy bridge PRM, Vol4 Part1 p71 (Surface Type: Programming
568 * Notes):
569 *
570 * A null surface will be used in instances where an actual surface is
571 * not bound. When a write message is generated to a null surface, no
572 * actual surface is written to. When a read message (including any
573 * sampling engine message) is generated to a null surface, the result
574 * is all zeros. Note that a null surface type is allowed to be used
575 * with all messages, even if it is not specificially indicated as
576 * supported. All of the remaining fields in surface state are ignored
577 * for null surfaces, with the following exceptions:
578 *
579 * - [DevSNB+]: Width, Height, Depth, and LOD fields must match the
580 * depth buffer’s corresponding state for all render target surfaces,
581 * including null.
582 *
583 * - Surface Format must be R8G8B8A8_UNORM.
584 */
585 unsigned surface_type = BRW_SURFACE_NULL;
586 drm_intel_bo *bo = NULL;
587 unsigned pitch_minus_1 = 0;
588 uint32_t multisampling_state = 0;
589 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
590 out_offset);
591
592 if (samples > 1) {
593 /* On Gen6, null render targets seem to cause GPU hangs when
594 * multisampling. So work around this problem by rendering into dummy
595 * color buffer.
596 *
597 * To decrease the amount of memory needed by the workaround buffer, we
598 * set its pitch to 128 bytes (the width of a Y tile). This means that
599 * the amount of memory needed for the workaround buffer is
600 * (width_in_tiles + height_in_tiles - 1) tiles.
601 *
602 * Note that since the workaround buffer will be interpreted by the
603 * hardware as an interleaved multisampled buffer, we need to compute
604 * width_in_tiles and height_in_tiles by dividing the width and height
605 * by 16 rather than the normal Y-tile size of 32.
606 */
607 unsigned width_in_tiles = ALIGN(width, 16) / 16;
608 unsigned height_in_tiles = ALIGN(height, 16) / 16;
609 unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
610 brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
611 size_needed);
612 bo = brw->wm.multisampled_null_render_target_bo;
613 surface_type = BRW_SURFACE_2D;
614 pitch_minus_1 = 127;
615 multisampling_state = brw_get_surface_num_multisamples(samples);
616 }
617
618 surf[0] = (surface_type << BRW_SURFACE_TYPE_SHIFT |
619 BRW_SURFACEFORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
620 if (brw->gen < 6) {
621 surf[0] |= (1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT |
622 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT |
623 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT |
624 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT);
625 }
626 surf[1] = bo ? bo->offset64 : 0;
627 surf[2] = ((width - 1) << BRW_SURFACE_WIDTH_SHIFT |
628 (height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
629
630 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
631 * Notes):
632 *
633 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
634 */
635 surf[3] = (BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y |
636 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
637 surf[4] = multisampling_state;
638 surf[5] = 0;
639
640 if (bo) {
641 drm_intel_bo_emit_reloc(brw->batch.bo,
642 *out_offset + 4,
643 bo, 0,
644 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
645 }
646 }
647
648 /**
649 * Sets up a surface state structure to point at the given region.
650 * While it is only used for the front/back buffer currently, it should be
651 * usable for further buffers when doing ARB_draw_buffer support.
652 */
653 static uint32_t
654 brw_update_renderbuffer_surface(struct brw_context *brw,
655 struct gl_renderbuffer *rb,
656 bool layered, unsigned unit,
657 uint32_t surf_index)
658 {
659 struct gl_context *ctx = &brw->ctx;
660 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
661 struct intel_mipmap_tree *mt = irb->mt;
662 uint32_t *surf;
663 uint32_t tile_x, tile_y;
664 uint32_t format = 0;
665 uint32_t offset;
666 /* _NEW_BUFFERS */
667 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
668 /* BRW_NEW_FS_PROG_DATA */
669
670 assert(!layered);
671
672 if (rb->TexImage && !brw->has_surface_tile_offset) {
673 intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y);
674
675 if (tile_x != 0 || tile_y != 0) {
676 /* Original gen4 hardware couldn't draw to a non-tile-aligned
677 * destination in a miptree unless you actually setup your renderbuffer
678 * as a miptree and used the fragile lod/array_index/etc. controls to
679 * select the image. So, instead, we just make a new single-level
680 * miptree and render into that.
681 */
682 intel_renderbuffer_move_to_temp(brw, irb, false);
683 mt = irb->mt;
684 }
685 }
686
687 intel_miptree_used_for_rendering(irb->mt);
688
689 surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32, &offset);
690
691 format = brw->render_target_format[rb_format];
692 if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
693 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
694 __func__, _mesa_get_format_name(rb_format));
695 }
696
697 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
698 format << BRW_SURFACE_FORMAT_SHIFT);
699
700 /* reloc */
701 assert(mt->offset % mt->cpp == 0);
702 surf[1] = (intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y) +
703 mt->bo->offset64 + mt->offset);
704
705 surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
706 (rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
707
708 surf[3] = (brw_get_surface_tiling_bits(mt->tiling) |
709 (mt->pitch - 1) << BRW_SURFACE_PITCH_SHIFT);
710
711 surf[4] = brw_get_surface_num_multisamples(mt->num_samples);
712
713 assert(brw->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
714 /* Note that the low bits of these fields are missing, so
715 * there's the possibility of getting in trouble.
716 */
717 assert(tile_x % 4 == 0);
718 assert(tile_y % 2 == 0);
719 surf[5] = ((tile_x / 4) << BRW_SURFACE_X_OFFSET_SHIFT |
720 (tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT |
721 (mt->align_h == 4 ? BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
722
723 if (brw->gen < 6) {
724 /* _NEW_COLOR */
725 if (!ctx->Color.ColorLogicOpEnabled &&
726 (ctx->Color.BlendEnabled & (1 << unit)))
727 surf[0] |= BRW_SURFACE_BLEND_ENABLED;
728
729 if (!ctx->Color.ColorMask[unit][0])
730 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT;
731 if (!ctx->Color.ColorMask[unit][1])
732 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT;
733 if (!ctx->Color.ColorMask[unit][2])
734 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT;
735
736 /* As mentioned above, disable writes to the alpha component when the
737 * renderbuffer is XRGB.
738 */
739 if (ctx->DrawBuffer->Visual.alphaBits == 0 ||
740 !ctx->Color.ColorMask[unit][3]) {
741 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT;
742 }
743 }
744
745 drm_intel_bo_emit_reloc(brw->batch.bo,
746 offset + 4,
747 mt->bo,
748 surf[1] - mt->bo->offset64,
749 I915_GEM_DOMAIN_RENDER,
750 I915_GEM_DOMAIN_RENDER);
751
752 return offset;
753 }
754
755 /**
756 * Construct SURFACE_STATE objects for renderbuffers/draw buffers.
757 */
758 void
759 brw_update_renderbuffer_surfaces(struct brw_context *brw,
760 const struct gl_framebuffer *fb,
761 uint32_t render_target_start,
762 uint32_t *surf_offset)
763 {
764 GLuint i;
765 const unsigned int w = _mesa_geometric_width(fb);
766 const unsigned int h = _mesa_geometric_height(fb);
767 const unsigned int s = _mesa_geometric_samples(fb);
768
769 /* Update surfaces for drawing buffers */
770 if (fb->_NumColorDrawBuffers >= 1) {
771 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
772 const uint32_t surf_index = render_target_start + i;
773
774 if (intel_renderbuffer(fb->_ColorDrawBuffers[i])) {
775 surf_offset[surf_index] =
776 brw->vtbl.update_renderbuffer_surface(
777 brw, fb->_ColorDrawBuffers[i],
778 _mesa_geometric_layers(fb) > 0, i, surf_index);
779 } else {
780 brw->vtbl.emit_null_surface_state(brw, w, h, s,
781 &surf_offset[surf_index]);
782 }
783 }
784 } else {
785 const uint32_t surf_index = render_target_start;
786 brw->vtbl.emit_null_surface_state(brw, w, h, s,
787 &surf_offset[surf_index]);
788 }
789 }
790
791 static void
792 update_renderbuffer_surfaces(struct brw_context *brw)
793 {
794 const struct gl_context *ctx = &brw->ctx;
795
796 /* _NEW_BUFFERS | _NEW_COLOR */
797 const struct gl_framebuffer *fb = ctx->DrawBuffer;
798 brw_update_renderbuffer_surfaces(
799 brw, fb,
800 brw->wm.prog_data->binding_table.render_target_start,
801 brw->wm.base.surf_offset);
802 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
803 }
804
805 const struct brw_tracked_state brw_renderbuffer_surfaces = {
806 .dirty = {
807 .mesa = _NEW_BUFFERS |
808 _NEW_COLOR,
809 .brw = BRW_NEW_BATCH |
810 BRW_NEW_FS_PROG_DATA,
811 },
812 .emit = update_renderbuffer_surfaces,
813 };
814
815 const struct brw_tracked_state gen6_renderbuffer_surfaces = {
816 .dirty = {
817 .mesa = _NEW_BUFFERS,
818 .brw = BRW_NEW_BATCH,
819 },
820 .emit = update_renderbuffer_surfaces,
821 };
822
823
824 static void
825 update_stage_texture_surfaces(struct brw_context *brw,
826 const struct gl_program *prog,
827 struct brw_stage_state *stage_state,
828 bool for_gather)
829 {
830 if (!prog)
831 return;
832
833 struct gl_context *ctx = &brw->ctx;
834
835 uint32_t *surf_offset = stage_state->surf_offset;
836
837 /* BRW_NEW_*_PROG_DATA */
838 if (for_gather)
839 surf_offset += stage_state->prog_data->binding_table.gather_texture_start;
840 else
841 surf_offset += stage_state->prog_data->binding_table.texture_start;
842
843 unsigned num_samplers = _mesa_fls(prog->SamplersUsed);
844 for (unsigned s = 0; s < num_samplers; s++) {
845 surf_offset[s] = 0;
846
847 if (prog->SamplersUsed & (1 << s)) {
848 const unsigned unit = prog->SamplerUnits[s];
849
850 /* _NEW_TEXTURE */
851 if (ctx->Texture.Unit[unit]._Current) {
852 brw->vtbl.update_texture_surface(ctx, unit, surf_offset + s, for_gather);
853 }
854 }
855 }
856 }
857
858
859 /**
860 * Construct SURFACE_STATE objects for enabled textures.
861 */
862 static void
863 brw_update_texture_surfaces(struct brw_context *brw)
864 {
865 /* BRW_NEW_VERTEX_PROGRAM */
866 struct gl_program *vs = (struct gl_program *) brw->vertex_program;
867
868 /* BRW_NEW_GEOMETRY_PROGRAM */
869 struct gl_program *gs = (struct gl_program *) brw->geometry_program;
870
871 /* BRW_NEW_FRAGMENT_PROGRAM */
872 struct gl_program *fs = (struct gl_program *) brw->fragment_program;
873
874 /* BRW_NEW_COMPUTE_PROGRAM */
875 struct gl_program *cs = (struct gl_program *) brw->compute_program;
876
877 /* _NEW_TEXTURE */
878 update_stage_texture_surfaces(brw, vs, &brw->vs.base, false);
879 update_stage_texture_surfaces(brw, gs, &brw->gs.base, false);
880 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false);
881 update_stage_texture_surfaces(brw, cs, &brw->cs.base, false);
882
883 /* emit alternate set of surface state for gather. this
884 * allows the surface format to be overriden for only the
885 * gather4 messages. */
886 if (brw->gen < 8) {
887 if (vs && vs->UsesGather)
888 update_stage_texture_surfaces(brw, vs, &brw->vs.base, true);
889 if (gs && gs->UsesGather)
890 update_stage_texture_surfaces(brw, gs, &brw->gs.base, true);
891 if (fs && fs->UsesGather)
892 update_stage_texture_surfaces(brw, fs, &brw->wm.base, true);
893 if (cs && cs->UsesGather)
894 update_stage_texture_surfaces(brw, cs, &brw->cs.base, true);
895 }
896
897 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
898 }
899
900 const struct brw_tracked_state brw_texture_surfaces = {
901 .dirty = {
902 .mesa = _NEW_TEXTURE,
903 .brw = BRW_NEW_BATCH |
904 BRW_NEW_COMPUTE_PROGRAM |
905 BRW_NEW_FRAGMENT_PROGRAM |
906 BRW_NEW_FS_PROG_DATA |
907 BRW_NEW_GEOMETRY_PROGRAM |
908 BRW_NEW_GS_PROG_DATA |
909 BRW_NEW_TEXTURE_BUFFER |
910 BRW_NEW_VERTEX_PROGRAM |
911 BRW_NEW_VS_PROG_DATA,
912 },
913 .emit = brw_update_texture_surfaces,
914 };
915
916 void
917 brw_upload_ubo_surfaces(struct brw_context *brw,
918 struct gl_shader *shader,
919 struct brw_stage_state *stage_state,
920 struct brw_stage_prog_data *prog_data,
921 bool dword_pitch)
922 {
923 struct gl_context *ctx = &brw->ctx;
924
925 if (!shader)
926 return;
927
928 uint32_t *surf_offsets =
929 &stage_state->surf_offset[prog_data->binding_table.ubo_start];
930
931 for (int i = 0; i < shader->NumUniformBlocks; i++) {
932 struct intel_buffer_object *intel_bo;
933
934 /* Because behavior for referencing outside of the binding's size in the
935 * glBindBufferRange case is undefined, we can just bind the whole buffer
936 * glBindBufferBase wants and be a correct implementation.
937 */
938 if (!shader->UniformBlocks[i].IsShaderStorage) {
939 struct gl_uniform_buffer_binding *binding;
940 binding =
941 &ctx->UniformBufferBindings[shader->UniformBlocks[i].Binding];
942 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
943 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &surf_offsets[i]);
944 } else {
945 intel_bo = intel_buffer_object(binding->BufferObject);
946 drm_intel_bo *bo =
947 intel_bufferobj_buffer(brw, intel_bo,
948 binding->Offset,
949 binding->BufferObject->Size - binding->Offset);
950 brw_create_constant_surface(brw, bo, binding->Offset,
951 binding->BufferObject->Size - binding->Offset,
952 &surf_offsets[i],
953 dword_pitch);
954 }
955 } else {
956 struct gl_shader_storage_buffer_binding *binding;
957 binding =
958 &ctx->ShaderStorageBufferBindings[shader->UniformBlocks[i].Binding];
959 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
960 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &surf_offsets[i]);
961 } else {
962 intel_bo = intel_buffer_object(binding->BufferObject);
963 drm_intel_bo *bo =
964 intel_bufferobj_buffer(brw, intel_bo,
965 binding->Offset,
966 binding->BufferObject->Size - binding->Offset);
967 brw_create_buffer_surface(brw, bo, binding->Offset,
968 binding->BufferObject->Size - binding->Offset,
969 &surf_offsets[i],
970 dword_pitch);
971 }
972 }
973 }
974
975 if (shader->NumUniformBlocks)
976 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
977 }
978
979 static void
980 brw_upload_wm_ubo_surfaces(struct brw_context *brw)
981 {
982 struct gl_context *ctx = &brw->ctx;
983 /* _NEW_PROGRAM */
984 struct gl_shader_program *prog = ctx->_Shader->_CurrentFragmentProgram;
985
986 if (!prog)
987 return;
988
989 /* BRW_NEW_FS_PROG_DATA */
990 brw_upload_ubo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
991 &brw->wm.base, &brw->wm.prog_data->base, true);
992 }
993
994 const struct brw_tracked_state brw_wm_ubo_surfaces = {
995 .dirty = {
996 .mesa = _NEW_PROGRAM,
997 .brw = BRW_NEW_BATCH |
998 BRW_NEW_FS_PROG_DATA |
999 BRW_NEW_UNIFORM_BUFFER,
1000 },
1001 .emit = brw_upload_wm_ubo_surfaces,
1002 };
1003
1004 void
1005 brw_upload_abo_surfaces(struct brw_context *brw,
1006 struct gl_shader_program *prog,
1007 struct brw_stage_state *stage_state,
1008 struct brw_stage_prog_data *prog_data)
1009 {
1010 struct gl_context *ctx = &brw->ctx;
1011 uint32_t *surf_offsets =
1012 &stage_state->surf_offset[prog_data->binding_table.abo_start];
1013
1014 for (unsigned i = 0; i < prog->NumAtomicBuffers; i++) {
1015 struct gl_atomic_buffer_binding *binding =
1016 &ctx->AtomicBufferBindings[prog->AtomicBuffers[i].Binding];
1017 struct intel_buffer_object *intel_bo =
1018 intel_buffer_object(binding->BufferObject);
1019 drm_intel_bo *bo = intel_bufferobj_buffer(
1020 brw, intel_bo, binding->Offset, intel_bo->Base.Size - binding->Offset);
1021
1022 brw->vtbl.emit_buffer_surface_state(brw, &surf_offsets[i], bo,
1023 binding->Offset, BRW_SURFACEFORMAT_RAW,
1024 bo->size - binding->Offset, 1, true);
1025 }
1026
1027 if (prog->NumAtomicBuffers)
1028 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1029 }
1030
1031 static void
1032 brw_upload_wm_abo_surfaces(struct brw_context *brw)
1033 {
1034 struct gl_context *ctx = &brw->ctx;
1035 /* _NEW_PROGRAM */
1036 struct gl_shader_program *prog = ctx->Shader._CurrentFragmentProgram;
1037
1038 if (prog) {
1039 /* BRW_NEW_FS_PROG_DATA */
1040 brw_upload_abo_surfaces(brw, prog, &brw->wm.base,
1041 &brw->wm.prog_data->base);
1042 }
1043 }
1044
1045 const struct brw_tracked_state brw_wm_abo_surfaces = {
1046 .dirty = {
1047 .mesa = _NEW_PROGRAM,
1048 .brw = BRW_NEW_ATOMIC_BUFFER |
1049 BRW_NEW_BATCH |
1050 BRW_NEW_FS_PROG_DATA,
1051 },
1052 .emit = brw_upload_wm_abo_surfaces,
1053 };
1054
1055 static void
1056 brw_upload_cs_abo_surfaces(struct brw_context *brw)
1057 {
1058 struct gl_context *ctx = &brw->ctx;
1059 /* _NEW_PROGRAM */
1060 struct gl_shader_program *prog =
1061 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1062
1063 if (prog) {
1064 /* BRW_NEW_CS_PROG_DATA */
1065 brw_upload_abo_surfaces(brw, prog, &brw->cs.base,
1066 &brw->cs.prog_data->base);
1067 }
1068 }
1069
1070 const struct brw_tracked_state brw_cs_abo_surfaces = {
1071 .dirty = {
1072 .mesa = _NEW_PROGRAM,
1073 .brw = BRW_NEW_ATOMIC_BUFFER |
1074 BRW_NEW_BATCH |
1075 BRW_NEW_CS_PROG_DATA,
1076 },
1077 .emit = brw_upload_cs_abo_surfaces,
1078 };
1079
1080 static void
1081 brw_upload_cs_image_surfaces(struct brw_context *brw)
1082 {
1083 struct gl_context *ctx = &brw->ctx;
1084 /* _NEW_PROGRAM */
1085 struct gl_shader_program *prog =
1086 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1087
1088 if (prog) {
1089 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS */
1090 brw_upload_image_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_COMPUTE],
1091 &brw->cs.base, &brw->cs.prog_data->base);
1092 }
1093 }
1094
1095 const struct brw_tracked_state brw_cs_image_surfaces = {
1096 .dirty = {
1097 .mesa = _NEW_PROGRAM,
1098 .brw = BRW_NEW_BATCH |
1099 BRW_NEW_CS_PROG_DATA |
1100 BRW_NEW_IMAGE_UNITS
1101 },
1102 .emit = brw_upload_cs_image_surfaces,
1103 };
1104
1105 static uint32_t
1106 get_image_format(struct brw_context *brw, mesa_format format, GLenum access)
1107 {
1108 if (access == GL_WRITE_ONLY) {
1109 return brw_format_for_mesa_format(format);
1110 } else {
1111 /* Typed surface reads support a very limited subset of the shader
1112 * image formats. Translate it into the closest format the
1113 * hardware supports.
1114 */
1115 if ((_mesa_get_format_bytes(format) >= 16 && brw->gen <= 8) ||
1116 (_mesa_get_format_bytes(format) >= 8 &&
1117 (brw->gen == 7 && !brw->is_haswell)))
1118 return BRW_SURFACEFORMAT_RAW;
1119 else
1120 return brw_format_for_mesa_format(
1121 brw_lower_mesa_image_format(brw->intelScreen->devinfo, format));
1122 }
1123 }
1124
1125 static void
1126 update_default_image_param(struct brw_context *brw,
1127 struct gl_image_unit *u,
1128 unsigned surface_idx,
1129 struct brw_image_param *param)
1130 {
1131 memset(param, 0, sizeof(*param));
1132 param->surface_idx = surface_idx;
1133 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1134 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1135 * detailed explanation of these parameters.
1136 */
1137 param->swizzling[0] = 0xff;
1138 param->swizzling[1] = 0xff;
1139 }
1140
1141 static void
1142 update_buffer_image_param(struct brw_context *brw,
1143 struct gl_image_unit *u,
1144 unsigned surface_idx,
1145 struct brw_image_param *param)
1146 {
1147 struct gl_buffer_object *obj = u->TexObj->BufferObject;
1148
1149 update_default_image_param(brw, u, surface_idx, param);
1150
1151 param->size[0] = obj->Size / _mesa_get_format_bytes(u->_ActualFormat);
1152 param->stride[0] = _mesa_get_format_bytes(u->_ActualFormat);
1153 }
1154
1155 static void
1156 update_texture_image_param(struct brw_context *brw,
1157 struct gl_image_unit *u,
1158 unsigned surface_idx,
1159 struct brw_image_param *param)
1160 {
1161 struct intel_mipmap_tree *mt = intel_texture_object(u->TexObj)->mt;
1162
1163 update_default_image_param(brw, u, surface_idx, param);
1164
1165 param->size[0] = minify(mt->logical_width0, u->Level);
1166 param->size[1] = minify(mt->logical_height0, u->Level);
1167 param->size[2] = (!u->Layered ? 1 :
1168 u->TexObj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1169 u->TexObj->Target == GL_TEXTURE_3D ?
1170 minify(mt->logical_depth0, u->Level) :
1171 mt->logical_depth0);
1172
1173 intel_miptree_get_image_offset(mt, u->Level, u->_Layer,
1174 &param->offset[0],
1175 &param->offset[1]);
1176
1177 param->stride[0] = mt->cpp;
1178 param->stride[1] = mt->pitch / mt->cpp;
1179 param->stride[2] =
1180 brw_miptree_get_horizontal_slice_pitch(brw, mt, u->Level);
1181 param->stride[3] =
1182 brw_miptree_get_vertical_slice_pitch(brw, mt, u->Level);
1183
1184 if (mt->tiling == I915_TILING_X) {
1185 /* An X tile is a rectangular block of 512x8 bytes. */
1186 param->tiling[0] = _mesa_logbase2(512 / mt->cpp);
1187 param->tiling[1] = _mesa_logbase2(8);
1188
1189 if (brw->has_swizzling) {
1190 /* Right shifts required to swizzle bits 9 and 10 of the memory
1191 * address with bit 6.
1192 */
1193 param->swizzling[0] = 3;
1194 param->swizzling[1] = 4;
1195 }
1196 } else if (mt->tiling == I915_TILING_Y) {
1197 /* The layout of a Y-tiled surface in memory isn't really fundamentally
1198 * different to the layout of an X-tiled surface, we simply pretend that
1199 * the surface is broken up in a number of smaller 16Bx32 tiles, each
1200 * one arranged in X-major order just like is the case for X-tiling.
1201 */
1202 param->tiling[0] = _mesa_logbase2(16 / mt->cpp);
1203 param->tiling[1] = _mesa_logbase2(32);
1204
1205 if (brw->has_swizzling) {
1206 /* Right shift required to swizzle bit 9 of the memory address with
1207 * bit 6.
1208 */
1209 param->swizzling[0] = 3;
1210 }
1211 }
1212
1213 /* 3D textures are arranged in 2D in memory with 2^lod slices per row. The
1214 * address calculation algorithm (emit_address_calculation() in
1215 * brw_fs_surface_builder.cpp) handles this as a sort of tiling with
1216 * modulus equal to the LOD.
1217 */
1218 param->tiling[2] = (u->TexObj->Target == GL_TEXTURE_3D ? u->Level :
1219 0);
1220 }
1221
1222 static void
1223 update_image_surface(struct brw_context *brw,
1224 struct gl_image_unit *u,
1225 GLenum access,
1226 unsigned surface_idx,
1227 uint32_t *surf_offset,
1228 struct brw_image_param *param)
1229 {
1230 if (u->_Valid) {
1231 struct gl_texture_object *obj = u->TexObj;
1232 const unsigned format = get_image_format(brw, u->_ActualFormat, access);
1233
1234 if (obj->Target == GL_TEXTURE_BUFFER) {
1235 struct intel_buffer_object *intel_obj =
1236 intel_buffer_object(obj->BufferObject);
1237 const unsigned texel_size = (format == BRW_SURFACEFORMAT_RAW ? 1 :
1238 _mesa_get_format_bytes(u->_ActualFormat));
1239
1240 brw->vtbl.emit_buffer_surface_state(
1241 brw, surf_offset, intel_obj->buffer, obj->BufferOffset,
1242 format, intel_obj->Base.Size / texel_size, texel_size,
1243 access != GL_READ_ONLY);
1244
1245 update_buffer_image_param(brw, u, surface_idx, param);
1246
1247 } else {
1248 struct intel_texture_object *intel_obj = intel_texture_object(obj);
1249 struct intel_mipmap_tree *mt = intel_obj->mt;
1250
1251 if (format == BRW_SURFACEFORMAT_RAW) {
1252 brw->vtbl.emit_buffer_surface_state(
1253 brw, surf_offset, mt->bo, mt->offset,
1254 format, mt->bo->size - mt->offset, 1 /* pitch */,
1255 access != GL_READ_ONLY);
1256
1257 } else {
1258 const unsigned min_layer = obj->MinLayer + u->_Layer;
1259 const unsigned min_level = obj->MinLevel + u->Level;
1260 const unsigned num_layers = (!u->Layered ? 1 :
1261 obj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1262 mt->logical_depth0);
1263 const GLenum target = (obj->Target == GL_TEXTURE_CUBE_MAP ||
1264 obj->Target == GL_TEXTURE_CUBE_MAP_ARRAY ?
1265 GL_TEXTURE_2D_ARRAY : obj->Target);
1266
1267 brw->vtbl.emit_texture_surface_state(
1268 brw, mt, target,
1269 min_layer, min_layer + num_layers,
1270 min_level, min_level + 1,
1271 format, SWIZZLE_XYZW,
1272 surf_offset, access != GL_READ_ONLY, false);
1273 }
1274
1275 update_texture_image_param(brw, u, surface_idx, param);
1276 }
1277
1278 } else {
1279 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, surf_offset);
1280 update_default_image_param(brw, u, surface_idx, param);
1281 }
1282 }
1283
1284 void
1285 brw_upload_image_surfaces(struct brw_context *brw,
1286 struct gl_shader *shader,
1287 struct brw_stage_state *stage_state,
1288 struct brw_stage_prog_data *prog_data)
1289 {
1290 struct gl_context *ctx = &brw->ctx;
1291
1292 if (shader && shader->NumImages) {
1293 for (unsigned i = 0; i < shader->NumImages; i++) {
1294 struct gl_image_unit *u = &ctx->ImageUnits[shader->ImageUnits[i]];
1295 const unsigned surf_idx = prog_data->binding_table.image_start + i;
1296
1297 update_image_surface(brw, u, shader->ImageAccess[i],
1298 surf_idx,
1299 &stage_state->surf_offset[surf_idx],
1300 &prog_data->image_param[i]);
1301 }
1302
1303 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1304 }
1305 }
1306
1307 static void
1308 brw_upload_wm_image_surfaces(struct brw_context *brw)
1309 {
1310 struct gl_context *ctx = &brw->ctx;
1311 /* BRW_NEW_FRAGMENT_PROGRAM */
1312 struct gl_shader_program *prog = ctx->Shader._CurrentFragmentProgram;
1313
1314 if (prog) {
1315 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS */
1316 brw_upload_image_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
1317 &brw->wm.base, &brw->wm.prog_data->base);
1318 }
1319 }
1320
1321 const struct brw_tracked_state brw_wm_image_surfaces = {
1322 .dirty = {
1323 .brw = BRW_NEW_BATCH |
1324 BRW_NEW_FRAGMENT_PROGRAM |
1325 BRW_NEW_FS_PROG_DATA |
1326 BRW_NEW_IMAGE_UNITS
1327 },
1328 .emit = brw_upload_wm_image_surfaces,
1329 };
1330
1331 void
1332 gen4_init_vtable_surface_functions(struct brw_context *brw)
1333 {
1334 brw->vtbl.update_texture_surface = brw_update_texture_surface;
1335 brw->vtbl.update_renderbuffer_surface = brw_update_renderbuffer_surface;
1336 brw->vtbl.emit_null_surface_state = brw_emit_null_surface_state;
1337 brw->vtbl.emit_buffer_surface_state = gen4_emit_buffer_surface_state;
1338 }
1339
1340 static void
1341 brw_upload_cs_work_groups_surface(struct brw_context *brw)
1342 {
1343 struct gl_context *ctx = &brw->ctx;
1344 /* _NEW_PROGRAM */
1345 struct gl_shader_program *prog =
1346 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1347
1348 if (prog && brw->cs.prog_data->uses_num_work_groups) {
1349 const unsigned surf_idx =
1350 brw->cs.prog_data->binding_table.work_groups_start;
1351 uint32_t *surf_offset = &brw->cs.base.surf_offset[surf_idx];
1352 drm_intel_bo *bo;
1353 uint32_t bo_offset;
1354
1355 if (brw->compute.num_work_groups_bo == NULL) {
1356 bo = NULL;
1357 intel_upload_data(brw,
1358 (void *)brw->compute.num_work_groups,
1359 3 * sizeof(GLuint),
1360 sizeof(GLuint),
1361 &bo,
1362 &bo_offset);
1363 } else {
1364 bo = brw->compute.num_work_groups_bo;
1365 bo_offset = brw->compute.num_work_groups_offset;
1366 }
1367
1368 brw->vtbl.emit_buffer_surface_state(brw, surf_offset,
1369 bo, bo_offset,
1370 BRW_SURFACEFORMAT_RAW,
1371 3 * sizeof(GLuint), 1, true);
1372 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1373 }
1374 }
1375
1376 const struct brw_tracked_state brw_cs_work_groups_surface = {
1377 .dirty = {
1378 .brw = BRW_NEW_CS_WORK_GROUPS
1379 },
1380 .emit = brw_upload_cs_work_groups_surface,
1381 };