i965: Upload Shader Storage Buffer Object surfaces
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_surface_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "main/context.h"
34 #include "main/blend.h"
35 #include "main/mtypes.h"
36 #include "main/samplerobj.h"
37 #include "program/prog_parameter.h"
38 #include "main/framebuffer.h"
39
40 #include "intel_mipmap_tree.h"
41 #include "intel_batchbuffer.h"
42 #include "intel_tex.h"
43 #include "intel_fbo.h"
44 #include "intel_buffer_objects.h"
45
46 #include "brw_context.h"
47 #include "brw_state.h"
48 #include "brw_defines.h"
49 #include "brw_wm.h"
50
51 GLuint
52 translate_tex_target(GLenum target)
53 {
54 switch (target) {
55 case GL_TEXTURE_1D:
56 case GL_TEXTURE_1D_ARRAY_EXT:
57 return BRW_SURFACE_1D;
58
59 case GL_TEXTURE_RECTANGLE_NV:
60 return BRW_SURFACE_2D;
61
62 case GL_TEXTURE_2D:
63 case GL_TEXTURE_2D_ARRAY_EXT:
64 case GL_TEXTURE_EXTERNAL_OES:
65 case GL_TEXTURE_2D_MULTISAMPLE:
66 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
67 return BRW_SURFACE_2D;
68
69 case GL_TEXTURE_3D:
70 return BRW_SURFACE_3D;
71
72 case GL_TEXTURE_CUBE_MAP:
73 case GL_TEXTURE_CUBE_MAP_ARRAY:
74 return BRW_SURFACE_CUBE;
75
76 default:
77 unreachable("not reached");
78 }
79 }
80
81 uint32_t
82 brw_get_surface_tiling_bits(uint32_t tiling)
83 {
84 switch (tiling) {
85 case I915_TILING_X:
86 return BRW_SURFACE_TILED;
87 case I915_TILING_Y:
88 return BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y;
89 default:
90 return 0;
91 }
92 }
93
94
95 uint32_t
96 brw_get_surface_num_multisamples(unsigned num_samples)
97 {
98 if (num_samples > 1)
99 return BRW_SURFACE_MULTISAMPLECOUNT_4;
100 else
101 return BRW_SURFACE_MULTISAMPLECOUNT_1;
102 }
103
104 void
105 brw_configure_w_tiled(const struct intel_mipmap_tree *mt,
106 bool is_render_target,
107 unsigned *width, unsigned *height,
108 unsigned *pitch, uint32_t *tiling, unsigned *format)
109 {
110 static const unsigned halign_stencil = 8;
111
112 /* In Y-tiling row is twice as wide as in W-tiling, and subsequently
113 * there are half as many rows.
114 * In addition, mip-levels are accessed manually by the program and
115 * therefore the surface is setup to cover all the mip-levels for one slice.
116 * (Hardware is still used to access individual slices).
117 */
118 *tiling = I915_TILING_Y;
119 *pitch = mt->pitch * 2;
120 *width = ALIGN(mt->total_width, halign_stencil) * 2;
121 *height = (mt->total_height / mt->physical_depth0) / 2;
122
123 if (is_render_target) {
124 *format = BRW_SURFACEFORMAT_R8_UINT;
125 }
126 }
127
128
129 /**
130 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
131 * swizzling.
132 */
133 int
134 brw_get_texture_swizzle(const struct gl_context *ctx,
135 const struct gl_texture_object *t)
136 {
137 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
138
139 int swizzles[SWIZZLE_NIL + 1] = {
140 SWIZZLE_X,
141 SWIZZLE_Y,
142 SWIZZLE_Z,
143 SWIZZLE_W,
144 SWIZZLE_ZERO,
145 SWIZZLE_ONE,
146 SWIZZLE_NIL
147 };
148
149 if (img->_BaseFormat == GL_DEPTH_COMPONENT ||
150 img->_BaseFormat == GL_DEPTH_STENCIL) {
151 GLenum depth_mode = t->DepthMode;
152
153 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
154 * with depth component data specified with a sized internal format.
155 * Otherwise, it's left at the old default, GL_LUMINANCE.
156 */
157 if (_mesa_is_gles3(ctx) &&
158 img->InternalFormat != GL_DEPTH_COMPONENT &&
159 img->InternalFormat != GL_DEPTH_STENCIL) {
160 depth_mode = GL_RED;
161 }
162
163 switch (depth_mode) {
164 case GL_ALPHA:
165 swizzles[0] = SWIZZLE_ZERO;
166 swizzles[1] = SWIZZLE_ZERO;
167 swizzles[2] = SWIZZLE_ZERO;
168 swizzles[3] = SWIZZLE_X;
169 break;
170 case GL_LUMINANCE:
171 swizzles[0] = SWIZZLE_X;
172 swizzles[1] = SWIZZLE_X;
173 swizzles[2] = SWIZZLE_X;
174 swizzles[3] = SWIZZLE_ONE;
175 break;
176 case GL_INTENSITY:
177 swizzles[0] = SWIZZLE_X;
178 swizzles[1] = SWIZZLE_X;
179 swizzles[2] = SWIZZLE_X;
180 swizzles[3] = SWIZZLE_X;
181 break;
182 case GL_RED:
183 swizzles[0] = SWIZZLE_X;
184 swizzles[1] = SWIZZLE_ZERO;
185 swizzles[2] = SWIZZLE_ZERO;
186 swizzles[3] = SWIZZLE_ONE;
187 break;
188 }
189 }
190
191 GLenum datatype = _mesa_get_format_datatype(img->TexFormat);
192
193 /* If the texture's format is alpha-only, force R, G, and B to
194 * 0.0. Similarly, if the texture's format has no alpha channel,
195 * force the alpha value read to 1.0. This allows for the
196 * implementation to use an RGBA texture for any of these formats
197 * without leaking any unexpected values.
198 */
199 switch (img->_BaseFormat) {
200 case GL_ALPHA:
201 swizzles[0] = SWIZZLE_ZERO;
202 swizzles[1] = SWIZZLE_ZERO;
203 swizzles[2] = SWIZZLE_ZERO;
204 break;
205 case GL_LUMINANCE:
206 if (t->_IsIntegerFormat || datatype == GL_SIGNED_NORMALIZED) {
207 swizzles[0] = SWIZZLE_X;
208 swizzles[1] = SWIZZLE_X;
209 swizzles[2] = SWIZZLE_X;
210 swizzles[3] = SWIZZLE_ONE;
211 }
212 break;
213 case GL_LUMINANCE_ALPHA:
214 if (datatype == GL_SIGNED_NORMALIZED) {
215 swizzles[0] = SWIZZLE_X;
216 swizzles[1] = SWIZZLE_X;
217 swizzles[2] = SWIZZLE_X;
218 swizzles[3] = SWIZZLE_W;
219 }
220 break;
221 case GL_INTENSITY:
222 if (datatype == GL_SIGNED_NORMALIZED) {
223 swizzles[0] = SWIZZLE_X;
224 swizzles[1] = SWIZZLE_X;
225 swizzles[2] = SWIZZLE_X;
226 swizzles[3] = SWIZZLE_X;
227 }
228 break;
229 case GL_RED:
230 case GL_RG:
231 case GL_RGB:
232 if (_mesa_get_format_bits(img->TexFormat, GL_ALPHA_BITS) > 0)
233 swizzles[3] = SWIZZLE_ONE;
234 break;
235 }
236
237 return MAKE_SWIZZLE4(swizzles[GET_SWZ(t->_Swizzle, 0)],
238 swizzles[GET_SWZ(t->_Swizzle, 1)],
239 swizzles[GET_SWZ(t->_Swizzle, 2)],
240 swizzles[GET_SWZ(t->_Swizzle, 3)]);
241 }
242
243 static void
244 gen4_emit_buffer_surface_state(struct brw_context *brw,
245 uint32_t *out_offset,
246 drm_intel_bo *bo,
247 unsigned buffer_offset,
248 unsigned surface_format,
249 unsigned buffer_size,
250 unsigned pitch,
251 bool rw)
252 {
253 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
254 6 * 4, 32, out_offset);
255 memset(surf, 0, 6 * 4);
256
257 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
258 surface_format << BRW_SURFACE_FORMAT_SHIFT |
259 (brw->gen >= 6 ? BRW_SURFACE_RC_READ_WRITE : 0);
260 surf[1] = (bo ? bo->offset64 : 0) + buffer_offset; /* reloc */
261 surf[2] = (buffer_size & 0x7f) << BRW_SURFACE_WIDTH_SHIFT |
262 ((buffer_size >> 7) & 0x1fff) << BRW_SURFACE_HEIGHT_SHIFT;
263 surf[3] = ((buffer_size >> 20) & 0x7f) << BRW_SURFACE_DEPTH_SHIFT |
264 (pitch - 1) << BRW_SURFACE_PITCH_SHIFT;
265
266 /* Emit relocation to surface contents. The 965 PRM, Volume 4, section
267 * 5.1.2 "Data Cache" says: "the data cache does not exist as a separate
268 * physical cache. It is mapped in hardware to the sampler cache."
269 */
270 if (bo) {
271 drm_intel_bo_emit_reloc(brw->batch.bo, *out_offset + 4,
272 bo, buffer_offset,
273 I915_GEM_DOMAIN_SAMPLER,
274 (rw ? I915_GEM_DOMAIN_SAMPLER : 0));
275 }
276 }
277
278 void
279 brw_update_buffer_texture_surface(struct gl_context *ctx,
280 unsigned unit,
281 uint32_t *surf_offset)
282 {
283 struct brw_context *brw = brw_context(ctx);
284 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
285 struct intel_buffer_object *intel_obj =
286 intel_buffer_object(tObj->BufferObject);
287 uint32_t size = tObj->BufferSize;
288 drm_intel_bo *bo = NULL;
289 mesa_format format = tObj->_BufferObjectFormat;
290 uint32_t brw_format = brw_format_for_mesa_format(format);
291 int texel_size = _mesa_get_format_bytes(format);
292
293 if (intel_obj) {
294 size = MIN2(size, intel_obj->Base.Size);
295 bo = intel_bufferobj_buffer(brw, intel_obj, tObj->BufferOffset, size);
296 }
297
298 if (brw_format == 0 && format != MESA_FORMAT_RGBA_FLOAT32) {
299 _mesa_problem(NULL, "bad format %s for texture buffer\n",
300 _mesa_get_format_name(format));
301 }
302
303 brw->vtbl.emit_buffer_surface_state(brw, surf_offset, bo,
304 tObj->BufferOffset,
305 brw_format,
306 size / texel_size,
307 texel_size,
308 false /* rw */);
309 }
310
311 static void
312 brw_update_texture_surface(struct gl_context *ctx,
313 unsigned unit,
314 uint32_t *surf_offset,
315 bool for_gather)
316 {
317 struct brw_context *brw = brw_context(ctx);
318 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
319 struct intel_texture_object *intelObj = intel_texture_object(tObj);
320 struct intel_mipmap_tree *mt = intelObj->mt;
321 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
322 uint32_t *surf;
323
324 /* BRW_NEW_TEXTURE_BUFFER */
325 if (tObj->Target == GL_TEXTURE_BUFFER) {
326 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
327 return;
328 }
329
330 surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
331 6 * 4, 32, surf_offset);
332
333 uint32_t tex_format = translate_tex_format(brw, mt->format,
334 sampler->sRGBDecode);
335
336 if (for_gather) {
337 /* Sandybridge's gather4 message is broken for integer formats.
338 * To work around this, we pretend the surface is UNORM for
339 * 8 or 16-bit formats, and emit shader instructions to recover
340 * the real INT/UINT value. For 32-bit formats, we pretend
341 * the surface is FLOAT, and simply reinterpret the resulting
342 * bits.
343 */
344 switch (tex_format) {
345 case BRW_SURFACEFORMAT_R8_SINT:
346 case BRW_SURFACEFORMAT_R8_UINT:
347 tex_format = BRW_SURFACEFORMAT_R8_UNORM;
348 break;
349
350 case BRW_SURFACEFORMAT_R16_SINT:
351 case BRW_SURFACEFORMAT_R16_UINT:
352 tex_format = BRW_SURFACEFORMAT_R16_UNORM;
353 break;
354
355 case BRW_SURFACEFORMAT_R32_SINT:
356 case BRW_SURFACEFORMAT_R32_UINT:
357 tex_format = BRW_SURFACEFORMAT_R32_FLOAT;
358 break;
359
360 default:
361 break;
362 }
363 }
364
365 surf[0] = (translate_tex_target(tObj->Target) << BRW_SURFACE_TYPE_SHIFT |
366 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
367 BRW_SURFACE_CUBEFACE_ENABLES |
368 tex_format << BRW_SURFACE_FORMAT_SHIFT);
369
370 surf[1] = mt->bo->offset64 + mt->offset; /* reloc */
371
372 surf[2] = ((intelObj->_MaxLevel - tObj->BaseLevel) << BRW_SURFACE_LOD_SHIFT |
373 (mt->logical_width0 - 1) << BRW_SURFACE_WIDTH_SHIFT |
374 (mt->logical_height0 - 1) << BRW_SURFACE_HEIGHT_SHIFT);
375
376 surf[3] = (brw_get_surface_tiling_bits(mt->tiling) |
377 (mt->logical_depth0 - 1) << BRW_SURFACE_DEPTH_SHIFT |
378 (mt->pitch - 1) << BRW_SURFACE_PITCH_SHIFT);
379
380 surf[4] = (brw_get_surface_num_multisamples(mt->num_samples) |
381 SET_FIELD(tObj->BaseLevel - mt->first_level, BRW_SURFACE_MIN_LOD));
382
383 surf[5] = mt->align_h == 4 ? BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0;
384
385 /* Emit relocation to surface contents */
386 drm_intel_bo_emit_reloc(brw->batch.bo,
387 *surf_offset + 4,
388 mt->bo,
389 surf[1] - mt->bo->offset64,
390 I915_GEM_DOMAIN_SAMPLER, 0);
391 }
392
393 /**
394 * Create the constant buffer surface. Vertex/fragment shader constants will be
395 * read from this buffer with Data Port Read instructions/messages.
396 */
397 void
398 brw_create_constant_surface(struct brw_context *brw,
399 drm_intel_bo *bo,
400 uint32_t offset,
401 uint32_t size,
402 uint32_t *out_offset,
403 bool dword_pitch)
404 {
405 uint32_t stride = dword_pitch ? 4 : 16;
406 uint32_t elements = ALIGN(size, stride) / stride;
407
408 brw->vtbl.emit_buffer_surface_state(brw, out_offset, bo, offset,
409 BRW_SURFACEFORMAT_R32G32B32A32_FLOAT,
410 elements, stride, false);
411 }
412
413 /**
414 * Create the buffer surface. Shader buffer variables will be
415 * read from / write to this buffer with Data Port Read/Write
416 * instructions/messages.
417 */
418 void
419 brw_create_buffer_surface(struct brw_context *brw,
420 drm_intel_bo *bo,
421 uint32_t offset,
422 uint32_t size,
423 uint32_t *out_offset,
424 bool dword_pitch)
425 {
426 /* Use a raw surface so we can reuse existing untyped read/write/atomic
427 * messages. We need these specifically for the fragment shader since they
428 * include a pixel mask header that we need to ensure correct behavior
429 * with helper invocations, which cannot write to the buffer.
430 */
431 brw->vtbl.emit_buffer_surface_state(brw, out_offset, bo, offset,
432 BRW_SURFACEFORMAT_RAW,
433 size, 1, true);
434 }
435
436 /**
437 * Set up a binding table entry for use by stream output logic (transform
438 * feedback).
439 *
440 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
441 */
442 void
443 brw_update_sol_surface(struct brw_context *brw,
444 struct gl_buffer_object *buffer_obj,
445 uint32_t *out_offset, unsigned num_vector_components,
446 unsigned stride_dwords, unsigned offset_dwords)
447 {
448 struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
449 uint32_t offset_bytes = 4 * offset_dwords;
450 drm_intel_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
451 offset_bytes,
452 buffer_obj->Size - offset_bytes);
453 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
454 out_offset);
455 uint32_t pitch_minus_1 = 4*stride_dwords - 1;
456 size_t size_dwords = buffer_obj->Size / 4;
457 uint32_t buffer_size_minus_1, width, height, depth, surface_format;
458
459 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
460 * too big to map using a single binding table entry?
461 */
462 assert((size_dwords - offset_dwords) / stride_dwords
463 <= BRW_MAX_NUM_BUFFER_ENTRIES);
464
465 if (size_dwords > offset_dwords + num_vector_components) {
466 /* There is room for at least 1 transform feedback output in the buffer.
467 * Compute the number of additional transform feedback outputs the
468 * buffer has room for.
469 */
470 buffer_size_minus_1 =
471 (size_dwords - offset_dwords - num_vector_components) / stride_dwords;
472 } else {
473 /* There isn't even room for a single transform feedback output in the
474 * buffer. We can't configure the binding table entry to prevent output
475 * entirely; we'll have to rely on the geometry shader to detect
476 * overflow. But to minimize the damage in case of a bug, set up the
477 * binding table entry to just allow a single output.
478 */
479 buffer_size_minus_1 = 0;
480 }
481 width = buffer_size_minus_1 & 0x7f;
482 height = (buffer_size_minus_1 & 0xfff80) >> 7;
483 depth = (buffer_size_minus_1 & 0x7f00000) >> 20;
484
485 switch (num_vector_components) {
486 case 1:
487 surface_format = BRW_SURFACEFORMAT_R32_FLOAT;
488 break;
489 case 2:
490 surface_format = BRW_SURFACEFORMAT_R32G32_FLOAT;
491 break;
492 case 3:
493 surface_format = BRW_SURFACEFORMAT_R32G32B32_FLOAT;
494 break;
495 case 4:
496 surface_format = BRW_SURFACEFORMAT_R32G32B32A32_FLOAT;
497 break;
498 default:
499 unreachable("Invalid vector size for transform feedback output");
500 }
501
502 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
503 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
504 surface_format << BRW_SURFACE_FORMAT_SHIFT |
505 BRW_SURFACE_RC_READ_WRITE;
506 surf[1] = bo->offset64 + offset_bytes; /* reloc */
507 surf[2] = (width << BRW_SURFACE_WIDTH_SHIFT |
508 height << BRW_SURFACE_HEIGHT_SHIFT);
509 surf[3] = (depth << BRW_SURFACE_DEPTH_SHIFT |
510 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
511 surf[4] = 0;
512 surf[5] = 0;
513
514 /* Emit relocation to surface contents. */
515 drm_intel_bo_emit_reloc(brw->batch.bo,
516 *out_offset + 4,
517 bo, offset_bytes,
518 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
519 }
520
521 /* Creates a new WM constant buffer reflecting the current fragment program's
522 * constants, if needed by the fragment program.
523 *
524 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
525 * state atom.
526 */
527 static void
528 brw_upload_wm_pull_constants(struct brw_context *brw)
529 {
530 struct brw_stage_state *stage_state = &brw->wm.base;
531 /* BRW_NEW_FRAGMENT_PROGRAM */
532 struct brw_fragment_program *fp =
533 (struct brw_fragment_program *) brw->fragment_program;
534 /* BRW_NEW_FS_PROG_DATA */
535 struct brw_stage_prog_data *prog_data = &brw->wm.prog_data->base;
536
537 /* _NEW_PROGRAM_CONSTANTS */
538 brw_upload_pull_constants(brw, BRW_NEW_SURFACES, &fp->program.Base,
539 stage_state, prog_data, true);
540 }
541
542 const struct brw_tracked_state brw_wm_pull_constants = {
543 .dirty = {
544 .mesa = _NEW_PROGRAM_CONSTANTS,
545 .brw = BRW_NEW_BATCH |
546 BRW_NEW_FRAGMENT_PROGRAM |
547 BRW_NEW_FS_PROG_DATA,
548 },
549 .emit = brw_upload_wm_pull_constants,
550 };
551
552 /**
553 * Creates a null renderbuffer surface.
554 *
555 * This is used when the shader doesn't write to any color output. An FB
556 * write to target 0 will still be emitted, because that's how the thread is
557 * terminated (and computed depth is returned), so we need to have the
558 * hardware discard the target 0 color output..
559 */
560 static void
561 brw_emit_null_surface_state(struct brw_context *brw,
562 unsigned width,
563 unsigned height,
564 unsigned samples,
565 uint32_t *out_offset)
566 {
567 /* From the Sandy bridge PRM, Vol4 Part1 p71 (Surface Type: Programming
568 * Notes):
569 *
570 * A null surface will be used in instances where an actual surface is
571 * not bound. When a write message is generated to a null surface, no
572 * actual surface is written to. When a read message (including any
573 * sampling engine message) is generated to a null surface, the result
574 * is all zeros. Note that a null surface type is allowed to be used
575 * with all messages, even if it is not specificially indicated as
576 * supported. All of the remaining fields in surface state are ignored
577 * for null surfaces, with the following exceptions:
578 *
579 * - [DevSNB+]: Width, Height, Depth, and LOD fields must match the
580 * depth buffer’s corresponding state for all render target surfaces,
581 * including null.
582 *
583 * - Surface Format must be R8G8B8A8_UNORM.
584 */
585 unsigned surface_type = BRW_SURFACE_NULL;
586 drm_intel_bo *bo = NULL;
587 unsigned pitch_minus_1 = 0;
588 uint32_t multisampling_state = 0;
589 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
590 out_offset);
591
592 if (samples > 1) {
593 /* On Gen6, null render targets seem to cause GPU hangs when
594 * multisampling. So work around this problem by rendering into dummy
595 * color buffer.
596 *
597 * To decrease the amount of memory needed by the workaround buffer, we
598 * set its pitch to 128 bytes (the width of a Y tile). This means that
599 * the amount of memory needed for the workaround buffer is
600 * (width_in_tiles + height_in_tiles - 1) tiles.
601 *
602 * Note that since the workaround buffer will be interpreted by the
603 * hardware as an interleaved multisampled buffer, we need to compute
604 * width_in_tiles and height_in_tiles by dividing the width and height
605 * by 16 rather than the normal Y-tile size of 32.
606 */
607 unsigned width_in_tiles = ALIGN(width, 16) / 16;
608 unsigned height_in_tiles = ALIGN(height, 16) / 16;
609 unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
610 brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
611 size_needed);
612 bo = brw->wm.multisampled_null_render_target_bo;
613 surface_type = BRW_SURFACE_2D;
614 pitch_minus_1 = 127;
615 multisampling_state = brw_get_surface_num_multisamples(samples);
616 }
617
618 surf[0] = (surface_type << BRW_SURFACE_TYPE_SHIFT |
619 BRW_SURFACEFORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
620 if (brw->gen < 6) {
621 surf[0] |= (1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT |
622 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT |
623 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT |
624 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT);
625 }
626 surf[1] = bo ? bo->offset64 : 0;
627 surf[2] = ((width - 1) << BRW_SURFACE_WIDTH_SHIFT |
628 (height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
629
630 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
631 * Notes):
632 *
633 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
634 */
635 surf[3] = (BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y |
636 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
637 surf[4] = multisampling_state;
638 surf[5] = 0;
639
640 if (bo) {
641 drm_intel_bo_emit_reloc(brw->batch.bo,
642 *out_offset + 4,
643 bo, 0,
644 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
645 }
646 }
647
648 /**
649 * Sets up a surface state structure to point at the given region.
650 * While it is only used for the front/back buffer currently, it should be
651 * usable for further buffers when doing ARB_draw_buffer support.
652 */
653 static uint32_t
654 brw_update_renderbuffer_surface(struct brw_context *brw,
655 struct gl_renderbuffer *rb,
656 bool layered, unsigned unit,
657 uint32_t surf_index)
658 {
659 struct gl_context *ctx = &brw->ctx;
660 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
661 struct intel_mipmap_tree *mt = irb->mt;
662 uint32_t *surf;
663 uint32_t tile_x, tile_y;
664 uint32_t format = 0;
665 uint32_t offset;
666 /* _NEW_BUFFERS */
667 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
668 /* BRW_NEW_FS_PROG_DATA */
669
670 assert(!layered);
671
672 if (rb->TexImage && !brw->has_surface_tile_offset) {
673 intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y);
674
675 if (tile_x != 0 || tile_y != 0) {
676 /* Original gen4 hardware couldn't draw to a non-tile-aligned
677 * destination in a miptree unless you actually setup your renderbuffer
678 * as a miptree and used the fragile lod/array_index/etc. controls to
679 * select the image. So, instead, we just make a new single-level
680 * miptree and render into that.
681 */
682 intel_renderbuffer_move_to_temp(brw, irb, false);
683 mt = irb->mt;
684 }
685 }
686
687 intel_miptree_used_for_rendering(irb->mt);
688
689 surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32, &offset);
690
691 format = brw->render_target_format[rb_format];
692 if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
693 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
694 __func__, _mesa_get_format_name(rb_format));
695 }
696
697 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
698 format << BRW_SURFACE_FORMAT_SHIFT);
699
700 /* reloc */
701 assert(mt->offset % mt->cpp == 0);
702 surf[1] = (intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y) +
703 mt->bo->offset64 + mt->offset);
704
705 surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
706 (rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
707
708 surf[3] = (brw_get_surface_tiling_bits(mt->tiling) |
709 (mt->pitch - 1) << BRW_SURFACE_PITCH_SHIFT);
710
711 surf[4] = brw_get_surface_num_multisamples(mt->num_samples);
712
713 assert(brw->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
714 /* Note that the low bits of these fields are missing, so
715 * there's the possibility of getting in trouble.
716 */
717 assert(tile_x % 4 == 0);
718 assert(tile_y % 2 == 0);
719 surf[5] = ((tile_x / 4) << BRW_SURFACE_X_OFFSET_SHIFT |
720 (tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT |
721 (mt->align_h == 4 ? BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
722
723 if (brw->gen < 6) {
724 /* _NEW_COLOR */
725 if (!ctx->Color.ColorLogicOpEnabled &&
726 (ctx->Color.BlendEnabled & (1 << unit)))
727 surf[0] |= BRW_SURFACE_BLEND_ENABLED;
728
729 if (!ctx->Color.ColorMask[unit][0])
730 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT;
731 if (!ctx->Color.ColorMask[unit][1])
732 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT;
733 if (!ctx->Color.ColorMask[unit][2])
734 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT;
735
736 /* As mentioned above, disable writes to the alpha component when the
737 * renderbuffer is XRGB.
738 */
739 if (ctx->DrawBuffer->Visual.alphaBits == 0 ||
740 !ctx->Color.ColorMask[unit][3]) {
741 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT;
742 }
743 }
744
745 drm_intel_bo_emit_reloc(brw->batch.bo,
746 offset + 4,
747 mt->bo,
748 surf[1] - mt->bo->offset64,
749 I915_GEM_DOMAIN_RENDER,
750 I915_GEM_DOMAIN_RENDER);
751
752 return offset;
753 }
754
755 /**
756 * Construct SURFACE_STATE objects for renderbuffers/draw buffers.
757 */
758 void
759 brw_update_renderbuffer_surfaces(struct brw_context *brw,
760 const struct gl_framebuffer *fb,
761 uint32_t render_target_start,
762 uint32_t *surf_offset)
763 {
764 GLuint i;
765 const unsigned int w = _mesa_geometric_width(fb);
766 const unsigned int h = _mesa_geometric_height(fb);
767 const unsigned int s = _mesa_geometric_samples(fb);
768
769 /* Update surfaces for drawing buffers */
770 if (fb->_NumColorDrawBuffers >= 1) {
771 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
772 const uint32_t surf_index = render_target_start + i;
773
774 if (intel_renderbuffer(fb->_ColorDrawBuffers[i])) {
775 surf_offset[surf_index] =
776 brw->vtbl.update_renderbuffer_surface(
777 brw, fb->_ColorDrawBuffers[i],
778 _mesa_geometric_layers(fb) > 0, i, surf_index);
779 } else {
780 brw->vtbl.emit_null_surface_state(brw, w, h, s,
781 &surf_offset[surf_index]);
782 }
783 }
784 } else {
785 const uint32_t surf_index = render_target_start;
786 brw->vtbl.emit_null_surface_state(brw, w, h, s,
787 &surf_offset[surf_index]);
788 }
789 }
790
791 static void
792 update_renderbuffer_surfaces(struct brw_context *brw)
793 {
794 const struct gl_context *ctx = &brw->ctx;
795
796 /* _NEW_BUFFERS | _NEW_COLOR */
797 const struct gl_framebuffer *fb = ctx->DrawBuffer;
798 brw_update_renderbuffer_surfaces(
799 brw, fb,
800 brw->wm.prog_data->binding_table.render_target_start,
801 brw->wm.base.surf_offset);
802 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
803 }
804
805 const struct brw_tracked_state brw_renderbuffer_surfaces = {
806 .dirty = {
807 .mesa = _NEW_BUFFERS |
808 _NEW_COLOR,
809 .brw = BRW_NEW_BATCH |
810 BRW_NEW_FS_PROG_DATA,
811 },
812 .emit = update_renderbuffer_surfaces,
813 };
814
815 const struct brw_tracked_state gen6_renderbuffer_surfaces = {
816 .dirty = {
817 .mesa = _NEW_BUFFERS,
818 .brw = BRW_NEW_BATCH,
819 },
820 .emit = update_renderbuffer_surfaces,
821 };
822
823
824 static void
825 update_stage_texture_surfaces(struct brw_context *brw,
826 const struct gl_program *prog,
827 struct brw_stage_state *stage_state,
828 bool for_gather)
829 {
830 if (!prog)
831 return;
832
833 struct gl_context *ctx = &brw->ctx;
834
835 uint32_t *surf_offset = stage_state->surf_offset;
836
837 /* BRW_NEW_*_PROG_DATA */
838 if (for_gather)
839 surf_offset += stage_state->prog_data->binding_table.gather_texture_start;
840 else
841 surf_offset += stage_state->prog_data->binding_table.texture_start;
842
843 unsigned num_samplers = _mesa_fls(prog->SamplersUsed);
844 for (unsigned s = 0; s < num_samplers; s++) {
845 surf_offset[s] = 0;
846
847 if (prog->SamplersUsed & (1 << s)) {
848 const unsigned unit = prog->SamplerUnits[s];
849
850 /* _NEW_TEXTURE */
851 if (ctx->Texture.Unit[unit]._Current) {
852 brw->vtbl.update_texture_surface(ctx, unit, surf_offset + s, for_gather);
853 }
854 }
855 }
856 }
857
858
859 /**
860 * Construct SURFACE_STATE objects for enabled textures.
861 */
862 static void
863 brw_update_texture_surfaces(struct brw_context *brw)
864 {
865 /* BRW_NEW_VERTEX_PROGRAM */
866 struct gl_program *vs = (struct gl_program *) brw->vertex_program;
867
868 /* BRW_NEW_GEOMETRY_PROGRAM */
869 struct gl_program *gs = (struct gl_program *) brw->geometry_program;
870
871 /* BRW_NEW_FRAGMENT_PROGRAM */
872 struct gl_program *fs = (struct gl_program *) brw->fragment_program;
873
874 /* BRW_NEW_COMPUTE_PROGRAM */
875 struct gl_program *cs = (struct gl_program *) brw->compute_program;
876
877 /* _NEW_TEXTURE */
878 update_stage_texture_surfaces(brw, vs, &brw->vs.base, false);
879 update_stage_texture_surfaces(brw, gs, &brw->gs.base, false);
880 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false);
881 update_stage_texture_surfaces(brw, cs, &brw->cs.base, false);
882
883 /* emit alternate set of surface state for gather. this
884 * allows the surface format to be overriden for only the
885 * gather4 messages. */
886 if (brw->gen < 8) {
887 if (vs && vs->UsesGather)
888 update_stage_texture_surfaces(brw, vs, &brw->vs.base, true);
889 if (gs && gs->UsesGather)
890 update_stage_texture_surfaces(brw, gs, &brw->gs.base, true);
891 if (fs && fs->UsesGather)
892 update_stage_texture_surfaces(brw, fs, &brw->wm.base, true);
893 if (cs && cs->UsesGather)
894 update_stage_texture_surfaces(brw, cs, &brw->cs.base, true);
895 }
896
897 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
898 }
899
900 const struct brw_tracked_state brw_texture_surfaces = {
901 .dirty = {
902 .mesa = _NEW_TEXTURE,
903 .brw = BRW_NEW_BATCH |
904 BRW_NEW_COMPUTE_PROGRAM |
905 BRW_NEW_FRAGMENT_PROGRAM |
906 BRW_NEW_FS_PROG_DATA |
907 BRW_NEW_GEOMETRY_PROGRAM |
908 BRW_NEW_GS_PROG_DATA |
909 BRW_NEW_TEXTURE_BUFFER |
910 BRW_NEW_VERTEX_PROGRAM |
911 BRW_NEW_VS_PROG_DATA,
912 },
913 .emit = brw_update_texture_surfaces,
914 };
915
916 void
917 brw_upload_ubo_surfaces(struct brw_context *brw,
918 struct gl_shader *shader,
919 struct brw_stage_state *stage_state,
920 struct brw_stage_prog_data *prog_data,
921 bool dword_pitch)
922 {
923 struct gl_context *ctx = &brw->ctx;
924
925 if (!shader)
926 return;
927
928 uint32_t *surf_offsets =
929 &stage_state->surf_offset[prog_data->binding_table.ubo_start];
930
931 for (int i = 0; i < shader->NumUniformBlocks; i++) {
932 struct intel_buffer_object *intel_bo;
933
934 /* Because behavior for referencing outside of the binding's size in the
935 * glBindBufferRange case is undefined, we can just bind the whole buffer
936 * glBindBufferBase wants and be a correct implementation.
937 */
938 if (!shader->UniformBlocks[i].IsShaderStorage) {
939 struct gl_uniform_buffer_binding *binding;
940 binding =
941 &ctx->UniformBufferBindings[shader->UniformBlocks[i].Binding];
942 intel_bo = intel_buffer_object(binding->BufferObject);
943 drm_intel_bo *bo =
944 intel_bufferobj_buffer(brw, intel_bo,
945 binding->Offset,
946 binding->BufferObject->Size - binding->Offset);
947 brw_create_constant_surface(brw, bo, binding->Offset,
948 bo->size - binding->Offset,
949 &surf_offsets[i],
950 dword_pitch);
951 } else {
952 struct gl_shader_storage_buffer_binding *binding;
953 binding =
954 &ctx->ShaderStorageBufferBindings[shader->UniformBlocks[i].Binding];
955 intel_bo = intel_buffer_object(binding->BufferObject);
956 drm_intel_bo *bo =
957 intel_bufferobj_buffer(brw, intel_bo,
958 binding->Offset,
959 binding->BufferObject->Size - binding->Offset);
960 brw_create_buffer_surface(brw, bo, binding->Offset,
961 bo->size - binding->Offset,
962 &surf_offsets[i],
963 dword_pitch);
964 }
965 }
966
967 if (shader->NumUniformBlocks)
968 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
969 }
970
971 static void
972 brw_upload_wm_ubo_surfaces(struct brw_context *brw)
973 {
974 struct gl_context *ctx = &brw->ctx;
975 /* _NEW_PROGRAM */
976 struct gl_shader_program *prog = ctx->_Shader->_CurrentFragmentProgram;
977
978 if (!prog)
979 return;
980
981 /* BRW_NEW_FS_PROG_DATA */
982 brw_upload_ubo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
983 &brw->wm.base, &brw->wm.prog_data->base, true);
984 }
985
986 const struct brw_tracked_state brw_wm_ubo_surfaces = {
987 .dirty = {
988 .mesa = _NEW_PROGRAM,
989 .brw = BRW_NEW_BATCH |
990 BRW_NEW_FS_PROG_DATA |
991 BRW_NEW_UNIFORM_BUFFER,
992 },
993 .emit = brw_upload_wm_ubo_surfaces,
994 };
995
996 void
997 brw_upload_abo_surfaces(struct brw_context *brw,
998 struct gl_shader_program *prog,
999 struct brw_stage_state *stage_state,
1000 struct brw_stage_prog_data *prog_data)
1001 {
1002 struct gl_context *ctx = &brw->ctx;
1003 uint32_t *surf_offsets =
1004 &stage_state->surf_offset[prog_data->binding_table.abo_start];
1005
1006 for (unsigned i = 0; i < prog->NumAtomicBuffers; i++) {
1007 struct gl_atomic_buffer_binding *binding =
1008 &ctx->AtomicBufferBindings[prog->AtomicBuffers[i].Binding];
1009 struct intel_buffer_object *intel_bo =
1010 intel_buffer_object(binding->BufferObject);
1011 drm_intel_bo *bo = intel_bufferobj_buffer(
1012 brw, intel_bo, binding->Offset, intel_bo->Base.Size - binding->Offset);
1013
1014 brw->vtbl.emit_buffer_surface_state(brw, &surf_offsets[i], bo,
1015 binding->Offset, BRW_SURFACEFORMAT_RAW,
1016 bo->size - binding->Offset, 1, true);
1017 }
1018
1019 if (prog->NumAtomicBuffers)
1020 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1021 }
1022
1023 static void
1024 brw_upload_wm_abo_surfaces(struct brw_context *brw)
1025 {
1026 struct gl_context *ctx = &brw->ctx;
1027 /* _NEW_PROGRAM */
1028 struct gl_shader_program *prog = ctx->Shader._CurrentFragmentProgram;
1029
1030 if (prog) {
1031 /* BRW_NEW_FS_PROG_DATA */
1032 brw_upload_abo_surfaces(brw, prog, &brw->wm.base,
1033 &brw->wm.prog_data->base);
1034 }
1035 }
1036
1037 const struct brw_tracked_state brw_wm_abo_surfaces = {
1038 .dirty = {
1039 .mesa = _NEW_PROGRAM,
1040 .brw = BRW_NEW_ATOMIC_BUFFER |
1041 BRW_NEW_BATCH |
1042 BRW_NEW_FS_PROG_DATA,
1043 },
1044 .emit = brw_upload_wm_abo_surfaces,
1045 };
1046
1047 static void
1048 brw_upload_cs_abo_surfaces(struct brw_context *brw)
1049 {
1050 struct gl_context *ctx = &brw->ctx;
1051 /* _NEW_PROGRAM */
1052 struct gl_shader_program *prog =
1053 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1054
1055 if (prog) {
1056 /* BRW_NEW_CS_PROG_DATA */
1057 brw_upload_abo_surfaces(brw, prog, &brw->cs.base,
1058 &brw->cs.prog_data->base);
1059 }
1060 }
1061
1062 const struct brw_tracked_state brw_cs_abo_surfaces = {
1063 .dirty = {
1064 .mesa = _NEW_PROGRAM,
1065 .brw = BRW_NEW_ATOMIC_BUFFER |
1066 BRW_NEW_BATCH |
1067 BRW_NEW_CS_PROG_DATA,
1068 },
1069 .emit = brw_upload_cs_abo_surfaces,
1070 };
1071
1072 static void
1073 brw_upload_cs_image_surfaces(struct brw_context *brw)
1074 {
1075 struct gl_context *ctx = &brw->ctx;
1076 /* _NEW_PROGRAM */
1077 struct gl_shader_program *prog =
1078 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1079
1080 if (prog) {
1081 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS */
1082 brw_upload_image_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_COMPUTE],
1083 &brw->cs.base, &brw->cs.prog_data->base);
1084 }
1085 }
1086
1087 const struct brw_tracked_state brw_cs_image_surfaces = {
1088 .dirty = {
1089 .mesa = _NEW_PROGRAM,
1090 .brw = BRW_NEW_BATCH |
1091 BRW_NEW_CS_PROG_DATA |
1092 BRW_NEW_IMAGE_UNITS
1093 },
1094 .emit = brw_upload_cs_image_surfaces,
1095 };
1096
1097 static uint32_t
1098 get_image_format(struct brw_context *brw, mesa_format format, GLenum access)
1099 {
1100 if (access == GL_WRITE_ONLY) {
1101 return brw_format_for_mesa_format(format);
1102 } else {
1103 /* Typed surface reads support a very limited subset of the shader
1104 * image formats. Translate it into the closest format the
1105 * hardware supports.
1106 */
1107 if ((_mesa_get_format_bytes(format) >= 16 && brw->gen <= 8) ||
1108 (_mesa_get_format_bytes(format) >= 8 &&
1109 (brw->gen == 7 && !brw->is_haswell)))
1110 return BRW_SURFACEFORMAT_RAW;
1111 else
1112 return brw_format_for_mesa_format(
1113 brw_lower_mesa_image_format(brw->intelScreen->devinfo, format));
1114 }
1115 }
1116
1117 static void
1118 update_default_image_param(struct brw_context *brw,
1119 struct gl_image_unit *u,
1120 unsigned surface_idx,
1121 struct brw_image_param *param)
1122 {
1123 memset(param, 0, sizeof(*param));
1124 param->surface_idx = surface_idx;
1125 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1126 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1127 * detailed explanation of these parameters.
1128 */
1129 param->swizzling[0] = 0xff;
1130 param->swizzling[1] = 0xff;
1131 }
1132
1133 static void
1134 update_buffer_image_param(struct brw_context *brw,
1135 struct gl_image_unit *u,
1136 unsigned surface_idx,
1137 struct brw_image_param *param)
1138 {
1139 struct gl_buffer_object *obj = u->TexObj->BufferObject;
1140
1141 update_default_image_param(brw, u, surface_idx, param);
1142
1143 param->size[0] = obj->Size / _mesa_get_format_bytes(u->_ActualFormat);
1144 param->stride[0] = _mesa_get_format_bytes(u->_ActualFormat);
1145 }
1146
1147 static void
1148 update_texture_image_param(struct brw_context *brw,
1149 struct gl_image_unit *u,
1150 unsigned surface_idx,
1151 struct brw_image_param *param)
1152 {
1153 struct intel_mipmap_tree *mt = intel_texture_object(u->TexObj)->mt;
1154
1155 update_default_image_param(brw, u, surface_idx, param);
1156
1157 param->size[0] = minify(mt->logical_width0, u->Level);
1158 param->size[1] = minify(mt->logical_height0, u->Level);
1159 param->size[2] = (!u->Layered ? 1 :
1160 u->TexObj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1161 u->TexObj->Target == GL_TEXTURE_3D ?
1162 minify(mt->logical_depth0, u->Level) :
1163 mt->logical_depth0);
1164
1165 intel_miptree_get_image_offset(mt, u->Level, u->_Layer,
1166 &param->offset[0],
1167 &param->offset[1]);
1168
1169 param->stride[0] = mt->cpp;
1170 param->stride[1] = mt->pitch / mt->cpp;
1171 param->stride[2] =
1172 brw_miptree_get_horizontal_slice_pitch(brw, mt, u->Level);
1173 param->stride[3] =
1174 brw_miptree_get_vertical_slice_pitch(brw, mt, u->Level);
1175
1176 if (mt->tiling == I915_TILING_X) {
1177 /* An X tile is a rectangular block of 512x8 bytes. */
1178 param->tiling[0] = _mesa_logbase2(512 / mt->cpp);
1179 param->tiling[1] = _mesa_logbase2(8);
1180
1181 if (brw->has_swizzling) {
1182 /* Right shifts required to swizzle bits 9 and 10 of the memory
1183 * address with bit 6.
1184 */
1185 param->swizzling[0] = 3;
1186 param->swizzling[1] = 4;
1187 }
1188 } else if (mt->tiling == I915_TILING_Y) {
1189 /* The layout of a Y-tiled surface in memory isn't really fundamentally
1190 * different to the layout of an X-tiled surface, we simply pretend that
1191 * the surface is broken up in a number of smaller 16Bx32 tiles, each
1192 * one arranged in X-major order just like is the case for X-tiling.
1193 */
1194 param->tiling[0] = _mesa_logbase2(16 / mt->cpp);
1195 param->tiling[1] = _mesa_logbase2(32);
1196
1197 if (brw->has_swizzling) {
1198 /* Right shift required to swizzle bit 9 of the memory address with
1199 * bit 6.
1200 */
1201 param->swizzling[0] = 3;
1202 }
1203 }
1204
1205 /* 3D textures are arranged in 2D in memory with 2^lod slices per row. The
1206 * address calculation algorithm (emit_address_calculation() in
1207 * brw_fs_surface_builder.cpp) handles this as a sort of tiling with
1208 * modulus equal to the LOD.
1209 */
1210 param->tiling[2] = (u->TexObj->Target == GL_TEXTURE_3D ? u->Level :
1211 0);
1212 }
1213
1214 static void
1215 update_image_surface(struct brw_context *brw,
1216 struct gl_image_unit *u,
1217 GLenum access,
1218 unsigned surface_idx,
1219 uint32_t *surf_offset,
1220 struct brw_image_param *param)
1221 {
1222 if (u->_Valid) {
1223 struct gl_texture_object *obj = u->TexObj;
1224 const unsigned format = get_image_format(brw, u->_ActualFormat, access);
1225
1226 if (obj->Target == GL_TEXTURE_BUFFER) {
1227 struct intel_buffer_object *intel_obj =
1228 intel_buffer_object(obj->BufferObject);
1229 const unsigned texel_size = (format == BRW_SURFACEFORMAT_RAW ? 1 :
1230 _mesa_get_format_bytes(u->_ActualFormat));
1231
1232 brw->vtbl.emit_buffer_surface_state(
1233 brw, surf_offset, intel_obj->buffer, obj->BufferOffset,
1234 format, intel_obj->Base.Size / texel_size, texel_size,
1235 access != GL_READ_ONLY);
1236
1237 update_buffer_image_param(brw, u, surface_idx, param);
1238
1239 } else {
1240 struct intel_texture_object *intel_obj = intel_texture_object(obj);
1241 struct intel_mipmap_tree *mt = intel_obj->mt;
1242
1243 if (format == BRW_SURFACEFORMAT_RAW) {
1244 brw->vtbl.emit_buffer_surface_state(
1245 brw, surf_offset, mt->bo, mt->offset,
1246 format, mt->bo->size - mt->offset, 1 /* pitch */,
1247 access != GL_READ_ONLY);
1248
1249 } else {
1250 const unsigned min_layer = obj->MinLayer + u->_Layer;
1251 const unsigned min_level = obj->MinLevel + u->Level;
1252 const unsigned num_layers = (!u->Layered ? 1 :
1253 obj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1254 mt->logical_depth0);
1255 const GLenum target = (obj->Target == GL_TEXTURE_CUBE_MAP ||
1256 obj->Target == GL_TEXTURE_CUBE_MAP_ARRAY ?
1257 GL_TEXTURE_2D_ARRAY : obj->Target);
1258
1259 brw->vtbl.emit_texture_surface_state(
1260 brw, mt, target,
1261 min_layer, min_layer + num_layers,
1262 min_level, min_level + 1,
1263 format, SWIZZLE_XYZW,
1264 surf_offset, access != GL_READ_ONLY, false);
1265 }
1266
1267 update_texture_image_param(brw, u, surface_idx, param);
1268 }
1269
1270 } else {
1271 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, surf_offset);
1272 update_default_image_param(brw, u, surface_idx, param);
1273 }
1274 }
1275
1276 void
1277 brw_upload_image_surfaces(struct brw_context *brw,
1278 struct gl_shader *shader,
1279 struct brw_stage_state *stage_state,
1280 struct brw_stage_prog_data *prog_data)
1281 {
1282 struct gl_context *ctx = &brw->ctx;
1283
1284 if (shader && shader->NumImages) {
1285 for (unsigned i = 0; i < shader->NumImages; i++) {
1286 struct gl_image_unit *u = &ctx->ImageUnits[shader->ImageUnits[i]];
1287 const unsigned surf_idx = prog_data->binding_table.image_start + i;
1288
1289 update_image_surface(brw, u, shader->ImageAccess[i],
1290 surf_idx,
1291 &stage_state->surf_offset[surf_idx],
1292 &prog_data->image_param[i]);
1293 }
1294
1295 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1296 }
1297 }
1298
1299 static void
1300 brw_upload_wm_image_surfaces(struct brw_context *brw)
1301 {
1302 struct gl_context *ctx = &brw->ctx;
1303 /* BRW_NEW_FRAGMENT_PROGRAM */
1304 struct gl_shader_program *prog = ctx->Shader._CurrentFragmentProgram;
1305
1306 if (prog) {
1307 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS */
1308 brw_upload_image_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
1309 &brw->wm.base, &brw->wm.prog_data->base);
1310 }
1311 }
1312
1313 const struct brw_tracked_state brw_wm_image_surfaces = {
1314 .dirty = {
1315 .brw = BRW_NEW_BATCH |
1316 BRW_NEW_FRAGMENT_PROGRAM |
1317 BRW_NEW_FS_PROG_DATA |
1318 BRW_NEW_IMAGE_UNITS
1319 },
1320 .emit = brw_upload_wm_image_surfaces,
1321 };
1322
1323 void
1324 gen4_init_vtable_surface_functions(struct brw_context *brw)
1325 {
1326 brw->vtbl.update_texture_surface = brw_update_texture_surface;
1327 brw->vtbl.update_renderbuffer_surface = brw_update_renderbuffer_surface;
1328 brw->vtbl.emit_null_surface_state = brw_emit_null_surface_state;
1329 brw->vtbl.emit_buffer_surface_state = gen4_emit_buffer_surface_state;
1330 }