4fa262195f3bde0faa56ba2cf4ca4ed76084eee4
[mesa.git] / src / gallium / drivers / nouveau / nvc0 / nvc0_tex.c
1 /*
2 * Copyright 2008 Ben Skeggs
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "nvc0/nvc0_context.h"
24 #include "nvc0/nvc0_resource.h"
25 #include "nvc0/gm107_texture.xml.h"
26 #include "nvc0/nvc0_compute.xml.h"
27 #include "nv50/g80_texture.xml.h"
28 #include "nv50/g80_defs.xml.h"
29
30 #include "util/u_format.h"
31
32 #define NVE4_TIC_ENTRY_INVALID 0x000fffff
33 #define NVE4_TSC_ENTRY_INVALID 0xfff00000
34
35 static inline uint32_t
36 nv50_tic_swizzle(const struct nvc0_format *fmt, unsigned swz, bool tex_int)
37 {
38 switch (swz) {
39 case PIPE_SWIZZLE_X : return fmt->tic.src_x;
40 case PIPE_SWIZZLE_Y: return fmt->tic.src_y;
41 case PIPE_SWIZZLE_Z : return fmt->tic.src_z;
42 case PIPE_SWIZZLE_W: return fmt->tic.src_w;
43 case PIPE_SWIZZLE_1:
44 return tex_int ? G80_TIC_SOURCE_ONE_INT : G80_TIC_SOURCE_ONE_FLOAT;
45 case PIPE_SWIZZLE_0:
46 default:
47 return G80_TIC_SOURCE_ZERO;
48 }
49 }
50
51 struct pipe_sampler_view *
52 nvc0_create_sampler_view(struct pipe_context *pipe,
53 struct pipe_resource *res,
54 const struct pipe_sampler_view *templ)
55 {
56 uint32_t flags = 0;
57
58 if (templ->target == PIPE_TEXTURE_RECT || templ->target == PIPE_BUFFER)
59 flags |= NV50_TEXVIEW_SCALED_COORDS;
60
61 return nvc0_create_texture_view(pipe, res, templ, flags, templ->target);
62 }
63
64 static struct pipe_sampler_view *
65 gm107_create_texture_view(struct pipe_context *pipe,
66 struct pipe_resource *texture,
67 const struct pipe_sampler_view *templ,
68 uint32_t flags,
69 enum pipe_texture_target target)
70 {
71 const struct util_format_description *desc;
72 const struct nvc0_format *fmt;
73 uint64_t address;
74 uint32_t *tic;
75 uint32_t swz[4];
76 uint32_t width, height;
77 uint32_t depth;
78 struct nv50_tic_entry *view;
79 struct nv50_miptree *mt;
80 bool tex_int;
81
82 view = MALLOC_STRUCT(nv50_tic_entry);
83 if (!view)
84 return NULL;
85 mt = nv50_miptree(texture);
86
87 view->pipe = *templ;
88 view->pipe.reference.count = 1;
89 view->pipe.texture = NULL;
90 view->pipe.context = pipe;
91
92 view->id = -1;
93
94 pipe_resource_reference(&view->pipe.texture, texture);
95
96 tic = &view->tic[0];
97
98 desc = util_format_description(view->pipe.format);
99 tex_int = util_format_is_pure_integer(view->pipe.format);
100
101 fmt = &nvc0_format_table[view->pipe.format];
102 swz[0] = nv50_tic_swizzle(fmt, view->pipe.swizzle_r, tex_int);
103 swz[1] = nv50_tic_swizzle(fmt, view->pipe.swizzle_g, tex_int);
104 swz[2] = nv50_tic_swizzle(fmt, view->pipe.swizzle_b, tex_int);
105 swz[3] = nv50_tic_swizzle(fmt, view->pipe.swizzle_a, tex_int);
106
107 tic[0] = fmt->tic.format << GM107_TIC2_0_COMPONENTS_SIZES__SHIFT;
108 tic[0] |= fmt->tic.type_r << GM107_TIC2_0_R_DATA_TYPE__SHIFT;
109 tic[0] |= fmt->tic.type_g << GM107_TIC2_0_G_DATA_TYPE__SHIFT;
110 tic[0] |= fmt->tic.type_b << GM107_TIC2_0_B_DATA_TYPE__SHIFT;
111 tic[0] |= fmt->tic.type_a << GM107_TIC2_0_A_DATA_TYPE__SHIFT;
112 tic[0] |= swz[0] << GM107_TIC2_0_X_SOURCE__SHIFT;
113 tic[0] |= swz[1] << GM107_TIC2_0_Y_SOURCE__SHIFT;
114 tic[0] |= swz[2] << GM107_TIC2_0_Z_SOURCE__SHIFT;
115 tic[0] |= swz[3] << GM107_TIC2_0_W_SOURCE__SHIFT;
116
117 address = mt->base.address;
118
119 tic[3] = GM107_TIC2_3_LOD_ANISO_QUALITY_2;
120 tic[4] = GM107_TIC2_4_SECTOR_PROMOTION_PROMOTE_TO_2_V;
121 tic[4] |= GM107_TIC2_4_BORDER_SIZE_SAMPLER_COLOR;
122
123 if (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB)
124 tic[4] |= GM107_TIC2_4_SRGB_CONVERSION;
125
126 if (!(flags & NV50_TEXVIEW_SCALED_COORDS))
127 tic[5] = GM107_TIC2_5_NORMALIZED_COORDS;
128 else
129 tic[5] = 0;
130
131 /* check for linear storage type */
132 if (unlikely(!nouveau_bo_memtype(nv04_resource(texture)->bo))) {
133 if (texture->target == PIPE_BUFFER) {
134 assert(!(tic[5] & GM107_TIC2_5_NORMALIZED_COORDS));
135 width = view->pipe.u.buf.size / (desc->block.bits / 8) - 1;
136 address +=
137 view->pipe.u.buf.offset;
138 tic[2] = GM107_TIC2_2_HEADER_VERSION_ONE_D_BUFFER;
139 tic[3] |= width >> 16;
140 tic[4] |= GM107_TIC2_4_TEXTURE_TYPE_ONE_D_BUFFER;
141 tic[4] |= width & 0xffff;
142 } else {
143 assert(!(mt->level[0].pitch & 0x1f));
144 /* must be 2D texture without mip maps */
145 tic[2] = GM107_TIC2_2_HEADER_VERSION_PITCH;
146 tic[4] |= GM107_TIC2_4_TEXTURE_TYPE_TWO_D_NO_MIPMAP;
147 tic[3] |= mt->level[0].pitch >> 5;
148 tic[4] |= mt->base.base.width0 - 1;
149 tic[5] |= 0 << GM107_TIC2_5_DEPTH_MINUS_ONE__SHIFT;
150 tic[5] |= mt->base.base.height0 - 1;
151 }
152 tic[1] = address;
153 tic[2] |= address >> 32;
154 tic[6] = 0;
155 tic[7] = 0;
156 return &view->pipe;
157 }
158
159 tic[2] = GM107_TIC2_2_HEADER_VERSION_BLOCKLINEAR;
160 tic[3] |=
161 ((mt->level[0].tile_mode & 0x0f0) >> 4 << 3) |
162 ((mt->level[0].tile_mode & 0xf00) >> 8 << 6);
163
164 depth = MAX2(mt->base.base.array_size, mt->base.base.depth0);
165
166 if (mt->base.base.array_size > 1) {
167 /* there doesn't seem to be a base layer field in TIC */
168 address += view->pipe.u.tex.first_layer * mt->layer_stride;
169 depth = view->pipe.u.tex.last_layer - view->pipe.u.tex.first_layer + 1;
170 }
171 tic[1] = address;
172 tic[2] |= address >> 32;
173
174 switch (target) {
175 case PIPE_TEXTURE_1D:
176 tic[4] |= GM107_TIC2_4_TEXTURE_TYPE_ONE_D;
177 break;
178 case PIPE_TEXTURE_2D:
179 tic[4] |= GM107_TIC2_4_TEXTURE_TYPE_TWO_D;
180 break;
181 case PIPE_TEXTURE_RECT:
182 tic[4] |= GM107_TIC2_4_TEXTURE_TYPE_TWO_D;
183 break;
184 case PIPE_TEXTURE_3D:
185 tic[4] |= GM107_TIC2_4_TEXTURE_TYPE_THREE_D;
186 break;
187 case PIPE_TEXTURE_CUBE:
188 depth /= 6;
189 tic[4] |= GM107_TIC2_4_TEXTURE_TYPE_CUBEMAP;
190 break;
191 case PIPE_TEXTURE_1D_ARRAY:
192 tic[4] |= GM107_TIC2_4_TEXTURE_TYPE_ONE_D_ARRAY;
193 break;
194 case PIPE_TEXTURE_2D_ARRAY:
195 tic[4] |= GM107_TIC2_4_TEXTURE_TYPE_TWO_D_ARRAY;
196 break;
197 case PIPE_TEXTURE_CUBE_ARRAY:
198 depth /= 6;
199 tic[4] |= GM107_TIC2_4_TEXTURE_TYPE_CUBE_ARRAY;
200 break;
201 default:
202 unreachable("unexpected/invalid texture target");
203 }
204
205 tic[3] |= (flags & NV50_TEXVIEW_FILTER_MSAA8) ?
206 GM107_TIC2_3_USE_HEADER_OPT_CONTROL :
207 GM107_TIC2_3_LOD_ANISO_QUALITY_HIGH |
208 GM107_TIC2_3_LOD_ISO_QUALITY_HIGH;
209
210 if (flags & NV50_TEXVIEW_ACCESS_RESOLVE) {
211 width = mt->base.base.width0 << mt->ms_x;
212 height = mt->base.base.height0 << mt->ms_y;
213 } else {
214 width = mt->base.base.width0;
215 height = mt->base.base.height0;
216 }
217
218 tic[4] |= width - 1;
219
220 tic[5] |= (height - 1) & 0xffff;
221 tic[5] |= (depth - 1) << GM107_TIC2_5_DEPTH_MINUS_ONE__SHIFT;
222 tic[3] |= mt->base.base.last_level << GM107_TIC2_3_MAX_MIP_LEVEL__SHIFT;
223
224 /* sampling points: (?) */
225 if ((flags & NV50_TEXVIEW_ACCESS_RESOLVE) && mt->ms_x > 1) {
226 tic[6] = GM107_TIC2_6_ANISO_FINE_SPREAD_MODIFIER_CONST_TWO;
227 tic[6] |= GM107_TIC2_6_MAX_ANISOTROPY_2_TO_1;
228 } else {
229 tic[6] = GM107_TIC2_6_ANISO_FINE_SPREAD_FUNC_TWO;
230 tic[6] |= GM107_TIC2_6_ANISO_COARSE_SPREAD_FUNC_ONE;
231 }
232
233 tic[7] = (view->pipe.u.tex.last_level << 4) | view->pipe.u.tex.first_level;
234 tic[7] |= mt->ms_mode << GM107_TIC2_7_MULTI_SAMPLE_COUNT__SHIFT;
235
236 return &view->pipe;
237 }
238
239 struct pipe_sampler_view *
240 gm107_create_texture_view_from_image(struct pipe_context *pipe,
241 const struct pipe_image_view *view)
242 {
243 struct nv04_resource *res = nv04_resource(view->resource);
244 struct pipe_sampler_view templ = {};
245 enum pipe_texture_target target;
246 uint32_t flags = 0;
247
248 if (!res)
249 return NULL;
250 target = res->base.target;
251
252 if (target == PIPE_TEXTURE_CUBE || target == PIPE_TEXTURE_CUBE_ARRAY)
253 target = PIPE_TEXTURE_2D_ARRAY;
254
255 templ.format = view->format;
256 templ.swizzle_r = PIPE_SWIZZLE_X;
257 templ.swizzle_g = PIPE_SWIZZLE_Y;
258 templ.swizzle_b = PIPE_SWIZZLE_Z;
259 templ.swizzle_a = PIPE_SWIZZLE_W;
260
261 if (target == PIPE_BUFFER) {
262 templ.u.buf.offset = view->u.buf.offset;
263 templ.u.buf.size = view->u.buf.size;
264 } else {
265 templ.u.tex.first_layer = view->u.tex.first_layer;
266 templ.u.tex.last_layer = view->u.tex.last_layer;
267 templ.u.tex.first_level = templ.u.tex.last_level = view->u.tex.level;
268 }
269
270 flags = NV50_TEXVIEW_SCALED_COORDS;
271
272 return nvc0_create_texture_view(pipe, &res->base, &templ, flags, target);
273 }
274
275 static struct pipe_sampler_view *
276 gf100_create_texture_view(struct pipe_context *pipe,
277 struct pipe_resource *texture,
278 const struct pipe_sampler_view *templ,
279 uint32_t flags,
280 enum pipe_texture_target target)
281 {
282 const struct util_format_description *desc;
283 const struct nvc0_format *fmt;
284 uint64_t address;
285 uint32_t *tic;
286 uint32_t swz[4];
287 uint32_t width, height;
288 uint32_t depth;
289 uint32_t tex_fmt;
290 struct nv50_tic_entry *view;
291 struct nv50_miptree *mt;
292 bool tex_int;
293
294 view = MALLOC_STRUCT(nv50_tic_entry);
295 if (!view)
296 return NULL;
297 mt = nv50_miptree(texture);
298
299 view->pipe = *templ;
300 view->pipe.reference.count = 1;
301 view->pipe.texture = NULL;
302 view->pipe.context = pipe;
303
304 view->id = -1;
305
306 pipe_resource_reference(&view->pipe.texture, texture);
307
308 tic = &view->tic[0];
309
310 desc = util_format_description(view->pipe.format);
311
312 fmt = &nvc0_format_table[view->pipe.format];
313
314 tex_int = util_format_is_pure_integer(view->pipe.format);
315 tex_fmt = fmt->tic.format & 0x3f;
316
317 swz[0] = nv50_tic_swizzle(fmt, view->pipe.swizzle_r, tex_int);
318 swz[1] = nv50_tic_swizzle(fmt, view->pipe.swizzle_g, tex_int);
319 swz[2] = nv50_tic_swizzle(fmt, view->pipe.swizzle_b, tex_int);
320 swz[3] = nv50_tic_swizzle(fmt, view->pipe.swizzle_a, tex_int);
321 tic[0] = (tex_fmt << G80_TIC_0_COMPONENTS_SIZES__SHIFT) |
322 (fmt->tic.type_r << G80_TIC_0_R_DATA_TYPE__SHIFT) |
323 (fmt->tic.type_g << G80_TIC_0_G_DATA_TYPE__SHIFT) |
324 (fmt->tic.type_b << G80_TIC_0_B_DATA_TYPE__SHIFT) |
325 (fmt->tic.type_a << G80_TIC_0_A_DATA_TYPE__SHIFT) |
326 (swz[0] << G80_TIC_0_X_SOURCE__SHIFT) |
327 (swz[1] << G80_TIC_0_Y_SOURCE__SHIFT) |
328 (swz[2] << G80_TIC_0_Z_SOURCE__SHIFT) |
329 (swz[3] << G80_TIC_0_W_SOURCE__SHIFT) |
330 ((fmt->tic.format & 0x40) << (GK20A_TIC_0_USE_COMPONENT_SIZES_EXTENDED__SHIFT - 6));
331
332 address = mt->base.address;
333
334 tic[2] = 0x10001000 | G80_TIC_2_BORDER_SOURCE_COLOR;
335
336 if (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB)
337 tic[2] |= G80_TIC_2_SRGB_CONVERSION;
338
339 if (!(flags & NV50_TEXVIEW_SCALED_COORDS))
340 tic[2] |= G80_TIC_2_NORMALIZED_COORDS;
341
342 /* check for linear storage type */
343 if (unlikely(!nouveau_bo_memtype(nv04_resource(texture)->bo))) {
344 if (texture->target == PIPE_BUFFER) {
345 assert(!(tic[2] & G80_TIC_2_NORMALIZED_COORDS));
346 address +=
347 view->pipe.u.buf.offset;
348 tic[2] |= G80_TIC_2_LAYOUT_PITCH | G80_TIC_2_TEXTURE_TYPE_ONE_D_BUFFER;
349 tic[3] = 0;
350 tic[4] = /* width */
351 view->pipe.u.buf.size / (desc->block.bits / 8);
352 tic[5] = 0;
353 } else {
354 /* must be 2D texture without mip maps */
355 tic[2] |= G80_TIC_2_LAYOUT_PITCH | G80_TIC_2_TEXTURE_TYPE_TWO_D_NO_MIPMAP;
356 tic[3] = mt->level[0].pitch;
357 tic[4] = mt->base.base.width0;
358 tic[5] = (1 << 16) | mt->base.base.height0;
359 }
360 tic[6] =
361 tic[7] = 0;
362 tic[1] = address;
363 tic[2] |= address >> 32;
364 return &view->pipe;
365 }
366
367 tic[2] |=
368 ((mt->level[0].tile_mode & 0x0f0) << (22 - 4)) |
369 ((mt->level[0].tile_mode & 0xf00) << (25 - 8));
370
371 depth = MAX2(mt->base.base.array_size, mt->base.base.depth0);
372
373 if (mt->base.base.array_size > 1) {
374 /* there doesn't seem to be a base layer field in TIC */
375 address += view->pipe.u.tex.first_layer * mt->layer_stride;
376 depth = view->pipe.u.tex.last_layer - view->pipe.u.tex.first_layer + 1;
377 }
378 tic[1] = address;
379 tic[2] |= address >> 32;
380
381 switch (target) {
382 case PIPE_TEXTURE_1D:
383 tic[2] |= G80_TIC_2_TEXTURE_TYPE_ONE_D;
384 break;
385 case PIPE_TEXTURE_2D:
386 tic[2] |= G80_TIC_2_TEXTURE_TYPE_TWO_D;
387 break;
388 case PIPE_TEXTURE_RECT:
389 tic[2] |= G80_TIC_2_TEXTURE_TYPE_TWO_D;
390 break;
391 case PIPE_TEXTURE_3D:
392 tic[2] |= G80_TIC_2_TEXTURE_TYPE_THREE_D;
393 break;
394 case PIPE_TEXTURE_CUBE:
395 depth /= 6;
396 tic[2] |= G80_TIC_2_TEXTURE_TYPE_CUBEMAP;
397 break;
398 case PIPE_TEXTURE_1D_ARRAY:
399 tic[2] |= G80_TIC_2_TEXTURE_TYPE_ONE_D_ARRAY;
400 break;
401 case PIPE_TEXTURE_2D_ARRAY:
402 tic[2] |= G80_TIC_2_TEXTURE_TYPE_TWO_D_ARRAY;
403 break;
404 case PIPE_TEXTURE_CUBE_ARRAY:
405 depth /= 6;
406 tic[2] |= G80_TIC_2_TEXTURE_TYPE_CUBE_ARRAY;
407 break;
408 default:
409 unreachable("unexpected/invalid texture target");
410 }
411
412 tic[3] = (flags & NV50_TEXVIEW_FILTER_MSAA8) ? 0x20000000 : 0x00300000;
413
414 if (flags & NV50_TEXVIEW_ACCESS_RESOLVE) {
415 width = mt->base.base.width0 << mt->ms_x;
416 height = mt->base.base.height0 << mt->ms_y;
417 } else {
418 width = mt->base.base.width0;
419 height = mt->base.base.height0;
420 }
421
422 tic[4] = (1 << 31) | width;
423
424 tic[5] = height & 0xffff;
425 tic[5] |= depth << 16;
426 tic[5] |= mt->base.base.last_level << 28;
427
428 /* sampling points: (?) */
429 if (flags & NV50_TEXVIEW_ACCESS_RESOLVE)
430 tic[6] = (mt->ms_x > 1) ? 0x88000000 : 0x03000000;
431 else
432 tic[6] = 0x03000000;
433
434 tic[7] = (view->pipe.u.tex.last_level << 4) | view->pipe.u.tex.first_level;
435 tic[7] |= mt->ms_mode << 12;
436
437 return &view->pipe;
438 }
439
440 struct pipe_sampler_view *
441 nvc0_create_texture_view(struct pipe_context *pipe,
442 struct pipe_resource *texture,
443 const struct pipe_sampler_view *templ,
444 uint32_t flags,
445 enum pipe_texture_target target)
446 {
447 if (nvc0_context(pipe)->screen->tic.maxwell)
448 return gm107_create_texture_view(pipe, texture, templ, flags, target);
449 return gf100_create_texture_view(pipe, texture, templ, flags, target);
450 }
451
452 void
453 nvc0_update_tic(struct nvc0_context *nvc0, struct nv50_tic_entry *tic,
454 struct nv04_resource *res)
455 {
456 uint64_t address = res->address;
457 if (res->base.target != PIPE_BUFFER)
458 return;
459 address += tic->pipe.u.buf.offset;
460 if (tic->tic[1] == (uint32_t)address &&
461 (tic->tic[2] & 0xff) == address >> 32)
462 return;
463
464 nvc0_screen_tic_unlock(nvc0->screen, tic);
465 tic->id = -1;
466 tic->tic[1] = address;
467 tic->tic[2] &= 0xffffff00;
468 tic->tic[2] |= address >> 32;
469 }
470
471 bool
472 nvc0_validate_tic(struct nvc0_context *nvc0, int s)
473 {
474 uint32_t commands[32];
475 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
476 unsigned i;
477 unsigned n = 0;
478 bool need_flush = false;
479
480 for (i = 0; i < nvc0->num_textures[s]; ++i) {
481 struct nv50_tic_entry *tic = nv50_tic_entry(nvc0->textures[s][i]);
482 struct nv04_resource *res;
483 const bool dirty = !!(nvc0->textures_dirty[s] & (1 << i));
484
485 if (!tic) {
486 if (dirty)
487 commands[n++] = (i << 1) | 0;
488 continue;
489 }
490 res = nv04_resource(tic->pipe.texture);
491 nvc0_update_tic(nvc0, tic, res);
492
493 if (tic->id < 0) {
494 tic->id = nvc0_screen_tic_alloc(nvc0->screen, tic);
495
496 nvc0_m2mf_push_linear(&nvc0->base, nvc0->screen->txc, tic->id * 32,
497 NV_VRAM_DOMAIN(&nvc0->screen->base), 32,
498 tic->tic);
499 need_flush = true;
500 } else
501 if (res->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) {
502 if (unlikely(s == 5))
503 BEGIN_NVC0(push, NVC0_CP(TEX_CACHE_CTL), 1);
504 else
505 BEGIN_NVC0(push, NVC0_3D(TEX_CACHE_CTL), 1);
506 PUSH_DATA (push, (tic->id << 4) | 1);
507 NOUVEAU_DRV_STAT(&nvc0->screen->base, tex_cache_flush_count, 1);
508 }
509 nvc0->screen->tic.lock[tic->id / 32] |= 1 << (tic->id % 32);
510
511 res->status &= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING;
512 res->status |= NOUVEAU_BUFFER_STATUS_GPU_READING;
513
514 if (!dirty)
515 continue;
516 commands[n++] = (tic->id << 9) | (i << 1) | 1;
517
518 if (unlikely(s == 5))
519 BCTX_REFN(nvc0->bufctx_cp, CP_TEX(i), res, RD);
520 else
521 BCTX_REFN(nvc0->bufctx_3d, 3D_TEX(s, i), res, RD);
522 }
523 for (; i < nvc0->state.num_textures[s]; ++i)
524 commands[n++] = (i << 1) | 0;
525
526 nvc0->state.num_textures[s] = nvc0->num_textures[s];
527
528 if (n) {
529 if (unlikely(s == 5))
530 BEGIN_NIC0(push, NVC0_CP(BIND_TIC), n);
531 else
532 BEGIN_NIC0(push, NVC0_3D(BIND_TIC(s)), n);
533 PUSH_DATAp(push, commands, n);
534 }
535 nvc0->textures_dirty[s] = 0;
536
537 return need_flush;
538 }
539
540 static bool
541 nve4_validate_tic(struct nvc0_context *nvc0, unsigned s)
542 {
543 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
544 unsigned i;
545 bool need_flush = false;
546
547 for (i = 0; i < nvc0->num_textures[s]; ++i) {
548 struct nv50_tic_entry *tic = nv50_tic_entry(nvc0->textures[s][i]);
549 struct nv04_resource *res;
550 const bool dirty = !!(nvc0->textures_dirty[s] & (1 << i));
551
552 if (!tic) {
553 nvc0->tex_handles[s][i] |= NVE4_TIC_ENTRY_INVALID;
554 continue;
555 }
556 res = nv04_resource(tic->pipe.texture);
557 nvc0_update_tic(nvc0, tic, res);
558
559 if (tic->id < 0) {
560 tic->id = nvc0_screen_tic_alloc(nvc0->screen, tic);
561
562 nve4_p2mf_push_linear(&nvc0->base, nvc0->screen->txc, tic->id * 32,
563 NV_VRAM_DOMAIN(&nvc0->screen->base), 32,
564 tic->tic);
565 need_flush = true;
566 } else
567 if (res->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) {
568 BEGIN_NVC0(push, NVC0_3D(TEX_CACHE_CTL), 1);
569 PUSH_DATA (push, (tic->id << 4) | 1);
570 }
571 nvc0->screen->tic.lock[tic->id / 32] |= 1 << (tic->id % 32);
572
573 res->status &= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING;
574 res->status |= NOUVEAU_BUFFER_STATUS_GPU_READING;
575
576 nvc0->tex_handles[s][i] &= ~NVE4_TIC_ENTRY_INVALID;
577 nvc0->tex_handles[s][i] |= tic->id;
578 if (dirty)
579 BCTX_REFN(nvc0->bufctx_3d, 3D_TEX(s, i), res, RD);
580 }
581 for (; i < nvc0->state.num_textures[s]; ++i) {
582 nvc0->tex_handles[s][i] |= NVE4_TIC_ENTRY_INVALID;
583 nvc0->textures_dirty[s] |= 1 << i;
584 }
585
586 nvc0->state.num_textures[s] = nvc0->num_textures[s];
587
588 return need_flush;
589 }
590
591 void nvc0_validate_textures(struct nvc0_context *nvc0)
592 {
593 bool need_flush = false;
594 int i;
595
596 for (i = 0; i < 5; i++) {
597 if (nvc0->screen->base.class_3d >= NVE4_3D_CLASS)
598 need_flush |= nve4_validate_tic(nvc0, i);
599 else
600 need_flush |= nvc0_validate_tic(nvc0, i);
601 }
602
603 if (need_flush) {
604 BEGIN_NVC0(nvc0->base.pushbuf, NVC0_3D(TIC_FLUSH), 1);
605 PUSH_DATA (nvc0->base.pushbuf, 0);
606 }
607
608 if (nvc0->screen->base.class_3d < NVE4_3D_CLASS) {
609 /* Invalidate all CP textures because they are aliased. */
610 for (int i = 0; i < nvc0->num_textures[5]; i++)
611 nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_CP_TEX(i));
612 nvc0->textures_dirty[5] = ~0;
613 nvc0->dirty_cp |= NVC0_NEW_CP_TEXTURES;
614 }
615 }
616
617 bool
618 nvc0_validate_tsc(struct nvc0_context *nvc0, int s)
619 {
620 uint32_t commands[16];
621 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
622 unsigned i;
623 unsigned n = 0;
624 bool need_flush = false;
625
626 for (i = 0; i < nvc0->num_samplers[s]; ++i) {
627 struct nv50_tsc_entry *tsc = nv50_tsc_entry(nvc0->samplers[s][i]);
628
629 if (!(nvc0->samplers_dirty[s] & (1 << i)))
630 continue;
631 if (!tsc) {
632 commands[n++] = (i << 4) | 0;
633 continue;
634 }
635 nvc0->seamless_cube_map = tsc->seamless_cube_map;
636 if (tsc->id < 0) {
637 tsc->id = nvc0_screen_tsc_alloc(nvc0->screen, tsc);
638
639 nvc0_m2mf_push_linear(&nvc0->base, nvc0->screen->txc,
640 65536 + tsc->id * 32, NV_VRAM_DOMAIN(&nvc0->screen->base),
641 32, tsc->tsc);
642 need_flush = true;
643 }
644 nvc0->screen->tsc.lock[tsc->id / 32] |= 1 << (tsc->id % 32);
645
646 commands[n++] = (tsc->id << 12) | (i << 4) | 1;
647 }
648 for (; i < nvc0->state.num_samplers[s]; ++i)
649 commands[n++] = (i << 4) | 0;
650
651 nvc0->state.num_samplers[s] = nvc0->num_samplers[s];
652
653 if (n) {
654 if (unlikely(s == 5))
655 BEGIN_NIC0(push, NVC0_CP(BIND_TSC), n);
656 else
657 BEGIN_NIC0(push, NVC0_3D(BIND_TSC(s)), n);
658 PUSH_DATAp(push, commands, n);
659 }
660 nvc0->samplers_dirty[s] = 0;
661
662 return need_flush;
663 }
664
665 bool
666 nve4_validate_tsc(struct nvc0_context *nvc0, int s)
667 {
668 unsigned i;
669 bool need_flush = false;
670
671 for (i = 0; i < nvc0->num_samplers[s]; ++i) {
672 struct nv50_tsc_entry *tsc = nv50_tsc_entry(nvc0->samplers[s][i]);
673
674 if (!tsc) {
675 nvc0->tex_handles[s][i] |= NVE4_TSC_ENTRY_INVALID;
676 continue;
677 }
678 if (tsc->id < 0) {
679 tsc->id = nvc0_screen_tsc_alloc(nvc0->screen, tsc);
680
681 nve4_p2mf_push_linear(&nvc0->base, nvc0->screen->txc,
682 65536 + tsc->id * 32,
683 NV_VRAM_DOMAIN(&nvc0->screen->base),
684 32, tsc->tsc);
685 need_flush = true;
686 }
687 nvc0->screen->tsc.lock[tsc->id / 32] |= 1 << (tsc->id % 32);
688
689 nvc0->tex_handles[s][i] &= ~NVE4_TSC_ENTRY_INVALID;
690 nvc0->tex_handles[s][i] |= tsc->id << 20;
691 }
692 for (; i < nvc0->state.num_samplers[s]; ++i) {
693 nvc0->tex_handles[s][i] |= NVE4_TSC_ENTRY_INVALID;
694 nvc0->samplers_dirty[s] |= 1 << i;
695 }
696
697 nvc0->state.num_samplers[s] = nvc0->num_samplers[s];
698
699 return need_flush;
700 }
701
702 void nvc0_validate_samplers(struct nvc0_context *nvc0)
703 {
704 bool need_flush = false;
705 int i;
706
707 for (i = 0; i < 5; i++) {
708 if (nvc0->screen->base.class_3d >= NVE4_3D_CLASS)
709 need_flush |= nve4_validate_tsc(nvc0, i);
710 else
711 need_flush |= nvc0_validate_tsc(nvc0, i);
712 }
713
714 if (need_flush) {
715 BEGIN_NVC0(nvc0->base.pushbuf, NVC0_3D(TSC_FLUSH), 1);
716 PUSH_DATA (nvc0->base.pushbuf, 0);
717 }
718
719 if (nvc0->screen->base.class_3d < NVE4_3D_CLASS) {
720 /* Invalidate all CP samplers because they are aliased. */
721 nvc0->samplers_dirty[5] = ~0;
722 nvc0->dirty_cp |= NVC0_NEW_CP_SAMPLERS;
723 }
724 }
725
726 /* Upload the "diagonal" entries for the possible texture sources ($t == $s).
727 * At some point we might want to get a list of the combinations used by a
728 * shader and fill in those entries instead of having it extract the handles.
729 */
730 void
731 nve4_set_tex_handles(struct nvc0_context *nvc0)
732 {
733 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
734 struct nvc0_screen *screen = nvc0->screen;
735 unsigned s;
736
737 if (nvc0->screen->base.class_3d < NVE4_3D_CLASS)
738 return;
739
740 for (s = 0; s < 5; ++s) {
741 uint32_t dirty = nvc0->textures_dirty[s] | nvc0->samplers_dirty[s];
742 if (!dirty)
743 continue;
744 BEGIN_NVC0(push, NVC0_3D(CB_SIZE), 3);
745 PUSH_DATA (push, NVC0_CB_AUX_SIZE);
746 PUSH_DATAh(push, screen->uniform_bo->offset + NVC0_CB_AUX_INFO(s));
747 PUSH_DATA (push, screen->uniform_bo->offset + NVC0_CB_AUX_INFO(s));
748 do {
749 int i = ffs(dirty) - 1;
750 dirty &= ~(1 << i);
751
752 BEGIN_NVC0(push, NVC0_3D(CB_POS), 2);
753 PUSH_DATA (push, (8 + i) * 4);
754 PUSH_DATA (push, nvc0->tex_handles[s][i]);
755 } while (dirty);
756
757 nvc0->textures_dirty[s] = 0;
758 nvc0->samplers_dirty[s] = 0;
759 }
760 }
761
762
763 static const uint8_t nve4_su_format_map[PIPE_FORMAT_COUNT];
764 static const uint16_t nve4_su_format_aux_map[PIPE_FORMAT_COUNT];
765 static const uint16_t nve4_suldp_lib_offset[PIPE_FORMAT_COUNT];
766
767 static void
768 nvc0_get_surface_dims(struct pipe_image_view *view, int *width, int *height,
769 int *depth)
770 {
771 struct nv04_resource *res = nv04_resource(view->resource);
772 int level;
773
774 *width = *height = *depth = 1;
775 if (res->base.target == PIPE_BUFFER) {
776 *width = view->u.buf.size / util_format_get_blocksize(view->format);
777 return;
778 }
779
780 level = view->u.tex.level;
781 *width = u_minify(view->resource->width0, level);
782 *height = u_minify(view->resource->height0, level);
783 *depth = u_minify(view->resource->depth0, level);
784
785 switch (res->base.target) {
786 case PIPE_TEXTURE_1D_ARRAY:
787 case PIPE_TEXTURE_2D_ARRAY:
788 case PIPE_TEXTURE_CUBE:
789 case PIPE_TEXTURE_CUBE_ARRAY:
790 *depth = view->u.tex.last_layer - view->u.tex.first_layer + 1;
791 break;
792 case PIPE_TEXTURE_1D:
793 case PIPE_TEXTURE_2D:
794 case PIPE_TEXTURE_RECT:
795 case PIPE_TEXTURE_3D:
796 break;
797 default:
798 assert(!"unexpected texture target");
799 break;
800 }
801 }
802
803 void
804 nvc0_mark_image_range_valid(const struct pipe_image_view *view)
805 {
806 struct nv04_resource *res = (struct nv04_resource *)view->resource;
807
808 assert(view->resource->target == PIPE_BUFFER);
809
810 util_range_add(&res->valid_buffer_range,
811 view->u.buf.offset,
812 view->u.buf.offset + view->u.buf.size);
813 }
814
815 void
816 nve4_set_surface_info(struct nouveau_pushbuf *push,
817 struct pipe_image_view *view,
818 struct nvc0_context *nvc0)
819 {
820 struct nvc0_screen *screen = nvc0->screen;
821 struct nv04_resource *res;
822 uint64_t address;
823 uint32_t *const info = push->cur;
824 int width, height, depth;
825 uint8_t log2cpp;
826
827 if (view && !nve4_su_format_map[view->format])
828 NOUVEAU_ERR("unsupported surface format, try is_format_supported() !\n");
829
830 push->cur += 16;
831
832 if (!view || !nve4_su_format_map[view->format]) {
833 memset(info, 0, 16 * sizeof(*info));
834
835 info[0] = 0xbadf0000;
836 info[1] = 0x80004000;
837 info[12] = nve4_suldp_lib_offset[PIPE_FORMAT_R32G32B32A32_UINT] +
838 screen->lib_code->start;
839 return;
840 }
841 res = nv04_resource(view->resource);
842
843 address = res->address;
844
845 /* get surface dimensions based on the target. */
846 nvc0_get_surface_dims(view, &width, &height, &depth);
847
848 info[8] = width;
849 info[9] = height;
850 info[10] = depth;
851 switch (res->base.target) {
852 case PIPE_TEXTURE_1D_ARRAY:
853 info[11] = 1;
854 break;
855 case PIPE_TEXTURE_2D:
856 case PIPE_TEXTURE_RECT:
857 info[11] = 2;
858 break;
859 case PIPE_TEXTURE_3D:
860 info[11] = 3;
861 break;
862 case PIPE_TEXTURE_2D_ARRAY:
863 case PIPE_TEXTURE_CUBE:
864 case PIPE_TEXTURE_CUBE_ARRAY:
865 info[11] = 4;
866 break;
867 default:
868 info[11] = 0;
869 break;
870 }
871 log2cpp = (0xf000 & nve4_su_format_aux_map[view->format]) >> 12;
872
873 /* Stick the blockwidth (ie. number of bytes per pixel) to check if the
874 * format doesn't mismatch. */
875 info[12] = util_format_get_blocksize(view->format);
876
877 /* limit in bytes for raw access */
878 info[13] = (0x06 << 22) | ((width << log2cpp) - 1);
879
880 info[1] = nve4_su_format_map[view->format];
881
882 #if 0
883 switch (util_format_get_blocksizebits(view->format)) {
884 case 16: info[1] |= 1 << 16; break;
885 case 32: info[1] |= 2 << 16; break;
886 case 64: info[1] |= 3 << 16; break;
887 case 128: info[1] |= 4 << 16; break;
888 default:
889 break;
890 }
891 #else
892 info[1] |= log2cpp << 16;
893 info[1] |= 0x4000;
894 info[1] |= (0x0f00 & nve4_su_format_aux_map[view->format]);
895 #endif
896
897 if (res->base.target == PIPE_BUFFER) {
898 address += view->u.buf.offset;
899
900 info[0] = address >> 8;
901 info[2] = width - 1;
902 info[2] |= (0xff & nve4_su_format_aux_map[view->format]) << 22;
903 info[3] = 0;
904 info[4] = 0;
905 info[5] = 0;
906 info[6] = 0;
907 info[7] = 0;
908 info[14] = 0;
909 info[15] = 0;
910 } else {
911 struct nv50_miptree *mt = nv50_miptree(&res->base);
912 struct nv50_miptree_level *lvl = &mt->level[view->u.tex.level];
913 const unsigned z = view->u.tex.first_layer;
914
915 if (z) {
916 if (mt->layout_3d) {
917 address += nvc0_mt_zslice_offset(mt, view->u.tex.level, z);
918 /* doesn't work if z passes z-tile boundary */
919 if (depth > 1) {
920 pipe_debug_message(&nvc0->base.debug, CONFORMANCE,
921 "3D images are not really supported!");
922 debug_printf("3D images are not really supported!\n");
923 }
924 } else {
925 address += mt->layer_stride * z;
926 }
927 }
928 address += lvl->offset;
929
930 info[0] = address >> 8;
931 info[2] = (width << mt->ms_x) - 1;
932 /* NOTE: this is really important: */
933 info[2] |= (0xff & nve4_su_format_aux_map[view->format]) << 22;
934 info[3] = (0x88 << 24) | (lvl->pitch / 64);
935 info[4] = (height << mt->ms_y) - 1;
936 info[4] |= (lvl->tile_mode & 0x0f0) << 25;
937 info[4] |= NVC0_TILE_SHIFT_Y(lvl->tile_mode) << 22;
938 info[5] = mt->layer_stride >> 8;
939 info[6] = depth - 1;
940 info[6] |= (lvl->tile_mode & 0xf00) << 21;
941 info[6] |= NVC0_TILE_SHIFT_Z(lvl->tile_mode) << 22;
942 info[7] = 0;
943 info[14] = mt->ms_x;
944 info[15] = mt->ms_y;
945 }
946 }
947
948 static inline void
949 nvc0_set_surface_info(struct nouveau_pushbuf *push,
950 struct pipe_image_view *view, uint64_t address,
951 int width, int height, int depth)
952 {
953 struct nv04_resource *res;
954 uint32_t *const info = push->cur;
955
956 push->cur += 16;
957
958 /* Make sure to always initialize the surface information area because it's
959 * used to check if the given image is bound or not. */
960 memset(info, 0, 16 * sizeof(*info));
961
962 if (!view || !view->resource)
963 return;
964 res = nv04_resource(view->resource);
965
966 /* Stick the image dimensions for the imageSize() builtin. */
967 info[8] = width;
968 info[9] = height;
969 info[10] = depth;
970
971 /* Stick the blockwidth (ie. number of bytes per pixel) to calculate pixel
972 * offset and to check if the format doesn't mismatch. */
973 info[12] = util_format_get_blocksize(view->format);
974
975 if (res->base.target == PIPE_BUFFER) {
976 info[0] = address >> 8;
977 info[2] = width;
978 } else {
979 struct nv50_miptree *mt = nv50_miptree(&res->base);
980
981 info[0] = address >> 8;
982 info[2] = width;
983 info[4] = height;
984 info[5] = mt->layer_stride >> 8;
985 info[6] = depth;
986 info[14] = mt->ms_x;
987 info[15] = mt->ms_y;
988 }
989 }
990
991 void
992 nvc0_validate_suf(struct nvc0_context *nvc0, int s)
993 {
994 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
995 struct nvc0_screen *screen = nvc0->screen;
996
997 for (int i = 0; i < NVC0_MAX_IMAGES; ++i) {
998 struct pipe_image_view *view = &nvc0->images[s][i];
999 int width, height, depth;
1000 uint64_t address = 0;
1001
1002 if (s == 5)
1003 BEGIN_NVC0(push, NVC0_CP(IMAGE(i)), 6);
1004 else
1005 BEGIN_NVC0(push, NVC0_3D(IMAGE(i)), 6);
1006
1007 if (view->resource) {
1008 struct nv04_resource *res = nv04_resource(view->resource);
1009 unsigned rt = nvc0_format_table[view->format].rt;
1010
1011 if (util_format_is_depth_or_stencil(view->format))
1012 rt = rt << 12;
1013 else
1014 rt = (rt << 4) | (0x14 << 12);
1015
1016 /* get surface dimensions based on the target. */
1017 nvc0_get_surface_dims(view, &width, &height, &depth);
1018
1019 address = res->address;
1020 if (res->base.target == PIPE_BUFFER) {
1021 unsigned blocksize = util_format_get_blocksize(view->format);
1022
1023 address += view->u.buf.offset;
1024 assert(!(address & 0xff));
1025
1026 if (view->access & PIPE_IMAGE_ACCESS_WRITE)
1027 nvc0_mark_image_range_valid(view);
1028
1029 PUSH_DATAh(push, address);
1030 PUSH_DATA (push, address);
1031 PUSH_DATA (push, align(width * blocksize, 0x100));
1032 PUSH_DATA (push, NVC0_3D_IMAGE_HEIGHT_LINEAR | 1);
1033 PUSH_DATA (push, rt);
1034 PUSH_DATA (push, 0);
1035 } else {
1036 struct nv50_miptree *mt = nv50_miptree(view->resource);
1037 struct nv50_miptree_level *lvl = &mt->level[view->u.tex.level];
1038 const unsigned z = view->u.tex.first_layer;
1039
1040 if (mt->layout_3d) {
1041 address += nvc0_mt_zslice_offset(mt, view->u.tex.level, z);
1042 if (depth >= 1) {
1043 pipe_debug_message(&nvc0->base.debug, CONFORMANCE,
1044 "3D images are not supported!");
1045 debug_printf("3D images are not supported!\n");
1046 }
1047 } else {
1048 address += mt->layer_stride * z;
1049 }
1050 address += lvl->offset;
1051
1052 PUSH_DATAh(push, address);
1053 PUSH_DATA (push, address);
1054 PUSH_DATA (push, width << mt->ms_x);
1055 PUSH_DATA (push, height << mt->ms_y);
1056 PUSH_DATA (push, rt);
1057 PUSH_DATA (push, lvl->tile_mode & 0xff); /* mask out z-tiling */
1058 }
1059
1060 if (s == 5)
1061 BCTX_REFN(nvc0->bufctx_cp, CP_SUF, res, RDWR);
1062 else
1063 BCTX_REFN(nvc0->bufctx_3d, 3D_SUF, res, RDWR);
1064 } else {
1065 PUSH_DATA(push, 0);
1066 PUSH_DATA(push, 0);
1067 PUSH_DATA(push, 0);
1068 PUSH_DATA(push, 0);
1069 PUSH_DATA(push, 0x14000);
1070 PUSH_DATA(push, 0);
1071 }
1072
1073 /* stick surface information into the driver constant buffer */
1074 if (s == 5)
1075 BEGIN_NVC0(push, NVC0_CP(CB_SIZE), 3);
1076 else
1077 BEGIN_NVC0(push, NVC0_3D(CB_SIZE), 3);
1078 PUSH_DATA (push, NVC0_CB_AUX_SIZE);
1079 PUSH_DATAh(push, screen->uniform_bo->offset + NVC0_CB_AUX_INFO(s));
1080 PUSH_DATA (push, screen->uniform_bo->offset + NVC0_CB_AUX_INFO(s));
1081 if (s == 5)
1082 BEGIN_1IC0(push, NVC0_CP(CB_POS), 1 + 16);
1083 else
1084 BEGIN_1IC0(push, NVC0_3D(CB_POS), 1 + 16);
1085 PUSH_DATA (push, NVC0_CB_AUX_SU_INFO(i));
1086
1087 nvc0_set_surface_info(push, view, address, width, height, depth);
1088 }
1089 }
1090
1091 static inline void
1092 nvc0_update_surface_bindings(struct nvc0_context *nvc0)
1093 {
1094 nvc0_validate_suf(nvc0, 4);
1095
1096 /* Invalidate all COMPUTE images because they are aliased with FRAGMENT. */
1097 nouveau_bufctx_reset(nvc0->bufctx_cp, NVC0_BIND_CP_SUF);
1098 nvc0->dirty_cp |= NVC0_NEW_CP_SURFACES;
1099 nvc0->images_dirty[5] |= nvc0->images_valid[5];
1100 }
1101
1102 static void
1103 gm107_validate_surfaces(struct nvc0_context *nvc0,
1104 struct pipe_image_view *view, int stage, int slot)
1105 {
1106 struct nv04_resource *res = nv04_resource(view->resource);
1107 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
1108 struct nvc0_screen *screen = nvc0->screen;
1109 struct nv50_tic_entry *tic;
1110
1111 tic = nv50_tic_entry(nvc0->images_tic[stage][slot]);
1112
1113 res = nv04_resource(tic->pipe.texture);
1114 nvc0_update_tic(nvc0, tic, res);
1115
1116 if (tic->id < 0) {
1117 tic->id = nvc0_screen_tic_alloc(nvc0->screen, tic);
1118
1119 /* upload the texture view */
1120 nve4_p2mf_push_linear(&nvc0->base, nvc0->screen->txc, tic->id * 32,
1121 NV_VRAM_DOMAIN(&nvc0->screen->base), 32, tic->tic);
1122
1123 BEGIN_NVC0(push, NVC0_3D(TIC_FLUSH), 1);
1124 PUSH_DATA (push, 0);
1125 } else
1126 if (res->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) {
1127 BEGIN_NVC0(push, NVC0_3D(TEX_CACHE_CTL), 1);
1128 PUSH_DATA (push, (tic->id << 4) | 1);
1129 }
1130 nvc0->screen->tic.lock[tic->id / 32] |= 1 << (tic->id % 32);
1131
1132 res->status &= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING;
1133 res->status |= NOUVEAU_BUFFER_STATUS_GPU_READING;
1134
1135 BCTX_REFN(nvc0->bufctx_3d, 3D_SUF, res, RD);
1136
1137 /* upload the texture handle */
1138 BEGIN_NVC0(push, NVC0_3D(CB_SIZE), 3);
1139 PUSH_DATA (push, NVC0_CB_AUX_SIZE);
1140 PUSH_DATAh(push, screen->uniform_bo->offset + NVC0_CB_AUX_INFO(stage));
1141 PUSH_DATA (push, screen->uniform_bo->offset + NVC0_CB_AUX_INFO(stage));
1142 BEGIN_NVC0(push, NVC0_3D(CB_POS), 2);
1143 PUSH_DATA (push, NVC0_CB_AUX_TEX_INFO(slot + 32));
1144 PUSH_DATA (push, tic->id);
1145 }
1146
1147 static inline void
1148 nve4_update_surface_bindings(struct nvc0_context *nvc0)
1149 {
1150 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
1151 struct nvc0_screen *screen = nvc0->screen;
1152 int i, j, s;
1153
1154 for (s = 0; s < 5; s++) {
1155 if (!nvc0->images_dirty[s])
1156 continue;
1157
1158 for (i = 0; i < NVC0_MAX_IMAGES; ++i) {
1159 struct pipe_image_view *view = &nvc0->images[s][i];
1160
1161 BEGIN_NVC0(push, NVC0_3D(CB_SIZE), 3);
1162 PUSH_DATA (push, NVC0_CB_AUX_SIZE);
1163 PUSH_DATAh(push, screen->uniform_bo->offset + NVC0_CB_AUX_INFO(s));
1164 PUSH_DATA (push, screen->uniform_bo->offset + NVC0_CB_AUX_INFO(s));
1165 BEGIN_1IC0(push, NVC0_3D(CB_POS), 1 + 16);
1166 PUSH_DATA (push, NVC0_CB_AUX_SU_INFO(i));
1167
1168 if (view->resource) {
1169 struct nv04_resource *res = nv04_resource(view->resource);
1170
1171 if (res->base.target == PIPE_BUFFER) {
1172 if (view->access & PIPE_IMAGE_ACCESS_WRITE)
1173 nvc0_mark_image_range_valid(view);
1174 }
1175
1176 nve4_set_surface_info(push, view, nvc0);
1177 BCTX_REFN(nvc0->bufctx_3d, 3D_SUF, res, RDWR);
1178
1179 if (nvc0->screen->base.class_3d >= GM107_3D_CLASS)
1180 gm107_validate_surfaces(nvc0, view, s, i);
1181 } else {
1182 for (j = 0; j < 16; j++)
1183 PUSH_DATA(push, 0);
1184 }
1185 }
1186 }
1187 }
1188
1189 void
1190 nvc0_validate_surfaces(struct nvc0_context *nvc0)
1191 {
1192 if (nvc0->screen->base.class_3d >= NVE4_3D_CLASS) {
1193 nve4_update_surface_bindings(nvc0);
1194 } else {
1195 nvc0_update_surface_bindings(nvc0);
1196 }
1197 }
1198
1199
1200 static const uint8_t nve4_su_format_map[PIPE_FORMAT_COUNT] =
1201 {
1202 [PIPE_FORMAT_R32G32B32A32_FLOAT] = GK104_IMAGE_FORMAT_RGBA32_FLOAT,
1203 [PIPE_FORMAT_R32G32B32A32_SINT] = GK104_IMAGE_FORMAT_RGBA32_SINT,
1204 [PIPE_FORMAT_R32G32B32A32_UINT] = GK104_IMAGE_FORMAT_RGBA32_UINT,
1205 [PIPE_FORMAT_R16G16B16A16_FLOAT] = GK104_IMAGE_FORMAT_RGBA16_FLOAT,
1206 [PIPE_FORMAT_R16G16B16A16_UNORM] = GK104_IMAGE_FORMAT_RGBA16_UNORM,
1207 [PIPE_FORMAT_R16G16B16A16_SNORM] = GK104_IMAGE_FORMAT_RGBA16_SNORM,
1208 [PIPE_FORMAT_R16G16B16A16_SINT] = GK104_IMAGE_FORMAT_RGBA16_SINT,
1209 [PIPE_FORMAT_R16G16B16A16_UINT] = GK104_IMAGE_FORMAT_RGBA16_UINT,
1210 [PIPE_FORMAT_B8G8R8A8_UNORM] = GK104_IMAGE_FORMAT_BGRA8_UNORM,
1211 [PIPE_FORMAT_R8G8B8A8_UNORM] = GK104_IMAGE_FORMAT_RGBA8_UNORM,
1212 [PIPE_FORMAT_R8G8B8A8_SNORM] = GK104_IMAGE_FORMAT_RGBA8_SNORM,
1213 [PIPE_FORMAT_R8G8B8A8_SINT] = GK104_IMAGE_FORMAT_RGBA8_SINT,
1214 [PIPE_FORMAT_R8G8B8A8_UINT] = GK104_IMAGE_FORMAT_RGBA8_UINT,
1215 [PIPE_FORMAT_R11G11B10_FLOAT] = GK104_IMAGE_FORMAT_R11G11B10_FLOAT,
1216 [PIPE_FORMAT_R10G10B10A2_UNORM] = GK104_IMAGE_FORMAT_RGB10_A2_UNORM,
1217 [PIPE_FORMAT_R10G10B10A2_UINT] = GK104_IMAGE_FORMAT_RGB10_A2_UINT,
1218 [PIPE_FORMAT_R32G32_FLOAT] = GK104_IMAGE_FORMAT_RG32_FLOAT,
1219 [PIPE_FORMAT_R32G32_SINT] = GK104_IMAGE_FORMAT_RG32_SINT,
1220 [PIPE_FORMAT_R32G32_UINT] = GK104_IMAGE_FORMAT_RG32_UINT,
1221 [PIPE_FORMAT_R16G16_FLOAT] = GK104_IMAGE_FORMAT_RG16_FLOAT,
1222 [PIPE_FORMAT_R16G16_UNORM] = GK104_IMAGE_FORMAT_RG16_UNORM,
1223 [PIPE_FORMAT_R16G16_SNORM] = GK104_IMAGE_FORMAT_RG16_SNORM,
1224 [PIPE_FORMAT_R16G16_SINT] = GK104_IMAGE_FORMAT_RG16_SINT,
1225 [PIPE_FORMAT_R16G16_UINT] = GK104_IMAGE_FORMAT_RG16_UINT,
1226 [PIPE_FORMAT_R8G8_UNORM] = GK104_IMAGE_FORMAT_RG8_UNORM,
1227 [PIPE_FORMAT_R8G8_SNORM] = GK104_IMAGE_FORMAT_RG8_SNORM,
1228 [PIPE_FORMAT_R8G8_SINT] = GK104_IMAGE_FORMAT_RG8_SINT,
1229 [PIPE_FORMAT_R8G8_UINT] = GK104_IMAGE_FORMAT_RG8_UINT,
1230 [PIPE_FORMAT_R32_FLOAT] = GK104_IMAGE_FORMAT_R32_FLOAT,
1231 [PIPE_FORMAT_R32_SINT] = GK104_IMAGE_FORMAT_R32_SINT,
1232 [PIPE_FORMAT_R32_UINT] = GK104_IMAGE_FORMAT_R32_UINT,
1233 [PIPE_FORMAT_R16_FLOAT] = GK104_IMAGE_FORMAT_R16_FLOAT,
1234 [PIPE_FORMAT_R16_UNORM] = GK104_IMAGE_FORMAT_R16_UNORM,
1235 [PIPE_FORMAT_R16_SNORM] = GK104_IMAGE_FORMAT_R16_SNORM,
1236 [PIPE_FORMAT_R16_SINT] = GK104_IMAGE_FORMAT_R16_SINT,
1237 [PIPE_FORMAT_R16_UINT] = GK104_IMAGE_FORMAT_R16_UINT,
1238 [PIPE_FORMAT_R8_UNORM] = GK104_IMAGE_FORMAT_R8_UNORM,
1239 [PIPE_FORMAT_R8_SNORM] = GK104_IMAGE_FORMAT_R8_SNORM,
1240 [PIPE_FORMAT_R8_SINT] = GK104_IMAGE_FORMAT_R8_SINT,
1241 [PIPE_FORMAT_R8_UINT] = GK104_IMAGE_FORMAT_R8_UINT,
1242 };
1243
1244 /* Auxiliary format description values for surface instructions.
1245 * (log2(bytes per pixel) << 12) | (unk8 << 8) | unk22
1246 */
1247 static const uint16_t nve4_su_format_aux_map[PIPE_FORMAT_COUNT] =
1248 {
1249 [PIPE_FORMAT_R32G32B32A32_FLOAT] = 0x4842,
1250 [PIPE_FORMAT_R32G32B32A32_SINT] = 0x4842,
1251 [PIPE_FORMAT_R32G32B32A32_UINT] = 0x4842,
1252
1253 [PIPE_FORMAT_R16G16B16A16_UNORM] = 0x3933,
1254 [PIPE_FORMAT_R16G16B16A16_SNORM] = 0x3933,
1255 [PIPE_FORMAT_R16G16B16A16_SINT] = 0x3933,
1256 [PIPE_FORMAT_R16G16B16A16_UINT] = 0x3933,
1257 [PIPE_FORMAT_R16G16B16A16_FLOAT] = 0x3933,
1258
1259 [PIPE_FORMAT_R32G32_FLOAT] = 0x3433,
1260 [PIPE_FORMAT_R32G32_SINT] = 0x3433,
1261 [PIPE_FORMAT_R32G32_UINT] = 0x3433,
1262
1263 [PIPE_FORMAT_R10G10B10A2_UNORM] = 0x2a24,
1264 [PIPE_FORMAT_R10G10B10A2_UINT] = 0x2a24,
1265 [PIPE_FORMAT_B8G8R8A8_UNORM] = 0x2a24,
1266 [PIPE_FORMAT_R8G8B8A8_UNORM] = 0x2a24,
1267 [PIPE_FORMAT_R8G8B8A8_SNORM] = 0x2a24,
1268 [PIPE_FORMAT_R8G8B8A8_SINT] = 0x2a24,
1269 [PIPE_FORMAT_R8G8B8A8_UINT] = 0x2a24,
1270 [PIPE_FORMAT_R11G11B10_FLOAT] = 0x2a24,
1271
1272 [PIPE_FORMAT_R16G16_UNORM] = 0x2524,
1273 [PIPE_FORMAT_R16G16_SNORM] = 0x2524,
1274 [PIPE_FORMAT_R16G16_SINT] = 0x2524,
1275 [PIPE_FORMAT_R16G16_UINT] = 0x2524,
1276 [PIPE_FORMAT_R16G16_FLOAT] = 0x2524,
1277
1278 [PIPE_FORMAT_R32_SINT] = 0x2024,
1279 [PIPE_FORMAT_R32_UINT] = 0x2024,
1280 [PIPE_FORMAT_R32_FLOAT] = 0x2024,
1281
1282 [PIPE_FORMAT_R8G8_UNORM] = 0x1615,
1283 [PIPE_FORMAT_R8G8_SNORM] = 0x1615,
1284 [PIPE_FORMAT_R8G8_SINT] = 0x1615,
1285 [PIPE_FORMAT_R8G8_UINT] = 0x1615,
1286
1287 [PIPE_FORMAT_R16_UNORM] = 0x1115,
1288 [PIPE_FORMAT_R16_SNORM] = 0x1115,
1289 [PIPE_FORMAT_R16_SINT] = 0x1115,
1290 [PIPE_FORMAT_R16_UINT] = 0x1115,
1291 [PIPE_FORMAT_R16_FLOAT] = 0x1115,
1292
1293 [PIPE_FORMAT_R8_UNORM] = 0x0206,
1294 [PIPE_FORMAT_R8_SNORM] = 0x0206,
1295 [PIPE_FORMAT_R8_SINT] = 0x0206,
1296 [PIPE_FORMAT_R8_UINT] = 0x0206
1297 };
1298
1299 /* NOTE: These are hardcoded offsets for the shader library.
1300 * TODO: Automate them.
1301 */
1302 static const uint16_t nve4_suldp_lib_offset[PIPE_FORMAT_COUNT] =
1303 {
1304 [PIPE_FORMAT_R32G32B32A32_FLOAT] = 0x218,
1305 [PIPE_FORMAT_R32G32B32A32_SINT] = 0x218,
1306 [PIPE_FORMAT_R32G32B32A32_UINT] = 0x218,
1307 [PIPE_FORMAT_R16G16B16A16_UNORM] = 0x248,
1308 [PIPE_FORMAT_R16G16B16A16_SNORM] = 0x2b8,
1309 [PIPE_FORMAT_R16G16B16A16_SINT] = 0x330,
1310 [PIPE_FORMAT_R16G16B16A16_UINT] = 0x388,
1311 [PIPE_FORMAT_R16G16B16A16_FLOAT] = 0x3d8,
1312 [PIPE_FORMAT_R32G32_FLOAT] = 0x428,
1313 [PIPE_FORMAT_R32G32_SINT] = 0x468,
1314 [PIPE_FORMAT_R32G32_UINT] = 0x468,
1315 [PIPE_FORMAT_R10G10B10A2_UNORM] = 0x4a8,
1316 [PIPE_FORMAT_R10G10B10A2_UINT] = 0x530,
1317 [PIPE_FORMAT_R8G8B8A8_UNORM] = 0x588,
1318 [PIPE_FORMAT_R8G8B8A8_SNORM] = 0x5f8,
1319 [PIPE_FORMAT_R8G8B8A8_SINT] = 0x670,
1320 [PIPE_FORMAT_R8G8B8A8_UINT] = 0x6c8,
1321 [PIPE_FORMAT_B5G6R5_UNORM] = 0x718,
1322 [PIPE_FORMAT_B5G5R5X1_UNORM] = 0x7a0,
1323 [PIPE_FORMAT_R16G16_UNORM] = 0x828,
1324 [PIPE_FORMAT_R16G16_SNORM] = 0x890,
1325 [PIPE_FORMAT_R16G16_SINT] = 0x8f0,
1326 [PIPE_FORMAT_R16G16_UINT] = 0x948,
1327 [PIPE_FORMAT_R16G16_FLOAT] = 0x998,
1328 [PIPE_FORMAT_R32_FLOAT] = 0x9e8,
1329 [PIPE_FORMAT_R32_SINT] = 0xa30,
1330 [PIPE_FORMAT_R32_UINT] = 0xa30,
1331 [PIPE_FORMAT_R8G8_UNORM] = 0xa78,
1332 [PIPE_FORMAT_R8G8_SNORM] = 0xae0,
1333 [PIPE_FORMAT_R8G8_UINT] = 0xb48,
1334 [PIPE_FORMAT_R8G8_SINT] = 0xb98,
1335 [PIPE_FORMAT_R16_UNORM] = 0xbe8,
1336 [PIPE_FORMAT_R16_SNORM] = 0xc48,
1337 [PIPE_FORMAT_R16_SINT] = 0xca0,
1338 [PIPE_FORMAT_R16_UINT] = 0xce8,
1339 [PIPE_FORMAT_R16_FLOAT] = 0xd30,
1340 [PIPE_FORMAT_R8_UNORM] = 0xd88,
1341 [PIPE_FORMAT_R8_SNORM] = 0xde0,
1342 [PIPE_FORMAT_R8_SINT] = 0xe38,
1343 [PIPE_FORMAT_R8_UINT] = 0xe88,
1344 [PIPE_FORMAT_R11G11B10_FLOAT] = 0xed0
1345 };