Merge remote-tracking branch 'mesa-public/master' into vulkan
[mesa.git] / src / gallium / drivers / nouveau / nvc0 / nvc0_tex.c
1 /*
2 * Copyright 2008 Ben Skeggs
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "nvc0/nvc0_context.h"
24 #include "nvc0/nvc0_resource.h"
25 #include "nv50/nv50_texture.xml.h"
26 #include "nv50/nv50_defs.xml.h"
27
28 #include "util/u_format.h"
29
30 #define NVE4_TIC_ENTRY_INVALID 0x000fffff
31 #define NVE4_TSC_ENTRY_INVALID 0xfff00000
32
33 #define NV50_TIC_0_SWIZZLE__MASK \
34 (NV50_TIC_0_MAPA__MASK | NV50_TIC_0_MAPB__MASK | \
35 NV50_TIC_0_MAPG__MASK | NV50_TIC_0_MAPR__MASK)
36
37 static inline uint32_t
38 nv50_tic_swizzle(uint32_t tc, unsigned swz, bool tex_int)
39 {
40 switch (swz) {
41 case PIPE_SWIZZLE_RED:
42 return (tc & NV50_TIC_0_MAPR__MASK) >> NV50_TIC_0_MAPR__SHIFT;
43 case PIPE_SWIZZLE_GREEN:
44 return (tc & NV50_TIC_0_MAPG__MASK) >> NV50_TIC_0_MAPG__SHIFT;
45 case PIPE_SWIZZLE_BLUE:
46 return (tc & NV50_TIC_0_MAPB__MASK) >> NV50_TIC_0_MAPB__SHIFT;
47 case PIPE_SWIZZLE_ALPHA:
48 return (tc & NV50_TIC_0_MAPA__MASK) >> NV50_TIC_0_MAPA__SHIFT;
49 case PIPE_SWIZZLE_ONE:
50 return tex_int ? NV50_TIC_MAP_ONE_INT : NV50_TIC_MAP_ONE_FLOAT;
51 case PIPE_SWIZZLE_ZERO:
52 default:
53 return NV50_TIC_MAP_ZERO;
54 }
55 }
56
57 struct pipe_sampler_view *
58 nvc0_create_sampler_view(struct pipe_context *pipe,
59 struct pipe_resource *res,
60 const struct pipe_sampler_view *templ)
61 {
62 uint32_t flags = 0;
63
64 if (templ->target == PIPE_TEXTURE_RECT || templ->target == PIPE_BUFFER)
65 flags |= NV50_TEXVIEW_SCALED_COORDS;
66
67 return nvc0_create_texture_view(pipe, res, templ, flags, templ->target);
68 }
69
70 struct pipe_sampler_view *
71 nvc0_create_texture_view(struct pipe_context *pipe,
72 struct pipe_resource *texture,
73 const struct pipe_sampler_view *templ,
74 uint32_t flags,
75 enum pipe_texture_target target)
76 {
77 const struct util_format_description *desc;
78 uint64_t address;
79 uint32_t *tic;
80 uint32_t swz[4];
81 uint32_t width, height;
82 uint32_t depth;
83 struct nv50_tic_entry *view;
84 struct nv50_miptree *mt;
85 bool tex_int;
86
87 view = MALLOC_STRUCT(nv50_tic_entry);
88 if (!view)
89 return NULL;
90 mt = nv50_miptree(texture);
91
92 view->pipe = *templ;
93 view->pipe.reference.count = 1;
94 view->pipe.texture = NULL;
95 view->pipe.context = pipe;
96
97 view->id = -1;
98
99 pipe_resource_reference(&view->pipe.texture, texture);
100
101 tic = &view->tic[0];
102
103 desc = util_format_description(view->pipe.format);
104
105 tic[0] = nvc0_format_table[view->pipe.format].tic;
106
107 tex_int = util_format_is_pure_integer(view->pipe.format);
108
109 swz[0] = nv50_tic_swizzle(tic[0], view->pipe.swizzle_r, tex_int);
110 swz[1] = nv50_tic_swizzle(tic[0], view->pipe.swizzle_g, tex_int);
111 swz[2] = nv50_tic_swizzle(tic[0], view->pipe.swizzle_b, tex_int);
112 swz[3] = nv50_tic_swizzle(tic[0], view->pipe.swizzle_a, tex_int);
113 tic[0] = (tic[0] & ~NV50_TIC_0_SWIZZLE__MASK) |
114 (swz[0] << NV50_TIC_0_MAPR__SHIFT) |
115 (swz[1] << NV50_TIC_0_MAPG__SHIFT) |
116 (swz[2] << NV50_TIC_0_MAPB__SHIFT) |
117 (swz[3] << NV50_TIC_0_MAPA__SHIFT);
118
119 address = mt->base.address;
120
121 tic[2] = 0x10001000 | NV50_TIC_2_NO_BORDER;
122
123 if (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB)
124 tic[2] |= NV50_TIC_2_COLORSPACE_SRGB;
125
126 if (!(flags & NV50_TEXVIEW_SCALED_COORDS))
127 tic[2] |= NV50_TIC_2_NORMALIZED_COORDS;
128
129 /* check for linear storage type */
130 if (unlikely(!nouveau_bo_memtype(nv04_resource(texture)->bo))) {
131 if (texture->target == PIPE_BUFFER) {
132 assert(!(tic[2] & NV50_TIC_2_NORMALIZED_COORDS));
133 address +=
134 view->pipe.u.buf.first_element * desc->block.bits / 8;
135 tic[2] |= NV50_TIC_2_LINEAR | NV50_TIC_2_TARGET_BUFFER;
136 tic[3] = 0;
137 tic[4] = /* width */
138 view->pipe.u.buf.last_element - view->pipe.u.buf.first_element + 1;
139 tic[5] = 0;
140 } else {
141 /* must be 2D texture without mip maps */
142 tic[2] |= NV50_TIC_2_LINEAR | NV50_TIC_2_TARGET_RECT;
143 tic[3] = mt->level[0].pitch;
144 tic[4] = mt->base.base.width0;
145 tic[5] = (1 << 16) | mt->base.base.height0;
146 }
147 tic[6] =
148 tic[7] = 0;
149 tic[1] = address;
150 tic[2] |= address >> 32;
151 return &view->pipe;
152 }
153
154 tic[2] |=
155 ((mt->level[0].tile_mode & 0x0f0) << (22 - 4)) |
156 ((mt->level[0].tile_mode & 0xf00) << (25 - 8));
157
158 depth = MAX2(mt->base.base.array_size, mt->base.base.depth0);
159
160 if (mt->base.base.array_size > 1) {
161 /* there doesn't seem to be a base layer field in TIC */
162 address += view->pipe.u.tex.first_layer * mt->layer_stride;
163 depth = view->pipe.u.tex.last_layer - view->pipe.u.tex.first_layer + 1;
164 }
165 tic[1] = address;
166 tic[2] |= address >> 32;
167
168 switch (target) {
169 case PIPE_TEXTURE_1D:
170 tic[2] |= NV50_TIC_2_TARGET_1D;
171 break;
172 case PIPE_TEXTURE_2D:
173 tic[2] |= NV50_TIC_2_TARGET_2D;
174 break;
175 case PIPE_TEXTURE_RECT:
176 tic[2] |= NV50_TIC_2_TARGET_2D;
177 break;
178 case PIPE_TEXTURE_3D:
179 tic[2] |= NV50_TIC_2_TARGET_3D;
180 break;
181 case PIPE_TEXTURE_CUBE:
182 depth /= 6;
183 tic[2] |= NV50_TIC_2_TARGET_CUBE;
184 break;
185 case PIPE_TEXTURE_1D_ARRAY:
186 tic[2] |= NV50_TIC_2_TARGET_1D_ARRAY;
187 break;
188 case PIPE_TEXTURE_2D_ARRAY:
189 tic[2] |= NV50_TIC_2_TARGET_2D_ARRAY;
190 break;
191 case PIPE_TEXTURE_CUBE_ARRAY:
192 depth /= 6;
193 tic[2] |= NV50_TIC_2_TARGET_CUBE_ARRAY;
194 break;
195 default:
196 NOUVEAU_ERR("unexpected/invalid texture target: %d\n",
197 mt->base.base.target);
198 return false;
199 }
200
201 tic[3] = (flags & NV50_TEXVIEW_FILTER_MSAA8) ? 0x20000000 : 0x00300000;
202
203 if (flags & NV50_TEXVIEW_ACCESS_RESOLVE) {
204 width = mt->base.base.width0 << mt->ms_x;
205 height = mt->base.base.height0 << mt->ms_y;
206 } else {
207 width = mt->base.base.width0;
208 height = mt->base.base.height0;
209 }
210
211 tic[4] = (1 << 31) | width;
212
213 tic[5] = height & 0xffff;
214 tic[5] |= depth << 16;
215 tic[5] |= mt->base.base.last_level << 28;
216
217 /* sampling points: (?) */
218 if (flags & NV50_TEXVIEW_ACCESS_RESOLVE)
219 tic[6] = (mt->ms_x > 1) ? 0x88000000 : 0x03000000;
220 else
221 tic[6] = 0x03000000;
222
223 tic[7] = (view->pipe.u.tex.last_level << 4) | view->pipe.u.tex.first_level;
224 tic[7] |= mt->ms_mode << 12;
225
226 return &view->pipe;
227 }
228
229 static bool
230 nvc0_validate_tic(struct nvc0_context *nvc0, int s)
231 {
232 uint32_t commands[32];
233 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
234 struct nouveau_bo *txc = nvc0->screen->txc;
235 unsigned i;
236 unsigned n = 0;
237 bool need_flush = false;
238
239 for (i = 0; i < nvc0->num_textures[s]; ++i) {
240 struct nv50_tic_entry *tic = nv50_tic_entry(nvc0->textures[s][i]);
241 struct nv04_resource *res;
242 const bool dirty = !!(nvc0->textures_dirty[s] & (1 << i));
243
244 if (!tic) {
245 if (dirty)
246 commands[n++] = (i << 1) | 0;
247 continue;
248 }
249 res = nv04_resource(tic->pipe.texture);
250
251 if (tic->id < 0) {
252 tic->id = nvc0_screen_tic_alloc(nvc0->screen, tic);
253
254 PUSH_SPACE(push, 17);
255 BEGIN_NVC0(push, NVC0_M2MF(OFFSET_OUT_HIGH), 2);
256 PUSH_DATAh(push, txc->offset + (tic->id * 32));
257 PUSH_DATA (push, txc->offset + (tic->id * 32));
258 BEGIN_NVC0(push, NVC0_M2MF(LINE_LENGTH_IN), 2);
259 PUSH_DATA (push, 32);
260 PUSH_DATA (push, 1);
261 BEGIN_NVC0(push, NVC0_M2MF(EXEC), 1);
262 PUSH_DATA (push, 0x100111);
263 BEGIN_NIC0(push, NVC0_M2MF(DATA), 8);
264 PUSH_DATAp(push, &tic->tic[0], 8);
265
266 need_flush = true;
267 } else
268 if (res->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) {
269 BEGIN_NVC0(push, NVC0_3D(TEX_CACHE_CTL), 1);
270 PUSH_DATA (push, (tic->id << 4) | 1);
271 NOUVEAU_DRV_STAT(&nvc0->screen->base, tex_cache_flush_count, 1);
272 }
273 nvc0->screen->tic.lock[tic->id / 32] |= 1 << (tic->id % 32);
274
275 res->status &= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING;
276 res->status |= NOUVEAU_BUFFER_STATUS_GPU_READING;
277
278 if (!dirty)
279 continue;
280 commands[n++] = (tic->id << 9) | (i << 1) | 1;
281
282 BCTX_REFN(nvc0->bufctx_3d, TEX(s, i), res, RD);
283 }
284 for (; i < nvc0->state.num_textures[s]; ++i)
285 commands[n++] = (i << 1) | 0;
286
287 nvc0->state.num_textures[s] = nvc0->num_textures[s];
288
289 if (n) {
290 BEGIN_NIC0(push, NVC0_3D(BIND_TIC(s)), n);
291 PUSH_DATAp(push, commands, n);
292 }
293 nvc0->textures_dirty[s] = 0;
294
295 return need_flush;
296 }
297
298 static bool
299 nve4_validate_tic(struct nvc0_context *nvc0, unsigned s)
300 {
301 struct nouveau_bo *txc = nvc0->screen->txc;
302 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
303 unsigned i;
304 bool need_flush = false;
305
306 for (i = 0; i < nvc0->num_textures[s]; ++i) {
307 struct nv50_tic_entry *tic = nv50_tic_entry(nvc0->textures[s][i]);
308 struct nv04_resource *res;
309 const bool dirty = !!(nvc0->textures_dirty[s] & (1 << i));
310
311 if (!tic) {
312 nvc0->tex_handles[s][i] |= NVE4_TIC_ENTRY_INVALID;
313 continue;
314 }
315 res = nv04_resource(tic->pipe.texture);
316
317 if (tic->id < 0) {
318 tic->id = nvc0_screen_tic_alloc(nvc0->screen, tic);
319
320 PUSH_SPACE(push, 16);
321 BEGIN_NVC0(push, NVE4_P2MF(UPLOAD_DST_ADDRESS_HIGH), 2);
322 PUSH_DATAh(push, txc->offset + (tic->id * 32));
323 PUSH_DATA (push, txc->offset + (tic->id * 32));
324 BEGIN_NVC0(push, NVE4_P2MF(UPLOAD_LINE_LENGTH_IN), 2);
325 PUSH_DATA (push, 32);
326 PUSH_DATA (push, 1);
327 BEGIN_1IC0(push, NVE4_P2MF(UPLOAD_EXEC), 9);
328 PUSH_DATA (push, 0x1001);
329 PUSH_DATAp(push, &tic->tic[0], 8);
330
331 need_flush = true;
332 } else
333 if (res->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) {
334 BEGIN_NVC0(push, NVC0_3D(TEX_CACHE_CTL), 1);
335 PUSH_DATA (push, (tic->id << 4) | 1);
336 }
337 nvc0->screen->tic.lock[tic->id / 32] |= 1 << (tic->id % 32);
338
339 res->status &= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING;
340 res->status |= NOUVEAU_BUFFER_STATUS_GPU_READING;
341
342 nvc0->tex_handles[s][i] &= ~NVE4_TIC_ENTRY_INVALID;
343 nvc0->tex_handles[s][i] |= tic->id;
344 if (dirty)
345 BCTX_REFN(nvc0->bufctx_3d, TEX(s, i), res, RD);
346 }
347 for (; i < nvc0->state.num_textures[s]; ++i) {
348 nvc0->tex_handles[s][i] |= NVE4_TIC_ENTRY_INVALID;
349 nvc0->textures_dirty[s] |= 1 << i;
350 }
351
352 nvc0->state.num_textures[s] = nvc0->num_textures[s];
353
354 return need_flush;
355 }
356
357 void nvc0_validate_textures(struct nvc0_context *nvc0)
358 {
359 bool need_flush = false;
360 int i;
361
362 for (i = 0; i < 5; i++) {
363 if (nvc0->screen->base.class_3d >= NVE4_3D_CLASS)
364 need_flush |= nve4_validate_tic(nvc0, i);
365 else
366 need_flush |= nvc0_validate_tic(nvc0, i);
367 }
368
369 if (need_flush) {
370 BEGIN_NVC0(nvc0->base.pushbuf, NVC0_3D(TIC_FLUSH), 1);
371 PUSH_DATA (nvc0->base.pushbuf, 0);
372 }
373 }
374
375 static bool
376 nvc0_validate_tsc(struct nvc0_context *nvc0, int s)
377 {
378 uint32_t commands[16];
379 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
380 unsigned i;
381 unsigned n = 0;
382 bool need_flush = false;
383
384 for (i = 0; i < nvc0->num_samplers[s]; ++i) {
385 struct nv50_tsc_entry *tsc = nv50_tsc_entry(nvc0->samplers[s][i]);
386
387 if (!(nvc0->samplers_dirty[s] & (1 << i)))
388 continue;
389 if (!tsc) {
390 commands[n++] = (i << 4) | 0;
391 continue;
392 }
393 if (tsc->id < 0) {
394 tsc->id = nvc0_screen_tsc_alloc(nvc0->screen, tsc);
395
396 nvc0_m2mf_push_linear(&nvc0->base, nvc0->screen->txc,
397 65536 + tsc->id * 32, NV_VRAM_DOMAIN(&nvc0->screen->base),
398 32, tsc->tsc);
399 need_flush = true;
400 }
401 nvc0->screen->tsc.lock[tsc->id / 32] |= 1 << (tsc->id % 32);
402
403 commands[n++] = (tsc->id << 12) | (i << 4) | 1;
404 }
405 for (; i < nvc0->state.num_samplers[s]; ++i)
406 commands[n++] = (i << 4) | 0;
407
408 nvc0->state.num_samplers[s] = nvc0->num_samplers[s];
409
410 if (n) {
411 BEGIN_NIC0(push, NVC0_3D(BIND_TSC(s)), n);
412 PUSH_DATAp(push, commands, n);
413 }
414 nvc0->samplers_dirty[s] = 0;
415
416 return need_flush;
417 }
418
419 bool
420 nve4_validate_tsc(struct nvc0_context *nvc0, int s)
421 {
422 struct nouveau_bo *txc = nvc0->screen->txc;
423 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
424 unsigned i;
425 bool need_flush = false;
426
427 for (i = 0; i < nvc0->num_samplers[s]; ++i) {
428 struct nv50_tsc_entry *tsc = nv50_tsc_entry(nvc0->samplers[s][i]);
429
430 if (!tsc) {
431 nvc0->tex_handles[s][i] |= NVE4_TSC_ENTRY_INVALID;
432 continue;
433 }
434 if (tsc->id < 0) {
435 tsc->id = nvc0_screen_tsc_alloc(nvc0->screen, tsc);
436
437 PUSH_SPACE(push, 16);
438 BEGIN_NVC0(push, NVE4_P2MF(UPLOAD_DST_ADDRESS_HIGH), 2);
439 PUSH_DATAh(push, txc->offset + 65536 + (tsc->id * 32));
440 PUSH_DATA (push, txc->offset + 65536 + (tsc->id * 32));
441 BEGIN_NVC0(push, NVE4_P2MF(UPLOAD_LINE_LENGTH_IN), 2);
442 PUSH_DATA (push, 32);
443 PUSH_DATA (push, 1);
444 BEGIN_1IC0(push, NVE4_P2MF(UPLOAD_EXEC), 9);
445 PUSH_DATA (push, 0x1001);
446 PUSH_DATAp(push, &tsc->tsc[0], 8);
447
448 need_flush = true;
449 }
450 nvc0->screen->tsc.lock[tsc->id / 32] |= 1 << (tsc->id % 32);
451
452 nvc0->tex_handles[s][i] &= ~NVE4_TSC_ENTRY_INVALID;
453 nvc0->tex_handles[s][i] |= tsc->id << 20;
454 }
455 for (; i < nvc0->state.num_samplers[s]; ++i) {
456 nvc0->tex_handles[s][i] |= NVE4_TSC_ENTRY_INVALID;
457 nvc0->samplers_dirty[s] |= 1 << i;
458 }
459
460 nvc0->state.num_samplers[s] = nvc0->num_samplers[s];
461
462 return need_flush;
463 }
464
465 void nvc0_validate_samplers(struct nvc0_context *nvc0)
466 {
467 bool need_flush = false;
468 int i;
469
470 for (i = 0; i < 5; i++) {
471 if (nvc0->screen->base.class_3d >= NVE4_3D_CLASS)
472 need_flush |= nve4_validate_tsc(nvc0, i);
473 else
474 need_flush |= nvc0_validate_tsc(nvc0, i);
475 }
476
477 if (need_flush) {
478 BEGIN_NVC0(nvc0->base.pushbuf, NVC0_3D(TSC_FLUSH), 1);
479 PUSH_DATA (nvc0->base.pushbuf, 0);
480 }
481 }
482
483 /* Upload the "diagonal" entries for the possible texture sources ($t == $s).
484 * At some point we might want to get a list of the combinations used by a
485 * shader and fill in those entries instead of having it extract the handles.
486 */
487 void
488 nve4_set_tex_handles(struct nvc0_context *nvc0)
489 {
490 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
491 uint64_t address;
492 unsigned s;
493
494 if (nvc0->screen->base.class_3d < NVE4_3D_CLASS)
495 return;
496 address = nvc0->screen->uniform_bo->offset + (5 << 16);
497
498 for (s = 0; s < 5; ++s, address += (1 << 9)) {
499 uint32_t dirty = nvc0->textures_dirty[s] | nvc0->samplers_dirty[s];
500 if (!dirty)
501 continue;
502 BEGIN_NVC0(push, NVC0_3D(CB_SIZE), 3);
503 PUSH_DATA (push, 512);
504 PUSH_DATAh(push, address);
505 PUSH_DATA (push, address);
506 do {
507 int i = ffs(dirty) - 1;
508 dirty &= ~(1 << i);
509
510 BEGIN_NVC0(push, NVC0_3D(CB_POS), 2);
511 PUSH_DATA (push, (8 + i) * 4);
512 PUSH_DATA (push, nvc0->tex_handles[s][i]);
513 } while (dirty);
514
515 nvc0->textures_dirty[s] = 0;
516 nvc0->samplers_dirty[s] = 0;
517 }
518 }
519
520
521 static const uint8_t nve4_su_format_map[PIPE_FORMAT_COUNT];
522 static const uint16_t nve4_su_format_aux_map[PIPE_FORMAT_COUNT];
523 static const uint16_t nve4_suldp_lib_offset[PIPE_FORMAT_COUNT];
524
525 void
526 nve4_set_surface_info(struct nouveau_pushbuf *push,
527 struct pipe_surface *psf,
528 struct nvc0_screen *screen)
529 {
530 struct nv50_surface *sf = nv50_surface(psf);
531 struct nv04_resource *res;
532 uint64_t address;
533 uint32_t *const info = push->cur;
534 uint8_t log2cpp;
535
536 if (psf && !nve4_su_format_map[psf->format])
537 NOUVEAU_ERR("unsupported surface format, try is_format_supported() !\n");
538
539 push->cur += 16;
540
541 if (!psf || !nve4_su_format_map[psf->format]) {
542 memset(info, 0, 16 * sizeof(*info));
543
544 info[0] = 0xbadf0000;
545 info[1] = 0x80004000;
546 info[12] = nve4_suldp_lib_offset[PIPE_FORMAT_R32G32B32A32_UINT] +
547 screen->lib_code->start;
548 return;
549 }
550 res = nv04_resource(sf->base.texture);
551
552 address = res->address + sf->offset;
553
554 info[8] = sf->width;
555 info[9] = sf->height;
556 info[10] = sf->depth;
557 switch (res->base.target) {
558 case PIPE_TEXTURE_1D_ARRAY:
559 info[11] = 1;
560 break;
561 case PIPE_TEXTURE_2D:
562 case PIPE_TEXTURE_RECT:
563 info[11] = 2;
564 break;
565 case PIPE_TEXTURE_3D:
566 info[11] = 3;
567 break;
568 case PIPE_TEXTURE_2D_ARRAY:
569 case PIPE_TEXTURE_CUBE:
570 case PIPE_TEXTURE_CUBE_ARRAY:
571 info[11] = 4;
572 break;
573 default:
574 info[11] = 0;
575 break;
576 }
577 log2cpp = (0xf000 & nve4_su_format_aux_map[sf->base.format]) >> 12;
578
579 info[12] = nve4_suldp_lib_offset[sf->base.format] + screen->lib_code->start;
580
581 /* limit in bytes for raw access */
582 info[13] = (0x06 << 22) | ((sf->width << log2cpp) - 1);
583
584 info[1] = nve4_su_format_map[sf->base.format];
585
586 #if 0
587 switch (util_format_get_blocksizebits(sf->base.format)) {
588 case 16: info[1] |= 1 << 16; break;
589 case 32: info[1] |= 2 << 16; break;
590 case 64: info[1] |= 3 << 16; break;
591 case 128: info[1] |= 4 << 16; break;
592 default:
593 break;
594 }
595 #else
596 info[1] |= log2cpp << 16;
597 info[1] |= 0x4000;
598 info[1] |= (0x0f00 & nve4_su_format_aux_map[sf->base.format]);
599 #endif
600
601 if (res->base.target == PIPE_BUFFER) {
602 info[0] = address >> 8;
603 info[2] = sf->width - 1;
604 info[2] |= (0xff & nve4_su_format_aux_map[sf->base.format]) << 22;
605 info[3] = 0;
606 info[4] = 0;
607 info[5] = 0;
608 info[6] = 0;
609 info[7] = 0;
610 info[14] = 0;
611 info[15] = 0;
612 } else {
613 struct nv50_miptree *mt = nv50_miptree(&res->base);
614 struct nv50_miptree_level *lvl = &mt->level[sf->base.u.tex.level];
615 const unsigned z = sf->base.u.tex.first_layer;
616
617 if (z) {
618 if (mt->layout_3d) {
619 address += nvc0_mt_zslice_offset(mt, psf->u.tex.level, z);
620 /* doesn't work if z passes z-tile boundary */
621 assert(sf->depth == 1);
622 } else {
623 address += mt->layer_stride * z;
624 }
625 }
626 info[0] = address >> 8;
627 info[2] = sf->width - 1;
628 /* NOTE: this is really important: */
629 info[2] |= (0xff & nve4_su_format_aux_map[sf->base.format]) << 22;
630 info[3] = (0x88 << 24) | (lvl->pitch / 64);
631 info[4] = sf->height - 1;
632 info[4] |= (lvl->tile_mode & 0x0f0) << 25;
633 info[4] |= NVC0_TILE_SHIFT_Y(lvl->tile_mode) << 22;
634 info[5] = mt->layer_stride >> 8;
635 info[6] = sf->depth - 1;
636 info[6] |= (lvl->tile_mode & 0xf00) << 21;
637 info[6] |= NVC0_TILE_SHIFT_Z(lvl->tile_mode) << 22;
638 info[7] = 0;
639 info[14] = mt->ms_x;
640 info[15] = mt->ms_y;
641 }
642 }
643
644 static inline void
645 nvc0_update_surface_bindings(struct nvc0_context *nvc0)
646 {
647 /* TODO */
648 }
649
650 static inline void
651 nve4_update_surface_bindings(struct nvc0_context *nvc0)
652 {
653 /* TODO */
654 }
655
656 void
657 nvc0_validate_surfaces(struct nvc0_context *nvc0)
658 {
659 if (nvc0->screen->base.class_3d >= NVE4_3D_CLASS) {
660 nve4_update_surface_bindings(nvc0);
661 } else {
662 nvc0_update_surface_bindings(nvc0);
663 }
664 }
665
666
667 static const uint8_t nve4_su_format_map[PIPE_FORMAT_COUNT] =
668 {
669 [PIPE_FORMAT_R32G32B32A32_FLOAT] = NVE4_IMAGE_FORMAT_RGBA32_FLOAT,
670 [PIPE_FORMAT_R32G32B32A32_SINT] = NVE4_IMAGE_FORMAT_RGBA32_SINT,
671 [PIPE_FORMAT_R32G32B32A32_UINT] = NVE4_IMAGE_FORMAT_RGBA32_UINT,
672 [PIPE_FORMAT_R16G16B16A16_FLOAT] = NVE4_IMAGE_FORMAT_RGBA16_FLOAT,
673 [PIPE_FORMAT_R16G16B16A16_UNORM] = NVE4_IMAGE_FORMAT_RGBA16_UNORM,
674 [PIPE_FORMAT_R16G16B16A16_SNORM] = NVE4_IMAGE_FORMAT_RGBA16_SNORM,
675 [PIPE_FORMAT_R16G16B16A16_SINT] = NVE4_IMAGE_FORMAT_RGBA16_SINT,
676 [PIPE_FORMAT_R16G16B16A16_UINT] = NVE4_IMAGE_FORMAT_RGBA16_UINT,
677 [PIPE_FORMAT_R8G8B8A8_UNORM] = NVE4_IMAGE_FORMAT_RGBA8_UNORM,
678 [PIPE_FORMAT_R8G8B8A8_SNORM] = NVE4_IMAGE_FORMAT_RGBA8_SNORM,
679 [PIPE_FORMAT_R8G8B8A8_SINT] = NVE4_IMAGE_FORMAT_RGBA8_SINT,
680 [PIPE_FORMAT_R8G8B8A8_UINT] = NVE4_IMAGE_FORMAT_RGBA8_UINT,
681 [PIPE_FORMAT_R11G11B10_FLOAT] = NVE4_IMAGE_FORMAT_R11G11B10_FLOAT,
682 [PIPE_FORMAT_R10G10B10A2_UNORM] = NVE4_IMAGE_FORMAT_RGB10_A2_UNORM,
683 /* [PIPE_FORMAT_R10G10B10A2_UINT] = NVE4_IMAGE_FORMAT_RGB10_A2_UINT, */
684 [PIPE_FORMAT_R32G32_FLOAT] = NVE4_IMAGE_FORMAT_RG32_FLOAT,
685 [PIPE_FORMAT_R32G32_SINT] = NVE4_IMAGE_FORMAT_RG32_SINT,
686 [PIPE_FORMAT_R32G32_UINT] = NVE4_IMAGE_FORMAT_RG32_UINT,
687 [PIPE_FORMAT_R16G16_FLOAT] = NVE4_IMAGE_FORMAT_RG16_FLOAT,
688 [PIPE_FORMAT_R16G16_UNORM] = NVE4_IMAGE_FORMAT_RG16_UNORM,
689 [PIPE_FORMAT_R16G16_SNORM] = NVE4_IMAGE_FORMAT_RG16_SNORM,
690 [PIPE_FORMAT_R16G16_SINT] = NVE4_IMAGE_FORMAT_RG16_SINT,
691 [PIPE_FORMAT_R16G16_UINT] = NVE4_IMAGE_FORMAT_RG16_UINT,
692 [PIPE_FORMAT_R8G8_UNORM] = NVE4_IMAGE_FORMAT_RG8_UNORM,
693 [PIPE_FORMAT_R8G8_SNORM] = NVE4_IMAGE_FORMAT_RG8_SNORM,
694 [PIPE_FORMAT_R8G8_SINT] = NVE4_IMAGE_FORMAT_RG8_SINT,
695 [PIPE_FORMAT_R8G8_UINT] = NVE4_IMAGE_FORMAT_RG8_UINT,
696 [PIPE_FORMAT_R32_FLOAT] = NVE4_IMAGE_FORMAT_R32_FLOAT,
697 [PIPE_FORMAT_R32_SINT] = NVE4_IMAGE_FORMAT_R32_SINT,
698 [PIPE_FORMAT_R32_UINT] = NVE4_IMAGE_FORMAT_R32_UINT,
699 [PIPE_FORMAT_R16_FLOAT] = NVE4_IMAGE_FORMAT_R16_FLOAT,
700 [PIPE_FORMAT_R16_UNORM] = NVE4_IMAGE_FORMAT_R16_UNORM,
701 [PIPE_FORMAT_R16_SNORM] = NVE4_IMAGE_FORMAT_R16_SNORM,
702 [PIPE_FORMAT_R16_SINT] = NVE4_IMAGE_FORMAT_R16_SINT,
703 [PIPE_FORMAT_R16_UINT] = NVE4_IMAGE_FORMAT_R16_UINT,
704 [PIPE_FORMAT_R8_UNORM] = NVE4_IMAGE_FORMAT_R8_UNORM,
705 [PIPE_FORMAT_R8_SNORM] = NVE4_IMAGE_FORMAT_R8_SNORM,
706 [PIPE_FORMAT_R8_SINT] = NVE4_IMAGE_FORMAT_R8_SINT,
707 [PIPE_FORMAT_R8_UINT] = NVE4_IMAGE_FORMAT_R8_UINT,
708 };
709
710 /* Auxiliary format description values for surface instructions.
711 * (log2(bytes per pixel) << 12) | (unk8 << 8) | unk22
712 */
713 static const uint16_t nve4_su_format_aux_map[PIPE_FORMAT_COUNT] =
714 {
715 [PIPE_FORMAT_R32G32B32A32_FLOAT] = 0x4842,
716 [PIPE_FORMAT_R32G32B32A32_SINT] = 0x4842,
717 [PIPE_FORMAT_R32G32B32A32_UINT] = 0x4842,
718
719 [PIPE_FORMAT_R16G16B16A16_UNORM] = 0x3933,
720 [PIPE_FORMAT_R16G16B16A16_SNORM] = 0x3933,
721 [PIPE_FORMAT_R16G16B16A16_SINT] = 0x3933,
722 [PIPE_FORMAT_R16G16B16A16_UINT] = 0x3933,
723 [PIPE_FORMAT_R16G16B16A16_FLOAT] = 0x3933,
724
725 [PIPE_FORMAT_R32G32_FLOAT] = 0x3433,
726 [PIPE_FORMAT_R32G32_SINT] = 0x3433,
727 [PIPE_FORMAT_R32G32_UINT] = 0x3433,
728
729 [PIPE_FORMAT_R10G10B10A2_UNORM] = 0x2a24,
730 /* [PIPE_FORMAT_R10G10B10A2_UINT] = 0x2a24, */
731 [PIPE_FORMAT_R8G8B8A8_UNORM] = 0x2a24,
732 [PIPE_FORMAT_R8G8B8A8_SNORM] = 0x2a24,
733 [PIPE_FORMAT_R8G8B8A8_SINT] = 0x2a24,
734 [PIPE_FORMAT_R8G8B8A8_UINT] = 0x2a24,
735 [PIPE_FORMAT_R11G11B10_FLOAT] = 0x2a24,
736
737 [PIPE_FORMAT_R16G16_UNORM] = 0x2524,
738 [PIPE_FORMAT_R16G16_SNORM] = 0x2524,
739 [PIPE_FORMAT_R16G16_SINT] = 0x2524,
740 [PIPE_FORMAT_R16G16_UINT] = 0x2524,
741 [PIPE_FORMAT_R16G16_FLOAT] = 0x2524,
742
743 [PIPE_FORMAT_R32_SINT] = 0x2024,
744 [PIPE_FORMAT_R32_UINT] = 0x2024,
745 [PIPE_FORMAT_R32_FLOAT] = 0x2024,
746
747 [PIPE_FORMAT_R8G8_UNORM] = 0x1615,
748 [PIPE_FORMAT_R8G8_SNORM] = 0x1615,
749 [PIPE_FORMAT_R8G8_SINT] = 0x1615,
750 [PIPE_FORMAT_R8G8_UINT] = 0x1615,
751
752 [PIPE_FORMAT_R16_UNORM] = 0x1115,
753 [PIPE_FORMAT_R16_SNORM] = 0x1115,
754 [PIPE_FORMAT_R16_SINT] = 0x1115,
755 [PIPE_FORMAT_R16_UINT] = 0x1115,
756 [PIPE_FORMAT_R16_FLOAT] = 0x1115,
757
758 [PIPE_FORMAT_R8_UNORM] = 0x0206,
759 [PIPE_FORMAT_R8_SNORM] = 0x0206,
760 [PIPE_FORMAT_R8_SINT] = 0x0206,
761 [PIPE_FORMAT_R8_UINT] = 0x0206
762 };
763
764 /* NOTE: These are hardcoded offsets for the shader library.
765 * TODO: Automate them.
766 */
767 static const uint16_t nve4_suldp_lib_offset[PIPE_FORMAT_COUNT] =
768 {
769 [PIPE_FORMAT_R32G32B32A32_FLOAT] = 0x218,
770 [PIPE_FORMAT_R32G32B32A32_SINT] = 0x218,
771 [PIPE_FORMAT_R32G32B32A32_UINT] = 0x218,
772 [PIPE_FORMAT_R16G16B16A16_UNORM] = 0x248,
773 [PIPE_FORMAT_R16G16B16A16_SNORM] = 0x2b8,
774 [PIPE_FORMAT_R16G16B16A16_SINT] = 0x330,
775 [PIPE_FORMAT_R16G16B16A16_UINT] = 0x388,
776 [PIPE_FORMAT_R16G16B16A16_FLOAT] = 0x3d8,
777 [PIPE_FORMAT_R32G32_FLOAT] = 0x428,
778 [PIPE_FORMAT_R32G32_SINT] = 0x468,
779 [PIPE_FORMAT_R32G32_UINT] = 0x468,
780 [PIPE_FORMAT_R10G10B10A2_UNORM] = 0x4a8,
781 /* [PIPE_FORMAT_R10G10B10A2_UINT] = 0x530, */
782 [PIPE_FORMAT_R8G8B8A8_UNORM] = 0x588,
783 [PIPE_FORMAT_R8G8B8A8_SNORM] = 0x5f8,
784 [PIPE_FORMAT_R8G8B8A8_SINT] = 0x670,
785 [PIPE_FORMAT_R8G8B8A8_UINT] = 0x6c8,
786 [PIPE_FORMAT_B5G6R5_UNORM] = 0x718,
787 [PIPE_FORMAT_B5G5R5X1_UNORM] = 0x7a0,
788 [PIPE_FORMAT_R16G16_UNORM] = 0x828,
789 [PIPE_FORMAT_R16G16_SNORM] = 0x890,
790 [PIPE_FORMAT_R16G16_SINT] = 0x8f0,
791 [PIPE_FORMAT_R16G16_UINT] = 0x948,
792 [PIPE_FORMAT_R16G16_FLOAT] = 0x998,
793 [PIPE_FORMAT_R32_FLOAT] = 0x9e8,
794 [PIPE_FORMAT_R32_SINT] = 0xa30,
795 [PIPE_FORMAT_R32_UINT] = 0xa30,
796 [PIPE_FORMAT_R8G8_UNORM] = 0xa78,
797 [PIPE_FORMAT_R8G8_SNORM] = 0xae0,
798 [PIPE_FORMAT_R8G8_UINT] = 0xb48,
799 [PIPE_FORMAT_R8G8_SINT] = 0xb98,
800 [PIPE_FORMAT_R16_UNORM] = 0xbe8,
801 [PIPE_FORMAT_R16_SNORM] = 0xc48,
802 [PIPE_FORMAT_R16_SINT] = 0xca0,
803 [PIPE_FORMAT_R16_UINT] = 0xce8,
804 [PIPE_FORMAT_R16_FLOAT] = 0xd30,
805 [PIPE_FORMAT_R8_UNORM] = 0xd88,
806 [PIPE_FORMAT_R8_SNORM] = 0xde0,
807 [PIPE_FORMAT_R8_SINT] = 0xe38,
808 [PIPE_FORMAT_R8_UINT] = 0xe88,
809 [PIPE_FORMAT_R11G11B10_FLOAT] = 0xed0
810 };