dc790f4d4c2f31cc4ce1439fa699e4f84d8525f3
[mesa.git] / src / gallium / drivers / nouveau / nvc0 / nvc0_tex.c
1 /*
2 * Copyright 2008 Ben Skeggs
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "nvc0/nvc0_context.h"
24 #include "nvc0/nvc0_resource.h"
25 #include "nv50/nv50_texture.xml.h"
26 #include "nv50/nv50_defs.xml.h"
27
28 #include "util/u_format.h"
29
30 #define NVE4_TIC_ENTRY_INVALID 0x000fffff
31 #define NVE4_TSC_ENTRY_INVALID 0xfff00000
32
33 #define NV50_TIC_0_SWIZZLE__MASK \
34 (NV50_TIC_0_MAPA__MASK | NV50_TIC_0_MAPB__MASK | \
35 NV50_TIC_0_MAPG__MASK | NV50_TIC_0_MAPR__MASK)
36
37 static inline uint32_t
38 nv50_tic_swizzle(uint32_t tc, unsigned swz, bool tex_int)
39 {
40 switch (swz) {
41 case PIPE_SWIZZLE_RED:
42 return (tc & NV50_TIC_0_MAPR__MASK) >> NV50_TIC_0_MAPR__SHIFT;
43 case PIPE_SWIZZLE_GREEN:
44 return (tc & NV50_TIC_0_MAPG__MASK) >> NV50_TIC_0_MAPG__SHIFT;
45 case PIPE_SWIZZLE_BLUE:
46 return (tc & NV50_TIC_0_MAPB__MASK) >> NV50_TIC_0_MAPB__SHIFT;
47 case PIPE_SWIZZLE_ALPHA:
48 return (tc & NV50_TIC_0_MAPA__MASK) >> NV50_TIC_0_MAPA__SHIFT;
49 case PIPE_SWIZZLE_ONE:
50 return tex_int ? NV50_TIC_MAP_ONE_INT : NV50_TIC_MAP_ONE_FLOAT;
51 case PIPE_SWIZZLE_ZERO:
52 default:
53 return NV50_TIC_MAP_ZERO;
54 }
55 }
56
57 struct pipe_sampler_view *
58 nvc0_create_sampler_view(struct pipe_context *pipe,
59 struct pipe_resource *res,
60 const struct pipe_sampler_view *templ)
61 {
62 uint32_t flags = 0;
63
64 if (templ->target == PIPE_TEXTURE_RECT || templ->target == PIPE_BUFFER)
65 flags |= NV50_TEXVIEW_SCALED_COORDS;
66
67 return nvc0_create_texture_view(pipe, res, templ, flags, templ->target);
68 }
69
70 struct pipe_sampler_view *
71 nvc0_create_texture_view(struct pipe_context *pipe,
72 struct pipe_resource *texture,
73 const struct pipe_sampler_view *templ,
74 uint32_t flags,
75 enum pipe_texture_target target)
76 {
77 const struct util_format_description *desc;
78 uint64_t address;
79 uint32_t *tic;
80 uint32_t swz[4];
81 uint32_t width, height;
82 uint32_t depth;
83 struct nv50_tic_entry *view;
84 struct nv50_miptree *mt;
85 bool tex_int;
86
87 view = MALLOC_STRUCT(nv50_tic_entry);
88 if (!view)
89 return NULL;
90 mt = nv50_miptree(texture);
91
92 view->pipe = *templ;
93 view->pipe.reference.count = 1;
94 view->pipe.texture = NULL;
95 view->pipe.context = pipe;
96
97 view->id = -1;
98
99 pipe_resource_reference(&view->pipe.texture, texture);
100
101 tic = &view->tic[0];
102
103 desc = util_format_description(view->pipe.format);
104
105 tic[0] = nvc0_format_table[view->pipe.format].tic;
106
107 tex_int = util_format_is_pure_integer(view->pipe.format);
108
109 swz[0] = nv50_tic_swizzle(tic[0], view->pipe.swizzle_r, tex_int);
110 swz[1] = nv50_tic_swizzle(tic[0], view->pipe.swizzle_g, tex_int);
111 swz[2] = nv50_tic_swizzle(tic[0], view->pipe.swizzle_b, tex_int);
112 swz[3] = nv50_tic_swizzle(tic[0], view->pipe.swizzle_a, tex_int);
113 tic[0] = (tic[0] & ~NV50_TIC_0_SWIZZLE__MASK) |
114 (swz[0] << NV50_TIC_0_MAPR__SHIFT) |
115 (swz[1] << NV50_TIC_0_MAPG__SHIFT) |
116 (swz[2] << NV50_TIC_0_MAPB__SHIFT) |
117 (swz[3] << NV50_TIC_0_MAPA__SHIFT);
118
119 address = mt->base.address;
120
121 tic[2] = 0x10001000 | NV50_TIC_2_NO_BORDER;
122
123 if (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB)
124 tic[2] |= NV50_TIC_2_COLORSPACE_SRGB;
125
126 if (!(flags & NV50_TEXVIEW_SCALED_COORDS))
127 tic[2] |= NV50_TIC_2_NORMALIZED_COORDS;
128
129 /* check for linear storage type */
130 if (unlikely(!nouveau_bo_memtype(nv04_resource(texture)->bo))) {
131 if (texture->target == PIPE_BUFFER) {
132 assert(!(tic[2] & NV50_TIC_2_NORMALIZED_COORDS));
133 address +=
134 view->pipe.u.buf.first_element * desc->block.bits / 8;
135 tic[2] |= NV50_TIC_2_LINEAR | NV50_TIC_2_TARGET_BUFFER;
136 tic[3] = 0;
137 tic[4] = /* width */
138 view->pipe.u.buf.last_element - view->pipe.u.buf.first_element + 1;
139 tic[5] = 0;
140 } else {
141 /* must be 2D texture without mip maps */
142 tic[2] |= NV50_TIC_2_LINEAR | NV50_TIC_2_TARGET_RECT;
143 tic[3] = mt->level[0].pitch;
144 tic[4] = mt->base.base.width0;
145 tic[5] = (1 << 16) | mt->base.base.height0;
146 }
147 tic[6] =
148 tic[7] = 0;
149 tic[1] = address;
150 tic[2] |= address >> 32;
151 return &view->pipe;
152 }
153
154 tic[2] |=
155 ((mt->level[0].tile_mode & 0x0f0) << (22 - 4)) |
156 ((mt->level[0].tile_mode & 0xf00) << (25 - 8));
157
158 depth = MAX2(mt->base.base.array_size, mt->base.base.depth0);
159
160 if (mt->base.base.array_size > 1) {
161 /* there doesn't seem to be a base layer field in TIC */
162 address += view->pipe.u.tex.first_layer * mt->layer_stride;
163 depth = view->pipe.u.tex.last_layer - view->pipe.u.tex.first_layer + 1;
164 }
165 tic[1] = address;
166 tic[2] |= address >> 32;
167
168 switch (target) {
169 case PIPE_TEXTURE_1D:
170 tic[2] |= NV50_TIC_2_TARGET_1D;
171 break;
172 case PIPE_TEXTURE_2D:
173 tic[2] |= NV50_TIC_2_TARGET_2D;
174 break;
175 case PIPE_TEXTURE_RECT:
176 tic[2] |= NV50_TIC_2_TARGET_2D;
177 break;
178 case PIPE_TEXTURE_3D:
179 tic[2] |= NV50_TIC_2_TARGET_3D;
180 break;
181 case PIPE_TEXTURE_CUBE:
182 depth /= 6;
183 tic[2] |= NV50_TIC_2_TARGET_CUBE;
184 break;
185 case PIPE_TEXTURE_1D_ARRAY:
186 tic[2] |= NV50_TIC_2_TARGET_1D_ARRAY;
187 break;
188 case PIPE_TEXTURE_2D_ARRAY:
189 tic[2] |= NV50_TIC_2_TARGET_2D_ARRAY;
190 break;
191 case PIPE_TEXTURE_CUBE_ARRAY:
192 depth /= 6;
193 tic[2] |= NV50_TIC_2_TARGET_CUBE_ARRAY;
194 break;
195 default:
196 NOUVEAU_ERR("unexpected/invalid texture target: %d\n",
197 mt->base.base.target);
198 return false;
199 }
200
201 tic[3] = (flags & NV50_TEXVIEW_FILTER_MSAA8) ? 0x20000000 : 0x00300000;
202
203 if (flags & NV50_TEXVIEW_ACCESS_RESOLVE) {
204 width = mt->base.base.width0 << mt->ms_x;
205 height = mt->base.base.height0 << mt->ms_y;
206 } else {
207 width = mt->base.base.width0;
208 height = mt->base.base.height0;
209 }
210
211 tic[4] = (1 << 31) | width;
212
213 tic[5] = height & 0xffff;
214 tic[5] |= depth << 16;
215 tic[5] |= mt->base.base.last_level << 28;
216
217 /* sampling points: (?) */
218 if (flags & NV50_TEXVIEW_ACCESS_RESOLVE)
219 tic[6] = (mt->ms_x > 1) ? 0x88000000 : 0x03000000;
220 else
221 tic[6] = 0x03000000;
222
223 tic[7] = (view->pipe.u.tex.last_level << 4) | view->pipe.u.tex.first_level;
224 tic[7] |= mt->ms_mode << 12;
225
226 return &view->pipe;
227 }
228
229 static bool
230 nvc0_validate_tic(struct nvc0_context *nvc0, int s)
231 {
232 uint32_t commands[32];
233 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
234 struct nouveau_bo *txc = nvc0->screen->txc;
235 unsigned i;
236 unsigned n = 0;
237 bool need_flush = false;
238
239 for (i = 0; i < nvc0->num_textures[s]; ++i) {
240 struct nv50_tic_entry *tic = nv50_tic_entry(nvc0->textures[s][i]);
241 struct nv04_resource *res;
242 const bool dirty = !!(nvc0->textures_dirty[s] & (1 << i));
243
244 if (!tic) {
245 if (dirty)
246 commands[n++] = (i << 1) | 0;
247 continue;
248 }
249 res = nv04_resource(tic->pipe.texture);
250
251 if (tic->id < 0) {
252 tic->id = nvc0_screen_tic_alloc(nvc0->screen, tic);
253
254 PUSH_SPACE(push, 17);
255 BEGIN_NVC0(push, NVC0_M2MF(OFFSET_OUT_HIGH), 2);
256 PUSH_DATAh(push, txc->offset + (tic->id * 32));
257 PUSH_DATA (push, txc->offset + (tic->id * 32));
258 BEGIN_NVC0(push, NVC0_M2MF(LINE_LENGTH_IN), 2);
259 PUSH_DATA (push, 32);
260 PUSH_DATA (push, 1);
261 BEGIN_NVC0(push, NVC0_M2MF(EXEC), 1);
262 PUSH_DATA (push, 0x100111);
263 BEGIN_NIC0(push, NVC0_M2MF(DATA), 8);
264 PUSH_DATAp(push, &tic->tic[0], 8);
265
266 need_flush = true;
267 } else
268 if (res->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) {
269 BEGIN_NVC0(push, NVC0_3D(TEX_CACHE_CTL), 1);
270 PUSH_DATA (push, (tic->id << 4) | 1);
271 NOUVEAU_DRV_STAT(&nvc0->screen->base, tex_cache_flush_count, 1);
272 }
273 nvc0->screen->tic.lock[tic->id / 32] |= 1 << (tic->id % 32);
274
275 res->status &= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING;
276 res->status |= NOUVEAU_BUFFER_STATUS_GPU_READING;
277
278 if (!dirty)
279 continue;
280 commands[n++] = (tic->id << 9) | (i << 1) | 1;
281
282 BCTX_REFN(nvc0->bufctx_3d, TEX(s, i), res, RD);
283 }
284 for (; i < nvc0->state.num_textures[s]; ++i)
285 commands[n++] = (i << 1) | 0;
286
287 nvc0->state.num_textures[s] = nvc0->num_textures[s];
288
289 if (n) {
290 BEGIN_NIC0(push, NVC0_3D(BIND_TIC(s)), n);
291 PUSH_DATAp(push, commands, n);
292 }
293 nvc0->textures_dirty[s] = 0;
294
295 return need_flush;
296 }
297
298 static bool
299 nve4_validate_tic(struct nvc0_context *nvc0, unsigned s)
300 {
301 struct nouveau_bo *txc = nvc0->screen->txc;
302 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
303 unsigned i;
304 bool need_flush = false;
305
306 for (i = 0; i < nvc0->num_textures[s]; ++i) {
307 struct nv50_tic_entry *tic = nv50_tic_entry(nvc0->textures[s][i]);
308 struct nv04_resource *res;
309 const bool dirty = !!(nvc0->textures_dirty[s] & (1 << i));
310
311 if (!tic) {
312 nvc0->tex_handles[s][i] |= NVE4_TIC_ENTRY_INVALID;
313 continue;
314 }
315 res = nv04_resource(tic->pipe.texture);
316
317 if (tic->id < 0) {
318 tic->id = nvc0_screen_tic_alloc(nvc0->screen, tic);
319
320 PUSH_SPACE(push, 16);
321 BEGIN_NVC0(push, NVE4_P2MF(UPLOAD_DST_ADDRESS_HIGH), 2);
322 PUSH_DATAh(push, txc->offset + (tic->id * 32));
323 PUSH_DATA (push, txc->offset + (tic->id * 32));
324 BEGIN_NVC0(push, NVE4_P2MF(UPLOAD_LINE_LENGTH_IN), 2);
325 PUSH_DATA (push, 32);
326 PUSH_DATA (push, 1);
327 BEGIN_1IC0(push, NVE4_P2MF(UPLOAD_EXEC), 9);
328 PUSH_DATA (push, 0x1001);
329 PUSH_DATAp(push, &tic->tic[0], 8);
330
331 need_flush = true;
332 } else
333 if (res->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) {
334 BEGIN_NVC0(push, NVC0_3D(TEX_CACHE_CTL), 1);
335 PUSH_DATA (push, (tic->id << 4) | 1);
336 }
337 nvc0->screen->tic.lock[tic->id / 32] |= 1 << (tic->id % 32);
338
339 res->status &= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING;
340 res->status |= NOUVEAU_BUFFER_STATUS_GPU_READING;
341
342 nvc0->tex_handles[s][i] &= ~NVE4_TIC_ENTRY_INVALID;
343 nvc0->tex_handles[s][i] |= tic->id;
344 if (dirty)
345 BCTX_REFN(nvc0->bufctx_3d, TEX(s, i), res, RD);
346 }
347 for (; i < nvc0->state.num_textures[s]; ++i) {
348 nvc0->tex_handles[s][i] |= NVE4_TIC_ENTRY_INVALID;
349 nvc0->textures_dirty[s] |= 1 << i;
350 }
351
352 nvc0->state.num_textures[s] = nvc0->num_textures[s];
353
354 return need_flush;
355 }
356
357 void nvc0_validate_textures(struct nvc0_context *nvc0)
358 {
359 bool need_flush;
360
361 if (nvc0->screen->base.class_3d >= NVE4_3D_CLASS) {
362 need_flush = nve4_validate_tic(nvc0, 0);
363 need_flush |= nve4_validate_tic(nvc0, 3);
364 need_flush |= nve4_validate_tic(nvc0, 4);
365 } else {
366 need_flush = nvc0_validate_tic(nvc0, 0);
367 need_flush |= nvc0_validate_tic(nvc0, 3);
368 need_flush |= nvc0_validate_tic(nvc0, 4);
369 }
370
371 if (need_flush) {
372 BEGIN_NVC0(nvc0->base.pushbuf, NVC0_3D(TIC_FLUSH), 1);
373 PUSH_DATA (nvc0->base.pushbuf, 0);
374 }
375 }
376
377 static bool
378 nvc0_validate_tsc(struct nvc0_context *nvc0, int s)
379 {
380 uint32_t commands[16];
381 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
382 unsigned i;
383 unsigned n = 0;
384 bool need_flush = false;
385
386 for (i = 0; i < nvc0->num_samplers[s]; ++i) {
387 struct nv50_tsc_entry *tsc = nv50_tsc_entry(nvc0->samplers[s][i]);
388
389 if (!(nvc0->samplers_dirty[s] & (1 << i)))
390 continue;
391 if (!tsc) {
392 commands[n++] = (i << 4) | 0;
393 continue;
394 }
395 if (tsc->id < 0) {
396 tsc->id = nvc0_screen_tsc_alloc(nvc0->screen, tsc);
397
398 nvc0_m2mf_push_linear(&nvc0->base, nvc0->screen->txc,
399 65536 + tsc->id * 32, NV_VRAM_DOMAIN(&nvc0->screen->base),
400 32, tsc->tsc);
401 need_flush = true;
402 }
403 nvc0->screen->tsc.lock[tsc->id / 32] |= 1 << (tsc->id % 32);
404
405 commands[n++] = (tsc->id << 12) | (i << 4) | 1;
406 }
407 for (; i < nvc0->state.num_samplers[s]; ++i)
408 commands[n++] = (i << 4) | 0;
409
410 nvc0->state.num_samplers[s] = nvc0->num_samplers[s];
411
412 if (n) {
413 BEGIN_NIC0(push, NVC0_3D(BIND_TSC(s)), n);
414 PUSH_DATAp(push, commands, n);
415 }
416 nvc0->samplers_dirty[s] = 0;
417
418 return need_flush;
419 }
420
421 bool
422 nve4_validate_tsc(struct nvc0_context *nvc0, int s)
423 {
424 struct nouveau_bo *txc = nvc0->screen->txc;
425 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
426 unsigned i;
427 bool need_flush = false;
428
429 for (i = 0; i < nvc0->num_samplers[s]; ++i) {
430 struct nv50_tsc_entry *tsc = nv50_tsc_entry(nvc0->samplers[s][i]);
431
432 if (!tsc) {
433 nvc0->tex_handles[s][i] |= NVE4_TSC_ENTRY_INVALID;
434 continue;
435 }
436 if (tsc->id < 0) {
437 tsc->id = nvc0_screen_tsc_alloc(nvc0->screen, tsc);
438
439 PUSH_SPACE(push, 16);
440 BEGIN_NVC0(push, NVE4_P2MF(UPLOAD_DST_ADDRESS_HIGH), 2);
441 PUSH_DATAh(push, txc->offset + 65536 + (tsc->id * 32));
442 PUSH_DATA (push, txc->offset + 65536 + (tsc->id * 32));
443 BEGIN_NVC0(push, NVE4_P2MF(UPLOAD_LINE_LENGTH_IN), 2);
444 PUSH_DATA (push, 32);
445 PUSH_DATA (push, 1);
446 BEGIN_1IC0(push, NVE4_P2MF(UPLOAD_EXEC), 9);
447 PUSH_DATA (push, 0x1001);
448 PUSH_DATAp(push, &tsc->tsc[0], 8);
449
450 need_flush = true;
451 }
452 nvc0->screen->tsc.lock[tsc->id / 32] |= 1 << (tsc->id % 32);
453
454 nvc0->tex_handles[s][i] &= ~NVE4_TSC_ENTRY_INVALID;
455 nvc0->tex_handles[s][i] |= tsc->id << 20;
456 }
457 for (; i < nvc0->state.num_samplers[s]; ++i) {
458 nvc0->tex_handles[s][i] |= NVE4_TSC_ENTRY_INVALID;
459 nvc0->samplers_dirty[s] |= 1 << i;
460 }
461
462 nvc0->state.num_samplers[s] = nvc0->num_samplers[s];
463
464 return need_flush;
465 }
466
467 void nvc0_validate_samplers(struct nvc0_context *nvc0)
468 {
469 bool need_flush;
470
471 if (nvc0->screen->base.class_3d >= NVE4_3D_CLASS) {
472 need_flush = nve4_validate_tsc(nvc0, 0);
473 need_flush |= nve4_validate_tsc(nvc0, 3);
474 need_flush |= nve4_validate_tsc(nvc0, 4);
475 } else {
476 need_flush = nvc0_validate_tsc(nvc0, 0);
477 need_flush |= nvc0_validate_tsc(nvc0, 3);
478 need_flush |= nvc0_validate_tsc(nvc0, 4);
479 }
480
481 if (need_flush) {
482 BEGIN_NVC0(nvc0->base.pushbuf, NVC0_3D(TSC_FLUSH), 1);
483 PUSH_DATA (nvc0->base.pushbuf, 0);
484 }
485 }
486
487 /* Upload the "diagonal" entries for the possible texture sources ($t == $s).
488 * At some point we might want to get a list of the combinations used by a
489 * shader and fill in those entries instead of having it extract the handles.
490 */
491 void
492 nve4_set_tex_handles(struct nvc0_context *nvc0)
493 {
494 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
495 uint64_t address;
496 unsigned s;
497
498 if (nvc0->screen->base.class_3d < NVE4_3D_CLASS)
499 return;
500 address = nvc0->screen->uniform_bo->offset + (5 << 16);
501
502 for (s = 0; s < 5; ++s, address += (1 << 9)) {
503 uint32_t dirty = nvc0->textures_dirty[s] | nvc0->samplers_dirty[s];
504 if (!dirty)
505 continue;
506 BEGIN_NVC0(push, NVC0_3D(CB_SIZE), 3);
507 PUSH_DATA (push, 512);
508 PUSH_DATAh(push, address);
509 PUSH_DATA (push, address);
510 do {
511 int i = ffs(dirty) - 1;
512 dirty &= ~(1 << i);
513
514 BEGIN_NVC0(push, NVC0_3D(CB_POS), 2);
515 PUSH_DATA (push, (8 + i) * 4);
516 PUSH_DATA (push, nvc0->tex_handles[s][i]);
517 } while (dirty);
518
519 nvc0->textures_dirty[s] = 0;
520 nvc0->samplers_dirty[s] = 0;
521 }
522 }
523
524
525 static const uint8_t nve4_su_format_map[PIPE_FORMAT_COUNT];
526 static const uint16_t nve4_su_format_aux_map[PIPE_FORMAT_COUNT];
527 static const uint16_t nve4_suldp_lib_offset[PIPE_FORMAT_COUNT];
528
529 void
530 nve4_set_surface_info(struct nouveau_pushbuf *push,
531 struct pipe_surface *psf,
532 struct nvc0_screen *screen)
533 {
534 struct nv50_surface *sf = nv50_surface(psf);
535 struct nv04_resource *res;
536 uint64_t address;
537 uint32_t *const info = push->cur;
538 uint8_t log2cpp;
539
540 if (psf && !nve4_su_format_map[psf->format])
541 NOUVEAU_ERR("unsupported surface format, try is_format_supported() !\n");
542
543 push->cur += 16;
544
545 if (!psf || !nve4_su_format_map[psf->format]) {
546 memset(info, 0, 16 * sizeof(*info));
547
548 info[0] = 0xbadf0000;
549 info[1] = 0x80004000;
550 info[12] = nve4_suldp_lib_offset[PIPE_FORMAT_R32G32B32A32_UINT] +
551 screen->lib_code->start;
552 return;
553 }
554 res = nv04_resource(sf->base.texture);
555
556 address = res->address + sf->offset;
557
558 info[8] = sf->width;
559 info[9] = sf->height;
560 info[10] = sf->depth;
561 switch (res->base.target) {
562 case PIPE_TEXTURE_1D_ARRAY:
563 info[11] = 1;
564 break;
565 case PIPE_TEXTURE_2D:
566 case PIPE_TEXTURE_RECT:
567 info[11] = 2;
568 break;
569 case PIPE_TEXTURE_3D:
570 info[11] = 3;
571 break;
572 case PIPE_TEXTURE_2D_ARRAY:
573 case PIPE_TEXTURE_CUBE:
574 case PIPE_TEXTURE_CUBE_ARRAY:
575 info[11] = 4;
576 break;
577 default:
578 info[11] = 0;
579 break;
580 }
581 log2cpp = (0xf000 & nve4_su_format_aux_map[sf->base.format]) >> 12;
582
583 info[12] = nve4_suldp_lib_offset[sf->base.format] + screen->lib_code->start;
584
585 /* limit in bytes for raw access */
586 info[13] = (0x06 << 22) | ((sf->width << log2cpp) - 1);
587
588 info[1] = nve4_su_format_map[sf->base.format];
589
590 #if 0
591 switch (util_format_get_blocksizebits(sf->base.format)) {
592 case 16: info[1] |= 1 << 16; break;
593 case 32: info[1] |= 2 << 16; break;
594 case 64: info[1] |= 3 << 16; break;
595 case 128: info[1] |= 4 << 16; break;
596 default:
597 break;
598 }
599 #else
600 info[1] |= log2cpp << 16;
601 info[1] |= 0x4000;
602 info[1] |= (0x0f00 & nve4_su_format_aux_map[sf->base.format]);
603 #endif
604
605 if (res->base.target == PIPE_BUFFER) {
606 info[0] = address >> 8;
607 info[2] = sf->width - 1;
608 info[2] |= (0xff & nve4_su_format_aux_map[sf->base.format]) << 22;
609 info[3] = 0;
610 info[4] = 0;
611 info[5] = 0;
612 info[6] = 0;
613 info[7] = 0;
614 info[14] = 0;
615 info[15] = 0;
616 } else {
617 struct nv50_miptree *mt = nv50_miptree(&res->base);
618 struct nv50_miptree_level *lvl = &mt->level[sf->base.u.tex.level];
619 const unsigned z = sf->base.u.tex.first_layer;
620
621 if (z) {
622 if (mt->layout_3d) {
623 address += nvc0_mt_zslice_offset(mt, psf->u.tex.level, z);
624 /* doesn't work if z passes z-tile boundary */
625 assert(sf->depth == 1);
626 } else {
627 address += mt->layer_stride * z;
628 }
629 }
630 info[0] = address >> 8;
631 info[2] = sf->width - 1;
632 /* NOTE: this is really important: */
633 info[2] |= (0xff & nve4_su_format_aux_map[sf->base.format]) << 22;
634 info[3] = (0x88 << 24) | (lvl->pitch / 64);
635 info[4] = sf->height - 1;
636 info[4] |= (lvl->tile_mode & 0x0f0) << 25;
637 info[4] |= NVC0_TILE_SHIFT_Y(lvl->tile_mode) << 22;
638 info[5] = mt->layer_stride >> 8;
639 info[6] = sf->depth - 1;
640 info[6] |= (lvl->tile_mode & 0xf00) << 21;
641 info[6] |= NVC0_TILE_SHIFT_Z(lvl->tile_mode) << 22;
642 info[7] = 0;
643 info[14] = mt->ms_x;
644 info[15] = mt->ms_y;
645 }
646 }
647
648 static inline void
649 nvc0_update_surface_bindings(struct nvc0_context *nvc0)
650 {
651 /* TODO */
652 }
653
654 static inline void
655 nve4_update_surface_bindings(struct nvc0_context *nvc0)
656 {
657 /* TODO */
658 }
659
660 void
661 nvc0_validate_surfaces(struct nvc0_context *nvc0)
662 {
663 if (nvc0->screen->base.class_3d >= NVE4_3D_CLASS) {
664 nve4_update_surface_bindings(nvc0);
665 } else {
666 nvc0_update_surface_bindings(nvc0);
667 }
668 }
669
670
671 static const uint8_t nve4_su_format_map[PIPE_FORMAT_COUNT] =
672 {
673 [PIPE_FORMAT_R32G32B32A32_FLOAT] = NVE4_IMAGE_FORMAT_RGBA32_FLOAT,
674 [PIPE_FORMAT_R32G32B32A32_SINT] = NVE4_IMAGE_FORMAT_RGBA32_SINT,
675 [PIPE_FORMAT_R32G32B32A32_UINT] = NVE4_IMAGE_FORMAT_RGBA32_UINT,
676 [PIPE_FORMAT_R16G16B16A16_FLOAT] = NVE4_IMAGE_FORMAT_RGBA16_FLOAT,
677 [PIPE_FORMAT_R16G16B16A16_UNORM] = NVE4_IMAGE_FORMAT_RGBA16_UNORM,
678 [PIPE_FORMAT_R16G16B16A16_SNORM] = NVE4_IMAGE_FORMAT_RGBA16_SNORM,
679 [PIPE_FORMAT_R16G16B16A16_SINT] = NVE4_IMAGE_FORMAT_RGBA16_SINT,
680 [PIPE_FORMAT_R16G16B16A16_UINT] = NVE4_IMAGE_FORMAT_RGBA16_UINT,
681 [PIPE_FORMAT_R8G8B8A8_UNORM] = NVE4_IMAGE_FORMAT_RGBA8_UNORM,
682 [PIPE_FORMAT_R8G8B8A8_SNORM] = NVE4_IMAGE_FORMAT_RGBA8_SNORM,
683 [PIPE_FORMAT_R8G8B8A8_SINT] = NVE4_IMAGE_FORMAT_RGBA8_SINT,
684 [PIPE_FORMAT_R8G8B8A8_UINT] = NVE4_IMAGE_FORMAT_RGBA8_UINT,
685 [PIPE_FORMAT_R11G11B10_FLOAT] = NVE4_IMAGE_FORMAT_R11G11B10_FLOAT,
686 [PIPE_FORMAT_R10G10B10A2_UNORM] = NVE4_IMAGE_FORMAT_RGB10_A2_UNORM,
687 /* [PIPE_FORMAT_R10G10B10A2_UINT] = NVE4_IMAGE_FORMAT_RGB10_A2_UINT, */
688 [PIPE_FORMAT_R32G32_FLOAT] = NVE4_IMAGE_FORMAT_RG32_FLOAT,
689 [PIPE_FORMAT_R32G32_SINT] = NVE4_IMAGE_FORMAT_RG32_SINT,
690 [PIPE_FORMAT_R32G32_UINT] = NVE4_IMAGE_FORMAT_RG32_UINT,
691 [PIPE_FORMAT_R16G16_FLOAT] = NVE4_IMAGE_FORMAT_RG16_FLOAT,
692 [PIPE_FORMAT_R16G16_UNORM] = NVE4_IMAGE_FORMAT_RG16_UNORM,
693 [PIPE_FORMAT_R16G16_SNORM] = NVE4_IMAGE_FORMAT_RG16_SNORM,
694 [PIPE_FORMAT_R16G16_SINT] = NVE4_IMAGE_FORMAT_RG16_SINT,
695 [PIPE_FORMAT_R16G16_UINT] = NVE4_IMAGE_FORMAT_RG16_UINT,
696 [PIPE_FORMAT_R8G8_UNORM] = NVE4_IMAGE_FORMAT_RG8_UNORM,
697 [PIPE_FORMAT_R8G8_SNORM] = NVE4_IMAGE_FORMAT_RG8_SNORM,
698 [PIPE_FORMAT_R8G8_SINT] = NVE4_IMAGE_FORMAT_RG8_SINT,
699 [PIPE_FORMAT_R8G8_UINT] = NVE4_IMAGE_FORMAT_RG8_UINT,
700 [PIPE_FORMAT_R32_FLOAT] = NVE4_IMAGE_FORMAT_R32_FLOAT,
701 [PIPE_FORMAT_R32_SINT] = NVE4_IMAGE_FORMAT_R32_SINT,
702 [PIPE_FORMAT_R32_UINT] = NVE4_IMAGE_FORMAT_R32_UINT,
703 [PIPE_FORMAT_R16_FLOAT] = NVE4_IMAGE_FORMAT_R16_FLOAT,
704 [PIPE_FORMAT_R16_UNORM] = NVE4_IMAGE_FORMAT_R16_UNORM,
705 [PIPE_FORMAT_R16_SNORM] = NVE4_IMAGE_FORMAT_R16_SNORM,
706 [PIPE_FORMAT_R16_SINT] = NVE4_IMAGE_FORMAT_R16_SINT,
707 [PIPE_FORMAT_R16_UINT] = NVE4_IMAGE_FORMAT_R16_UINT,
708 [PIPE_FORMAT_R8_UNORM] = NVE4_IMAGE_FORMAT_R8_UNORM,
709 [PIPE_FORMAT_R8_SNORM] = NVE4_IMAGE_FORMAT_R8_SNORM,
710 [PIPE_FORMAT_R8_SINT] = NVE4_IMAGE_FORMAT_R8_SINT,
711 [PIPE_FORMAT_R8_UINT] = NVE4_IMAGE_FORMAT_R8_UINT,
712 };
713
714 /* Auxiliary format description values for surface instructions.
715 * (log2(bytes per pixel) << 12) | (unk8 << 8) | unk22
716 */
717 static const uint16_t nve4_su_format_aux_map[PIPE_FORMAT_COUNT] =
718 {
719 [PIPE_FORMAT_R32G32B32A32_FLOAT] = 0x4842,
720 [PIPE_FORMAT_R32G32B32A32_SINT] = 0x4842,
721 [PIPE_FORMAT_R32G32B32A32_UINT] = 0x4842,
722
723 [PIPE_FORMAT_R16G16B16A16_UNORM] = 0x3933,
724 [PIPE_FORMAT_R16G16B16A16_SNORM] = 0x3933,
725 [PIPE_FORMAT_R16G16B16A16_SINT] = 0x3933,
726 [PIPE_FORMAT_R16G16B16A16_UINT] = 0x3933,
727 [PIPE_FORMAT_R16G16B16A16_FLOAT] = 0x3933,
728
729 [PIPE_FORMAT_R32G32_FLOAT] = 0x3433,
730 [PIPE_FORMAT_R32G32_SINT] = 0x3433,
731 [PIPE_FORMAT_R32G32_UINT] = 0x3433,
732
733 [PIPE_FORMAT_R10G10B10A2_UNORM] = 0x2a24,
734 /* [PIPE_FORMAT_R10G10B10A2_UINT] = 0x2a24, */
735 [PIPE_FORMAT_R8G8B8A8_UNORM] = 0x2a24,
736 [PIPE_FORMAT_R8G8B8A8_SNORM] = 0x2a24,
737 [PIPE_FORMAT_R8G8B8A8_SINT] = 0x2a24,
738 [PIPE_FORMAT_R8G8B8A8_UINT] = 0x2a24,
739 [PIPE_FORMAT_R11G11B10_FLOAT] = 0x2a24,
740
741 [PIPE_FORMAT_R16G16_UNORM] = 0x2524,
742 [PIPE_FORMAT_R16G16_SNORM] = 0x2524,
743 [PIPE_FORMAT_R16G16_SINT] = 0x2524,
744 [PIPE_FORMAT_R16G16_UINT] = 0x2524,
745 [PIPE_FORMAT_R16G16_FLOAT] = 0x2524,
746
747 [PIPE_FORMAT_R32_SINT] = 0x2024,
748 [PIPE_FORMAT_R32_UINT] = 0x2024,
749 [PIPE_FORMAT_R32_FLOAT] = 0x2024,
750
751 [PIPE_FORMAT_R8G8_UNORM] = 0x1615,
752 [PIPE_FORMAT_R8G8_SNORM] = 0x1615,
753 [PIPE_FORMAT_R8G8_SINT] = 0x1615,
754 [PIPE_FORMAT_R8G8_UINT] = 0x1615,
755
756 [PIPE_FORMAT_R16_UNORM] = 0x1115,
757 [PIPE_FORMAT_R16_SNORM] = 0x1115,
758 [PIPE_FORMAT_R16_SINT] = 0x1115,
759 [PIPE_FORMAT_R16_UINT] = 0x1115,
760 [PIPE_FORMAT_R16_FLOAT] = 0x1115,
761
762 [PIPE_FORMAT_R8_UNORM] = 0x0206,
763 [PIPE_FORMAT_R8_SNORM] = 0x0206,
764 [PIPE_FORMAT_R8_SINT] = 0x0206,
765 [PIPE_FORMAT_R8_UINT] = 0x0206
766 };
767
768 /* NOTE: These are hardcoded offsets for the shader library.
769 * TODO: Automate them.
770 */
771 static const uint16_t nve4_suldp_lib_offset[PIPE_FORMAT_COUNT] =
772 {
773 [PIPE_FORMAT_R32G32B32A32_FLOAT] = 0x218,
774 [PIPE_FORMAT_R32G32B32A32_SINT] = 0x218,
775 [PIPE_FORMAT_R32G32B32A32_UINT] = 0x218,
776 [PIPE_FORMAT_R16G16B16A16_UNORM] = 0x248,
777 [PIPE_FORMAT_R16G16B16A16_SNORM] = 0x2b8,
778 [PIPE_FORMAT_R16G16B16A16_SINT] = 0x330,
779 [PIPE_FORMAT_R16G16B16A16_UINT] = 0x388,
780 [PIPE_FORMAT_R16G16B16A16_FLOAT] = 0x3d8,
781 [PIPE_FORMAT_R32G32_FLOAT] = 0x428,
782 [PIPE_FORMAT_R32G32_SINT] = 0x468,
783 [PIPE_FORMAT_R32G32_UINT] = 0x468,
784 [PIPE_FORMAT_R10G10B10A2_UNORM] = 0x4a8,
785 /* [PIPE_FORMAT_R10G10B10A2_UINT] = 0x530, */
786 [PIPE_FORMAT_R8G8B8A8_UNORM] = 0x588,
787 [PIPE_FORMAT_R8G8B8A8_SNORM] = 0x5f8,
788 [PIPE_FORMAT_R8G8B8A8_SINT] = 0x670,
789 [PIPE_FORMAT_R8G8B8A8_UINT] = 0x6c8,
790 [PIPE_FORMAT_B5G6R5_UNORM] = 0x718,
791 [PIPE_FORMAT_B5G5R5X1_UNORM] = 0x7a0,
792 [PIPE_FORMAT_R16G16_UNORM] = 0x828,
793 [PIPE_FORMAT_R16G16_SNORM] = 0x890,
794 [PIPE_FORMAT_R16G16_SINT] = 0x8f0,
795 [PIPE_FORMAT_R16G16_UINT] = 0x948,
796 [PIPE_FORMAT_R16G16_FLOAT] = 0x998,
797 [PIPE_FORMAT_R32_FLOAT] = 0x9e8,
798 [PIPE_FORMAT_R32_SINT] = 0xa30,
799 [PIPE_FORMAT_R32_UINT] = 0xa30,
800 [PIPE_FORMAT_R8G8_UNORM] = 0xa78,
801 [PIPE_FORMAT_R8G8_SNORM] = 0xae0,
802 [PIPE_FORMAT_R8G8_UINT] = 0xb48,
803 [PIPE_FORMAT_R8G8_SINT] = 0xb98,
804 [PIPE_FORMAT_R16_UNORM] = 0xbe8,
805 [PIPE_FORMAT_R16_SNORM] = 0xc48,
806 [PIPE_FORMAT_R16_SINT] = 0xca0,
807 [PIPE_FORMAT_R16_UINT] = 0xce8,
808 [PIPE_FORMAT_R16_FLOAT] = 0xd30,
809 [PIPE_FORMAT_R8_UNORM] = 0xd88,
810 [PIPE_FORMAT_R8_SNORM] = 0xde0,
811 [PIPE_FORMAT_R8_SINT] = 0xe38,
812 [PIPE_FORMAT_R8_UINT] = 0xe88,
813 [PIPE_FORMAT_R11G11B10_FLOAT] = 0xed0
814 };