2 * Copyright 2008 Ben Skeggs
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include "nvc0/nvc0_context.h"
24 #include "nvc0/nvc0_resource.h"
25 #include "nv50/g80_texture.xml.h"
26 #include "nv50/g80_defs.xml.h"
28 #include "util/u_format.h"
30 #define NVE4_TIC_ENTRY_INVALID 0x000fffff
31 #define NVE4_TSC_ENTRY_INVALID 0xfff00000
33 static inline uint32_t
34 nv50_tic_swizzle(const struct nvc0_format
*fmt
, unsigned swz
, bool tex_int
)
37 case PIPE_SWIZZLE_RED
: return fmt
->tic
.src_x
;
38 case PIPE_SWIZZLE_GREEN
: return fmt
->tic
.src_y
;
39 case PIPE_SWIZZLE_BLUE
: return fmt
->tic
.src_z
;
40 case PIPE_SWIZZLE_ALPHA
: return fmt
->tic
.src_w
;
41 case PIPE_SWIZZLE_ONE
:
42 return tex_int
? G80_TIC_SOURCE_ONE_INT
: G80_TIC_SOURCE_ONE_FLOAT
;
43 case PIPE_SWIZZLE_ZERO
:
45 return G80_TIC_SOURCE_ZERO
;
49 struct pipe_sampler_view
*
50 nvc0_create_sampler_view(struct pipe_context
*pipe
,
51 struct pipe_resource
*res
,
52 const struct pipe_sampler_view
*templ
)
56 if (templ
->target
== PIPE_TEXTURE_RECT
|| templ
->target
== PIPE_BUFFER
)
57 flags
|= NV50_TEXVIEW_SCALED_COORDS
;
59 return nvc0_create_texture_view(pipe
, res
, templ
, flags
, templ
->target
);
62 struct pipe_sampler_view
*
63 nvc0_create_texture_view(struct pipe_context
*pipe
,
64 struct pipe_resource
*texture
,
65 const struct pipe_sampler_view
*templ
,
67 enum pipe_texture_target target
)
69 const struct util_format_description
*desc
;
70 const struct nvc0_format
*fmt
;
74 uint32_t width
, height
;
76 struct nv50_tic_entry
*view
;
77 struct nv50_miptree
*mt
;
80 view
= MALLOC_STRUCT(nv50_tic_entry
);
83 mt
= nv50_miptree(texture
);
86 view
->pipe
.reference
.count
= 1;
87 view
->pipe
.texture
= NULL
;
88 view
->pipe
.context
= pipe
;
92 pipe_resource_reference(&view
->pipe
.texture
, texture
);
96 desc
= util_format_description(view
->pipe
.format
);
98 fmt
= &nvc0_format_table
[view
->pipe
.format
];
100 tex_int
= util_format_is_pure_integer(view
->pipe
.format
);
102 swz
[0] = nv50_tic_swizzle(fmt
, view
->pipe
.swizzle_r
, tex_int
);
103 swz
[1] = nv50_tic_swizzle(fmt
, view
->pipe
.swizzle_g
, tex_int
);
104 swz
[2] = nv50_tic_swizzle(fmt
, view
->pipe
.swizzle_b
, tex_int
);
105 swz
[3] = nv50_tic_swizzle(fmt
, view
->pipe
.swizzle_a
, tex_int
);
106 tic
[0] = (fmt
->tic
.format
<< G80_TIC_0_COMPONENTS_SIZES__SHIFT
) |
107 (fmt
->tic
.type_r
<< G80_TIC_0_R_DATA_TYPE__SHIFT
) |
108 (fmt
->tic
.type_g
<< G80_TIC_0_G_DATA_TYPE__SHIFT
) |
109 (fmt
->tic
.type_b
<< G80_TIC_0_B_DATA_TYPE__SHIFT
) |
110 (fmt
->tic
.type_a
<< G80_TIC_0_A_DATA_TYPE__SHIFT
) |
111 (swz
[0] << G80_TIC_0_X_SOURCE__SHIFT
) |
112 (swz
[1] << G80_TIC_0_Y_SOURCE__SHIFT
) |
113 (swz
[2] << G80_TIC_0_Z_SOURCE__SHIFT
) |
114 (swz
[3] << G80_TIC_0_W_SOURCE__SHIFT
);
116 address
= mt
->base
.address
;
118 tic
[2] = 0x10001000 | G80_TIC_2_BORDER_SOURCE_COLOR
;
120 if (desc
->colorspace
== UTIL_FORMAT_COLORSPACE_SRGB
)
121 tic
[2] |= G80_TIC_2_SRGB_CONVERSION
;
123 if (!(flags
& NV50_TEXVIEW_SCALED_COORDS
))
124 tic
[2] |= G80_TIC_2_NORMALIZED_COORDS
;
126 /* check for linear storage type */
127 if (unlikely(!nouveau_bo_memtype(nv04_resource(texture
)->bo
))) {
128 if (texture
->target
== PIPE_BUFFER
) {
129 assert(!(tic
[2] & G80_TIC_2_NORMALIZED_COORDS
));
131 view
->pipe
.u
.buf
.first_element
* desc
->block
.bits
/ 8;
132 tic
[2] |= G80_TIC_2_LAYOUT_PITCH
| G80_TIC_2_TEXTURE_TYPE_ONE_D_BUFFER
;
135 view
->pipe
.u
.buf
.last_element
- view
->pipe
.u
.buf
.first_element
+ 1;
138 /* must be 2D texture without mip maps */
139 tic
[2] |= G80_TIC_2_LAYOUT_PITCH
| G80_TIC_2_TEXTURE_TYPE_TWO_D_NO_MIPMAP
;
140 tic
[3] = mt
->level
[0].pitch
;
141 tic
[4] = mt
->base
.base
.width0
;
142 tic
[5] = (1 << 16) | mt
->base
.base
.height0
;
147 tic
[2] |= address
>> 32;
152 ((mt
->level
[0].tile_mode
& 0x0f0) << (22 - 4)) |
153 ((mt
->level
[0].tile_mode
& 0xf00) << (25 - 8));
155 depth
= MAX2(mt
->base
.base
.array_size
, mt
->base
.base
.depth0
);
157 if (mt
->base
.base
.array_size
> 1) {
158 /* there doesn't seem to be a base layer field in TIC */
159 address
+= view
->pipe
.u
.tex
.first_layer
* mt
->layer_stride
;
160 depth
= view
->pipe
.u
.tex
.last_layer
- view
->pipe
.u
.tex
.first_layer
+ 1;
163 tic
[2] |= address
>> 32;
166 case PIPE_TEXTURE_1D
:
167 tic
[2] |= G80_TIC_2_TEXTURE_TYPE_ONE_D
;
169 case PIPE_TEXTURE_2D
:
170 tic
[2] |= G80_TIC_2_TEXTURE_TYPE_TWO_D
;
172 case PIPE_TEXTURE_RECT
:
173 tic
[2] |= G80_TIC_2_TEXTURE_TYPE_TWO_D
;
175 case PIPE_TEXTURE_3D
:
176 tic
[2] |= G80_TIC_2_TEXTURE_TYPE_THREE_D
;
178 case PIPE_TEXTURE_CUBE
:
180 tic
[2] |= G80_TIC_2_TEXTURE_TYPE_CUBEMAP
;
182 case PIPE_TEXTURE_1D_ARRAY
:
183 tic
[2] |= G80_TIC_2_TEXTURE_TYPE_ONE_D_ARRAY
;
185 case PIPE_TEXTURE_2D_ARRAY
:
186 tic
[2] |= G80_TIC_2_TEXTURE_TYPE_TWO_D_ARRAY
;
188 case PIPE_TEXTURE_CUBE_ARRAY
:
190 tic
[2] |= G80_TIC_2_TEXTURE_TYPE_CUBE_ARRAY
;
193 unreachable("unexpected/invalid texture target");
196 tic
[3] = (flags
& NV50_TEXVIEW_FILTER_MSAA8
) ? 0x20000000 : 0x00300000;
198 if (flags
& NV50_TEXVIEW_ACCESS_RESOLVE
) {
199 width
= mt
->base
.base
.width0
<< mt
->ms_x
;
200 height
= mt
->base
.base
.height0
<< mt
->ms_y
;
202 width
= mt
->base
.base
.width0
;
203 height
= mt
->base
.base
.height0
;
206 tic
[4] = (1 << 31) | width
;
208 tic
[5] = height
& 0xffff;
209 tic
[5] |= depth
<< 16;
210 tic
[5] |= mt
->base
.base
.last_level
<< 28;
212 /* sampling points: (?) */
213 if (flags
& NV50_TEXVIEW_ACCESS_RESOLVE
)
214 tic
[6] = (mt
->ms_x
> 1) ? 0x88000000 : 0x03000000;
218 tic
[7] = (view
->pipe
.u
.tex
.last_level
<< 4) | view
->pipe
.u
.tex
.first_level
;
219 tic
[7] |= mt
->ms_mode
<< 12;
225 nvc0_update_tic(struct nvc0_context
*nvc0
, struct nv50_tic_entry
*tic
,
226 struct nv04_resource
*res
)
228 uint64_t address
= res
->address
;
229 if (res
->base
.target
!= PIPE_BUFFER
)
231 address
+= tic
->pipe
.u
.buf
.first_element
*
232 util_format_get_blocksize(tic
->pipe
.format
);
233 if (tic
->tic
[1] == (uint32_t)address
&&
234 (tic
->tic
[2] & 0xff) == address
>> 32)
237 nvc0_screen_tic_unlock(nvc0
->screen
, tic
);
239 tic
->tic
[1] = address
;
240 tic
->tic
[2] &= 0xffffff00;
241 tic
->tic
[2] |= address
>> 32;
245 nvc0_validate_tic(struct nvc0_context
*nvc0
, int s
)
247 uint32_t commands
[32];
248 struct nouveau_pushbuf
*push
= nvc0
->base
.pushbuf
;
249 struct nouveau_bo
*txc
= nvc0
->screen
->txc
;
252 bool need_flush
= false;
254 for (i
= 0; i
< nvc0
->num_textures
[s
]; ++i
) {
255 struct nv50_tic_entry
*tic
= nv50_tic_entry(nvc0
->textures
[s
][i
]);
256 struct nv04_resource
*res
;
257 const bool dirty
= !!(nvc0
->textures_dirty
[s
] & (1 << i
));
261 commands
[n
++] = (i
<< 1) | 0;
264 res
= nv04_resource(tic
->pipe
.texture
);
265 nvc0_update_tic(nvc0
, tic
, res
);
268 tic
->id
= nvc0_screen_tic_alloc(nvc0
->screen
, tic
);
270 PUSH_SPACE(push
, 17);
271 BEGIN_NVC0(push
, NVC0_M2MF(OFFSET_OUT_HIGH
), 2);
272 PUSH_DATAh(push
, txc
->offset
+ (tic
->id
* 32));
273 PUSH_DATA (push
, txc
->offset
+ (tic
->id
* 32));
274 BEGIN_NVC0(push
, NVC0_M2MF(LINE_LENGTH_IN
), 2);
275 PUSH_DATA (push
, 32);
277 BEGIN_NVC0(push
, NVC0_M2MF(EXEC
), 1);
278 PUSH_DATA (push
, 0x100111);
279 BEGIN_NIC0(push
, NVC0_M2MF(DATA
), 8);
280 PUSH_DATAp(push
, &tic
->tic
[0], 8);
284 if (res
->status
& NOUVEAU_BUFFER_STATUS_GPU_WRITING
) {
285 BEGIN_NVC0(push
, NVC0_3D(TEX_CACHE_CTL
), 1);
286 PUSH_DATA (push
, (tic
->id
<< 4) | 1);
287 NOUVEAU_DRV_STAT(&nvc0
->screen
->base
, tex_cache_flush_count
, 1);
289 nvc0
->screen
->tic
.lock
[tic
->id
/ 32] |= 1 << (tic
->id
% 32);
291 res
->status
&= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING
;
292 res
->status
|= NOUVEAU_BUFFER_STATUS_GPU_READING
;
296 commands
[n
++] = (tic
->id
<< 9) | (i
<< 1) | 1;
298 BCTX_REFN(nvc0
->bufctx_3d
, TEX(s
, i
), res
, RD
);
300 for (; i
< nvc0
->state
.num_textures
[s
]; ++i
)
301 commands
[n
++] = (i
<< 1) | 0;
303 nvc0
->state
.num_textures
[s
] = nvc0
->num_textures
[s
];
306 BEGIN_NIC0(push
, NVC0_3D(BIND_TIC(s
)), n
);
307 PUSH_DATAp(push
, commands
, n
);
309 nvc0
->textures_dirty
[s
] = 0;
315 nve4_validate_tic(struct nvc0_context
*nvc0
, unsigned s
)
317 struct nouveau_bo
*txc
= nvc0
->screen
->txc
;
318 struct nouveau_pushbuf
*push
= nvc0
->base
.pushbuf
;
320 bool need_flush
= false;
322 for (i
= 0; i
< nvc0
->num_textures
[s
]; ++i
) {
323 struct nv50_tic_entry
*tic
= nv50_tic_entry(nvc0
->textures
[s
][i
]);
324 struct nv04_resource
*res
;
325 const bool dirty
= !!(nvc0
->textures_dirty
[s
] & (1 << i
));
328 nvc0
->tex_handles
[s
][i
] |= NVE4_TIC_ENTRY_INVALID
;
331 res
= nv04_resource(tic
->pipe
.texture
);
332 nvc0_update_tic(nvc0
, tic
, res
);
335 tic
->id
= nvc0_screen_tic_alloc(nvc0
->screen
, tic
);
337 PUSH_SPACE(push
, 16);
338 BEGIN_NVC0(push
, NVE4_P2MF(UPLOAD_DST_ADDRESS_HIGH
), 2);
339 PUSH_DATAh(push
, txc
->offset
+ (tic
->id
* 32));
340 PUSH_DATA (push
, txc
->offset
+ (tic
->id
* 32));
341 BEGIN_NVC0(push
, NVE4_P2MF(UPLOAD_LINE_LENGTH_IN
), 2);
342 PUSH_DATA (push
, 32);
344 BEGIN_1IC0(push
, NVE4_P2MF(UPLOAD_EXEC
), 9);
345 PUSH_DATA (push
, 0x1001);
346 PUSH_DATAp(push
, &tic
->tic
[0], 8);
350 if (res
->status
& NOUVEAU_BUFFER_STATUS_GPU_WRITING
) {
351 BEGIN_NVC0(push
, NVC0_3D(TEX_CACHE_CTL
), 1);
352 PUSH_DATA (push
, (tic
->id
<< 4) | 1);
354 nvc0
->screen
->tic
.lock
[tic
->id
/ 32] |= 1 << (tic
->id
% 32);
356 res
->status
&= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING
;
357 res
->status
|= NOUVEAU_BUFFER_STATUS_GPU_READING
;
359 nvc0
->tex_handles
[s
][i
] &= ~NVE4_TIC_ENTRY_INVALID
;
360 nvc0
->tex_handles
[s
][i
] |= tic
->id
;
362 BCTX_REFN(nvc0
->bufctx_3d
, TEX(s
, i
), res
, RD
);
364 for (; i
< nvc0
->state
.num_textures
[s
]; ++i
) {
365 nvc0
->tex_handles
[s
][i
] |= NVE4_TIC_ENTRY_INVALID
;
366 nvc0
->textures_dirty
[s
] |= 1 << i
;
369 nvc0
->state
.num_textures
[s
] = nvc0
->num_textures
[s
];
374 void nvc0_validate_textures(struct nvc0_context
*nvc0
)
376 bool need_flush
= false;
379 for (i
= 0; i
< 5; i
++) {
380 if (nvc0
->screen
->base
.class_3d
>= NVE4_3D_CLASS
)
381 need_flush
|= nve4_validate_tic(nvc0
, i
);
383 need_flush
|= nvc0_validate_tic(nvc0
, i
);
387 BEGIN_NVC0(nvc0
->base
.pushbuf
, NVC0_3D(TIC_FLUSH
), 1);
388 PUSH_DATA (nvc0
->base
.pushbuf
, 0);
393 nvc0_validate_tsc(struct nvc0_context
*nvc0
, int s
)
395 uint32_t commands
[16];
396 struct nouveau_pushbuf
*push
= nvc0
->base
.pushbuf
;
399 bool need_flush
= false;
401 for (i
= 0; i
< nvc0
->num_samplers
[s
]; ++i
) {
402 struct nv50_tsc_entry
*tsc
= nv50_tsc_entry(nvc0
->samplers
[s
][i
]);
404 if (!(nvc0
->samplers_dirty
[s
] & (1 << i
)))
407 commands
[n
++] = (i
<< 4) | 0;
411 tsc
->id
= nvc0_screen_tsc_alloc(nvc0
->screen
, tsc
);
413 nvc0_m2mf_push_linear(&nvc0
->base
, nvc0
->screen
->txc
,
414 65536 + tsc
->id
* 32, NV_VRAM_DOMAIN(&nvc0
->screen
->base
),
418 nvc0
->screen
->tsc
.lock
[tsc
->id
/ 32] |= 1 << (tsc
->id
% 32);
420 commands
[n
++] = (tsc
->id
<< 12) | (i
<< 4) | 1;
422 for (; i
< nvc0
->state
.num_samplers
[s
]; ++i
)
423 commands
[n
++] = (i
<< 4) | 0;
425 nvc0
->state
.num_samplers
[s
] = nvc0
->num_samplers
[s
];
428 BEGIN_NIC0(push
, NVC0_3D(BIND_TSC(s
)), n
);
429 PUSH_DATAp(push
, commands
, n
);
431 nvc0
->samplers_dirty
[s
] = 0;
437 nve4_validate_tsc(struct nvc0_context
*nvc0
, int s
)
439 struct nouveau_bo
*txc
= nvc0
->screen
->txc
;
440 struct nouveau_pushbuf
*push
= nvc0
->base
.pushbuf
;
442 bool need_flush
= false;
444 for (i
= 0; i
< nvc0
->num_samplers
[s
]; ++i
) {
445 struct nv50_tsc_entry
*tsc
= nv50_tsc_entry(nvc0
->samplers
[s
][i
]);
448 nvc0
->tex_handles
[s
][i
] |= NVE4_TSC_ENTRY_INVALID
;
452 tsc
->id
= nvc0_screen_tsc_alloc(nvc0
->screen
, tsc
);
454 PUSH_SPACE(push
, 16);
455 BEGIN_NVC0(push
, NVE4_P2MF(UPLOAD_DST_ADDRESS_HIGH
), 2);
456 PUSH_DATAh(push
, txc
->offset
+ 65536 + (tsc
->id
* 32));
457 PUSH_DATA (push
, txc
->offset
+ 65536 + (tsc
->id
* 32));
458 BEGIN_NVC0(push
, NVE4_P2MF(UPLOAD_LINE_LENGTH_IN
), 2);
459 PUSH_DATA (push
, 32);
461 BEGIN_1IC0(push
, NVE4_P2MF(UPLOAD_EXEC
), 9);
462 PUSH_DATA (push
, 0x1001);
463 PUSH_DATAp(push
, &tsc
->tsc
[0], 8);
467 nvc0
->screen
->tsc
.lock
[tsc
->id
/ 32] |= 1 << (tsc
->id
% 32);
469 nvc0
->tex_handles
[s
][i
] &= ~NVE4_TSC_ENTRY_INVALID
;
470 nvc0
->tex_handles
[s
][i
] |= tsc
->id
<< 20;
472 for (; i
< nvc0
->state
.num_samplers
[s
]; ++i
) {
473 nvc0
->tex_handles
[s
][i
] |= NVE4_TSC_ENTRY_INVALID
;
474 nvc0
->samplers_dirty
[s
] |= 1 << i
;
477 nvc0
->state
.num_samplers
[s
] = nvc0
->num_samplers
[s
];
482 void nvc0_validate_samplers(struct nvc0_context
*nvc0
)
484 bool need_flush
= false;
487 for (i
= 0; i
< 5; i
++) {
488 if (nvc0
->screen
->base
.class_3d
>= NVE4_3D_CLASS
)
489 need_flush
|= nve4_validate_tsc(nvc0
, i
);
491 need_flush
|= nvc0_validate_tsc(nvc0
, i
);
495 BEGIN_NVC0(nvc0
->base
.pushbuf
, NVC0_3D(TSC_FLUSH
), 1);
496 PUSH_DATA (nvc0
->base
.pushbuf
, 0);
500 /* Upload the "diagonal" entries for the possible texture sources ($t == $s).
501 * At some point we might want to get a list of the combinations used by a
502 * shader and fill in those entries instead of having it extract the handles.
505 nve4_set_tex_handles(struct nvc0_context
*nvc0
)
507 struct nouveau_pushbuf
*push
= nvc0
->base
.pushbuf
;
511 if (nvc0
->screen
->base
.class_3d
< NVE4_3D_CLASS
)
513 address
= nvc0
->screen
->uniform_bo
->offset
+ (5 << 16);
515 for (s
= 0; s
< 5; ++s
, address
+= (1 << 10)) {
516 uint32_t dirty
= nvc0
->textures_dirty
[s
] | nvc0
->samplers_dirty
[s
];
519 BEGIN_NVC0(push
, NVC0_3D(CB_SIZE
), 3);
520 PUSH_DATA (push
, 1024);
521 PUSH_DATAh(push
, address
);
522 PUSH_DATA (push
, address
);
524 int i
= ffs(dirty
) - 1;
527 BEGIN_NVC0(push
, NVC0_3D(CB_POS
), 2);
528 PUSH_DATA (push
, (8 + i
) * 4);
529 PUSH_DATA (push
, nvc0
->tex_handles
[s
][i
]);
532 nvc0
->textures_dirty
[s
] = 0;
533 nvc0
->samplers_dirty
[s
] = 0;
538 static const uint8_t nve4_su_format_map
[PIPE_FORMAT_COUNT
];
539 static const uint16_t nve4_su_format_aux_map
[PIPE_FORMAT_COUNT
];
540 static const uint16_t nve4_suldp_lib_offset
[PIPE_FORMAT_COUNT
];
543 nve4_set_surface_info(struct nouveau_pushbuf
*push
,
544 struct pipe_surface
*psf
,
545 struct nvc0_screen
*screen
)
547 struct nv50_surface
*sf
= nv50_surface(psf
);
548 struct nv04_resource
*res
;
550 uint32_t *const info
= push
->cur
;
553 if (psf
&& !nve4_su_format_map
[psf
->format
])
554 NOUVEAU_ERR("unsupported surface format, try is_format_supported() !\n");
558 if (!psf
|| !nve4_su_format_map
[psf
->format
]) {
559 memset(info
, 0, 16 * sizeof(*info
));
561 info
[0] = 0xbadf0000;
562 info
[1] = 0x80004000;
563 info
[12] = nve4_suldp_lib_offset
[PIPE_FORMAT_R32G32B32A32_UINT
] +
564 screen
->lib_code
->start
;
567 res
= nv04_resource(sf
->base
.texture
);
569 address
= res
->address
+ sf
->offset
;
572 info
[9] = sf
->height
;
573 info
[10] = sf
->depth
;
574 switch (res
->base
.target
) {
575 case PIPE_TEXTURE_1D_ARRAY
:
578 case PIPE_TEXTURE_2D
:
579 case PIPE_TEXTURE_RECT
:
582 case PIPE_TEXTURE_3D
:
585 case PIPE_TEXTURE_2D_ARRAY
:
586 case PIPE_TEXTURE_CUBE
:
587 case PIPE_TEXTURE_CUBE_ARRAY
:
594 log2cpp
= (0xf000 & nve4_su_format_aux_map
[sf
->base
.format
]) >> 12;
596 info
[12] = nve4_suldp_lib_offset
[sf
->base
.format
] + screen
->lib_code
->start
;
598 /* limit in bytes for raw access */
599 info
[13] = (0x06 << 22) | ((sf
->width
<< log2cpp
) - 1);
601 info
[1] = nve4_su_format_map
[sf
->base
.format
];
604 switch (util_format_get_blocksizebits(sf
->base
.format
)) {
605 case 16: info
[1] |= 1 << 16; break;
606 case 32: info
[1] |= 2 << 16; break;
607 case 64: info
[1] |= 3 << 16; break;
608 case 128: info
[1] |= 4 << 16; break;
613 info
[1] |= log2cpp
<< 16;
615 info
[1] |= (0x0f00 & nve4_su_format_aux_map
[sf
->base
.format
]);
618 if (res
->base
.target
== PIPE_BUFFER
) {
619 info
[0] = address
>> 8;
620 info
[2] = sf
->width
- 1;
621 info
[2] |= (0xff & nve4_su_format_aux_map
[sf
->base
.format
]) << 22;
630 struct nv50_miptree
*mt
= nv50_miptree(&res
->base
);
631 struct nv50_miptree_level
*lvl
= &mt
->level
[sf
->base
.u
.tex
.level
];
632 const unsigned z
= sf
->base
.u
.tex
.first_layer
;
636 address
+= nvc0_mt_zslice_offset(mt
, psf
->u
.tex
.level
, z
);
637 /* doesn't work if z passes z-tile boundary */
638 assert(sf
->depth
== 1);
640 address
+= mt
->layer_stride
* z
;
643 info
[0] = address
>> 8;
644 info
[2] = sf
->width
- 1;
645 /* NOTE: this is really important: */
646 info
[2] |= (0xff & nve4_su_format_aux_map
[sf
->base
.format
]) << 22;
647 info
[3] = (0x88 << 24) | (lvl
->pitch
/ 64);
648 info
[4] = sf
->height
- 1;
649 info
[4] |= (lvl
->tile_mode
& 0x0f0) << 25;
650 info
[4] |= NVC0_TILE_SHIFT_Y(lvl
->tile_mode
) << 22;
651 info
[5] = mt
->layer_stride
>> 8;
652 info
[6] = sf
->depth
- 1;
653 info
[6] |= (lvl
->tile_mode
& 0xf00) << 21;
654 info
[6] |= NVC0_TILE_SHIFT_Z(lvl
->tile_mode
) << 22;
662 nvc0_update_surface_bindings(struct nvc0_context
*nvc0
)
668 nve4_update_surface_bindings(struct nvc0_context
*nvc0
)
674 nvc0_validate_surfaces(struct nvc0_context
*nvc0
)
676 if (nvc0
->screen
->base
.class_3d
>= NVE4_3D_CLASS
) {
677 nve4_update_surface_bindings(nvc0
);
679 nvc0_update_surface_bindings(nvc0
);
684 static const uint8_t nve4_su_format_map
[PIPE_FORMAT_COUNT
] =
686 [PIPE_FORMAT_R32G32B32A32_FLOAT
] = GK104_IMAGE_FORMAT_RGBA32_FLOAT
,
687 [PIPE_FORMAT_R32G32B32A32_SINT
] = GK104_IMAGE_FORMAT_RGBA32_SINT
,
688 [PIPE_FORMAT_R32G32B32A32_UINT
] = GK104_IMAGE_FORMAT_RGBA32_UINT
,
689 [PIPE_FORMAT_R16G16B16A16_FLOAT
] = GK104_IMAGE_FORMAT_RGBA16_FLOAT
,
690 [PIPE_FORMAT_R16G16B16A16_UNORM
] = GK104_IMAGE_FORMAT_RGBA16_UNORM
,
691 [PIPE_FORMAT_R16G16B16A16_SNORM
] = GK104_IMAGE_FORMAT_RGBA16_SNORM
,
692 [PIPE_FORMAT_R16G16B16A16_SINT
] = GK104_IMAGE_FORMAT_RGBA16_SINT
,
693 [PIPE_FORMAT_R16G16B16A16_UINT
] = GK104_IMAGE_FORMAT_RGBA16_UINT
,
694 [PIPE_FORMAT_R8G8B8A8_UNORM
] = GK104_IMAGE_FORMAT_RGBA8_UNORM
,
695 [PIPE_FORMAT_R8G8B8A8_SNORM
] = GK104_IMAGE_FORMAT_RGBA8_SNORM
,
696 [PIPE_FORMAT_R8G8B8A8_SINT
] = GK104_IMAGE_FORMAT_RGBA8_SINT
,
697 [PIPE_FORMAT_R8G8B8A8_UINT
] = GK104_IMAGE_FORMAT_RGBA8_UINT
,
698 [PIPE_FORMAT_R11G11B10_FLOAT
] = GK104_IMAGE_FORMAT_R11G11B10_FLOAT
,
699 [PIPE_FORMAT_R10G10B10A2_UNORM
] = GK104_IMAGE_FORMAT_RGB10_A2_UNORM
,
700 /* [PIPE_FORMAT_R10G10B10A2_UINT] = GK104_IMAGE_FORMAT_RGB10_A2_UINT, */
701 [PIPE_FORMAT_R32G32_FLOAT
] = GK104_IMAGE_FORMAT_RG32_FLOAT
,
702 [PIPE_FORMAT_R32G32_SINT
] = GK104_IMAGE_FORMAT_RG32_SINT
,
703 [PIPE_FORMAT_R32G32_UINT
] = GK104_IMAGE_FORMAT_RG32_UINT
,
704 [PIPE_FORMAT_R16G16_FLOAT
] = GK104_IMAGE_FORMAT_RG16_FLOAT
,
705 [PIPE_FORMAT_R16G16_UNORM
] = GK104_IMAGE_FORMAT_RG16_UNORM
,
706 [PIPE_FORMAT_R16G16_SNORM
] = GK104_IMAGE_FORMAT_RG16_SNORM
,
707 [PIPE_FORMAT_R16G16_SINT
] = GK104_IMAGE_FORMAT_RG16_SINT
,
708 [PIPE_FORMAT_R16G16_UINT
] = GK104_IMAGE_FORMAT_RG16_UINT
,
709 [PIPE_FORMAT_R8G8_UNORM
] = GK104_IMAGE_FORMAT_RG8_UNORM
,
710 [PIPE_FORMAT_R8G8_SNORM
] = GK104_IMAGE_FORMAT_RG8_SNORM
,
711 [PIPE_FORMAT_R8G8_SINT
] = GK104_IMAGE_FORMAT_RG8_SINT
,
712 [PIPE_FORMAT_R8G8_UINT
] = GK104_IMAGE_FORMAT_RG8_UINT
,
713 [PIPE_FORMAT_R32_FLOAT
] = GK104_IMAGE_FORMAT_R32_FLOAT
,
714 [PIPE_FORMAT_R32_SINT
] = GK104_IMAGE_FORMAT_R32_SINT
,
715 [PIPE_FORMAT_R32_UINT
] = GK104_IMAGE_FORMAT_R32_UINT
,
716 [PIPE_FORMAT_R16_FLOAT
] = GK104_IMAGE_FORMAT_R16_FLOAT
,
717 [PIPE_FORMAT_R16_UNORM
] = GK104_IMAGE_FORMAT_R16_UNORM
,
718 [PIPE_FORMAT_R16_SNORM
] = GK104_IMAGE_FORMAT_R16_SNORM
,
719 [PIPE_FORMAT_R16_SINT
] = GK104_IMAGE_FORMAT_R16_SINT
,
720 [PIPE_FORMAT_R16_UINT
] = GK104_IMAGE_FORMAT_R16_UINT
,
721 [PIPE_FORMAT_R8_UNORM
] = GK104_IMAGE_FORMAT_R8_UNORM
,
722 [PIPE_FORMAT_R8_SNORM
] = GK104_IMAGE_FORMAT_R8_SNORM
,
723 [PIPE_FORMAT_R8_SINT
] = GK104_IMAGE_FORMAT_R8_SINT
,
724 [PIPE_FORMAT_R8_UINT
] = GK104_IMAGE_FORMAT_R8_UINT
,
727 /* Auxiliary format description values for surface instructions.
728 * (log2(bytes per pixel) << 12) | (unk8 << 8) | unk22
730 static const uint16_t nve4_su_format_aux_map
[PIPE_FORMAT_COUNT
] =
732 [PIPE_FORMAT_R32G32B32A32_FLOAT
] = 0x4842,
733 [PIPE_FORMAT_R32G32B32A32_SINT
] = 0x4842,
734 [PIPE_FORMAT_R32G32B32A32_UINT
] = 0x4842,
736 [PIPE_FORMAT_R16G16B16A16_UNORM
] = 0x3933,
737 [PIPE_FORMAT_R16G16B16A16_SNORM
] = 0x3933,
738 [PIPE_FORMAT_R16G16B16A16_SINT
] = 0x3933,
739 [PIPE_FORMAT_R16G16B16A16_UINT
] = 0x3933,
740 [PIPE_FORMAT_R16G16B16A16_FLOAT
] = 0x3933,
742 [PIPE_FORMAT_R32G32_FLOAT
] = 0x3433,
743 [PIPE_FORMAT_R32G32_SINT
] = 0x3433,
744 [PIPE_FORMAT_R32G32_UINT
] = 0x3433,
746 [PIPE_FORMAT_R10G10B10A2_UNORM
] = 0x2a24,
747 /* [PIPE_FORMAT_R10G10B10A2_UINT] = 0x2a24, */
748 [PIPE_FORMAT_R8G8B8A8_UNORM
] = 0x2a24,
749 [PIPE_FORMAT_R8G8B8A8_SNORM
] = 0x2a24,
750 [PIPE_FORMAT_R8G8B8A8_SINT
] = 0x2a24,
751 [PIPE_FORMAT_R8G8B8A8_UINT
] = 0x2a24,
752 [PIPE_FORMAT_R11G11B10_FLOAT
] = 0x2a24,
754 [PIPE_FORMAT_R16G16_UNORM
] = 0x2524,
755 [PIPE_FORMAT_R16G16_SNORM
] = 0x2524,
756 [PIPE_FORMAT_R16G16_SINT
] = 0x2524,
757 [PIPE_FORMAT_R16G16_UINT
] = 0x2524,
758 [PIPE_FORMAT_R16G16_FLOAT
] = 0x2524,
760 [PIPE_FORMAT_R32_SINT
] = 0x2024,
761 [PIPE_FORMAT_R32_UINT
] = 0x2024,
762 [PIPE_FORMAT_R32_FLOAT
] = 0x2024,
764 [PIPE_FORMAT_R8G8_UNORM
] = 0x1615,
765 [PIPE_FORMAT_R8G8_SNORM
] = 0x1615,
766 [PIPE_FORMAT_R8G8_SINT
] = 0x1615,
767 [PIPE_FORMAT_R8G8_UINT
] = 0x1615,
769 [PIPE_FORMAT_R16_UNORM
] = 0x1115,
770 [PIPE_FORMAT_R16_SNORM
] = 0x1115,
771 [PIPE_FORMAT_R16_SINT
] = 0x1115,
772 [PIPE_FORMAT_R16_UINT
] = 0x1115,
773 [PIPE_FORMAT_R16_FLOAT
] = 0x1115,
775 [PIPE_FORMAT_R8_UNORM
] = 0x0206,
776 [PIPE_FORMAT_R8_SNORM
] = 0x0206,
777 [PIPE_FORMAT_R8_SINT
] = 0x0206,
778 [PIPE_FORMAT_R8_UINT
] = 0x0206
781 /* NOTE: These are hardcoded offsets for the shader library.
782 * TODO: Automate them.
784 static const uint16_t nve4_suldp_lib_offset
[PIPE_FORMAT_COUNT
] =
786 [PIPE_FORMAT_R32G32B32A32_FLOAT
] = 0x218,
787 [PIPE_FORMAT_R32G32B32A32_SINT
] = 0x218,
788 [PIPE_FORMAT_R32G32B32A32_UINT
] = 0x218,
789 [PIPE_FORMAT_R16G16B16A16_UNORM
] = 0x248,
790 [PIPE_FORMAT_R16G16B16A16_SNORM
] = 0x2b8,
791 [PIPE_FORMAT_R16G16B16A16_SINT
] = 0x330,
792 [PIPE_FORMAT_R16G16B16A16_UINT
] = 0x388,
793 [PIPE_FORMAT_R16G16B16A16_FLOAT
] = 0x3d8,
794 [PIPE_FORMAT_R32G32_FLOAT
] = 0x428,
795 [PIPE_FORMAT_R32G32_SINT
] = 0x468,
796 [PIPE_FORMAT_R32G32_UINT
] = 0x468,
797 [PIPE_FORMAT_R10G10B10A2_UNORM
] = 0x4a8,
798 /* [PIPE_FORMAT_R10G10B10A2_UINT] = 0x530, */
799 [PIPE_FORMAT_R8G8B8A8_UNORM
] = 0x588,
800 [PIPE_FORMAT_R8G8B8A8_SNORM
] = 0x5f8,
801 [PIPE_FORMAT_R8G8B8A8_SINT
] = 0x670,
802 [PIPE_FORMAT_R8G8B8A8_UINT
] = 0x6c8,
803 [PIPE_FORMAT_B5G6R5_UNORM
] = 0x718,
804 [PIPE_FORMAT_B5G5R5X1_UNORM
] = 0x7a0,
805 [PIPE_FORMAT_R16G16_UNORM
] = 0x828,
806 [PIPE_FORMAT_R16G16_SNORM
] = 0x890,
807 [PIPE_FORMAT_R16G16_SINT
] = 0x8f0,
808 [PIPE_FORMAT_R16G16_UINT
] = 0x948,
809 [PIPE_FORMAT_R16G16_FLOAT
] = 0x998,
810 [PIPE_FORMAT_R32_FLOAT
] = 0x9e8,
811 [PIPE_FORMAT_R32_SINT
] = 0xa30,
812 [PIPE_FORMAT_R32_UINT
] = 0xa30,
813 [PIPE_FORMAT_R8G8_UNORM
] = 0xa78,
814 [PIPE_FORMAT_R8G8_SNORM
] = 0xae0,
815 [PIPE_FORMAT_R8G8_UINT
] = 0xb48,
816 [PIPE_FORMAT_R8G8_SINT
] = 0xb98,
817 [PIPE_FORMAT_R16_UNORM
] = 0xbe8,
818 [PIPE_FORMAT_R16_SNORM
] = 0xc48,
819 [PIPE_FORMAT_R16_SINT
] = 0xca0,
820 [PIPE_FORMAT_R16_UINT
] = 0xce8,
821 [PIPE_FORMAT_R16_FLOAT
] = 0xd30,
822 [PIPE_FORMAT_R8_UNORM
] = 0xd88,
823 [PIPE_FORMAT_R8_SNORM
] = 0xde0,
824 [PIPE_FORMAT_R8_SINT
] = 0xe38,
825 [PIPE_FORMAT_R8_UINT
] = 0xe88,
826 [PIPE_FORMAT_R11G11B10_FLOAT
] = 0xed0