c52c76597a3663d941b1b4d65ba08e2396922af7
[mesa.git] / src / gallium / drivers / nvc0 / nvc0_tex.c
1 /*
2 * Copyright 2008 Ben Skeggs
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
19 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 * SOFTWARE.
21 */
22
23 #include "nvc0_context.h"
24 #include "nvc0_resource.h"
25 #include "nv50_texture.xml.h"
26
27 #include "util/u_format.h"
28
29 static INLINE uint32_t
30 nv50_tic_swizzle(uint32_t tc, unsigned swz)
31 {
32 switch (swz) {
33 case PIPE_SWIZZLE_RED:
34 return (tc & NV50_TIC_0_MAPR__MASK) >> NV50_TIC_0_MAPR__SHIFT;
35 case PIPE_SWIZZLE_GREEN:
36 return (tc & NV50_TIC_0_MAPG__MASK) >> NV50_TIC_0_MAPG__SHIFT;
37 case PIPE_SWIZZLE_BLUE:
38 return (tc & NV50_TIC_0_MAPB__MASK) >> NV50_TIC_0_MAPB__SHIFT;
39 case PIPE_SWIZZLE_ALPHA:
40 return (tc & NV50_TIC_0_MAPA__MASK) >> NV50_TIC_0_MAPA__SHIFT;
41 case PIPE_SWIZZLE_ONE:
42 return NV50_TIC_MAP_ONE;
43 case PIPE_SWIZZLE_ZERO:
44 default:
45 return NV50_TIC_MAP_ZERO;
46 }
47 }
48
49 struct pipe_sampler_view *
50 nvc0_create_sampler_view(struct pipe_context *pipe,
51 struct pipe_resource *texture,
52 const struct pipe_sampler_view *templ)
53 {
54 const struct util_format_description *desc;
55 uint32_t *tic;
56 uint32_t swz[4];
57 uint32_t depth;
58 struct nvc0_tic_entry *view;
59 struct nvc0_miptree *mt = nvc0_miptree(texture);
60
61 view = MALLOC_STRUCT(nvc0_tic_entry);
62 if (!view)
63 return NULL;
64
65 view->pipe = *templ;
66 view->pipe.reference.count = 1;
67 view->pipe.texture = NULL;
68 view->pipe.context = pipe;
69
70 view->id = -1;
71
72 pipe_resource_reference(&view->pipe.texture, texture);
73
74 tic = &view->tic[0];
75
76 desc = util_format_description(mt->base.base.format);
77
78 /* TIC[0] */
79
80 tic[0] = nvc0_format_table[view->pipe.format].tic;
81
82 swz[0] = nv50_tic_swizzle(tic[0], view->pipe.swizzle_r);
83 swz[1] = nv50_tic_swizzle(tic[0], view->pipe.swizzle_g);
84 swz[2] = nv50_tic_swizzle(tic[0], view->pipe.swizzle_b);
85 swz[3] = nv50_tic_swizzle(tic[0], view->pipe.swizzle_a);
86 tic[0] = (tic[0] & ~NV50_TIC_0_SWIZZLE__MASK) |
87 (swz[0] << NV50_TIC_0_MAPR__SHIFT) |
88 (swz[1] << NV50_TIC_0_MAPG__SHIFT) |
89 (swz[2] << NV50_TIC_0_MAPB__SHIFT) |
90 (swz[3] << NV50_TIC_0_MAPA__SHIFT);
91
92 tic[1] = /* mt->base.bo->offset; */ 0;
93 tic[2] = /* mt->base.bo->offset >> 32 */ 0;
94
95 tic[2] |= 0x10001000 | /* NV50_TIC_2_NO_BORDER */ 0x40000000;
96
97 if (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB)
98 tic[2] |= NV50_TIC_2_COLORSPACE_SRGB;
99
100 if (mt->base.base.target != PIPE_TEXTURE_RECT)
101 tic[2] |= NV50_TIC_2_NORMALIZED_COORDS;
102
103 tic[2] |=
104 ((mt->base.bo->tile_mode & 0x0f0) << (22 - 4)) |
105 ((mt->base.bo->tile_mode & 0xf00) << (25 - 8));
106
107 depth = MAX2(mt->base.base.array_size, mt->base.base.depth0);
108
109 if (mt->base.base.target == PIPE_TEXTURE_1D_ARRAY ||
110 mt->base.base.target == PIPE_TEXTURE_2D_ARRAY) {
111 /* there doesn't seem to be a base layer field in TIC */
112 tic[1] = view->pipe.u.tex.first_layer * mt->layer_stride;
113 depth = view->pipe.u.tex.last_layer - view->pipe.u.tex.first_layer + 1;
114 }
115
116 switch (mt->base.base.target) {
117 case PIPE_TEXTURE_1D:
118 tic[2] |= NV50_TIC_2_TARGET_1D;
119 break;
120 case PIPE_TEXTURE_2D:
121 tic[2] |= NV50_TIC_2_TARGET_2D;
122 break;
123 case PIPE_TEXTURE_RECT:
124 tic[2] |= NV50_TIC_2_TARGET_RECT;
125 break;
126 case PIPE_TEXTURE_3D:
127 tic[2] |= NV50_TIC_2_TARGET_3D;
128 break;
129 case PIPE_TEXTURE_CUBE:
130 depth /= 6;
131 if (depth > 1)
132 tic[2] |= NV50_TIC_2_TARGET_CUBE_ARRAY;
133 else
134 tic[2] |= NV50_TIC_2_TARGET_CUBE;
135 break;
136 case PIPE_TEXTURE_1D_ARRAY:
137 tic[2] |= NV50_TIC_2_TARGET_1D_ARRAY;
138 break;
139 case PIPE_TEXTURE_2D_ARRAY:
140 tic[2] |= NV50_TIC_2_TARGET_2D_ARRAY;
141 break;
142 case PIPE_BUFFER:
143 tic[2] |= NV50_TIC_2_TARGET_BUFFER | /* NV50_TIC_2_LINEAR */ (1 << 18);
144 default:
145 NOUVEAU_ERR("invalid texture target: %d\n", mt->base.base.target);
146 return FALSE;
147 }
148
149 if (mt->base.base.target == PIPE_BUFFER)
150 tic[3] = mt->base.base.width0;
151 else
152 tic[3] = 0x00300000;
153
154 tic[4] = (1 << 31) | mt->base.base.width0;
155
156 tic[5] = mt->base.base.height0 & 0xffff;
157 tic[5] |= depth << 16;
158 tic[5] |= mt->base.base.last_level << 28;
159
160 tic[6] = 0x03000000;
161
162 tic[7] = (view->pipe.u.tex.last_level << 4) | view->pipe.u.tex.first_level;
163
164 return &view->pipe;
165 }
166
167 static boolean
168 nvc0_validate_tic(struct nvc0_context *nvc0, int s)
169 {
170 struct nouveau_channel *chan = nvc0->screen->base.channel;
171 struct nouveau_bo *txc = nvc0->screen->txc;
172 unsigned i;
173 boolean need_flush = FALSE;
174
175 for (i = 0; i < nvc0->num_textures[s]; ++i) {
176 struct nvc0_tic_entry *tic = nvc0_tic_entry(nvc0->textures[s][i]);
177 struct nv04_resource *res;
178
179 if (!tic) {
180 BEGIN_RING(chan, RING_3D(BIND_TIC(s)), 1);
181 OUT_RING (chan, (i << 1) | 0);
182 continue;
183 }
184 res = &nvc0_miptree(tic->pipe.texture)->base;
185
186 if (tic->id < 0) {
187 uint32_t offset = tic->tic[1];
188
189 tic->id = nvc0_screen_tic_alloc(nvc0->screen, tic);
190
191 MARK_RING (chan, 9 + 8, 4);
192 BEGIN_RING(chan, RING_MF(OFFSET_OUT_HIGH), 2);
193 OUT_RELOCh(chan, txc, tic->id * 32, NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
194 OUT_RELOCl(chan, txc, tic->id * 32, NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
195 BEGIN_RING(chan, RING_MF(LINE_LENGTH_IN), 2);
196 OUT_RING (chan, 32);
197 OUT_RING (chan, 1);
198 BEGIN_RING(chan, RING_MF(EXEC), 1);
199 OUT_RING (chan, 0x100111);
200 BEGIN_RING_NI(chan, RING_MF(DATA), 8);
201 OUT_RING (chan, tic->tic[0]);
202 OUT_RELOCl(chan, res->bo, offset, NOUVEAU_BO_VRAM | NOUVEAU_BO_RD);
203 OUT_RELOC (chan, res->bo, offset, NOUVEAU_BO_VRAM | NOUVEAU_BO_RD |
204 NOUVEAU_BO_HIGH | NOUVEAU_BO_OR, tic->tic[2], tic->tic[2]);
205 OUT_RINGp (chan, &tic->tic[3], 5);
206
207 need_flush = TRUE;
208 } else
209 if (res->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) {
210 BEGIN_RING(chan, RING_3D(TEX_CACHE_CTL), 1);
211 OUT_RING (chan, (tic->id << 4) | 1);
212 }
213 nvc0->screen->tic.lock[tic->id / 32] |= 1 << (tic->id % 32);
214
215 res->status &= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING;
216 res->status |= NOUVEAU_BUFFER_STATUS_GPU_READING;
217
218 nvc0_bufctx_add_resident(nvc0, NVC0_BUFCTX_TEXTURES, res,
219 NOUVEAU_BO_VRAM | NOUVEAU_BO_RD);
220
221 BEGIN_RING(chan, RING_3D(BIND_TIC(s)), 1);
222 OUT_RING (chan, (tic->id << 9) | (i << 1) | 1);
223 }
224 for (; i < nvc0->state.num_textures[s]; ++i) {
225 BEGIN_RING(chan, RING_3D(BIND_TIC(s)), 1);
226 OUT_RING (chan, (i << 1) | 0);
227 }
228 nvc0->state.num_textures[s] = nvc0->num_textures[s];
229
230 return need_flush;
231 }
232
233 void nvc0_validate_textures(struct nvc0_context *nvc0)
234 {
235 boolean need_flush;
236
237 need_flush = nvc0_validate_tic(nvc0, 0);
238 need_flush |= nvc0_validate_tic(nvc0, 4);
239
240 if (need_flush) {
241 BEGIN_RING(nvc0->screen->base.channel, RING_3D(TIC_FLUSH), 1);
242 OUT_RING (nvc0->screen->base.channel, 0);
243 }
244 }
245
246 static boolean
247 nvc0_validate_tsc(struct nvc0_context *nvc0, int s)
248 {
249 struct nouveau_channel *chan = nvc0->screen->base.channel;
250 unsigned i;
251 boolean need_flush = FALSE;
252
253 for (i = 0; i < nvc0->num_samplers[s]; ++i) {
254 struct nvc0_tsc_entry *tsc = nvc0_tsc_entry(nvc0->samplers[s][i]);
255
256 if (!tsc) {
257 BEGIN_RING(chan, RING_3D(BIND_TSC(s)), 1);
258 OUT_RING (chan, (i << 4) | 0);
259 continue;
260 }
261 if (tsc->id < 0) {
262 tsc->id = nvc0_screen_tsc_alloc(nvc0->screen, tsc);
263
264 nvc0_m2mf_push_linear(&nvc0->base, nvc0->screen->txc,
265 65536 + tsc->id * 32, NOUVEAU_BO_VRAM,
266 32, tsc->tsc);
267 need_flush = TRUE;
268 }
269 nvc0->screen->tsc.lock[tsc->id / 32] |= 1 << (tsc->id % 32);
270
271 BEGIN_RING(chan, RING_3D(BIND_TSC(s)), 1);
272 OUT_RING (chan, (tsc->id << 12) | (i << 4) | 1);
273 }
274 for (; i < nvc0->state.num_samplers[s]; ++i) {
275 BEGIN_RING(chan, RING_3D(BIND_TSC(s)), 1);
276 OUT_RING (chan, (i << 4) | 0);
277 }
278 nvc0->state.num_samplers[s] = nvc0->num_samplers[s];
279
280 return need_flush;
281 }
282
283 void nvc0_validate_samplers(struct nvc0_context *nvc0)
284 {
285 boolean need_flush;
286
287 need_flush = nvc0_validate_tsc(nvc0, 0);
288 need_flush |= nvc0_validate_tsc(nvc0, 4);
289
290 if (need_flush) {
291 BEGIN_RING(nvc0->screen->base.channel, RING_3D(TSC_FLUSH), 1);
292 OUT_RING (nvc0->screen->base.channel, 0);
293 }
294 }