nouveau: Factor out common winsys bits into libnouveaudrm.a
[mesa.git] / src / gallium / winsys / drm / nouveau / common / nv04_surface.c
1 #include "pipe/p_context.h"
2 #include "pipe/p_format.h"
3
4 #include "nouveau_context.h"
5
6 static INLINE int log2i(int i)
7 {
8 int r = 0;
9
10 if (i & 0xffff0000) {
11 i >>= 16;
12 r += 16;
13 }
14 if (i & 0x0000ff00) {
15 i >>= 8;
16 r += 8;
17 }
18 if (i & 0x000000f0) {
19 i >>= 4;
20 r += 4;
21 }
22 if (i & 0x0000000c) {
23 i >>= 2;
24 r += 2;
25 }
26 if (i & 0x00000002) {
27 r += 1;
28 }
29 return r;
30 }
31
32 static INLINE int
33 nv04_surface_format(enum pipe_format format)
34 {
35 switch (format) {
36 case PIPE_FORMAT_A8_UNORM:
37 return NV04_CONTEXT_SURFACES_2D_FORMAT_Y8;
38 case PIPE_FORMAT_R16_SNORM:
39 case PIPE_FORMAT_R5G6B5_UNORM:
40 return NV04_CONTEXT_SURFACES_2D_FORMAT_R5G6B5;
41 case PIPE_FORMAT_X8R8G8B8_UNORM:
42 case PIPE_FORMAT_A8R8G8B8_UNORM:
43 return NV04_CONTEXT_SURFACES_2D_FORMAT_A8R8G8B8;
44 case PIPE_FORMAT_Z24S8_UNORM:
45 return NV04_CONTEXT_SURFACES_2D_FORMAT_Y32;
46 default:
47 return -1;
48 }
49 }
50
51 static INLINE int
52 nv04_rect_format(enum pipe_format format)
53 {
54 switch (format) {
55 case PIPE_FORMAT_A8_UNORM:
56 return NV04_GDI_RECTANGLE_TEXT_COLOR_FORMAT_A8R8G8B8;
57 case PIPE_FORMAT_R5G6B5_UNORM:
58 return NV04_GDI_RECTANGLE_TEXT_COLOR_FORMAT_A16R5G6B5;
59 case PIPE_FORMAT_A8R8G8B8_UNORM:
60 case PIPE_FORMAT_Z24S8_UNORM:
61 return NV04_GDI_RECTANGLE_TEXT_COLOR_FORMAT_A8R8G8B8;
62 default:
63 return -1;
64 }
65 }
66
67 static INLINE int
68 nv04_scaled_image_format(enum pipe_format format)
69 {
70 switch (format) {
71 case PIPE_FORMAT_A1R5G5B5_UNORM:
72 return NV04_SCALED_IMAGE_FROM_MEMORY_COLOR_FORMAT_A1R5G5B5;
73 case PIPE_FORMAT_A8R8G8B8_UNORM:
74 return NV04_SCALED_IMAGE_FROM_MEMORY_COLOR_FORMAT_A8R8G8B8;
75 case PIPE_FORMAT_X8R8G8B8_UNORM:
76 return NV04_SCALED_IMAGE_FROM_MEMORY_COLOR_FORMAT_X8R8G8B8;
77 case PIPE_FORMAT_R5G6B5_UNORM:
78 case PIPE_FORMAT_R16_SNORM:
79 return NV04_SCALED_IMAGE_FROM_MEMORY_COLOR_FORMAT_R5G6B5;
80 default:
81 return -1;
82 }
83 }
84
85 static INLINE unsigned
86 nv04_swizzle_bits(unsigned x, unsigned y)
87 {
88 unsigned u = (x & 0x001) << 0 |
89 (x & 0x002) << 1 |
90 (x & 0x004) << 2 |
91 (x & 0x008) << 3 |
92 (x & 0x010) << 4 |
93 (x & 0x020) << 5 |
94 (x & 0x040) << 6 |
95 (x & 0x080) << 7 |
96 (x & 0x100) << 8 |
97 (x & 0x200) << 9 |
98 (x & 0x400) << 10 |
99 (x & 0x800) << 11;
100
101 unsigned v = (y & 0x001) << 1 |
102 (y & 0x002) << 2 |
103 (y & 0x004) << 3 |
104 (y & 0x008) << 4 |
105 (y & 0x010) << 5 |
106 (y & 0x020) << 6 |
107 (y & 0x040) << 7 |
108 (y & 0x080) << 8 |
109 (y & 0x100) << 9 |
110 (y & 0x200) << 10 |
111 (y & 0x400) << 11 |
112 (y & 0x800) << 12;
113 return v | u;
114 }
115
116 static void
117 nv04_surface_copy_swizzle(struct nouveau_context *nv, unsigned dx, unsigned dy,
118 unsigned sx, unsigned sy, unsigned w, unsigned h)
119 {
120 struct nouveau_channel *chan = nv->nvc->channel;
121 struct pipe_surface *dst = nv->surf_dst;
122 struct pipe_surface *src = nv->surf_src;
123
124 const unsigned max_w = 1024;
125 const unsigned max_h = 1024;
126 const unsigned sub_w = w > max_w ? max_w : w;
127 const unsigned sub_h = h > max_h ? max_h : h;
128 unsigned cx = 0;
129 unsigned cy = 0;
130
131 /* POT or GTFO */
132 assert(!(w & (w - 1)) && !(h & (h - 1)));
133
134 BEGIN_RING(chan, nv->nvc->NvSwzSurf, NV04_SWIZZLED_SURFACE_DMA_IMAGE, 1);
135 OUT_RELOCo(chan, nouveau_buffer(dst->buffer)->bo,
136 NOUVEAU_BO_GART | NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
137 BEGIN_RING(chan, nv->nvc->NvSwzSurf, NV04_SWIZZLED_SURFACE_FORMAT, 1);
138 OUT_RING (chan, nv04_surface_format(dst->format) |
139 log2i(w) << NV04_SWIZZLED_SURFACE_FORMAT_BASE_SIZE_U_SHIFT |
140 log2i(h) << NV04_SWIZZLED_SURFACE_FORMAT_BASE_SIZE_V_SHIFT);
141
142 BEGIN_RING(chan, nv->nvc->NvSIFM, NV04_SCALED_IMAGE_FROM_MEMORY_DMA_IMAGE, 1);
143 OUT_RELOCo(chan, nouveau_buffer(src->buffer)->bo,
144 NOUVEAU_BO_GART | NOUVEAU_BO_VRAM | NOUVEAU_BO_RD);
145 BEGIN_RING(chan, nv->nvc->NvSIFM, NV04_SCALED_IMAGE_FROM_MEMORY_SURFACE, 1);
146 OUT_RING (chan, nv->nvc->NvSwzSurf->handle);
147
148 for (cy = 0; cy < h; cy += sub_h) {
149 for (cx = 0; cx < w; cx += sub_w) {
150 BEGIN_RING(chan, nv->nvc->NvSwzSurf, NV04_SWIZZLED_SURFACE_OFFSET, 1);
151 OUT_RELOCl(chan, nouveau_buffer(dst->buffer)->bo,
152 dst->offset + nv04_swizzle_bits(cx, cy) * dst->block.size,
153 NOUVEAU_BO_GART | NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
154
155 BEGIN_RING(chan, nv->nvc->NvSIFM, NV04_SCALED_IMAGE_FROM_MEMORY_COLOR_CONVERSION, 9);
156 OUT_RING (chan, NV04_SCALED_IMAGE_FROM_MEMORY_COLOR_CONVERSION_TRUNCATE);
157 OUT_RING (chan, nv04_scaled_image_format(src->format));
158 OUT_RING (chan, NV04_SCALED_IMAGE_FROM_MEMORY_OPERATION_SRCCOPY);
159 OUT_RING (chan, 0);
160 OUT_RING (chan, sub_h << 16 | sub_w);
161 OUT_RING (chan, 0);
162 OUT_RING (chan, sub_h << 16 | sub_w);
163 OUT_RING (chan, 1 << 20);
164 OUT_RING (chan, 1 << 20);
165
166 BEGIN_RING(chan, nv->nvc->NvSIFM, NV04_SCALED_IMAGE_FROM_MEMORY_SIZE, 4);
167 OUT_RING (chan, sub_h << 16 | sub_w);
168 OUT_RING (chan, src->stride |
169 NV04_SCALED_IMAGE_FROM_MEMORY_FORMAT_ORIGIN_CENTER |
170 NV04_SCALED_IMAGE_FROM_MEMORY_FORMAT_FILTER_POINT_SAMPLE);
171 OUT_RELOCl(chan, nouveau_buffer(src->buffer)->bo,
172 src->offset + cy * src->stride + cx * src->block.size,
173 NOUVEAU_BO_GART | NOUVEAU_BO_VRAM | NOUVEAU_BO_RD);
174 OUT_RING (chan, 0);
175 }
176 }
177 }
178
179 static void
180 nv04_surface_copy_m2mf(struct nouveau_context *nv, unsigned dx, unsigned dy,
181 unsigned sx, unsigned sy, unsigned w, unsigned h)
182 {
183 struct nouveau_channel *chan = nv->nvc->channel;
184 struct pipe_surface *dst = nv->surf_dst;
185 struct pipe_surface *src = nv->surf_src;
186 unsigned dst_offset, src_offset;
187
188 dst_offset = dst->offset + (dy * dst->stride) + (dx * dst->block.size);
189 src_offset = src->offset + (sy * src->stride) + (sx * src->block.size);
190
191 while (h) {
192 int count = (h > 2047) ? 2047 : h;
193
194 BEGIN_RING(chan, nv->nvc->NvM2MF,
195 NV04_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
196 OUT_RELOCl(chan, nouveau_buffer(src->buffer)->bo, src_offset,
197 NOUVEAU_BO_VRAM | NOUVEAU_BO_GART | NOUVEAU_BO_RD);
198 OUT_RELOCl(chan, nouveau_buffer(dst->buffer)->bo, dst_offset,
199 NOUVEAU_BO_VRAM | NOUVEAU_BO_GART | NOUVEAU_BO_WR);
200 OUT_RING (chan, src->stride);
201 OUT_RING (chan, dst->stride);
202 OUT_RING (chan, w * src->block.size);
203 OUT_RING (chan, count);
204 OUT_RING (chan, 0x0101);
205 OUT_RING (chan, 0);
206
207 h -= count;
208 src_offset += src->stride * count;
209 dst_offset += dst->stride * count;
210 }
211 }
212
213 static void
214 nv04_surface_copy_blit(struct nouveau_context *nv, unsigned dx, unsigned dy,
215 unsigned sx, unsigned sy, unsigned w, unsigned h)
216 {
217 struct nouveau_channel *chan = nv->nvc->channel;
218
219 BEGIN_RING(chan, nv->nvc->NvImageBlit, 0x0300, 3);
220 OUT_RING (chan, (sy << 16) | sx);
221 OUT_RING (chan, (dy << 16) | dx);
222 OUT_RING (chan, ( h << 16) | w);
223 }
224
225 static int
226 nv04_surface_copy_prep(struct nouveau_context *nv, struct pipe_surface *dst,
227 struct pipe_surface *src)
228 {
229 struct nouveau_channel *chan = nv->nvc->channel;
230 int format;
231
232 if (src->format != dst->format)
233 return 1;
234
235 /* Setup transfer to swizzle the texture to vram if needed */
236 /* FIXME/TODO: check proper limits of this operation */
237 if (src->texture && dst->texture) {
238 unsigned int src_linear = src->texture->tex_usage &
239 NOUVEAU_TEXTURE_USAGE_LINEAR;
240 unsigned int dst_linear = dst->texture->tex_usage &
241 NOUVEAU_TEXTURE_USAGE_LINEAR;
242 if (src_linear ^ dst_linear) {
243 nv->surface_copy = nv04_surface_copy_swizzle;
244 nv->surf_dst = dst;
245 nv->surf_src = src;
246 return 0;
247 }
248 }
249
250 /* NV_CONTEXT_SURFACES_2D has buffer alignment restrictions, fallback
251 * to NV_MEMORY_TO_MEMORY_FORMAT in this case.
252 */
253 if ((src->offset & 63) || (dst->offset & 63)) {
254 BEGIN_RING(nv->nvc->channel, nv->nvc->NvM2MF,
255 NV04_MEMORY_TO_MEMORY_FORMAT_DMA_BUFFER_IN, 2);
256 OUT_RELOCo(chan, nouveau_buffer(src->buffer)->bo,
257 NOUVEAU_BO_GART | NOUVEAU_BO_VRAM | NOUVEAU_BO_RD);
258 OUT_RELOCo(chan, nouveau_buffer(dst->buffer)->bo,
259 NOUVEAU_BO_GART | NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
260
261 nv->surface_copy = nv04_surface_copy_m2mf;
262 nv->surf_dst = dst;
263 nv->surf_src = src;
264 return 0;
265
266 }
267
268 if ((format = nv04_surface_format(dst->format)) < 0) {
269 NOUVEAU_ERR("Bad surface format 0x%x\n", dst->format);
270 return 1;
271 }
272 nv->surface_copy = nv04_surface_copy_blit;
273
274 BEGIN_RING(chan, nv->nvc->NvCtxSurf2D,
275 NV04_CONTEXT_SURFACES_2D_DMA_IMAGE_SOURCE, 2);
276 OUT_RELOCo(chan, nouveau_buffer(src->buffer)->bo,
277 NOUVEAU_BO_VRAM | NOUVEAU_BO_RD);
278 OUT_RELOCo(chan, nouveau_buffer(dst->buffer)->bo,
279 NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
280
281 BEGIN_RING(chan, nv->nvc->NvCtxSurf2D,
282 NV04_CONTEXT_SURFACES_2D_FORMAT, 4);
283 OUT_RING (chan, format);
284 OUT_RING (chan, (dst->stride << 16) | src->stride);
285 OUT_RELOCl(chan, nouveau_buffer(src->buffer)->bo, src->offset,
286 NOUVEAU_BO_VRAM | NOUVEAU_BO_RD);
287 OUT_RELOCl(chan, nouveau_buffer(dst->buffer)->bo, dst->offset,
288 NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
289
290 return 0;
291 }
292
293 static void
294 nv04_surface_copy_done(struct nouveau_context *nv)
295 {
296 FIRE_RING(nv->nvc->channel);
297 }
298
299 static int
300 nv04_surface_fill(struct nouveau_context *nv, struct pipe_surface *dst,
301 unsigned dx, unsigned dy, unsigned w, unsigned h,
302 unsigned value)
303 {
304 struct nouveau_channel *chan = nv->nvc->channel;
305 struct nouveau_grobj *surf2d = nv->nvc->NvCtxSurf2D;
306 struct nouveau_grobj *rect = nv->nvc->NvGdiRect;
307 int cs2d_format, gdirect_format;
308
309 if ((cs2d_format = nv04_surface_format(dst->format)) < 0) {
310 NOUVEAU_ERR("Bad format = %d\n", dst->format);
311 return 1;
312 }
313
314 if ((gdirect_format = nv04_rect_format(dst->format)) < 0) {
315 NOUVEAU_ERR("Bad format = %d\n", dst->format);
316 return 1;
317 }
318
319 BEGIN_RING(chan, surf2d, NV04_CONTEXT_SURFACES_2D_DMA_IMAGE_SOURCE, 2);
320 OUT_RELOCo(chan, nouveau_buffer(dst->buffer)->bo,
321 NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
322 OUT_RELOCo(chan, nouveau_buffer(dst->buffer)->bo,
323 NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
324 BEGIN_RING(chan, surf2d, NV04_CONTEXT_SURFACES_2D_FORMAT, 4);
325 OUT_RING (chan, cs2d_format);
326 OUT_RING (chan, (dst->stride << 16) | dst->stride);
327 OUT_RELOCl(chan, nouveau_buffer(dst->buffer)->bo, dst->offset,
328 NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
329 OUT_RELOCl(chan, nouveau_buffer(dst->buffer)->bo, dst->offset,
330 NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
331
332 BEGIN_RING(chan, rect, NV04_GDI_RECTANGLE_TEXT_COLOR_FORMAT, 1);
333 OUT_RING (chan, gdirect_format);
334 BEGIN_RING(chan, rect, NV04_GDI_RECTANGLE_TEXT_COLOR1_A, 1);
335 OUT_RING (chan, value);
336 BEGIN_RING(chan, rect,
337 NV04_GDI_RECTANGLE_TEXT_UNCLIPPED_RECTANGLE_POINT(0), 2);
338 OUT_RING (chan, (dx << 16) | dy);
339 OUT_RING (chan, ( w << 16) | h);
340
341 FIRE_RING(chan);
342 return 0;
343 }
344
345 int
346 nouveau_surface_channel_create_nv04(struct nouveau_channel_context *nvc)
347 {
348 struct nouveau_channel *chan = nvc->channel;
349 unsigned chipset = nvc->channel->device->chipset, class;
350 int ret;
351
352 if ((ret = nouveau_grobj_alloc(chan, nvc->next_handle++, 0x39,
353 &nvc->NvM2MF))) {
354 NOUVEAU_ERR("Error creating m2mf object: %d\n", ret);
355 return 1;
356 }
357 BIND_RING (chan, nvc->NvM2MF, nvc->next_subchannel++);
358 BEGIN_RING(chan, nvc->NvM2MF,
359 NV04_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 1);
360 OUT_RING (chan, nvc->sync_notifier->handle);
361
362 class = chipset < 0x10 ? NV04_CONTEXT_SURFACES_2D :
363 NV10_CONTEXT_SURFACES_2D;
364 if ((ret = nouveau_grobj_alloc(chan, nvc->next_handle++, class,
365 &nvc->NvCtxSurf2D))) {
366 NOUVEAU_ERR("Error creating 2D surface object: %d\n", ret);
367 return 1;
368 }
369 BIND_RING (chan, nvc->NvCtxSurf2D, nvc->next_subchannel++);
370 BEGIN_RING(chan, nvc->NvCtxSurf2D,
371 NV04_CONTEXT_SURFACES_2D_DMA_IMAGE_SOURCE, 2);
372 OUT_RING (chan, nvc->channel->vram->handle);
373 OUT_RING (chan, nvc->channel->vram->handle);
374
375 class = chipset < 0x10 ? NV04_IMAGE_BLIT : NV12_IMAGE_BLIT;
376 if ((ret = nouveau_grobj_alloc(chan, nvc->next_handle++, class,
377 &nvc->NvImageBlit))) {
378 NOUVEAU_ERR("Error creating blit object: %d\n", ret);
379 return 1;
380 }
381 BIND_RING (chan, nvc->NvImageBlit, nvc->next_subchannel++);
382 BEGIN_RING(chan, nvc->NvImageBlit, NV04_IMAGE_BLIT_DMA_NOTIFY, 1);
383 OUT_RING (chan, nvc->sync_notifier->handle);
384 BEGIN_RING(chan, nvc->NvImageBlit, NV04_IMAGE_BLIT_SURFACE, 1);
385 OUT_RING (chan, nvc->NvCtxSurf2D->handle);
386 BEGIN_RING(chan, nvc->NvImageBlit, NV04_IMAGE_BLIT_OPERATION, 1);
387 OUT_RING (chan, NV04_IMAGE_BLIT_OPERATION_SRCCOPY);
388
389 class = NV04_GDI_RECTANGLE_TEXT;
390 if ((ret = nouveau_grobj_alloc(chan, nvc->next_handle++, class,
391 &nvc->NvGdiRect))) {
392 NOUVEAU_ERR("Error creating rect object: %d\n", ret);
393 return 1;
394 }
395 BIND_RING (chan, nvc->NvGdiRect, nvc->next_subchannel++);
396 BEGIN_RING(chan, nvc->NvGdiRect, NV04_GDI_RECTANGLE_TEXT_DMA_NOTIFY, 1);
397 OUT_RING (chan, nvc->sync_notifier->handle);
398 BEGIN_RING(chan, nvc->NvGdiRect, NV04_GDI_RECTANGLE_TEXT_SURFACE, 1);
399 OUT_RING (chan, nvc->NvCtxSurf2D->handle);
400 BEGIN_RING(chan, nvc->NvGdiRect, NV04_GDI_RECTANGLE_TEXT_OPERATION, 1);
401 OUT_RING (chan, NV04_GDI_RECTANGLE_TEXT_OPERATION_SRCCOPY);
402 BEGIN_RING(chan, nvc->NvGdiRect,
403 NV04_GDI_RECTANGLE_TEXT_MONOCHROME_FORMAT, 1);
404 OUT_RING (chan, NV04_GDI_RECTANGLE_TEXT_MONOCHROME_FORMAT_LE);
405
406 switch (chipset & 0xf0) {
407 case 0x00:
408 case 0x10:
409 class = NV04_SWIZZLED_SURFACE;
410 break;
411 case 0x20:
412 class = NV20_SWIZZLED_SURFACE;
413 break;
414 case 0x30:
415 class = NV30_SWIZZLED_SURFACE;
416 break;
417 case 0x40:
418 case 0x60:
419 class = NV40_SWIZZLED_SURFACE;
420 break;
421 default:
422 /* Famous last words: this really can't happen.. */
423 assert(0);
424 break;
425 }
426
427 ret = nouveau_grobj_alloc(chan, nvc->next_handle++, class,
428 &nvc->NvSwzSurf);
429 if (ret) {
430 NOUVEAU_ERR("Error creating swizzled surface: %d\n", ret);
431 return 1;
432 }
433
434 BIND_RING (chan, nvc->NvSwzSurf, nvc->next_subchannel++);
435
436 if (chipset < 0x10) {
437 class = NV04_SCALED_IMAGE_FROM_MEMORY;
438 } else
439 if (chipset < 0x40) {
440 class = NV10_SCALED_IMAGE_FROM_MEMORY;
441 } else {
442 class = NV40_SCALED_IMAGE_FROM_MEMORY;
443 }
444
445 ret = nouveau_grobj_alloc(chan, nvc->next_handle++, class,
446 &nvc->NvSIFM);
447 if (ret) {
448 NOUVEAU_ERR("Error creating scaled image object: %d\n", ret);
449 return 1;
450 }
451
452 BIND_RING (chan, nvc->NvSIFM, nvc->next_subchannel++);
453
454 return 0;
455 }
456
457 int
458 nouveau_surface_init_nv04(struct nouveau_context *nv)
459 {
460 nv->surface_copy_prep = nv04_surface_copy_prep;
461 nv->surface_copy = nv04_surface_copy_blit;
462 nv->surface_copy_done = nv04_surface_copy_done;
463 nv->surface_fill = nv04_surface_fill;
464 return 0;
465 }
466