nouveau: send back a debug message when waiting for a fence to complete
[mesa.git] / src / gallium / drivers / nouveau / nvc0 / nvc0_transfer.c
1
2 #include "util/u_format.h"
3
4 #include "nvc0/nvc0_context.h"
5
6 #include "nv50/nv50_defs.xml.h"
7
8 struct nvc0_transfer {
9 struct pipe_transfer base;
10 struct nv50_m2mf_rect rect[2];
11 uint32_t nblocksx;
12 uint16_t nblocksy;
13 uint16_t nlayers;
14 };
15
16 static void
17 nvc0_m2mf_transfer_rect(struct nvc0_context *nvc0,
18 const struct nv50_m2mf_rect *dst,
19 const struct nv50_m2mf_rect *src,
20 uint32_t nblocksx, uint32_t nblocksy)
21 {
22 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
23 struct nouveau_bufctx *bctx = nvc0->bufctx;
24 const int cpp = dst->cpp;
25 uint32_t src_ofst = src->base;
26 uint32_t dst_ofst = dst->base;
27 uint32_t height = nblocksy;
28 uint32_t sy = src->y;
29 uint32_t dy = dst->y;
30 uint32_t exec = (1 << 20);
31
32 assert(dst->cpp == src->cpp);
33
34 nouveau_bufctx_refn(bctx, 0, src->bo, src->domain | NOUVEAU_BO_RD);
35 nouveau_bufctx_refn(bctx, 0, dst->bo, dst->domain | NOUVEAU_BO_WR);
36 nouveau_pushbuf_bufctx(push, bctx);
37 nouveau_pushbuf_validate(push);
38
39 if (nouveau_bo_memtype(src->bo)) {
40 BEGIN_NVC0(push, NVC0_M2MF(TILING_MODE_IN), 5);
41 PUSH_DATA (push, src->tile_mode);
42 PUSH_DATA (push, src->width * cpp);
43 PUSH_DATA (push, src->height);
44 PUSH_DATA (push, src->depth);
45 PUSH_DATA (push, src->z);
46 } else {
47 src_ofst += src->y * src->pitch + src->x * cpp;
48
49 BEGIN_NVC0(push, NVC0_M2MF(PITCH_IN), 1);
50 PUSH_DATA (push, src->width * cpp);
51
52 exec |= NVC0_M2MF_EXEC_LINEAR_IN;
53 }
54
55 if (nouveau_bo_memtype(dst->bo)) {
56 BEGIN_NVC0(push, NVC0_M2MF(TILING_MODE_OUT), 5);
57 PUSH_DATA (push, dst->tile_mode);
58 PUSH_DATA (push, dst->width * cpp);
59 PUSH_DATA (push, dst->height);
60 PUSH_DATA (push, dst->depth);
61 PUSH_DATA (push, dst->z);
62 } else {
63 dst_ofst += dst->y * dst->pitch + dst->x * cpp;
64
65 BEGIN_NVC0(push, NVC0_M2MF(PITCH_OUT), 1);
66 PUSH_DATA (push, dst->width * cpp);
67
68 exec |= NVC0_M2MF_EXEC_LINEAR_OUT;
69 }
70
71 while (height) {
72 int line_count = height > 2047 ? 2047 : height;
73
74 BEGIN_NVC0(push, NVC0_M2MF(OFFSET_IN_HIGH), 2);
75 PUSH_DATAh(push, src->bo->offset + src_ofst);
76 PUSH_DATA (push, src->bo->offset + src_ofst);
77
78 BEGIN_NVC0(push, NVC0_M2MF(OFFSET_OUT_HIGH), 2);
79 PUSH_DATAh(push, dst->bo->offset + dst_ofst);
80 PUSH_DATA (push, dst->bo->offset + dst_ofst);
81
82 if (!(exec & NVC0_M2MF_EXEC_LINEAR_IN)) {
83 BEGIN_NVC0(push, NVC0_M2MF(TILING_POSITION_IN_X), 2);
84 PUSH_DATA (push, src->x * cpp);
85 PUSH_DATA (push, sy);
86 } else {
87 src_ofst += line_count * src->pitch;
88 }
89 if (!(exec & NVC0_M2MF_EXEC_LINEAR_OUT)) {
90 BEGIN_NVC0(push, NVC0_M2MF(TILING_POSITION_OUT_X), 2);
91 PUSH_DATA (push, dst->x * cpp);
92 PUSH_DATA (push, dy);
93 } else {
94 dst_ofst += line_count * dst->pitch;
95 }
96
97 BEGIN_NVC0(push, NVC0_M2MF(LINE_LENGTH_IN), 2);
98 PUSH_DATA (push, nblocksx * cpp);
99 PUSH_DATA (push, line_count);
100 BEGIN_NVC0(push, NVC0_M2MF(EXEC), 1);
101 PUSH_DATA (push, exec);
102
103 height -= line_count;
104 sy += line_count;
105 dy += line_count;
106 }
107
108 nouveau_bufctx_reset(bctx, 0);
109 }
110
111 static void
112 nve4_m2mf_transfer_rect(struct nvc0_context *nvc0,
113 const struct nv50_m2mf_rect *dst,
114 const struct nv50_m2mf_rect *src,
115 uint32_t nblocksx, uint32_t nblocksy)
116 {
117 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
118 struct nouveau_bufctx *bctx = nvc0->bufctx;
119 uint32_t exec;
120 uint32_t src_base = src->base;
121 uint32_t dst_base = dst->base;
122 const int cpp = dst->cpp;
123
124 assert(dst->cpp == src->cpp);
125
126 nouveau_bufctx_refn(bctx, 0, dst->bo, dst->domain | NOUVEAU_BO_WR);
127 nouveau_bufctx_refn(bctx, 0, src->bo, src->domain | NOUVEAU_BO_RD);
128 nouveau_pushbuf_bufctx(push, bctx);
129 nouveau_pushbuf_validate(push);
130
131 exec = 0x200 /* 2D_ENABLE */ | 0x6 /* UNK */;
132
133 if (!nouveau_bo_memtype(dst->bo)) {
134 assert(!dst->z);
135 dst_base += dst->y * dst->pitch + dst->x * cpp;
136 exec |= 0x100; /* DST_MODE_2D_LINEAR */
137 }
138 if (!nouveau_bo_memtype(src->bo)) {
139 assert(!src->z);
140 src_base += src->y * src->pitch + src->x * cpp;
141 exec |= 0x080; /* SRC_MODE_2D_LINEAR */
142 }
143
144 BEGIN_NVC0(push, SUBC_COPY(0x070c), 6);
145 PUSH_DATA (push, 0x1000 | dst->tile_mode);
146 PUSH_DATA (push, dst->pitch);
147 PUSH_DATA (push, dst->height);
148 PUSH_DATA (push, dst->depth);
149 PUSH_DATA (push, dst->z);
150 PUSH_DATA (push, (dst->y << 16) | (dst->x * cpp));
151
152 BEGIN_NVC0(push, SUBC_COPY(0x0728), 6);
153 PUSH_DATA (push, 0x1000 | src->tile_mode);
154 PUSH_DATA (push, src->pitch);
155 PUSH_DATA (push, src->height);
156 PUSH_DATA (push, src->depth);
157 PUSH_DATA (push, src->z);
158 PUSH_DATA (push, (src->y << 16) | (src->x * cpp));
159
160 BEGIN_NVC0(push, SUBC_COPY(0x0400), 8);
161 PUSH_DATAh(push, src->bo->offset + src_base);
162 PUSH_DATA (push, src->bo->offset + src_base);
163 PUSH_DATAh(push, dst->bo->offset + dst_base);
164 PUSH_DATA (push, dst->bo->offset + dst_base);
165 PUSH_DATA (push, src->pitch);
166 PUSH_DATA (push, dst->pitch);
167 PUSH_DATA (push, nblocksx * cpp);
168 PUSH_DATA (push, nblocksy);
169
170 BEGIN_NVC0(push, SUBC_COPY(0x0300), 1);
171 PUSH_DATA (push, exec);
172
173 nouveau_bufctx_reset(bctx, 0);
174 }
175
176 void
177 nvc0_m2mf_push_linear(struct nouveau_context *nv,
178 struct nouveau_bo *dst, unsigned offset, unsigned domain,
179 unsigned size, const void *data)
180 {
181 struct nvc0_context *nvc0 = nvc0_context(&nv->pipe);
182 struct nouveau_pushbuf *push = nv->pushbuf;
183 uint32_t *src = (uint32_t *)data;
184 unsigned count = (size + 3) / 4;
185
186 nouveau_bufctx_refn(nvc0->bufctx, 0, dst, domain | NOUVEAU_BO_WR);
187 nouveau_pushbuf_bufctx(push, nvc0->bufctx);
188 nouveau_pushbuf_validate(push);
189
190 while (count) {
191 unsigned nr = MIN2(count, NV04_PFIFO_MAX_PACKET_LEN);
192
193 if (!PUSH_SPACE(push, nr + 9))
194 break;
195
196 BEGIN_NVC0(push, NVC0_M2MF(OFFSET_OUT_HIGH), 2);
197 PUSH_DATAh(push, dst->offset + offset);
198 PUSH_DATA (push, dst->offset + offset);
199 BEGIN_NVC0(push, NVC0_M2MF(LINE_LENGTH_IN), 2);
200 PUSH_DATA (push, MIN2(size, nr * 4));
201 PUSH_DATA (push, 1);
202 BEGIN_NVC0(push, NVC0_M2MF(EXEC), 1);
203 PUSH_DATA (push, 0x100111);
204
205 /* must not be interrupted (trap on QUERY fence, 0x50 works however) */
206 BEGIN_NIC0(push, NVC0_M2MF(DATA), nr);
207 PUSH_DATAp(push, src, nr);
208
209 count -= nr;
210 src += nr;
211 offset += nr * 4;
212 size -= nr * 4;
213 }
214
215 nouveau_bufctx_reset(nvc0->bufctx, 0);
216 }
217
218 void
219 nve4_p2mf_push_linear(struct nouveau_context *nv,
220 struct nouveau_bo *dst, unsigned offset, unsigned domain,
221 unsigned size, const void *data)
222 {
223 struct nvc0_context *nvc0 = nvc0_context(&nv->pipe);
224 struct nouveau_pushbuf *push = nv->pushbuf;
225 uint32_t *src = (uint32_t *)data;
226 unsigned count = (size + 3) / 4;
227
228 nouveau_bufctx_refn(nvc0->bufctx, 0, dst, domain | NOUVEAU_BO_WR);
229 nouveau_pushbuf_bufctx(push, nvc0->bufctx);
230 nouveau_pushbuf_validate(push);
231
232 while (count) {
233 unsigned nr = MIN2(count, (NV04_PFIFO_MAX_PACKET_LEN - 1));
234
235 if (!PUSH_SPACE(push, nr + 10))
236 break;
237
238 BEGIN_NVC0(push, NVE4_P2MF(UPLOAD_DST_ADDRESS_HIGH), 2);
239 PUSH_DATAh(push, dst->offset + offset);
240 PUSH_DATA (push, dst->offset + offset);
241 BEGIN_NVC0(push, NVE4_P2MF(UPLOAD_LINE_LENGTH_IN), 2);
242 PUSH_DATA (push, MIN2(size, nr * 4));
243 PUSH_DATA (push, 1);
244 /* must not be interrupted (trap on QUERY fence, 0x50 works however) */
245 BEGIN_1IC0(push, NVE4_P2MF(UPLOAD_EXEC), nr + 1);
246 PUSH_DATA (push, 0x1001);
247 PUSH_DATAp(push, src, nr);
248
249 count -= nr;
250 src += nr;
251 offset += nr * 4;
252 size -= nr * 4;
253 }
254
255 nouveau_bufctx_reset(nvc0->bufctx, 0);
256 }
257
258 static void
259 nvc0_m2mf_copy_linear(struct nouveau_context *nv,
260 struct nouveau_bo *dst, unsigned dstoff, unsigned dstdom,
261 struct nouveau_bo *src, unsigned srcoff, unsigned srcdom,
262 unsigned size)
263 {
264 struct nouveau_pushbuf *push = nv->pushbuf;
265 struct nouveau_bufctx *bctx = nvc0_context(&nv->pipe)->bufctx;
266
267 nouveau_bufctx_refn(bctx, 0, src, srcdom | NOUVEAU_BO_RD);
268 nouveau_bufctx_refn(bctx, 0, dst, dstdom | NOUVEAU_BO_WR);
269 nouveau_pushbuf_bufctx(push, bctx);
270 nouveau_pushbuf_validate(push);
271
272 while (size) {
273 unsigned bytes = MIN2(size, 1 << 17);
274
275 BEGIN_NVC0(push, NVC0_M2MF(OFFSET_OUT_HIGH), 2);
276 PUSH_DATAh(push, dst->offset + dstoff);
277 PUSH_DATA (push, dst->offset + dstoff);
278 BEGIN_NVC0(push, NVC0_M2MF(OFFSET_IN_HIGH), 2);
279 PUSH_DATAh(push, src->offset + srcoff);
280 PUSH_DATA (push, src->offset + srcoff);
281 BEGIN_NVC0(push, NVC0_M2MF(LINE_LENGTH_IN), 2);
282 PUSH_DATA (push, bytes);
283 PUSH_DATA (push, 1);
284 BEGIN_NVC0(push, NVC0_M2MF(EXEC), 1);
285 PUSH_DATA (push, NVC0_M2MF_EXEC_QUERY_SHORT |
286 NVC0_M2MF_EXEC_LINEAR_IN | NVC0_M2MF_EXEC_LINEAR_OUT);
287
288 srcoff += bytes;
289 dstoff += bytes;
290 size -= bytes;
291 }
292
293 nouveau_bufctx_reset(bctx, 0);
294 }
295
296 static void
297 nve4_m2mf_copy_linear(struct nouveau_context *nv,
298 struct nouveau_bo *dst, unsigned dstoff, unsigned dstdom,
299 struct nouveau_bo *src, unsigned srcoff, unsigned srcdom,
300 unsigned size)
301 {
302 struct nouveau_pushbuf *push = nv->pushbuf;
303 struct nouveau_bufctx *bctx = nvc0_context(&nv->pipe)->bufctx;
304
305 nouveau_bufctx_refn(bctx, 0, src, srcdom | NOUVEAU_BO_RD);
306 nouveau_bufctx_refn(bctx, 0, dst, dstdom | NOUVEAU_BO_WR);
307 nouveau_pushbuf_bufctx(push, bctx);
308 nouveau_pushbuf_validate(push);
309
310 BEGIN_NVC0(push, SUBC_COPY(0x0400), 4);
311 PUSH_DATAh(push, src->offset + srcoff);
312 PUSH_DATA (push, src->offset + srcoff);
313 PUSH_DATAh(push, dst->offset + dstoff);
314 PUSH_DATA (push, dst->offset + dstoff);
315 BEGIN_NVC0(push, SUBC_COPY(0x0418), 1);
316 PUSH_DATA (push, size);
317 BEGIN_NVC0(push, SUBC_COPY(0x0300), 1);
318 PUSH_DATA (push, 0x186);
319
320 nouveau_bufctx_reset(bctx, 0);
321 }
322
323
324 static inline bool
325 nvc0_mt_transfer_can_map_directly(struct nv50_miptree *mt)
326 {
327 if (mt->base.domain == NOUVEAU_BO_VRAM)
328 return false;
329 if (mt->base.base.usage != PIPE_USAGE_STAGING)
330 return false;
331 return !nouveau_bo_memtype(mt->base.bo);
332 }
333
334 static inline bool
335 nvc0_mt_sync(struct nvc0_context *nvc0, struct nv50_miptree *mt, unsigned usage)
336 {
337 if (!mt->base.mm) {
338 uint32_t access = (usage & PIPE_TRANSFER_WRITE) ?
339 NOUVEAU_BO_WR : NOUVEAU_BO_RD;
340 return !nouveau_bo_wait(mt->base.bo, access, nvc0->base.client);
341 }
342 if (usage & PIPE_TRANSFER_WRITE)
343 return !mt->base.fence || nouveau_fence_wait(mt->base.fence, &nvc0->base.debug);
344 return !mt->base.fence_wr || nouveau_fence_wait(mt->base.fence_wr, &nvc0->base.debug);
345 }
346
347 void *
348 nvc0_miptree_transfer_map(struct pipe_context *pctx,
349 struct pipe_resource *res,
350 unsigned level,
351 unsigned usage,
352 const struct pipe_box *box,
353 struct pipe_transfer **ptransfer)
354 {
355 struct nvc0_context *nvc0 = nvc0_context(pctx);
356 struct nouveau_device *dev = nvc0->screen->base.device;
357 struct nv50_miptree *mt = nv50_miptree(res);
358 struct nvc0_transfer *tx;
359 uint32_t size;
360 int ret;
361 unsigned flags = 0;
362
363 if (nvc0_mt_transfer_can_map_directly(mt)) {
364 ret = !nvc0_mt_sync(nvc0, mt, usage);
365 if (!ret)
366 ret = nouveau_bo_map(mt->base.bo, 0, NULL);
367 if (ret &&
368 (usage & PIPE_TRANSFER_MAP_DIRECTLY))
369 return NULL;
370 if (!ret)
371 usage |= PIPE_TRANSFER_MAP_DIRECTLY;
372 } else
373 if (usage & PIPE_TRANSFER_MAP_DIRECTLY)
374 return NULL;
375
376 tx = CALLOC_STRUCT(nvc0_transfer);
377 if (!tx)
378 return NULL;
379
380 pipe_resource_reference(&tx->base.resource, res);
381
382 tx->base.level = level;
383 tx->base.usage = usage;
384 tx->base.box = *box;
385
386 if (util_format_is_plain(res->format)) {
387 tx->nblocksx = box->width << mt->ms_x;
388 tx->nblocksy = box->height << mt->ms_y;
389 } else {
390 tx->nblocksx = util_format_get_nblocksx(res->format, box->width);
391 tx->nblocksy = util_format_get_nblocksy(res->format, box->height);
392 }
393 tx->nlayers = box->depth;
394
395 tx->base.stride = tx->nblocksx * util_format_get_blocksize(res->format);
396 tx->base.layer_stride = tx->nblocksy * tx->base.stride;
397
398 if (usage & PIPE_TRANSFER_MAP_DIRECTLY) {
399 tx->base.stride = align(tx->base.stride, 128);
400 *ptransfer = &tx->base;
401 return mt->base.bo->map + mt->base.offset;
402 }
403
404 nv50_m2mf_rect_setup(&tx->rect[0], res, level, box->x, box->y, box->z);
405
406 size = tx->base.layer_stride;
407
408 ret = nouveau_bo_new(dev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP, 0,
409 size * tx->nlayers, NULL, &tx->rect[1].bo);
410 if (ret) {
411 pipe_resource_reference(&tx->base.resource, NULL);
412 FREE(tx);
413 return NULL;
414 }
415
416 tx->rect[1].cpp = tx->rect[0].cpp;
417 tx->rect[1].width = tx->nblocksx;
418 tx->rect[1].height = tx->nblocksy;
419 tx->rect[1].depth = 1;
420 tx->rect[1].pitch = tx->base.stride;
421 tx->rect[1].domain = NOUVEAU_BO_GART;
422
423 if (usage & PIPE_TRANSFER_READ) {
424 unsigned base = tx->rect[0].base;
425 unsigned z = tx->rect[0].z;
426 unsigned i;
427 for (i = 0; i < tx->nlayers; ++i) {
428 nvc0->m2mf_copy_rect(nvc0, &tx->rect[1], &tx->rect[0],
429 tx->nblocksx, tx->nblocksy);
430 if (mt->layout_3d)
431 tx->rect[0].z++;
432 else
433 tx->rect[0].base += mt->layer_stride;
434 tx->rect[1].base += size;
435 }
436 tx->rect[0].z = z;
437 tx->rect[0].base = base;
438 tx->rect[1].base = 0;
439 }
440
441 if (tx->rect[1].bo->map) {
442 *ptransfer = &tx->base;
443 return tx->rect[1].bo->map;
444 }
445
446 if (usage & PIPE_TRANSFER_READ)
447 flags = NOUVEAU_BO_RD;
448 if (usage & PIPE_TRANSFER_WRITE)
449 flags |= NOUVEAU_BO_WR;
450
451 ret = nouveau_bo_map(tx->rect[1].bo, flags, nvc0->screen->base.client);
452 if (ret) {
453 pipe_resource_reference(&tx->base.resource, NULL);
454 nouveau_bo_ref(NULL, &tx->rect[1].bo);
455 FREE(tx);
456 return NULL;
457 }
458
459 *ptransfer = &tx->base;
460 return tx->rect[1].bo->map;
461 }
462
463 void
464 nvc0_miptree_transfer_unmap(struct pipe_context *pctx,
465 struct pipe_transfer *transfer)
466 {
467 struct nvc0_context *nvc0 = nvc0_context(pctx);
468 struct nvc0_transfer *tx = (struct nvc0_transfer *)transfer;
469 struct nv50_miptree *mt = nv50_miptree(tx->base.resource);
470 unsigned i;
471
472 if (tx->base.usage & PIPE_TRANSFER_MAP_DIRECTLY) {
473 pipe_resource_reference(&transfer->resource, NULL);
474
475 FREE(tx);
476 return;
477 }
478
479 if (tx->base.usage & PIPE_TRANSFER_WRITE) {
480 for (i = 0; i < tx->nlayers; ++i) {
481 nvc0->m2mf_copy_rect(nvc0, &tx->rect[0], &tx->rect[1],
482 tx->nblocksx, tx->nblocksy);
483 if (mt->layout_3d)
484 tx->rect[0].z++;
485 else
486 tx->rect[0].base += mt->layer_stride;
487 tx->rect[1].base += tx->nblocksy * tx->base.stride;
488 }
489 NOUVEAU_DRV_STAT(&nvc0->screen->base, tex_transfers_wr, 1);
490
491 /* Allow the copies above to finish executing before freeing the source */
492 nouveau_fence_work(nvc0->screen->base.fence.current,
493 nouveau_fence_unref_bo, tx->rect[1].bo);
494 } else {
495 nouveau_bo_ref(NULL, &tx->rect[1].bo);
496 }
497 if (tx->base.usage & PIPE_TRANSFER_READ)
498 NOUVEAU_DRV_STAT(&nvc0->screen->base, tex_transfers_rd, 1);
499
500 pipe_resource_reference(&transfer->resource, NULL);
501
502 FREE(tx);
503 }
504
505 /* This happens rather often with DTD9/st. */
506 static void
507 nvc0_cb_push(struct nouveau_context *nv,
508 struct nv04_resource *res,
509 unsigned offset, unsigned words, const uint32_t *data)
510 {
511 struct nvc0_context *nvc0 = nvc0_context(&nv->pipe);
512 struct nvc0_constbuf *cb = NULL;
513 int s;
514
515 /* Go through all the constbuf binding points of this buffer and try to
516 * find one which contains the region to be updated.
517 */
518 for (s = 0; s < 6 && !cb; s++) {
519 uint16_t bindings = res->cb_bindings[s];
520 while (bindings) {
521 int i = ffs(bindings) - 1;
522 uint32_t cb_offset = nvc0->constbuf[s][i].offset;
523
524 bindings &= ~(1 << i);
525 if (cb_offset <= offset &&
526 cb_offset + nvc0->constbuf[s][i].size >= offset + words * 4) {
527 cb = &nvc0->constbuf[s][i];
528 break;
529 }
530 }
531 }
532
533 if (cb) {
534 nvc0_cb_bo_push(nv, res->bo, res->domain,
535 res->offset + cb->offset, cb->size,
536 offset - cb->offset, words, data);
537 } else {
538 nv->push_data(nv, res->bo, res->offset + offset, res->domain,
539 words * 4, data);
540 }
541 }
542
543 void
544 nvc0_cb_bo_push(struct nouveau_context *nv,
545 struct nouveau_bo *bo, unsigned domain,
546 unsigned base, unsigned size,
547 unsigned offset, unsigned words, const uint32_t *data)
548 {
549 struct nouveau_pushbuf *push = nv->pushbuf;
550
551 NOUVEAU_DRV_STAT(nv->screen, constbuf_upload_count, 1);
552 NOUVEAU_DRV_STAT(nv->screen, constbuf_upload_bytes, words * 4);
553
554 assert(!(offset & 3));
555 size = align(size, 0x100);
556
557 assert(offset < size);
558 assert(offset + words * 4 <= size);
559
560 BEGIN_NVC0(push, NVC0_3D(CB_SIZE), 3);
561 PUSH_DATA (push, size);
562 PUSH_DATAh(push, bo->offset + base);
563 PUSH_DATA (push, bo->offset + base);
564
565 while (words) {
566 unsigned nr = MIN2(words, NV04_PFIFO_MAX_PACKET_LEN - 1);
567
568 PUSH_SPACE(push, nr + 2);
569 PUSH_REFN (push, bo, NOUVEAU_BO_WR | domain);
570 BEGIN_1IC0(push, NVC0_3D(CB_POS), nr + 1);
571 PUSH_DATA (push, offset);
572 PUSH_DATAp(push, data, nr);
573
574 words -= nr;
575 data += nr;
576 offset += nr * 4;
577 }
578 }
579
580 void
581 nvc0_init_transfer_functions(struct nvc0_context *nvc0)
582 {
583 if (nvc0->screen->base.class_3d >= NVE4_3D_CLASS) {
584 nvc0->m2mf_copy_rect = nve4_m2mf_transfer_rect;
585 nvc0->base.copy_data = nve4_m2mf_copy_linear;
586 nvc0->base.push_data = nve4_p2mf_push_linear;
587 } else {
588 nvc0->m2mf_copy_rect = nvc0_m2mf_transfer_rect;
589 nvc0->base.copy_data = nvc0_m2mf_copy_linear;
590 nvc0->base.push_data = nvc0_m2mf_push_linear;
591 }
592 nvc0->base.push_cb = nvc0_cb_push;
593 }