nvc0: when mapping directly, provide accurate xfer info + start
[mesa.git] / src / gallium / drivers / nouveau / nvc0 / nvc0_transfer.c
1
2 #include "util/u_format.h"
3
4 #include "nvc0/nvc0_context.h"
5
6 struct nvc0_transfer {
7 struct pipe_transfer base;
8 struct nv50_m2mf_rect rect[2];
9 uint32_t nblocksx;
10 uint16_t nblocksy;
11 uint16_t nlayers;
12 };
13
14 static void
15 nvc0_m2mf_transfer_rect(struct nvc0_context *nvc0,
16 const struct nv50_m2mf_rect *dst,
17 const struct nv50_m2mf_rect *src,
18 uint32_t nblocksx, uint32_t nblocksy)
19 {
20 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
21 struct nouveau_bufctx *bctx = nvc0->bufctx;
22 const int cpp = dst->cpp;
23 uint32_t src_ofst = src->base;
24 uint32_t dst_ofst = dst->base;
25 uint32_t height = nblocksy;
26 uint32_t sy = src->y;
27 uint32_t dy = dst->y;
28 uint32_t exec = (1 << 20);
29
30 assert(dst->cpp == src->cpp);
31
32 nouveau_bufctx_refn(bctx, 0, src->bo, src->domain | NOUVEAU_BO_RD);
33 nouveau_bufctx_refn(bctx, 0, dst->bo, dst->domain | NOUVEAU_BO_WR);
34 nouveau_pushbuf_bufctx(push, bctx);
35 nouveau_pushbuf_validate(push);
36
37 if (nouveau_bo_memtype(src->bo)) {
38 BEGIN_NVC0(push, NVC0_M2MF(TILING_MODE_IN), 5);
39 PUSH_DATA (push, src->tile_mode);
40 PUSH_DATA (push, src->width * cpp);
41 PUSH_DATA (push, src->height);
42 PUSH_DATA (push, src->depth);
43 PUSH_DATA (push, src->z);
44 } else {
45 src_ofst += src->y * src->pitch + src->x * cpp;
46
47 BEGIN_NVC0(push, NVC0_M2MF(PITCH_IN), 1);
48 PUSH_DATA (push, src->width * cpp);
49
50 exec |= NVC0_M2MF_EXEC_LINEAR_IN;
51 }
52
53 if (nouveau_bo_memtype(dst->bo)) {
54 BEGIN_NVC0(push, NVC0_M2MF(TILING_MODE_OUT), 5);
55 PUSH_DATA (push, dst->tile_mode);
56 PUSH_DATA (push, dst->width * cpp);
57 PUSH_DATA (push, dst->height);
58 PUSH_DATA (push, dst->depth);
59 PUSH_DATA (push, dst->z);
60 } else {
61 dst_ofst += dst->y * dst->pitch + dst->x * cpp;
62
63 BEGIN_NVC0(push, NVC0_M2MF(PITCH_OUT), 1);
64 PUSH_DATA (push, dst->width * cpp);
65
66 exec |= NVC0_M2MF_EXEC_LINEAR_OUT;
67 }
68
69 while (height) {
70 int line_count = height > 2047 ? 2047 : height;
71
72 BEGIN_NVC0(push, NVC0_M2MF(OFFSET_IN_HIGH), 2);
73 PUSH_DATAh(push, src->bo->offset + src_ofst);
74 PUSH_DATA (push, src->bo->offset + src_ofst);
75
76 BEGIN_NVC0(push, NVC0_M2MF(OFFSET_OUT_HIGH), 2);
77 PUSH_DATAh(push, dst->bo->offset + dst_ofst);
78 PUSH_DATA (push, dst->bo->offset + dst_ofst);
79
80 if (!(exec & NVC0_M2MF_EXEC_LINEAR_IN)) {
81 BEGIN_NVC0(push, NVC0_M2MF(TILING_POSITION_IN_X), 2);
82 PUSH_DATA (push, src->x * cpp);
83 PUSH_DATA (push, sy);
84 } else {
85 src_ofst += line_count * src->pitch;
86 }
87 if (!(exec & NVC0_M2MF_EXEC_LINEAR_OUT)) {
88 BEGIN_NVC0(push, NVC0_M2MF(TILING_POSITION_OUT_X), 2);
89 PUSH_DATA (push, dst->x * cpp);
90 PUSH_DATA (push, dy);
91 } else {
92 dst_ofst += line_count * dst->pitch;
93 }
94
95 BEGIN_NVC0(push, NVC0_M2MF(LINE_LENGTH_IN), 2);
96 PUSH_DATA (push, nblocksx * cpp);
97 PUSH_DATA (push, line_count);
98 BEGIN_NVC0(push, NVC0_M2MF(EXEC), 1);
99 PUSH_DATA (push, exec);
100
101 height -= line_count;
102 sy += line_count;
103 dy += line_count;
104 }
105
106 nouveau_bufctx_reset(bctx, 0);
107 }
108
109 static void
110 nve4_m2mf_transfer_rect(struct nvc0_context *nvc0,
111 const struct nv50_m2mf_rect *dst,
112 const struct nv50_m2mf_rect *src,
113 uint32_t nblocksx, uint32_t nblocksy)
114 {
115 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
116 struct nouveau_bufctx *bctx = nvc0->bufctx;
117 uint32_t exec;
118 uint32_t src_base = src->base;
119 uint32_t dst_base = dst->base;
120 const int cpp = dst->cpp;
121
122 assert(dst->cpp == src->cpp);
123
124 nouveau_bufctx_refn(bctx, 0, dst->bo, dst->domain | NOUVEAU_BO_WR);
125 nouveau_bufctx_refn(bctx, 0, src->bo, src->domain | NOUVEAU_BO_RD);
126 nouveau_pushbuf_bufctx(push, bctx);
127 nouveau_pushbuf_validate(push);
128
129 exec = 0x200 /* 2D_ENABLE */ | 0x6 /* UNK */;
130
131 if (!nouveau_bo_memtype(dst->bo)) {
132 assert(!dst->z);
133 dst_base += dst->y * dst->pitch + dst->x * cpp;
134 exec |= 0x100; /* DST_MODE_2D_LINEAR */
135 }
136 if (!nouveau_bo_memtype(src->bo)) {
137 assert(!src->z);
138 src_base += src->y * src->pitch + src->x * cpp;
139 exec |= 0x080; /* SRC_MODE_2D_LINEAR */
140 }
141
142 BEGIN_NVC0(push, SUBC_COPY(0x070c), 6);
143 PUSH_DATA (push, 0x1000 | dst->tile_mode);
144 PUSH_DATA (push, dst->pitch);
145 PUSH_DATA (push, dst->height);
146 PUSH_DATA (push, dst->depth);
147 PUSH_DATA (push, dst->z);
148 PUSH_DATA (push, (dst->y << 16) | (dst->x * cpp));
149
150 BEGIN_NVC0(push, SUBC_COPY(0x0728), 6);
151 PUSH_DATA (push, 0x1000 | src->tile_mode);
152 PUSH_DATA (push, src->pitch);
153 PUSH_DATA (push, src->height);
154 PUSH_DATA (push, src->depth);
155 PUSH_DATA (push, src->z);
156 PUSH_DATA (push, (src->y << 16) | (src->x * cpp));
157
158 BEGIN_NVC0(push, SUBC_COPY(0x0400), 8);
159 PUSH_DATAh(push, src->bo->offset + src_base);
160 PUSH_DATA (push, src->bo->offset + src_base);
161 PUSH_DATAh(push, dst->bo->offset + dst_base);
162 PUSH_DATA (push, dst->bo->offset + dst_base);
163 PUSH_DATA (push, src->pitch);
164 PUSH_DATA (push, dst->pitch);
165 PUSH_DATA (push, nblocksx * cpp);
166 PUSH_DATA (push, nblocksy);
167
168 BEGIN_NVC0(push, SUBC_COPY(0x0300), 1);
169 PUSH_DATA (push, exec);
170
171 nouveau_bufctx_reset(bctx, 0);
172 }
173
174 void
175 nvc0_m2mf_push_linear(struct nouveau_context *nv,
176 struct nouveau_bo *dst, unsigned offset, unsigned domain,
177 unsigned size, const void *data)
178 {
179 struct nvc0_context *nvc0 = nvc0_context(&nv->pipe);
180 struct nouveau_pushbuf *push = nv->pushbuf;
181 uint32_t *src = (uint32_t *)data;
182 unsigned count = (size + 3) / 4;
183
184 nouveau_bufctx_refn(nvc0->bufctx, 0, dst, domain | NOUVEAU_BO_WR);
185 nouveau_pushbuf_bufctx(push, nvc0->bufctx);
186 nouveau_pushbuf_validate(push);
187
188 while (count) {
189 unsigned nr = MIN2(count, NV04_PFIFO_MAX_PACKET_LEN);
190
191 if (!PUSH_SPACE(push, nr + 9))
192 break;
193
194 BEGIN_NVC0(push, NVC0_M2MF(OFFSET_OUT_HIGH), 2);
195 PUSH_DATAh(push, dst->offset + offset);
196 PUSH_DATA (push, dst->offset + offset);
197 BEGIN_NVC0(push, NVC0_M2MF(LINE_LENGTH_IN), 2);
198 PUSH_DATA (push, MIN2(size, nr * 4));
199 PUSH_DATA (push, 1);
200 BEGIN_NVC0(push, NVC0_M2MF(EXEC), 1);
201 PUSH_DATA (push, 0x100111);
202
203 /* must not be interrupted (trap on QUERY fence, 0x50 works however) */
204 BEGIN_NIC0(push, NVC0_M2MF(DATA), nr);
205 PUSH_DATAp(push, src, nr);
206
207 count -= nr;
208 src += nr;
209 offset += nr * 4;
210 size -= nr * 4;
211 }
212
213 nouveau_bufctx_reset(nvc0->bufctx, 0);
214 }
215
216 void
217 nve4_p2mf_push_linear(struct nouveau_context *nv,
218 struct nouveau_bo *dst, unsigned offset, unsigned domain,
219 unsigned size, const void *data)
220 {
221 struct nvc0_context *nvc0 = nvc0_context(&nv->pipe);
222 struct nouveau_pushbuf *push = nv->pushbuf;
223 uint32_t *src = (uint32_t *)data;
224 unsigned count = (size + 3) / 4;
225
226 nouveau_bufctx_refn(nvc0->bufctx, 0, dst, domain | NOUVEAU_BO_WR);
227 nouveau_pushbuf_bufctx(push, nvc0->bufctx);
228 nouveau_pushbuf_validate(push);
229
230 while (count) {
231 unsigned nr = MIN2(count, (NV04_PFIFO_MAX_PACKET_LEN - 1));
232
233 if (!PUSH_SPACE(push, nr + 10))
234 break;
235
236 BEGIN_NVC0(push, NVE4_P2MF(UPLOAD_DST_ADDRESS_HIGH), 2);
237 PUSH_DATAh(push, dst->offset + offset);
238 PUSH_DATA (push, dst->offset + offset);
239 BEGIN_NVC0(push, NVE4_P2MF(UPLOAD_LINE_LENGTH_IN), 2);
240 PUSH_DATA (push, MIN2(size, nr * 4));
241 PUSH_DATA (push, 1);
242 /* must not be interrupted (trap on QUERY fence, 0x50 works however) */
243 BEGIN_1IC0(push, NVE4_P2MF(UPLOAD_EXEC), nr + 1);
244 PUSH_DATA (push, 0x1001);
245 PUSH_DATAp(push, src, nr);
246
247 count -= nr;
248 src += nr;
249 offset += nr * 4;
250 size -= nr * 4;
251 }
252
253 nouveau_bufctx_reset(nvc0->bufctx, 0);
254 }
255
256 static void
257 nvc0_m2mf_copy_linear(struct nouveau_context *nv,
258 struct nouveau_bo *dst, unsigned dstoff, unsigned dstdom,
259 struct nouveau_bo *src, unsigned srcoff, unsigned srcdom,
260 unsigned size)
261 {
262 struct nouveau_pushbuf *push = nv->pushbuf;
263 struct nouveau_bufctx *bctx = nvc0_context(&nv->pipe)->bufctx;
264
265 nouveau_bufctx_refn(bctx, 0, src, srcdom | NOUVEAU_BO_RD);
266 nouveau_bufctx_refn(bctx, 0, dst, dstdom | NOUVEAU_BO_WR);
267 nouveau_pushbuf_bufctx(push, bctx);
268 nouveau_pushbuf_validate(push);
269
270 while (size) {
271 unsigned bytes = MIN2(size, 1 << 17);
272
273 BEGIN_NVC0(push, NVC0_M2MF(OFFSET_OUT_HIGH), 2);
274 PUSH_DATAh(push, dst->offset + dstoff);
275 PUSH_DATA (push, dst->offset + dstoff);
276 BEGIN_NVC0(push, NVC0_M2MF(OFFSET_IN_HIGH), 2);
277 PUSH_DATAh(push, src->offset + srcoff);
278 PUSH_DATA (push, src->offset + srcoff);
279 BEGIN_NVC0(push, NVC0_M2MF(LINE_LENGTH_IN), 2);
280 PUSH_DATA (push, bytes);
281 PUSH_DATA (push, 1);
282 BEGIN_NVC0(push, NVC0_M2MF(EXEC), 1);
283 PUSH_DATA (push, NVC0_M2MF_EXEC_QUERY_SHORT |
284 NVC0_M2MF_EXEC_LINEAR_IN | NVC0_M2MF_EXEC_LINEAR_OUT);
285
286 srcoff += bytes;
287 dstoff += bytes;
288 size -= bytes;
289 }
290
291 nouveau_bufctx_reset(bctx, 0);
292 }
293
294 static void
295 nve4_m2mf_copy_linear(struct nouveau_context *nv,
296 struct nouveau_bo *dst, unsigned dstoff, unsigned dstdom,
297 struct nouveau_bo *src, unsigned srcoff, unsigned srcdom,
298 unsigned size)
299 {
300 struct nouveau_pushbuf *push = nv->pushbuf;
301 struct nouveau_bufctx *bctx = nvc0_context(&nv->pipe)->bufctx;
302
303 nouveau_bufctx_refn(bctx, 0, src, srcdom | NOUVEAU_BO_RD);
304 nouveau_bufctx_refn(bctx, 0, dst, dstdom | NOUVEAU_BO_WR);
305 nouveau_pushbuf_bufctx(push, bctx);
306 nouveau_pushbuf_validate(push);
307
308 BEGIN_NVC0(push, SUBC_COPY(0x0400), 4);
309 PUSH_DATAh(push, src->offset + srcoff);
310 PUSH_DATA (push, src->offset + srcoff);
311 PUSH_DATAh(push, dst->offset + dstoff);
312 PUSH_DATA (push, dst->offset + dstoff);
313 BEGIN_NVC0(push, SUBC_COPY(0x0418), 1);
314 PUSH_DATA (push, size);
315 BEGIN_NVC0(push, SUBC_COPY(0x0300), 1);
316 PUSH_DATA (push, 0x186);
317
318 nouveau_bufctx_reset(bctx, 0);
319 }
320
321
322 static inline bool
323 nvc0_mt_transfer_can_map_directly(struct nv50_miptree *mt)
324 {
325 if (mt->base.domain == NOUVEAU_BO_VRAM)
326 return false;
327 if (mt->base.base.usage != PIPE_USAGE_STAGING)
328 return false;
329 return !nouveau_bo_memtype(mt->base.bo);
330 }
331
332 static inline bool
333 nvc0_mt_sync(struct nvc0_context *nvc0, struct nv50_miptree *mt, unsigned usage)
334 {
335 if (!mt->base.mm) {
336 uint32_t access = (usage & PIPE_TRANSFER_WRITE) ?
337 NOUVEAU_BO_WR : NOUVEAU_BO_RD;
338 return !nouveau_bo_wait(mt->base.bo, access, nvc0->base.client);
339 }
340 if (usage & PIPE_TRANSFER_WRITE)
341 return !mt->base.fence || nouveau_fence_wait(mt->base.fence, &nvc0->base.debug);
342 return !mt->base.fence_wr || nouveau_fence_wait(mt->base.fence_wr, &nvc0->base.debug);
343 }
344
345 void *
346 nvc0_miptree_transfer_map(struct pipe_context *pctx,
347 struct pipe_resource *res,
348 unsigned level,
349 unsigned usage,
350 const struct pipe_box *box,
351 struct pipe_transfer **ptransfer)
352 {
353 struct nvc0_context *nvc0 = nvc0_context(pctx);
354 struct nouveau_device *dev = nvc0->screen->base.device;
355 struct nv50_miptree *mt = nv50_miptree(res);
356 struct nvc0_transfer *tx;
357 uint32_t size;
358 int ret;
359 unsigned flags = 0;
360
361 if (nvc0_mt_transfer_can_map_directly(mt)) {
362 ret = !nvc0_mt_sync(nvc0, mt, usage);
363 if (!ret)
364 ret = nouveau_bo_map(mt->base.bo, 0, NULL);
365 if (ret &&
366 (usage & PIPE_TRANSFER_MAP_DIRECTLY))
367 return NULL;
368 if (!ret)
369 usage |= PIPE_TRANSFER_MAP_DIRECTLY;
370 } else
371 if (usage & PIPE_TRANSFER_MAP_DIRECTLY)
372 return NULL;
373
374 tx = CALLOC_STRUCT(nvc0_transfer);
375 if (!tx)
376 return NULL;
377
378 pipe_resource_reference(&tx->base.resource, res);
379
380 tx->base.level = level;
381 tx->base.usage = usage;
382 tx->base.box = *box;
383
384 if (util_format_is_plain(res->format)) {
385 tx->nblocksx = box->width << mt->ms_x;
386 tx->nblocksy = box->height << mt->ms_y;
387 } else {
388 tx->nblocksx = util_format_get_nblocksx(res->format, box->width);
389 tx->nblocksy = util_format_get_nblocksy(res->format, box->height);
390 }
391 tx->nlayers = box->depth;
392
393 if (usage & PIPE_TRANSFER_MAP_DIRECTLY) {
394 tx->base.stride = mt->level[level].pitch;
395 tx->base.layer_stride = mt->layer_stride;
396 uint32_t offset = box->y * tx->base.stride +
397 util_format_get_stride(res->format, box->x);
398 if (!mt->layout_3d)
399 offset += mt->layer_stride * box->z;
400 else
401 offset += nvc0_mt_zslice_offset(mt, level, box->z);
402 *ptransfer = &tx->base;
403 return mt->base.bo->map + mt->base.offset + offset;
404 }
405
406 tx->base.stride = tx->nblocksx * util_format_get_blocksize(res->format);
407 tx->base.layer_stride = tx->nblocksy * tx->base.stride;
408
409 nv50_m2mf_rect_setup(&tx->rect[0], res, level, box->x, box->y, box->z);
410
411 size = tx->base.layer_stride;
412
413 ret = nouveau_bo_new(dev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP, 0,
414 size * tx->nlayers, NULL, &tx->rect[1].bo);
415 if (ret) {
416 pipe_resource_reference(&tx->base.resource, NULL);
417 FREE(tx);
418 return NULL;
419 }
420
421 tx->rect[1].cpp = tx->rect[0].cpp;
422 tx->rect[1].width = tx->nblocksx;
423 tx->rect[1].height = tx->nblocksy;
424 tx->rect[1].depth = 1;
425 tx->rect[1].pitch = tx->base.stride;
426 tx->rect[1].domain = NOUVEAU_BO_GART;
427
428 if (usage & PIPE_TRANSFER_READ) {
429 unsigned base = tx->rect[0].base;
430 unsigned z = tx->rect[0].z;
431 unsigned i;
432 for (i = 0; i < tx->nlayers; ++i) {
433 nvc0->m2mf_copy_rect(nvc0, &tx->rect[1], &tx->rect[0],
434 tx->nblocksx, tx->nblocksy);
435 if (mt->layout_3d)
436 tx->rect[0].z++;
437 else
438 tx->rect[0].base += mt->layer_stride;
439 tx->rect[1].base += size;
440 }
441 tx->rect[0].z = z;
442 tx->rect[0].base = base;
443 tx->rect[1].base = 0;
444 }
445
446 if (tx->rect[1].bo->map) {
447 *ptransfer = &tx->base;
448 return tx->rect[1].bo->map;
449 }
450
451 if (usage & PIPE_TRANSFER_READ)
452 flags = NOUVEAU_BO_RD;
453 if (usage & PIPE_TRANSFER_WRITE)
454 flags |= NOUVEAU_BO_WR;
455
456 ret = nouveau_bo_map(tx->rect[1].bo, flags, nvc0->screen->base.client);
457 if (ret) {
458 pipe_resource_reference(&tx->base.resource, NULL);
459 nouveau_bo_ref(NULL, &tx->rect[1].bo);
460 FREE(tx);
461 return NULL;
462 }
463
464 *ptransfer = &tx->base;
465 return tx->rect[1].bo->map;
466 }
467
468 void
469 nvc0_miptree_transfer_unmap(struct pipe_context *pctx,
470 struct pipe_transfer *transfer)
471 {
472 struct nvc0_context *nvc0 = nvc0_context(pctx);
473 struct nvc0_transfer *tx = (struct nvc0_transfer *)transfer;
474 struct nv50_miptree *mt = nv50_miptree(tx->base.resource);
475 unsigned i;
476
477 if (tx->base.usage & PIPE_TRANSFER_MAP_DIRECTLY) {
478 pipe_resource_reference(&transfer->resource, NULL);
479
480 FREE(tx);
481 return;
482 }
483
484 if (tx->base.usage & PIPE_TRANSFER_WRITE) {
485 for (i = 0; i < tx->nlayers; ++i) {
486 nvc0->m2mf_copy_rect(nvc0, &tx->rect[0], &tx->rect[1],
487 tx->nblocksx, tx->nblocksy);
488 if (mt->layout_3d)
489 tx->rect[0].z++;
490 else
491 tx->rect[0].base += mt->layer_stride;
492 tx->rect[1].base += tx->nblocksy * tx->base.stride;
493 }
494 NOUVEAU_DRV_STAT(&nvc0->screen->base, tex_transfers_wr, 1);
495
496 /* Allow the copies above to finish executing before freeing the source */
497 nouveau_fence_work(nvc0->screen->base.fence.current,
498 nouveau_fence_unref_bo, tx->rect[1].bo);
499 } else {
500 nouveau_bo_ref(NULL, &tx->rect[1].bo);
501 }
502 if (tx->base.usage & PIPE_TRANSFER_READ)
503 NOUVEAU_DRV_STAT(&nvc0->screen->base, tex_transfers_rd, 1);
504
505 pipe_resource_reference(&transfer->resource, NULL);
506
507 FREE(tx);
508 }
509
510 /* This happens rather often with DTD9/st. */
511 static void
512 nvc0_cb_push(struct nouveau_context *nv,
513 struct nv04_resource *res,
514 unsigned offset, unsigned words, const uint32_t *data)
515 {
516 struct nvc0_context *nvc0 = nvc0_context(&nv->pipe);
517 struct nvc0_constbuf *cb = NULL;
518 int s;
519
520 /* Go through all the constbuf binding points of this buffer and try to
521 * find one which contains the region to be updated.
522 */
523 for (s = 0; s < 6 && !cb; s++) {
524 uint16_t bindings = res->cb_bindings[s];
525 while (bindings) {
526 int i = ffs(bindings) - 1;
527 uint32_t cb_offset = nvc0->constbuf[s][i].offset;
528
529 bindings &= ~(1 << i);
530 if (cb_offset <= offset &&
531 cb_offset + nvc0->constbuf[s][i].size >= offset + words * 4) {
532 cb = &nvc0->constbuf[s][i];
533 break;
534 }
535 }
536 }
537
538 if (cb) {
539 nvc0_cb_bo_push(nv, res->bo, res->domain,
540 res->offset + cb->offset, cb->size,
541 offset - cb->offset, words, data);
542 } else {
543 nv->push_data(nv, res->bo, res->offset + offset, res->domain,
544 words * 4, data);
545 }
546 }
547
548 void
549 nvc0_cb_bo_push(struct nouveau_context *nv,
550 struct nouveau_bo *bo, unsigned domain,
551 unsigned base, unsigned size,
552 unsigned offset, unsigned words, const uint32_t *data)
553 {
554 struct nouveau_pushbuf *push = nv->pushbuf;
555
556 NOUVEAU_DRV_STAT(nv->screen, constbuf_upload_count, 1);
557 NOUVEAU_DRV_STAT(nv->screen, constbuf_upload_bytes, words * 4);
558
559 assert(!(offset & 3));
560 size = align(size, 0x100);
561
562 assert(offset < size);
563 assert(offset + words * 4 <= size);
564
565 BEGIN_NVC0(push, NVC0_3D(CB_SIZE), 3);
566 PUSH_DATA (push, size);
567 PUSH_DATAh(push, bo->offset + base);
568 PUSH_DATA (push, bo->offset + base);
569
570 while (words) {
571 unsigned nr = MIN2(words, NV04_PFIFO_MAX_PACKET_LEN - 1);
572
573 PUSH_SPACE(push, nr + 2);
574 PUSH_REFN (push, bo, NOUVEAU_BO_WR | domain);
575 BEGIN_1IC0(push, NVC0_3D(CB_POS), nr + 1);
576 PUSH_DATA (push, offset);
577 PUSH_DATAp(push, data, nr);
578
579 words -= nr;
580 data += nr;
581 offset += nr * 4;
582 }
583 }
584
585 void
586 nvc0_init_transfer_functions(struct nvc0_context *nvc0)
587 {
588 if (nvc0->screen->base.class_3d >= NVE4_3D_CLASS) {
589 nvc0->m2mf_copy_rect = nve4_m2mf_transfer_rect;
590 nvc0->base.copy_data = nve4_m2mf_copy_linear;
591 nvc0->base.push_data = nve4_p2mf_push_linear;
592 } else {
593 nvc0->m2mf_copy_rect = nvc0_m2mf_transfer_rect;
594 nvc0->base.copy_data = nvc0_m2mf_copy_linear;
595 nvc0->base.push_data = nvc0_m2mf_push_linear;
596 }
597 nvc0->base.push_cb = nvc0_cb_push;
598 }