tegra: Initial support
[mesa.git] / src / gallium / drivers / tegra / tegra_context.c
1 /*
2 * Copyright © 2014-2018 NVIDIA Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <inttypes.h>
25 #include <stdlib.h>
26
27 #include "util/u_debug.h"
28 #include "util/u_inlines.h"
29 #include "util/u_upload_mgr.h"
30
31 #include "tegra_context.h"
32 #include "tegra_resource.h"
33 #include "tegra_screen.h"
34
35 static void
36 tegra_destroy(struct pipe_context *pcontext)
37 {
38 struct tegra_context *context = to_tegra_context(pcontext);
39
40 if (context->base.stream_uploader)
41 u_upload_destroy(context->base.stream_uploader);
42
43 context->gpu->destroy(context->gpu);
44 free(context);
45 }
46
47 static void
48 tegra_draw_vbo(struct pipe_context *pcontext,
49 const struct pipe_draw_info *pinfo)
50 {
51 struct tegra_context *context = to_tegra_context(pcontext);
52 struct pipe_draw_indirect_info indirect;
53 struct pipe_draw_info info;
54
55 if (pinfo && (pinfo->indirect || pinfo->index_size)) {
56 memcpy(&info, pinfo, sizeof(info));
57
58 if (pinfo->indirect) {
59 memcpy(&indirect, pinfo->indirect, sizeof(indirect));
60 indirect.buffer = tegra_resource_unwrap(info.indirect->buffer);
61 info.indirect = &indirect;
62 }
63
64 if (pinfo->index_size && !pinfo->has_user_indices)
65 info.index.resource = tegra_resource_unwrap(info.index.resource);
66
67 pinfo = &info;
68 }
69
70 context->gpu->draw_vbo(context->gpu, pinfo);
71 }
72
73 static void
74 tegra_render_condition(struct pipe_context *pcontext,
75 struct pipe_query *query,
76 boolean condition,
77 unsigned int mode)
78 {
79 struct tegra_context *context = to_tegra_context(pcontext);
80
81 context->gpu->render_condition(context->gpu, query, condition, mode);
82 }
83
84 static struct pipe_query *
85 tegra_create_query(struct pipe_context *pcontext, unsigned int query_type,
86 unsigned int index)
87 {
88 struct tegra_context *context = to_tegra_context(pcontext);
89
90 return context->gpu->create_query(context->gpu, query_type, index);
91 }
92
93 static struct pipe_query *
94 tegra_create_batch_query(struct pipe_context *pcontext,
95 unsigned int num_queries,
96 unsigned int *queries)
97 {
98 struct tegra_context *context = to_tegra_context(pcontext);
99
100 return context->gpu->create_batch_query(context->gpu, num_queries,
101 queries);
102 }
103
104 static void
105 tegra_destroy_query(struct pipe_context *pcontext, struct pipe_query *query)
106 {
107 struct tegra_context *context = to_tegra_context(pcontext);
108
109 context->gpu->destroy_query(context->gpu, query);
110 }
111
112 static boolean
113 tegra_begin_query(struct pipe_context *pcontext, struct pipe_query *query)
114 {
115 struct tegra_context *context = to_tegra_context(pcontext);
116
117 return context->gpu->begin_query(context->gpu, query);
118 }
119
120 static bool
121 tegra_end_query(struct pipe_context *pcontext, struct pipe_query *query)
122 {
123 struct tegra_context *context = to_tegra_context(pcontext);
124
125 return context->gpu->end_query(context->gpu, query);
126 }
127
128 static boolean
129 tegra_get_query_result(struct pipe_context *pcontext,
130 struct pipe_query *query,
131 boolean wait,
132 union pipe_query_result *result)
133 {
134 struct tegra_context *context = to_tegra_context(pcontext);
135
136 return context->gpu->get_query_result(context->gpu, query, wait,
137 result);
138 }
139
140 static void
141 tegra_get_query_result_resource(struct pipe_context *pcontext,
142 struct pipe_query *query,
143 boolean wait,
144 enum pipe_query_value_type result_type,
145 int index,
146 struct pipe_resource *resource,
147 unsigned int offset)
148 {
149 struct tegra_context *context = to_tegra_context(pcontext);
150
151 context->gpu->get_query_result_resource(context->gpu, query, wait,
152 result_type, index, resource,
153 offset);
154 }
155
156 static void
157 tegra_set_active_query_state(struct pipe_context *pcontext, boolean enable)
158 {
159 struct tegra_context *context = to_tegra_context(pcontext);
160
161 context->gpu->set_active_query_state(context->gpu, enable);
162 }
163
164 static void *
165 tegra_create_blend_state(struct pipe_context *pcontext,
166 const struct pipe_blend_state *cso)
167 {
168 struct tegra_context *context = to_tegra_context(pcontext);
169
170 return context->gpu->create_blend_state(context->gpu, cso);
171 }
172
173 static void
174 tegra_bind_blend_state(struct pipe_context *pcontext, void *so)
175 {
176 struct tegra_context *context = to_tegra_context(pcontext);
177
178 context->gpu->bind_blend_state(context->gpu, so);
179 }
180
181 static void
182 tegra_delete_blend_state(struct pipe_context *pcontext, void *so)
183 {
184 struct tegra_context *context = to_tegra_context(pcontext);
185
186 context->gpu->delete_blend_state(context->gpu, so);
187 }
188
189 static void *
190 tegra_create_sampler_state(struct pipe_context *pcontext,
191 const struct pipe_sampler_state *cso)
192 {
193 struct tegra_context *context = to_tegra_context(pcontext);
194
195 return context->gpu->create_sampler_state(context->gpu, cso);
196 }
197
198 static void
199 tegra_bind_sampler_states(struct pipe_context *pcontext, unsigned shader,
200 unsigned start_slot, unsigned num_samplers,
201 void **samplers)
202 {
203 struct tegra_context *context = to_tegra_context(pcontext);
204
205 context->gpu->bind_sampler_states(context->gpu, shader, start_slot,
206 num_samplers, samplers);
207 }
208
209 static void
210 tegra_delete_sampler_state(struct pipe_context *pcontext, void *so)
211 {
212 struct tegra_context *context = to_tegra_context(pcontext);
213
214 context->gpu->delete_sampler_state(context->gpu, so);
215 }
216
217 static void *
218 tegra_create_rasterizer_state(struct pipe_context *pcontext,
219 const struct pipe_rasterizer_state *cso)
220 {
221 struct tegra_context *context = to_tegra_context(pcontext);
222
223 return context->gpu->create_rasterizer_state(context->gpu, cso);
224 }
225
226 static void
227 tegra_bind_rasterizer_state(struct pipe_context *pcontext, void *so)
228 {
229 struct tegra_context *context = to_tegra_context(pcontext);
230
231 context->gpu->bind_rasterizer_state(context->gpu, so);
232 }
233
234 static void
235 tegra_delete_rasterizer_state(struct pipe_context *pcontext, void *so)
236 {
237 struct tegra_context *context = to_tegra_context(pcontext);
238
239 context->gpu->delete_rasterizer_state(context->gpu, so);
240 }
241
242 static void *
243 tegra_create_depth_stencil_alpha_state(struct pipe_context *pcontext,
244 const struct pipe_depth_stencil_alpha_state *cso)
245 {
246 struct tegra_context *context = to_tegra_context(pcontext);
247
248 return context->gpu->create_depth_stencil_alpha_state(context->gpu, cso);
249 }
250
251 static void
252 tegra_bind_depth_stencil_alpha_state(struct pipe_context *pcontext, void *so)
253 {
254 struct tegra_context *context = to_tegra_context(pcontext);
255
256 context->gpu->bind_depth_stencil_alpha_state(context->gpu, so);
257 }
258
259 static void
260 tegra_delete_depth_stencil_alpha_state(struct pipe_context *pcontext, void *so)
261 {
262 struct tegra_context *context = to_tegra_context(pcontext);
263
264 context->gpu->delete_depth_stencil_alpha_state(context->gpu, so);
265 }
266
267 static void *
268 tegra_create_fs_state(struct pipe_context *pcontext,
269 const struct pipe_shader_state *cso)
270 {
271 struct tegra_context *context = to_tegra_context(pcontext);
272
273 return context->gpu->create_fs_state(context->gpu, cso);
274 }
275
276 static void
277 tegra_bind_fs_state(struct pipe_context *pcontext, void *so)
278 {
279 struct tegra_context *context = to_tegra_context(pcontext);
280
281 context->gpu->bind_fs_state(context->gpu, so);
282 }
283
284 static void
285 tegra_delete_fs_state(struct pipe_context *pcontext, void *so)
286 {
287 struct tegra_context *context = to_tegra_context(pcontext);
288
289 context->gpu->delete_fs_state(context->gpu, so);
290 }
291
292 static void *
293 tegra_create_vs_state(struct pipe_context *pcontext,
294 const struct pipe_shader_state *cso)
295 {
296 struct tegra_context *context = to_tegra_context(pcontext);
297
298 return context->gpu->create_vs_state(context->gpu, cso);
299 }
300
301 static void
302 tegra_bind_vs_state(struct pipe_context *pcontext, void *so)
303 {
304 struct tegra_context *context = to_tegra_context(pcontext);
305
306 context->gpu->bind_vs_state(context->gpu, so);
307 }
308
309 static void
310 tegra_delete_vs_state(struct pipe_context *pcontext, void *so)
311 {
312 struct tegra_context *context = to_tegra_context(pcontext);
313
314 context->gpu->delete_vs_state(context->gpu, so);
315 }
316
317 static void *
318 tegra_create_gs_state(struct pipe_context *pcontext,
319 const struct pipe_shader_state *cso)
320 {
321 struct tegra_context *context = to_tegra_context(pcontext);
322
323 return context->gpu->create_gs_state(context->gpu, cso);
324 }
325
326 static void
327 tegra_bind_gs_state(struct pipe_context *pcontext, void *so)
328 {
329 struct tegra_context *context = to_tegra_context(pcontext);
330
331 context->gpu->bind_gs_state(context->gpu, so);
332 }
333
334 static void
335 tegra_delete_gs_state(struct pipe_context *pcontext, void *so)
336 {
337 struct tegra_context *context = to_tegra_context(pcontext);
338
339 context->gpu->delete_gs_state(context->gpu, so);
340 }
341
342 static void *
343 tegra_create_tcs_state(struct pipe_context *pcontext,
344 const struct pipe_shader_state *cso)
345 {
346 struct tegra_context *context = to_tegra_context(pcontext);
347
348 return context->gpu->create_tcs_state(context->gpu, cso);
349 }
350
351 static void
352 tegra_bind_tcs_state(struct pipe_context *pcontext, void *so)
353 {
354 struct tegra_context *context = to_tegra_context(pcontext);
355
356 context->gpu->bind_tcs_state(context->gpu, so);
357 }
358
359 static void
360 tegra_delete_tcs_state(struct pipe_context *pcontext, void *so)
361 {
362 struct tegra_context *context = to_tegra_context(pcontext);
363
364 context->gpu->delete_tcs_state(context->gpu, so);
365 }
366
367 static void *
368 tegra_create_tes_state(struct pipe_context *pcontext,
369 const struct pipe_shader_state *cso)
370 {
371 struct tegra_context *context = to_tegra_context(pcontext);
372
373 return context->gpu->create_tes_state(context->gpu, cso);
374 }
375
376 static void
377 tegra_bind_tes_state(struct pipe_context *pcontext, void *so)
378 {
379 struct tegra_context *context = to_tegra_context(pcontext);
380
381 context->gpu->bind_tes_state(context->gpu, so);
382 }
383
384 static void
385 tegra_delete_tes_state(struct pipe_context *pcontext, void *so)
386 {
387 struct tegra_context *context = to_tegra_context(pcontext);
388
389 context->gpu->delete_tes_state(context->gpu, so);
390 }
391
392 static void *
393 tegra_create_vertex_elements_state(struct pipe_context *pcontext,
394 unsigned num_elements,
395 const struct pipe_vertex_element *elements)
396 {
397 struct tegra_context *context = to_tegra_context(pcontext);
398
399 return context->gpu->create_vertex_elements_state(context->gpu,
400 num_elements,
401 elements);
402 }
403
404 static void
405 tegra_bind_vertex_elements_state(struct pipe_context *pcontext, void *so)
406 {
407 struct tegra_context *context = to_tegra_context(pcontext);
408
409 context->gpu->bind_vertex_elements_state(context->gpu, so);
410 }
411
412 static void
413 tegra_delete_vertex_elements_state(struct pipe_context *pcontext, void *so)
414 {
415 struct tegra_context *context = to_tegra_context(pcontext);
416
417 context->gpu->delete_vertex_elements_state(context->gpu, so);
418 }
419
420 static void
421 tegra_set_blend_color(struct pipe_context *pcontext,
422 const struct pipe_blend_color *color)
423 {
424 struct tegra_context *context = to_tegra_context(pcontext);
425
426 context->gpu->set_blend_color(context->gpu, color);
427 }
428
429 static void
430 tegra_set_stencil_ref(struct pipe_context *pcontext,
431 const struct pipe_stencil_ref *ref)
432 {
433 struct tegra_context *context = to_tegra_context(pcontext);
434
435 context->gpu->set_stencil_ref(context->gpu, ref);
436 }
437
438 static void
439 tegra_set_sample_mask(struct pipe_context *pcontext, unsigned int mask)
440 {
441 struct tegra_context *context = to_tegra_context(pcontext);
442
443 context->gpu->set_sample_mask(context->gpu, mask);
444 }
445
446 static void
447 tegra_set_min_samples(struct pipe_context *pcontext, unsigned int samples)
448 {
449 struct tegra_context *context = to_tegra_context(pcontext);
450
451 context->gpu->set_min_samples(context->gpu, samples);
452 }
453
454 static void
455 tegra_set_clip_state(struct pipe_context *pcontext,
456 const struct pipe_clip_state *state)
457 {
458 struct tegra_context *context = to_tegra_context(pcontext);
459
460 context->gpu->set_clip_state(context->gpu, state);
461 }
462
463 static void
464 tegra_set_constant_buffer(struct pipe_context *pcontext, unsigned int shader,
465 unsigned int index,
466 const struct pipe_constant_buffer *buf)
467 {
468 struct tegra_context *context = to_tegra_context(pcontext);
469 struct pipe_constant_buffer buffer;
470
471 if (buf && buf->buffer) {
472 memcpy(&buffer, buf, sizeof(buffer));
473 buffer.buffer = tegra_resource_unwrap(buffer.buffer);
474 buf = &buffer;
475 }
476
477 context->gpu->set_constant_buffer(context->gpu, shader, index, buf);
478 }
479
480 static void
481 tegra_set_framebuffer_state(struct pipe_context *pcontext,
482 const struct pipe_framebuffer_state *fb)
483 {
484 struct tegra_context *context = to_tegra_context(pcontext);
485 struct pipe_framebuffer_state state;
486 unsigned i;
487
488 if (fb) {
489 memcpy(&state, fb, sizeof(state));
490
491 for (i = 0; i < fb->nr_cbufs; i++)
492 state.cbufs[i] = tegra_surface_unwrap(fb->cbufs[i]);
493
494 while (i < PIPE_MAX_COLOR_BUFS)
495 state.cbufs[i++] = NULL;
496
497 state.zsbuf = tegra_surface_unwrap(fb->zsbuf);
498
499 fb = &state;
500 }
501
502 context->gpu->set_framebuffer_state(context->gpu, fb);
503 }
504
505 static void
506 tegra_set_polygon_stipple(struct pipe_context *pcontext,
507 const struct pipe_poly_stipple *stipple)
508 {
509 struct tegra_context *context = to_tegra_context(pcontext);
510
511 context->gpu->set_polygon_stipple(context->gpu, stipple);
512 }
513
514 static void
515 tegra_set_scissor_states(struct pipe_context *pcontext, unsigned start_slot,
516 unsigned num_scissors,
517 const struct pipe_scissor_state *scissors)
518 {
519 struct tegra_context *context = to_tegra_context(pcontext);
520
521 context->gpu->set_scissor_states(context->gpu, start_slot, num_scissors,
522 scissors);
523 }
524
525 static void
526 tegra_set_window_rectangles(struct pipe_context *pcontext, boolean include,
527 unsigned int num_rectangles,
528 const struct pipe_scissor_state *rectangles)
529 {
530 struct tegra_context *context = to_tegra_context(pcontext);
531
532 context->gpu->set_window_rectangles(context->gpu, include, num_rectangles,
533 rectangles);
534 }
535
536 static void
537 tegra_set_viewport_states(struct pipe_context *pcontext, unsigned start_slot,
538 unsigned num_viewports,
539 const struct pipe_viewport_state *viewports)
540 {
541 struct tegra_context *context = to_tegra_context(pcontext);
542
543 context->gpu->set_viewport_states(context->gpu, start_slot, num_viewports,
544 viewports);
545 }
546
547 static void
548 tegra_set_sampler_views(struct pipe_context *pcontext, unsigned shader,
549 unsigned start_slot, unsigned num_views,
550 struct pipe_sampler_view **pviews)
551 {
552 struct pipe_sampler_view *views[PIPE_MAX_SHADER_SAMPLER_VIEWS];
553 struct tegra_context *context = to_tegra_context(pcontext);
554 unsigned i;
555
556 for (i = 0; i < num_views; i++)
557 views[i] = tegra_sampler_view_unwrap(pviews[i]);
558
559 context->gpu->set_sampler_views(context->gpu, shader, start_slot,
560 num_views, views);
561 }
562
563 static void
564 tegra_set_tess_state(struct pipe_context *pcontext,
565 const float default_outer_level[4],
566 const float default_inner_level[2])
567 {
568 struct tegra_context *context = to_tegra_context(pcontext);
569
570 context->gpu->set_tess_state(context->gpu, default_outer_level,
571 default_inner_level);
572 }
573
574 static void
575 tegra_set_debug_callback(struct pipe_context *pcontext,
576 const struct pipe_debug_callback *callback)
577 {
578 struct tegra_context *context = to_tegra_context(pcontext);
579
580 context->gpu->set_debug_callback(context->gpu, callback);
581 }
582
583 static void
584 tegra_set_shader_buffers(struct pipe_context *pcontext, unsigned int shader,
585 unsigned start, unsigned count,
586 const struct pipe_shader_buffer *buffers)
587 {
588 struct tegra_context *context = to_tegra_context(pcontext);
589
590 context->gpu->set_shader_buffers(context->gpu, shader, start, count,
591 buffers);
592 }
593
594 static void
595 tegra_set_shader_images(struct pipe_context *pcontext, unsigned int shader,
596 unsigned start, unsigned count,
597 const struct pipe_image_view *images)
598 {
599 struct tegra_context *context = to_tegra_context(pcontext);
600
601 context->gpu->set_shader_images(context->gpu, shader, start, count,
602 images);
603 }
604
605 static void
606 tegra_set_vertex_buffers(struct pipe_context *pcontext, unsigned start_slot,
607 unsigned num_buffers,
608 const struct pipe_vertex_buffer *buffers)
609 {
610 struct tegra_context *context = to_tegra_context(pcontext);
611 struct pipe_vertex_buffer buf[PIPE_MAX_SHADER_INPUTS];
612 unsigned i;
613
614 if (num_buffers && buffers) {
615 memcpy(buf, buffers, num_buffers * sizeof(struct pipe_vertex_buffer));
616
617 for (i = 0; i < num_buffers; i++) {
618 if (!buf[i].is_user_buffer)
619 buf[i].buffer.resource = tegra_resource_unwrap(buf[i].buffer.resource);
620 }
621
622 buffers = buf;
623 }
624
625 context->gpu->set_vertex_buffers(context->gpu, start_slot, num_buffers,
626 buffers);
627 }
628
629 static struct pipe_stream_output_target *
630 tegra_create_stream_output_target(struct pipe_context *pcontext,
631 struct pipe_resource *presource,
632 unsigned buffer_offset,
633 unsigned buffer_size)
634 {
635 struct tegra_resource *resource = to_tegra_resource(presource);
636 struct tegra_context *context = to_tegra_context(pcontext);
637
638 return context->gpu->create_stream_output_target(context->gpu,
639 resource->gpu,
640 buffer_offset,
641 buffer_size);
642 }
643
644 static void
645 tegra_stream_output_target_destroy(struct pipe_context *pcontext,
646 struct pipe_stream_output_target *target)
647 {
648 struct tegra_context *context = to_tegra_context(pcontext);
649
650 context->gpu->stream_output_target_destroy(context->gpu, target);
651 }
652
653 static void
654 tegra_set_stream_output_targets(struct pipe_context *pcontext,
655 unsigned num_targets,
656 struct pipe_stream_output_target **targets,
657 const unsigned *offsets)
658 {
659 struct tegra_context *context = to_tegra_context(pcontext);
660
661 context->gpu->set_stream_output_targets(context->gpu, num_targets,
662 targets, offsets);
663 }
664
665 static void
666 tegra_resource_copy_region(struct pipe_context *pcontext,
667 struct pipe_resource *pdst,
668 unsigned int dst_level,
669 unsigned int dstx,
670 unsigned int dsty,
671 unsigned int dstz,
672 struct pipe_resource *psrc,
673 unsigned int src_level,
674 const struct pipe_box *src_box)
675 {
676 struct tegra_context *context = to_tegra_context(pcontext);
677 struct tegra_resource *dst = to_tegra_resource(pdst);
678 struct tegra_resource *src = to_tegra_resource(psrc);
679
680 context->gpu->resource_copy_region(context->gpu, dst->gpu, dst_level, dstx,
681 dsty, dstz, src->gpu, src_level,
682 src_box);
683 }
684
685 static void
686 tegra_blit(struct pipe_context *pcontext, const struct pipe_blit_info *pinfo)
687 {
688 struct tegra_context *context = to_tegra_context(pcontext);
689 struct pipe_blit_info info;
690
691 if (pinfo) {
692 memcpy(&info, pinfo, sizeof(info));
693 info.dst.resource = tegra_resource_unwrap(info.dst.resource);
694 info.src.resource = tegra_resource_unwrap(info.src.resource);
695 pinfo = &info;
696 }
697
698 context->gpu->blit(context->gpu, pinfo);
699 }
700
701 static void
702 tegra_clear(struct pipe_context *pcontext, unsigned buffers,
703 const union pipe_color_union *color, double depth,
704 unsigned stencil)
705 {
706 struct tegra_context *context = to_tegra_context(pcontext);
707
708 context->gpu->clear(context->gpu, buffers, color, depth, stencil);
709 }
710
711 static void
712 tegra_clear_render_target(struct pipe_context *pcontext,
713 struct pipe_surface *pdst,
714 const union pipe_color_union *color,
715 unsigned int dstx,
716 unsigned int dsty,
717 unsigned int width,
718 unsigned int height,
719 bool render_condition)
720 {
721 struct tegra_context *context = to_tegra_context(pcontext);
722 struct tegra_surface *dst = to_tegra_surface(pdst);
723
724 context->gpu->clear_render_target(context->gpu, dst->gpu, color, dstx,
725 dsty, width, height, render_condition);
726 }
727
728 static void
729 tegra_clear_depth_stencil(struct pipe_context *pcontext,
730 struct pipe_surface *pdst,
731 unsigned int flags,
732 double depth,
733 unsigned int stencil,
734 unsigned int dstx,
735 unsigned int dsty,
736 unsigned int width,
737 unsigned int height,
738 bool render_condition)
739 {
740 struct tegra_context *context = to_tegra_context(pcontext);
741 struct tegra_surface *dst = to_tegra_surface(pdst);
742
743 context->gpu->clear_depth_stencil(context->gpu, dst->gpu, flags, depth,
744 stencil, dstx, dsty, width, height,
745 render_condition);
746 }
747
748 static void
749 tegra_clear_texture(struct pipe_context *pcontext,
750 struct pipe_resource *presource,
751 unsigned int level,
752 const struct pipe_box *box,
753 const void *data)
754 {
755 struct tegra_resource *resource = to_tegra_resource(presource);
756 struct tegra_context *context = to_tegra_context(pcontext);
757
758 context->gpu->clear_texture(context->gpu, resource->gpu, level, box, data);
759 }
760
761 static void
762 tegra_clear_buffer(struct pipe_context *pcontext,
763 struct pipe_resource *presource,
764 unsigned int offset,
765 unsigned int size,
766 const void *value,
767 int value_size)
768 {
769 struct tegra_resource *resource = to_tegra_resource(presource);
770 struct tegra_context *context = to_tegra_context(pcontext);
771
772 context->gpu->clear_buffer(context->gpu, resource->gpu, offset, size,
773 value, value_size);
774 }
775
776 static void
777 tegra_flush(struct pipe_context *pcontext, struct pipe_fence_handle **fence,
778 unsigned flags)
779 {
780 struct tegra_context *context = to_tegra_context(pcontext);
781
782 context->gpu->flush(context->gpu, fence, flags);
783 }
784
785 static void
786 tegra_create_fence_fd(struct pipe_context *pcontext,
787 struct pipe_fence_handle **fence,
788 int fd, enum pipe_fd_type type)
789 {
790 struct tegra_context *context = to_tegra_context(pcontext);
791
792 assert(type == PIPE_FD_TYPE_NATIVE_SYNC);
793 context->gpu->create_fence_fd(context->gpu, fence, fd, type);
794 }
795
796 static void
797 tegra_fence_server_sync(struct pipe_context *pcontext,
798 struct pipe_fence_handle *fence)
799 {
800 struct tegra_context *context = to_tegra_context(pcontext);
801
802 context->gpu->fence_server_sync(context->gpu, fence);
803 }
804
805 static struct pipe_sampler_view *
806 tegra_create_sampler_view(struct pipe_context *pcontext,
807 struct pipe_resource *presource,
808 const struct pipe_sampler_view *template)
809 {
810 struct tegra_resource *resource = to_tegra_resource(presource);
811 struct tegra_context *context = to_tegra_context(pcontext);
812 struct tegra_sampler_view *view;
813
814 view = calloc(1, sizeof(*view));
815 if (!view)
816 return NULL;
817
818 view->gpu = context->gpu->create_sampler_view(context->gpu, resource->gpu,
819 template);
820 memcpy(&view->base, view->gpu, sizeof(*view->gpu));
821 /* overwrite to prevent reference from being released */
822 view->base.texture = NULL;
823
824 pipe_reference_init(&view->base.reference, 1);
825 pipe_resource_reference(&view->base.texture, presource);
826 view->base.context = pcontext;
827
828 return &view->base;
829 }
830
831 static void
832 tegra_sampler_view_destroy(struct pipe_context *pcontext,
833 struct pipe_sampler_view *pview)
834 {
835 struct tegra_sampler_view *view = to_tegra_sampler_view(pview);
836
837 pipe_resource_reference(&view->base.texture, NULL);
838 pipe_sampler_view_reference(&view->gpu, NULL);
839 free(view);
840 }
841
842 static struct pipe_surface *
843 tegra_create_surface(struct pipe_context *pcontext,
844 struct pipe_resource *presource,
845 const struct pipe_surface *template)
846 {
847 struct tegra_resource *resource = to_tegra_resource(presource);
848 struct tegra_context *context = to_tegra_context(pcontext);
849 struct tegra_surface *surface;
850
851 surface = calloc(1, sizeof(*surface));
852 if (!surface)
853 return NULL;
854
855 surface->gpu = context->gpu->create_surface(context->gpu, resource->gpu,
856 template);
857 if (!surface->gpu) {
858 free(surface);
859 return NULL;
860 }
861
862 memcpy(&surface->base, surface->gpu, sizeof(*surface->gpu));
863 /* overwrite to prevent reference from being released */
864 surface->base.texture = NULL;
865
866 pipe_reference_init(&surface->base.reference, 1);
867 pipe_resource_reference(&surface->base.texture, presource);
868 surface->base.context = &context->base;
869
870 return &surface->base;
871 }
872
873 static void
874 tegra_surface_destroy(struct pipe_context *pcontext,
875 struct pipe_surface *psurface)
876 {
877 struct tegra_surface *surface = to_tegra_surface(psurface);
878
879 pipe_resource_reference(&surface->base.texture, NULL);
880 pipe_surface_reference(&surface->gpu, NULL);
881 free(surface);
882 }
883
884 static void *
885 tegra_transfer_map(struct pipe_context *pcontext,
886 struct pipe_resource *presource,
887 unsigned level, unsigned usage,
888 const struct pipe_box *box,
889 struct pipe_transfer **ptransfer)
890 {
891 struct tegra_resource *resource = to_tegra_resource(presource);
892 struct tegra_context *context = to_tegra_context(pcontext);
893 struct tegra_transfer *transfer;
894
895 transfer = calloc(1, sizeof(*transfer));
896 if (!transfer)
897 return NULL;
898
899 transfer->map = context->gpu->transfer_map(context->gpu, resource->gpu,
900 level, usage, box,
901 &transfer->gpu);
902 memcpy(&transfer->base, transfer->gpu, sizeof(*transfer->gpu));
903 transfer->base.resource = NULL;
904 pipe_resource_reference(&transfer->base.resource, presource);
905
906 *ptransfer = &transfer->base;
907
908 return transfer->map;
909 }
910
911 static void
912 tegra_transfer_flush_region(struct pipe_context *pcontext,
913 struct pipe_transfer *ptransfer,
914 const struct pipe_box *box)
915 {
916 struct tegra_transfer *transfer = to_tegra_transfer(ptransfer);
917 struct tegra_context *context = to_tegra_context(pcontext);
918
919 context->gpu->transfer_flush_region(context->gpu, transfer->gpu, box);
920 }
921
922 static void
923 tegra_transfer_unmap(struct pipe_context *pcontext,
924 struct pipe_transfer *ptransfer)
925 {
926 struct tegra_transfer *transfer = to_tegra_transfer(ptransfer);
927 struct tegra_context *context = to_tegra_context(pcontext);
928
929 context->gpu->transfer_unmap(context->gpu, transfer->gpu);
930 pipe_resource_reference(&transfer->base.resource, NULL);
931 free(transfer);
932 }
933
934 static void
935 tegra_buffer_subdata(struct pipe_context *pcontext,
936 struct pipe_resource *presource,
937 unsigned usage, unsigned offset,
938 unsigned size, const void *data)
939 {
940 struct tegra_resource *resource = to_tegra_resource(presource);
941 struct tegra_context *context = to_tegra_context(pcontext);
942
943 context->gpu->buffer_subdata(context->gpu, resource->gpu, usage, offset,
944 size, data);
945 }
946
947 static void
948 tegra_texture_subdata(struct pipe_context *pcontext,
949 struct pipe_resource *presource,
950 unsigned level,
951 unsigned usage,
952 const struct pipe_box *box,
953 const void *data,
954 unsigned stride,
955 unsigned layer_stride)
956 {
957 struct tegra_resource *resource = to_tegra_resource(presource);
958 struct tegra_context *context = to_tegra_context(pcontext);
959
960 context->gpu->texture_subdata(context->gpu, resource->gpu, level, usage,
961 box, data, stride, layer_stride);
962 }
963
964 static void
965 tegra_texture_barrier(struct pipe_context *pcontext, unsigned int flags)
966 {
967 struct tegra_context *context = to_tegra_context(pcontext);
968
969 context->gpu->texture_barrier(context->gpu, flags);
970 }
971
972 static void
973 tegra_memory_barrier(struct pipe_context *pcontext, unsigned int flags)
974 {
975 struct tegra_context *context = to_tegra_context(pcontext);
976
977 context->gpu->memory_barrier(context->gpu, flags);
978 }
979
980 static struct pipe_video_codec *
981 tegra_create_video_codec(struct pipe_context *pcontext,
982 const struct pipe_video_codec *template)
983 {
984 struct tegra_context *context = to_tegra_context(pcontext);
985
986 return context->gpu->create_video_codec(context->gpu, template);
987 }
988
989 static struct pipe_video_buffer *
990 tegra_create_video_buffer(struct pipe_context *pcontext,
991 const struct pipe_video_buffer *template)
992 {
993 struct tegra_context *context = to_tegra_context(pcontext);
994
995 return context->gpu->create_video_buffer(context->gpu, template);
996 }
997
998 static void *
999 tegra_create_compute_state(struct pipe_context *pcontext,
1000 const struct pipe_compute_state *template)
1001 {
1002 struct tegra_context *context = to_tegra_context(pcontext);
1003
1004 return context->gpu->create_compute_state(context->gpu, template);
1005 }
1006
1007 static void
1008 tegra_bind_compute_state(struct pipe_context *pcontext, void *so)
1009 {
1010 struct tegra_context *context = to_tegra_context(pcontext);
1011
1012 context->gpu->bind_compute_state(context->gpu, so);
1013 }
1014
1015 static void
1016 tegra_delete_compute_state(struct pipe_context *pcontext, void *so)
1017 {
1018 struct tegra_context *context = to_tegra_context(pcontext);
1019
1020 context->gpu->delete_compute_state(context->gpu, so);
1021 }
1022
1023 static void
1024 tegra_set_compute_resources(struct pipe_context *pcontext,
1025 unsigned int start, unsigned int count,
1026 struct pipe_surface **resources)
1027 {
1028 struct tegra_context *context = to_tegra_context(pcontext);
1029
1030 /* XXX unwrap resources */
1031
1032 context->gpu->set_compute_resources(context->gpu, start, count, resources);
1033 }
1034
1035 static void
1036 tegra_set_global_binding(struct pipe_context *pcontext, unsigned int first,
1037 unsigned int count, struct pipe_resource **resources,
1038 uint32_t **handles)
1039 {
1040 struct tegra_context *context = to_tegra_context(pcontext);
1041
1042 /* XXX unwrap resources */
1043
1044 context->gpu->set_global_binding(context->gpu, first, count, resources,
1045 handles);
1046 }
1047
1048 static void
1049 tegra_launch_grid(struct pipe_context *pcontext,
1050 const struct pipe_grid_info *info)
1051 {
1052 struct tegra_context *context = to_tegra_context(pcontext);
1053
1054 /* XXX unwrap info->indirect? */
1055
1056 context->gpu->launch_grid(context->gpu, info);
1057 }
1058
1059 static void
1060 tegra_get_sample_position(struct pipe_context *pcontext, unsigned int count,
1061 unsigned int index, float *value)
1062 {
1063 struct tegra_context *context = to_tegra_context(pcontext);
1064
1065 context->gpu->get_sample_position(context->gpu, count, index, value);
1066 }
1067
1068 static uint64_t
1069 tegra_get_timestamp(struct pipe_context *pcontext)
1070 {
1071 struct tegra_context *context = to_tegra_context(pcontext);
1072
1073 return context->gpu->get_timestamp(context->gpu);
1074 }
1075
1076 static void
1077 tegra_flush_resource(struct pipe_context *pcontext,
1078 struct pipe_resource *presource)
1079 {
1080 struct tegra_resource *resource = to_tegra_resource(presource);
1081 struct tegra_context *context = to_tegra_context(pcontext);
1082
1083 context->gpu->flush_resource(context->gpu, resource->gpu);
1084 }
1085
1086 static void
1087 tegra_invalidate_resource(struct pipe_context *pcontext,
1088 struct pipe_resource *presource)
1089 {
1090 struct tegra_resource *resource = to_tegra_resource(presource);
1091 struct tegra_context *context = to_tegra_context(pcontext);
1092
1093 context->gpu->invalidate_resource(context->gpu, resource->gpu);
1094 }
1095
1096 static enum pipe_reset_status
1097 tegra_get_device_reset_status(struct pipe_context *pcontext)
1098 {
1099 struct tegra_context *context = to_tegra_context(pcontext);
1100
1101 return context->gpu->get_device_reset_status(context->gpu);
1102 }
1103
1104 static void
1105 tegra_set_device_reset_callback(struct pipe_context *pcontext,
1106 const struct pipe_device_reset_callback *cb)
1107 {
1108 struct tegra_context *context = to_tegra_context(pcontext);
1109
1110 context->gpu->set_device_reset_callback(context->gpu, cb);
1111 }
1112
1113 static void
1114 tegra_dump_debug_state(struct pipe_context *pcontext, FILE *stream,
1115 unsigned int flags)
1116 {
1117 struct tegra_context *context = to_tegra_context(pcontext);
1118
1119 context->gpu->dump_debug_state(context->gpu, stream, flags);
1120 }
1121
1122 static void
1123 tegra_emit_string_marker(struct pipe_context *pcontext, const char *string,
1124 int length)
1125 {
1126 struct tegra_context *context = to_tegra_context(pcontext);
1127
1128 context->gpu->emit_string_marker(context->gpu, string, length);
1129 }
1130
1131 static boolean
1132 tegra_generate_mipmap(struct pipe_context *pcontext,
1133 struct pipe_resource *presource,
1134 enum pipe_format format,
1135 unsigned int base_level,
1136 unsigned int last_level,
1137 unsigned int first_layer,
1138 unsigned int last_layer)
1139 {
1140 struct tegra_resource *resource = to_tegra_resource(presource);
1141 struct tegra_context *context = to_tegra_context(pcontext);
1142
1143 return context->gpu->generate_mipmap(context->gpu, resource->gpu, format,
1144 base_level, last_level, first_layer,
1145 last_layer);
1146 }
1147
1148 static uint64_t
1149 tegra_create_texture_handle(struct pipe_context *pcontext,
1150 struct pipe_sampler_view *view,
1151 const struct pipe_sampler_state *state)
1152 {
1153 struct tegra_context *context = to_tegra_context(pcontext);
1154
1155 return context->gpu->create_texture_handle(context->gpu, view, state);
1156 }
1157
1158 static void tegra_delete_texture_handle(struct pipe_context *pcontext,
1159 uint64_t handle)
1160 {
1161 struct tegra_context *context = to_tegra_context(pcontext);
1162
1163 context->gpu->delete_texture_handle(context->gpu, handle);
1164 }
1165
1166 static void tegra_make_texture_handle_resident(struct pipe_context *pcontext,
1167 uint64_t handle, bool resident)
1168 {
1169 struct tegra_context *context = to_tegra_context(pcontext);
1170
1171 context->gpu->make_texture_handle_resident(context->gpu, handle, resident);
1172 }
1173
1174 static uint64_t tegra_create_image_handle(struct pipe_context *pcontext,
1175 const struct pipe_image_view *image)
1176 {
1177 struct tegra_context *context = to_tegra_context(pcontext);
1178
1179 return context->gpu->create_image_handle(context->gpu, image);
1180 }
1181
1182 static void tegra_delete_image_handle(struct pipe_context *pcontext,
1183 uint64_t handle)
1184 {
1185 struct tegra_context *context = to_tegra_context(pcontext);
1186
1187 context->gpu->delete_image_handle(context->gpu, handle);
1188 }
1189
1190 static void tegra_make_image_handle_resident(struct pipe_context *pcontext,
1191 uint64_t handle, unsigned access,
1192 bool resident)
1193 {
1194 struct tegra_context *context = to_tegra_context(pcontext);
1195
1196 context->gpu->make_image_handle_resident(context->gpu, handle, access,
1197 resident);
1198 }
1199
1200 struct pipe_context *
1201 tegra_screen_context_create(struct pipe_screen *pscreen, void *priv,
1202 unsigned int flags)
1203 {
1204 struct tegra_screen *screen = to_tegra_screen(pscreen);
1205 struct tegra_context *context;
1206
1207 context = calloc(1, sizeof(*context));
1208 if (!context)
1209 return NULL;
1210
1211 context->gpu = screen->gpu->context_create(screen->gpu, priv, flags);
1212 if (!context->gpu) {
1213 debug_error("failed to create GPU context\n");
1214 goto free;
1215 }
1216
1217 context->base.screen = &screen->base;
1218 context->base.priv = priv;
1219
1220 /*
1221 * Create custom stream and const uploaders. Note that technically nouveau
1222 * already creates uploaders that could be reused, but that would make the
1223 * resource unwrapping rather complicate. The reason for that is that both
1224 * uploaders create resources based on the context that they were created
1225 * from, which means that nouveau's uploader will use the nouveau context
1226 * which means that those resources must not be unwrapped. So before each
1227 * resource is unwrapped, the code would need to check that it does not
1228 * correspond to the uploaders' buffers.
1229 *
1230 * However, duplicating the uploaders here sounds worse than it is. The
1231 * default implementation that nouveau uses allocates buffers lazily, and
1232 * since it is never used, no buffers will every be allocated and the only
1233 * memory wasted is that occupied by the nouveau uploader itself.
1234 */
1235 context->base.stream_uploader = u_upload_create_default(&context->base);
1236 if (!context->base.stream_uploader)
1237 goto destroy;
1238
1239 context->base.const_uploader = context->base.stream_uploader;
1240
1241 context->base.destroy = tegra_destroy;
1242
1243 context->base.draw_vbo = tegra_draw_vbo;
1244
1245 context->base.render_condition = tegra_render_condition;
1246
1247 context->base.create_query = tegra_create_query;
1248 context->base.create_batch_query = tegra_create_batch_query;
1249 context->base.destroy_query = tegra_destroy_query;
1250 context->base.begin_query = tegra_begin_query;
1251 context->base.end_query = tegra_end_query;
1252 context->base.get_query_result = tegra_get_query_result;
1253 context->base.get_query_result_resource = tegra_get_query_result_resource;
1254 context->base.set_active_query_state = tegra_set_active_query_state;
1255
1256 context->base.create_blend_state = tegra_create_blend_state;
1257 context->base.bind_blend_state = tegra_bind_blend_state;
1258 context->base.delete_blend_state = tegra_delete_blend_state;
1259
1260 context->base.create_sampler_state = tegra_create_sampler_state;
1261 context->base.bind_sampler_states = tegra_bind_sampler_states;
1262 context->base.delete_sampler_state = tegra_delete_sampler_state;
1263
1264 context->base.create_rasterizer_state = tegra_create_rasterizer_state;
1265 context->base.bind_rasterizer_state = tegra_bind_rasterizer_state;
1266 context->base.delete_rasterizer_state = tegra_delete_rasterizer_state;
1267
1268 context->base.create_depth_stencil_alpha_state = tegra_create_depth_stencil_alpha_state;
1269 context->base.bind_depth_stencil_alpha_state = tegra_bind_depth_stencil_alpha_state;
1270 context->base.delete_depth_stencil_alpha_state = tegra_delete_depth_stencil_alpha_state;
1271
1272 context->base.create_fs_state = tegra_create_fs_state;
1273 context->base.bind_fs_state = tegra_bind_fs_state;
1274 context->base.delete_fs_state = tegra_delete_fs_state;
1275
1276 context->base.create_vs_state = tegra_create_vs_state;
1277 context->base.bind_vs_state = tegra_bind_vs_state;
1278 context->base.delete_vs_state = tegra_delete_vs_state;
1279
1280 context->base.create_gs_state = tegra_create_gs_state;
1281 context->base.bind_gs_state = tegra_bind_gs_state;
1282 context->base.delete_gs_state = tegra_delete_gs_state;
1283
1284 context->base.create_tcs_state = tegra_create_tcs_state;
1285 context->base.bind_tcs_state = tegra_bind_tcs_state;
1286 context->base.delete_tcs_state = tegra_delete_tcs_state;
1287
1288 context->base.create_tes_state = tegra_create_tes_state;
1289 context->base.bind_tes_state = tegra_bind_tes_state;
1290 context->base.delete_tes_state = tegra_delete_tes_state;
1291
1292 context->base.create_vertex_elements_state = tegra_create_vertex_elements_state;
1293 context->base.bind_vertex_elements_state = tegra_bind_vertex_elements_state;
1294 context->base.delete_vertex_elements_state = tegra_delete_vertex_elements_state;
1295
1296 context->base.set_blend_color = tegra_set_blend_color;
1297 context->base.set_stencil_ref = tegra_set_stencil_ref;
1298 context->base.set_sample_mask = tegra_set_sample_mask;
1299 context->base.set_min_samples = tegra_set_min_samples;
1300 context->base.set_clip_state = tegra_set_clip_state;
1301
1302 context->base.set_constant_buffer = tegra_set_constant_buffer;
1303 context->base.set_framebuffer_state = tegra_set_framebuffer_state;
1304 context->base.set_polygon_stipple = tegra_set_polygon_stipple;
1305 context->base.set_scissor_states = tegra_set_scissor_states;
1306 context->base.set_window_rectangles = tegra_set_window_rectangles;
1307 context->base.set_viewport_states = tegra_set_viewport_states;
1308 context->base.set_sampler_views = tegra_set_sampler_views;
1309 context->base.set_tess_state = tegra_set_tess_state;
1310
1311 context->base.set_debug_callback = tegra_set_debug_callback;
1312
1313 context->base.set_shader_buffers = tegra_set_shader_buffers;
1314 context->base.set_shader_images = tegra_set_shader_images;
1315 context->base.set_vertex_buffers = tegra_set_vertex_buffers;
1316
1317 context->base.create_stream_output_target = tegra_create_stream_output_target;
1318 context->base.stream_output_target_destroy = tegra_stream_output_target_destroy;
1319 context->base.set_stream_output_targets = tegra_set_stream_output_targets;
1320
1321 context->base.resource_copy_region = tegra_resource_copy_region;
1322 context->base.blit = tegra_blit;
1323 context->base.clear = tegra_clear;
1324 context->base.clear_render_target = tegra_clear_render_target;
1325 context->base.clear_depth_stencil = tegra_clear_depth_stencil;
1326 context->base.clear_texture = tegra_clear_texture;
1327 context->base.clear_buffer = tegra_clear_buffer;
1328 context->base.flush = tegra_flush;
1329
1330 context->base.create_fence_fd = tegra_create_fence_fd;
1331 context->base.fence_server_sync = tegra_fence_server_sync;
1332
1333 context->base.create_sampler_view = tegra_create_sampler_view;
1334 context->base.sampler_view_destroy = tegra_sampler_view_destroy;
1335
1336 context->base.create_surface = tegra_create_surface;
1337 context->base.surface_destroy = tegra_surface_destroy;
1338
1339 context->base.transfer_map = tegra_transfer_map;
1340 context->base.transfer_flush_region = tegra_transfer_flush_region;
1341 context->base.transfer_unmap = tegra_transfer_unmap;
1342 context->base.buffer_subdata = tegra_buffer_subdata;
1343 context->base.texture_subdata = tegra_texture_subdata;
1344
1345 context->base.texture_barrier = tegra_texture_barrier;
1346 context->base.memory_barrier = tegra_memory_barrier;
1347
1348 context->base.create_video_codec = tegra_create_video_codec;
1349 context->base.create_video_buffer = tegra_create_video_buffer;
1350
1351 context->base.create_compute_state = tegra_create_compute_state;
1352 context->base.bind_compute_state = tegra_bind_compute_state;
1353 context->base.delete_compute_state = tegra_delete_compute_state;
1354 context->base.set_compute_resources = tegra_set_compute_resources;
1355 context->base.set_global_binding = tegra_set_global_binding;
1356 context->base.launch_grid = tegra_launch_grid;
1357 context->base.get_sample_position = tegra_get_sample_position;
1358 context->base.get_timestamp = tegra_get_timestamp;
1359
1360 context->base.flush_resource = tegra_flush_resource;
1361 context->base.invalidate_resource = tegra_invalidate_resource;
1362
1363 context->base.get_device_reset_status = tegra_get_device_reset_status;
1364 context->base.set_device_reset_callback = tegra_set_device_reset_callback;
1365 context->base.dump_debug_state = tegra_dump_debug_state;
1366 context->base.emit_string_marker = tegra_emit_string_marker;
1367
1368 context->base.generate_mipmap = tegra_generate_mipmap;
1369
1370 context->base.create_texture_handle = tegra_create_texture_handle;
1371 context->base.delete_texture_handle = tegra_delete_texture_handle;
1372 context->base.make_texture_handle_resident = tegra_make_texture_handle_resident;
1373 context->base.create_image_handle = tegra_create_image_handle;
1374 context->base.delete_image_handle = tegra_delete_image_handle;
1375 context->base.make_image_handle_resident = tegra_make_image_handle_resident;
1376
1377 return &context->base;
1378
1379 destroy:
1380 context->gpu->destroy(context->gpu);
1381 free:
1382 free(context);
1383 return NULL;
1384 }