2 * Copyright © 2014-2018 NVIDIA Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 #include "util/u_debug.h"
28 #include "util/u_inlines.h"
29 #include "util/u_upload_mgr.h"
31 #include "tegra_context.h"
32 #include "tegra_resource.h"
33 #include "tegra_screen.h"
36 tegra_destroy(struct pipe_context
*pcontext
)
38 struct tegra_context
*context
= to_tegra_context(pcontext
);
40 if (context
->base
.stream_uploader
)
41 u_upload_destroy(context
->base
.stream_uploader
);
43 context
->gpu
->destroy(context
->gpu
);
48 tegra_draw_vbo(struct pipe_context
*pcontext
,
49 const struct pipe_draw_info
*pinfo
)
51 struct tegra_context
*context
= to_tegra_context(pcontext
);
52 struct pipe_draw_indirect_info indirect
;
53 struct pipe_draw_info info
;
55 if (pinfo
&& (pinfo
->indirect
|| pinfo
->index_size
)) {
56 memcpy(&info
, pinfo
, sizeof(info
));
58 if (pinfo
->indirect
) {
59 memcpy(&indirect
, pinfo
->indirect
, sizeof(indirect
));
60 indirect
.buffer
= tegra_resource_unwrap(info
.indirect
->buffer
);
61 info
.indirect
= &indirect
;
64 if (pinfo
->index_size
&& !pinfo
->has_user_indices
)
65 info
.index
.resource
= tegra_resource_unwrap(info
.index
.resource
);
70 context
->gpu
->draw_vbo(context
->gpu
, pinfo
);
74 tegra_render_condition(struct pipe_context
*pcontext
,
75 struct pipe_query
*query
,
79 struct tegra_context
*context
= to_tegra_context(pcontext
);
81 context
->gpu
->render_condition(context
->gpu
, query
, condition
, mode
);
84 static struct pipe_query
*
85 tegra_create_query(struct pipe_context
*pcontext
, unsigned int query_type
,
88 struct tegra_context
*context
= to_tegra_context(pcontext
);
90 return context
->gpu
->create_query(context
->gpu
, query_type
, index
);
93 static struct pipe_query
*
94 tegra_create_batch_query(struct pipe_context
*pcontext
,
95 unsigned int num_queries
,
96 unsigned int *queries
)
98 struct tegra_context
*context
= to_tegra_context(pcontext
);
100 return context
->gpu
->create_batch_query(context
->gpu
, num_queries
,
105 tegra_destroy_query(struct pipe_context
*pcontext
, struct pipe_query
*query
)
107 struct tegra_context
*context
= to_tegra_context(pcontext
);
109 context
->gpu
->destroy_query(context
->gpu
, query
);
113 tegra_begin_query(struct pipe_context
*pcontext
, struct pipe_query
*query
)
115 struct tegra_context
*context
= to_tegra_context(pcontext
);
117 return context
->gpu
->begin_query(context
->gpu
, query
);
121 tegra_end_query(struct pipe_context
*pcontext
, struct pipe_query
*query
)
123 struct tegra_context
*context
= to_tegra_context(pcontext
);
125 return context
->gpu
->end_query(context
->gpu
, query
);
129 tegra_get_query_result(struct pipe_context
*pcontext
,
130 struct pipe_query
*query
,
132 union pipe_query_result
*result
)
134 struct tegra_context
*context
= to_tegra_context(pcontext
);
136 return context
->gpu
->get_query_result(context
->gpu
, query
, wait
,
141 tegra_get_query_result_resource(struct pipe_context
*pcontext
,
142 struct pipe_query
*query
,
144 enum pipe_query_value_type result_type
,
146 struct pipe_resource
*resource
,
149 struct tegra_context
*context
= to_tegra_context(pcontext
);
151 context
->gpu
->get_query_result_resource(context
->gpu
, query
, wait
,
152 result_type
, index
, resource
,
157 tegra_set_active_query_state(struct pipe_context
*pcontext
, boolean enable
)
159 struct tegra_context
*context
= to_tegra_context(pcontext
);
161 context
->gpu
->set_active_query_state(context
->gpu
, enable
);
165 tegra_create_blend_state(struct pipe_context
*pcontext
,
166 const struct pipe_blend_state
*cso
)
168 struct tegra_context
*context
= to_tegra_context(pcontext
);
170 return context
->gpu
->create_blend_state(context
->gpu
, cso
);
174 tegra_bind_blend_state(struct pipe_context
*pcontext
, void *so
)
176 struct tegra_context
*context
= to_tegra_context(pcontext
);
178 context
->gpu
->bind_blend_state(context
->gpu
, so
);
182 tegra_delete_blend_state(struct pipe_context
*pcontext
, void *so
)
184 struct tegra_context
*context
= to_tegra_context(pcontext
);
186 context
->gpu
->delete_blend_state(context
->gpu
, so
);
190 tegra_create_sampler_state(struct pipe_context
*pcontext
,
191 const struct pipe_sampler_state
*cso
)
193 struct tegra_context
*context
= to_tegra_context(pcontext
);
195 return context
->gpu
->create_sampler_state(context
->gpu
, cso
);
199 tegra_bind_sampler_states(struct pipe_context
*pcontext
, unsigned shader
,
200 unsigned start_slot
, unsigned num_samplers
,
203 struct tegra_context
*context
= to_tegra_context(pcontext
);
205 context
->gpu
->bind_sampler_states(context
->gpu
, shader
, start_slot
,
206 num_samplers
, samplers
);
210 tegra_delete_sampler_state(struct pipe_context
*pcontext
, void *so
)
212 struct tegra_context
*context
= to_tegra_context(pcontext
);
214 context
->gpu
->delete_sampler_state(context
->gpu
, so
);
218 tegra_create_rasterizer_state(struct pipe_context
*pcontext
,
219 const struct pipe_rasterizer_state
*cso
)
221 struct tegra_context
*context
= to_tegra_context(pcontext
);
223 return context
->gpu
->create_rasterizer_state(context
->gpu
, cso
);
227 tegra_bind_rasterizer_state(struct pipe_context
*pcontext
, void *so
)
229 struct tegra_context
*context
= to_tegra_context(pcontext
);
231 context
->gpu
->bind_rasterizer_state(context
->gpu
, so
);
235 tegra_delete_rasterizer_state(struct pipe_context
*pcontext
, void *so
)
237 struct tegra_context
*context
= to_tegra_context(pcontext
);
239 context
->gpu
->delete_rasterizer_state(context
->gpu
, so
);
243 tegra_create_depth_stencil_alpha_state(struct pipe_context
*pcontext
,
244 const struct pipe_depth_stencil_alpha_state
*cso
)
246 struct tegra_context
*context
= to_tegra_context(pcontext
);
248 return context
->gpu
->create_depth_stencil_alpha_state(context
->gpu
, cso
);
252 tegra_bind_depth_stencil_alpha_state(struct pipe_context
*pcontext
, void *so
)
254 struct tegra_context
*context
= to_tegra_context(pcontext
);
256 context
->gpu
->bind_depth_stencil_alpha_state(context
->gpu
, so
);
260 tegra_delete_depth_stencil_alpha_state(struct pipe_context
*pcontext
, void *so
)
262 struct tegra_context
*context
= to_tegra_context(pcontext
);
264 context
->gpu
->delete_depth_stencil_alpha_state(context
->gpu
, so
);
268 tegra_create_fs_state(struct pipe_context
*pcontext
,
269 const struct pipe_shader_state
*cso
)
271 struct tegra_context
*context
= to_tegra_context(pcontext
);
273 return context
->gpu
->create_fs_state(context
->gpu
, cso
);
277 tegra_bind_fs_state(struct pipe_context
*pcontext
, void *so
)
279 struct tegra_context
*context
= to_tegra_context(pcontext
);
281 context
->gpu
->bind_fs_state(context
->gpu
, so
);
285 tegra_delete_fs_state(struct pipe_context
*pcontext
, void *so
)
287 struct tegra_context
*context
= to_tegra_context(pcontext
);
289 context
->gpu
->delete_fs_state(context
->gpu
, so
);
293 tegra_create_vs_state(struct pipe_context
*pcontext
,
294 const struct pipe_shader_state
*cso
)
296 struct tegra_context
*context
= to_tegra_context(pcontext
);
298 return context
->gpu
->create_vs_state(context
->gpu
, cso
);
302 tegra_bind_vs_state(struct pipe_context
*pcontext
, void *so
)
304 struct tegra_context
*context
= to_tegra_context(pcontext
);
306 context
->gpu
->bind_vs_state(context
->gpu
, so
);
310 tegra_delete_vs_state(struct pipe_context
*pcontext
, void *so
)
312 struct tegra_context
*context
= to_tegra_context(pcontext
);
314 context
->gpu
->delete_vs_state(context
->gpu
, so
);
318 tegra_create_gs_state(struct pipe_context
*pcontext
,
319 const struct pipe_shader_state
*cso
)
321 struct tegra_context
*context
= to_tegra_context(pcontext
);
323 return context
->gpu
->create_gs_state(context
->gpu
, cso
);
327 tegra_bind_gs_state(struct pipe_context
*pcontext
, void *so
)
329 struct tegra_context
*context
= to_tegra_context(pcontext
);
331 context
->gpu
->bind_gs_state(context
->gpu
, so
);
335 tegra_delete_gs_state(struct pipe_context
*pcontext
, void *so
)
337 struct tegra_context
*context
= to_tegra_context(pcontext
);
339 context
->gpu
->delete_gs_state(context
->gpu
, so
);
343 tegra_create_tcs_state(struct pipe_context
*pcontext
,
344 const struct pipe_shader_state
*cso
)
346 struct tegra_context
*context
= to_tegra_context(pcontext
);
348 return context
->gpu
->create_tcs_state(context
->gpu
, cso
);
352 tegra_bind_tcs_state(struct pipe_context
*pcontext
, void *so
)
354 struct tegra_context
*context
= to_tegra_context(pcontext
);
356 context
->gpu
->bind_tcs_state(context
->gpu
, so
);
360 tegra_delete_tcs_state(struct pipe_context
*pcontext
, void *so
)
362 struct tegra_context
*context
= to_tegra_context(pcontext
);
364 context
->gpu
->delete_tcs_state(context
->gpu
, so
);
368 tegra_create_tes_state(struct pipe_context
*pcontext
,
369 const struct pipe_shader_state
*cso
)
371 struct tegra_context
*context
= to_tegra_context(pcontext
);
373 return context
->gpu
->create_tes_state(context
->gpu
, cso
);
377 tegra_bind_tes_state(struct pipe_context
*pcontext
, void *so
)
379 struct tegra_context
*context
= to_tegra_context(pcontext
);
381 context
->gpu
->bind_tes_state(context
->gpu
, so
);
385 tegra_delete_tes_state(struct pipe_context
*pcontext
, void *so
)
387 struct tegra_context
*context
= to_tegra_context(pcontext
);
389 context
->gpu
->delete_tes_state(context
->gpu
, so
);
393 tegra_create_vertex_elements_state(struct pipe_context
*pcontext
,
394 unsigned num_elements
,
395 const struct pipe_vertex_element
*elements
)
397 struct tegra_context
*context
= to_tegra_context(pcontext
);
399 return context
->gpu
->create_vertex_elements_state(context
->gpu
,
405 tegra_bind_vertex_elements_state(struct pipe_context
*pcontext
, void *so
)
407 struct tegra_context
*context
= to_tegra_context(pcontext
);
409 context
->gpu
->bind_vertex_elements_state(context
->gpu
, so
);
413 tegra_delete_vertex_elements_state(struct pipe_context
*pcontext
, void *so
)
415 struct tegra_context
*context
= to_tegra_context(pcontext
);
417 context
->gpu
->delete_vertex_elements_state(context
->gpu
, so
);
421 tegra_set_blend_color(struct pipe_context
*pcontext
,
422 const struct pipe_blend_color
*color
)
424 struct tegra_context
*context
= to_tegra_context(pcontext
);
426 context
->gpu
->set_blend_color(context
->gpu
, color
);
430 tegra_set_stencil_ref(struct pipe_context
*pcontext
,
431 const struct pipe_stencil_ref
*ref
)
433 struct tegra_context
*context
= to_tegra_context(pcontext
);
435 context
->gpu
->set_stencil_ref(context
->gpu
, ref
);
439 tegra_set_sample_mask(struct pipe_context
*pcontext
, unsigned int mask
)
441 struct tegra_context
*context
= to_tegra_context(pcontext
);
443 context
->gpu
->set_sample_mask(context
->gpu
, mask
);
447 tegra_set_min_samples(struct pipe_context
*pcontext
, unsigned int samples
)
449 struct tegra_context
*context
= to_tegra_context(pcontext
);
451 context
->gpu
->set_min_samples(context
->gpu
, samples
);
455 tegra_set_clip_state(struct pipe_context
*pcontext
,
456 const struct pipe_clip_state
*state
)
458 struct tegra_context
*context
= to_tegra_context(pcontext
);
460 context
->gpu
->set_clip_state(context
->gpu
, state
);
464 tegra_set_constant_buffer(struct pipe_context
*pcontext
, unsigned int shader
,
466 const struct pipe_constant_buffer
*buf
)
468 struct tegra_context
*context
= to_tegra_context(pcontext
);
469 struct pipe_constant_buffer buffer
;
471 if (buf
&& buf
->buffer
) {
472 memcpy(&buffer
, buf
, sizeof(buffer
));
473 buffer
.buffer
= tegra_resource_unwrap(buffer
.buffer
);
477 context
->gpu
->set_constant_buffer(context
->gpu
, shader
, index
, buf
);
481 tegra_set_framebuffer_state(struct pipe_context
*pcontext
,
482 const struct pipe_framebuffer_state
*fb
)
484 struct tegra_context
*context
= to_tegra_context(pcontext
);
485 struct pipe_framebuffer_state state
;
489 memcpy(&state
, fb
, sizeof(state
));
491 for (i
= 0; i
< fb
->nr_cbufs
; i
++)
492 state
.cbufs
[i
] = tegra_surface_unwrap(fb
->cbufs
[i
]);
494 while (i
< PIPE_MAX_COLOR_BUFS
)
495 state
.cbufs
[i
++] = NULL
;
497 state
.zsbuf
= tegra_surface_unwrap(fb
->zsbuf
);
502 context
->gpu
->set_framebuffer_state(context
->gpu
, fb
);
506 tegra_set_polygon_stipple(struct pipe_context
*pcontext
,
507 const struct pipe_poly_stipple
*stipple
)
509 struct tegra_context
*context
= to_tegra_context(pcontext
);
511 context
->gpu
->set_polygon_stipple(context
->gpu
, stipple
);
515 tegra_set_scissor_states(struct pipe_context
*pcontext
, unsigned start_slot
,
516 unsigned num_scissors
,
517 const struct pipe_scissor_state
*scissors
)
519 struct tegra_context
*context
= to_tegra_context(pcontext
);
521 context
->gpu
->set_scissor_states(context
->gpu
, start_slot
, num_scissors
,
526 tegra_set_window_rectangles(struct pipe_context
*pcontext
, boolean include
,
527 unsigned int num_rectangles
,
528 const struct pipe_scissor_state
*rectangles
)
530 struct tegra_context
*context
= to_tegra_context(pcontext
);
532 context
->gpu
->set_window_rectangles(context
->gpu
, include
, num_rectangles
,
537 tegra_set_viewport_states(struct pipe_context
*pcontext
, unsigned start_slot
,
538 unsigned num_viewports
,
539 const struct pipe_viewport_state
*viewports
)
541 struct tegra_context
*context
= to_tegra_context(pcontext
);
543 context
->gpu
->set_viewport_states(context
->gpu
, start_slot
, num_viewports
,
548 tegra_set_sampler_views(struct pipe_context
*pcontext
, unsigned shader
,
549 unsigned start_slot
, unsigned num_views
,
550 struct pipe_sampler_view
**pviews
)
552 struct pipe_sampler_view
*views
[PIPE_MAX_SHADER_SAMPLER_VIEWS
];
553 struct tegra_context
*context
= to_tegra_context(pcontext
);
556 for (i
= 0; i
< num_views
; i
++)
557 views
[i
] = tegra_sampler_view_unwrap(pviews
[i
]);
559 context
->gpu
->set_sampler_views(context
->gpu
, shader
, start_slot
,
564 tegra_set_tess_state(struct pipe_context
*pcontext
,
565 const float default_outer_level
[4],
566 const float default_inner_level
[2])
568 struct tegra_context
*context
= to_tegra_context(pcontext
);
570 context
->gpu
->set_tess_state(context
->gpu
, default_outer_level
,
571 default_inner_level
);
575 tegra_set_debug_callback(struct pipe_context
*pcontext
,
576 const struct pipe_debug_callback
*callback
)
578 struct tegra_context
*context
= to_tegra_context(pcontext
);
580 context
->gpu
->set_debug_callback(context
->gpu
, callback
);
584 tegra_set_shader_buffers(struct pipe_context
*pcontext
, unsigned int shader
,
585 unsigned start
, unsigned count
,
586 const struct pipe_shader_buffer
*buffers
)
588 struct tegra_context
*context
= to_tegra_context(pcontext
);
590 context
->gpu
->set_shader_buffers(context
->gpu
, shader
, start
, count
,
595 tegra_set_shader_images(struct pipe_context
*pcontext
, unsigned int shader
,
596 unsigned start
, unsigned count
,
597 const struct pipe_image_view
*images
)
599 struct tegra_context
*context
= to_tegra_context(pcontext
);
601 context
->gpu
->set_shader_images(context
->gpu
, shader
, start
, count
,
606 tegra_set_vertex_buffers(struct pipe_context
*pcontext
, unsigned start_slot
,
607 unsigned num_buffers
,
608 const struct pipe_vertex_buffer
*buffers
)
610 struct tegra_context
*context
= to_tegra_context(pcontext
);
611 struct pipe_vertex_buffer buf
[PIPE_MAX_SHADER_INPUTS
];
614 if (num_buffers
&& buffers
) {
615 memcpy(buf
, buffers
, num_buffers
* sizeof(struct pipe_vertex_buffer
));
617 for (i
= 0; i
< num_buffers
; i
++) {
618 if (!buf
[i
].is_user_buffer
)
619 buf
[i
].buffer
.resource
= tegra_resource_unwrap(buf
[i
].buffer
.resource
);
625 context
->gpu
->set_vertex_buffers(context
->gpu
, start_slot
, num_buffers
,
629 static struct pipe_stream_output_target
*
630 tegra_create_stream_output_target(struct pipe_context
*pcontext
,
631 struct pipe_resource
*presource
,
632 unsigned buffer_offset
,
633 unsigned buffer_size
)
635 struct tegra_resource
*resource
= to_tegra_resource(presource
);
636 struct tegra_context
*context
= to_tegra_context(pcontext
);
638 return context
->gpu
->create_stream_output_target(context
->gpu
,
645 tegra_stream_output_target_destroy(struct pipe_context
*pcontext
,
646 struct pipe_stream_output_target
*target
)
648 struct tegra_context
*context
= to_tegra_context(pcontext
);
650 context
->gpu
->stream_output_target_destroy(context
->gpu
, target
);
654 tegra_set_stream_output_targets(struct pipe_context
*pcontext
,
655 unsigned num_targets
,
656 struct pipe_stream_output_target
**targets
,
657 const unsigned *offsets
)
659 struct tegra_context
*context
= to_tegra_context(pcontext
);
661 context
->gpu
->set_stream_output_targets(context
->gpu
, num_targets
,
666 tegra_resource_copy_region(struct pipe_context
*pcontext
,
667 struct pipe_resource
*pdst
,
668 unsigned int dst_level
,
672 struct pipe_resource
*psrc
,
673 unsigned int src_level
,
674 const struct pipe_box
*src_box
)
676 struct tegra_context
*context
= to_tegra_context(pcontext
);
677 struct tegra_resource
*dst
= to_tegra_resource(pdst
);
678 struct tegra_resource
*src
= to_tegra_resource(psrc
);
680 context
->gpu
->resource_copy_region(context
->gpu
, dst
->gpu
, dst_level
, dstx
,
681 dsty
, dstz
, src
->gpu
, src_level
,
686 tegra_blit(struct pipe_context
*pcontext
, const struct pipe_blit_info
*pinfo
)
688 struct tegra_context
*context
= to_tegra_context(pcontext
);
689 struct pipe_blit_info info
;
692 memcpy(&info
, pinfo
, sizeof(info
));
693 info
.dst
.resource
= tegra_resource_unwrap(info
.dst
.resource
);
694 info
.src
.resource
= tegra_resource_unwrap(info
.src
.resource
);
698 context
->gpu
->blit(context
->gpu
, pinfo
);
702 tegra_clear(struct pipe_context
*pcontext
, unsigned buffers
,
703 const union pipe_color_union
*color
, double depth
,
706 struct tegra_context
*context
= to_tegra_context(pcontext
);
708 context
->gpu
->clear(context
->gpu
, buffers
, color
, depth
, stencil
);
712 tegra_clear_render_target(struct pipe_context
*pcontext
,
713 struct pipe_surface
*pdst
,
714 const union pipe_color_union
*color
,
719 bool render_condition
)
721 struct tegra_context
*context
= to_tegra_context(pcontext
);
722 struct tegra_surface
*dst
= to_tegra_surface(pdst
);
724 context
->gpu
->clear_render_target(context
->gpu
, dst
->gpu
, color
, dstx
,
725 dsty
, width
, height
, render_condition
);
729 tegra_clear_depth_stencil(struct pipe_context
*pcontext
,
730 struct pipe_surface
*pdst
,
733 unsigned int stencil
,
738 bool render_condition
)
740 struct tegra_context
*context
= to_tegra_context(pcontext
);
741 struct tegra_surface
*dst
= to_tegra_surface(pdst
);
743 context
->gpu
->clear_depth_stencil(context
->gpu
, dst
->gpu
, flags
, depth
,
744 stencil
, dstx
, dsty
, width
, height
,
749 tegra_clear_texture(struct pipe_context
*pcontext
,
750 struct pipe_resource
*presource
,
752 const struct pipe_box
*box
,
755 struct tegra_resource
*resource
= to_tegra_resource(presource
);
756 struct tegra_context
*context
= to_tegra_context(pcontext
);
758 context
->gpu
->clear_texture(context
->gpu
, resource
->gpu
, level
, box
, data
);
762 tegra_clear_buffer(struct pipe_context
*pcontext
,
763 struct pipe_resource
*presource
,
769 struct tegra_resource
*resource
= to_tegra_resource(presource
);
770 struct tegra_context
*context
= to_tegra_context(pcontext
);
772 context
->gpu
->clear_buffer(context
->gpu
, resource
->gpu
, offset
, size
,
777 tegra_flush(struct pipe_context
*pcontext
, struct pipe_fence_handle
**fence
,
780 struct tegra_context
*context
= to_tegra_context(pcontext
);
782 context
->gpu
->flush(context
->gpu
, fence
, flags
);
786 tegra_create_fence_fd(struct pipe_context
*pcontext
,
787 struct pipe_fence_handle
**fence
,
788 int fd
, enum pipe_fd_type type
)
790 struct tegra_context
*context
= to_tegra_context(pcontext
);
792 assert(type
== PIPE_FD_TYPE_NATIVE_SYNC
);
793 context
->gpu
->create_fence_fd(context
->gpu
, fence
, fd
, type
);
797 tegra_fence_server_sync(struct pipe_context
*pcontext
,
798 struct pipe_fence_handle
*fence
)
800 struct tegra_context
*context
= to_tegra_context(pcontext
);
802 context
->gpu
->fence_server_sync(context
->gpu
, fence
);
805 static struct pipe_sampler_view
*
806 tegra_create_sampler_view(struct pipe_context
*pcontext
,
807 struct pipe_resource
*presource
,
808 const struct pipe_sampler_view
*template)
810 struct tegra_resource
*resource
= to_tegra_resource(presource
);
811 struct tegra_context
*context
= to_tegra_context(pcontext
);
812 struct tegra_sampler_view
*view
;
814 view
= calloc(1, sizeof(*view
));
818 view
->gpu
= context
->gpu
->create_sampler_view(context
->gpu
, resource
->gpu
,
820 memcpy(&view
->base
, view
->gpu
, sizeof(*view
->gpu
));
821 /* overwrite to prevent reference from being released */
822 view
->base
.texture
= NULL
;
824 pipe_reference_init(&view
->base
.reference
, 1);
825 pipe_resource_reference(&view
->base
.texture
, presource
);
826 view
->base
.context
= pcontext
;
832 tegra_sampler_view_destroy(struct pipe_context
*pcontext
,
833 struct pipe_sampler_view
*pview
)
835 struct tegra_sampler_view
*view
= to_tegra_sampler_view(pview
);
837 pipe_resource_reference(&view
->base
.texture
, NULL
);
838 pipe_sampler_view_reference(&view
->gpu
, NULL
);
842 static struct pipe_surface
*
843 tegra_create_surface(struct pipe_context
*pcontext
,
844 struct pipe_resource
*presource
,
845 const struct pipe_surface
*template)
847 struct tegra_resource
*resource
= to_tegra_resource(presource
);
848 struct tegra_context
*context
= to_tegra_context(pcontext
);
849 struct tegra_surface
*surface
;
851 surface
= calloc(1, sizeof(*surface
));
855 surface
->gpu
= context
->gpu
->create_surface(context
->gpu
, resource
->gpu
,
862 memcpy(&surface
->base
, surface
->gpu
, sizeof(*surface
->gpu
));
863 /* overwrite to prevent reference from being released */
864 surface
->base
.texture
= NULL
;
866 pipe_reference_init(&surface
->base
.reference
, 1);
867 pipe_resource_reference(&surface
->base
.texture
, presource
);
868 surface
->base
.context
= &context
->base
;
870 return &surface
->base
;
874 tegra_surface_destroy(struct pipe_context
*pcontext
,
875 struct pipe_surface
*psurface
)
877 struct tegra_surface
*surface
= to_tegra_surface(psurface
);
879 pipe_resource_reference(&surface
->base
.texture
, NULL
);
880 pipe_surface_reference(&surface
->gpu
, NULL
);
885 tegra_transfer_map(struct pipe_context
*pcontext
,
886 struct pipe_resource
*presource
,
887 unsigned level
, unsigned usage
,
888 const struct pipe_box
*box
,
889 struct pipe_transfer
**ptransfer
)
891 struct tegra_resource
*resource
= to_tegra_resource(presource
);
892 struct tegra_context
*context
= to_tegra_context(pcontext
);
893 struct tegra_transfer
*transfer
;
895 transfer
= calloc(1, sizeof(*transfer
));
899 transfer
->map
= context
->gpu
->transfer_map(context
->gpu
, resource
->gpu
,
902 memcpy(&transfer
->base
, transfer
->gpu
, sizeof(*transfer
->gpu
));
903 transfer
->base
.resource
= NULL
;
904 pipe_resource_reference(&transfer
->base
.resource
, presource
);
906 *ptransfer
= &transfer
->base
;
908 return transfer
->map
;
912 tegra_transfer_flush_region(struct pipe_context
*pcontext
,
913 struct pipe_transfer
*ptransfer
,
914 const struct pipe_box
*box
)
916 struct tegra_transfer
*transfer
= to_tegra_transfer(ptransfer
);
917 struct tegra_context
*context
= to_tegra_context(pcontext
);
919 context
->gpu
->transfer_flush_region(context
->gpu
, transfer
->gpu
, box
);
923 tegra_transfer_unmap(struct pipe_context
*pcontext
,
924 struct pipe_transfer
*ptransfer
)
926 struct tegra_transfer
*transfer
= to_tegra_transfer(ptransfer
);
927 struct tegra_context
*context
= to_tegra_context(pcontext
);
929 context
->gpu
->transfer_unmap(context
->gpu
, transfer
->gpu
);
930 pipe_resource_reference(&transfer
->base
.resource
, NULL
);
935 tegra_buffer_subdata(struct pipe_context
*pcontext
,
936 struct pipe_resource
*presource
,
937 unsigned usage
, unsigned offset
,
938 unsigned size
, const void *data
)
940 struct tegra_resource
*resource
= to_tegra_resource(presource
);
941 struct tegra_context
*context
= to_tegra_context(pcontext
);
943 context
->gpu
->buffer_subdata(context
->gpu
, resource
->gpu
, usage
, offset
,
948 tegra_texture_subdata(struct pipe_context
*pcontext
,
949 struct pipe_resource
*presource
,
952 const struct pipe_box
*box
,
955 unsigned layer_stride
)
957 struct tegra_resource
*resource
= to_tegra_resource(presource
);
958 struct tegra_context
*context
= to_tegra_context(pcontext
);
960 context
->gpu
->texture_subdata(context
->gpu
, resource
->gpu
, level
, usage
,
961 box
, data
, stride
, layer_stride
);
965 tegra_texture_barrier(struct pipe_context
*pcontext
, unsigned int flags
)
967 struct tegra_context
*context
= to_tegra_context(pcontext
);
969 context
->gpu
->texture_barrier(context
->gpu
, flags
);
973 tegra_memory_barrier(struct pipe_context
*pcontext
, unsigned int flags
)
975 struct tegra_context
*context
= to_tegra_context(pcontext
);
977 context
->gpu
->memory_barrier(context
->gpu
, flags
);
980 static struct pipe_video_codec
*
981 tegra_create_video_codec(struct pipe_context
*pcontext
,
982 const struct pipe_video_codec
*template)
984 struct tegra_context
*context
= to_tegra_context(pcontext
);
986 return context
->gpu
->create_video_codec(context
->gpu
, template);
989 static struct pipe_video_buffer
*
990 tegra_create_video_buffer(struct pipe_context
*pcontext
,
991 const struct pipe_video_buffer
*template)
993 struct tegra_context
*context
= to_tegra_context(pcontext
);
995 return context
->gpu
->create_video_buffer(context
->gpu
, template);
999 tegra_create_compute_state(struct pipe_context
*pcontext
,
1000 const struct pipe_compute_state
*template)
1002 struct tegra_context
*context
= to_tegra_context(pcontext
);
1004 return context
->gpu
->create_compute_state(context
->gpu
, template);
1008 tegra_bind_compute_state(struct pipe_context
*pcontext
, void *so
)
1010 struct tegra_context
*context
= to_tegra_context(pcontext
);
1012 context
->gpu
->bind_compute_state(context
->gpu
, so
);
1016 tegra_delete_compute_state(struct pipe_context
*pcontext
, void *so
)
1018 struct tegra_context
*context
= to_tegra_context(pcontext
);
1020 context
->gpu
->delete_compute_state(context
->gpu
, so
);
1024 tegra_set_compute_resources(struct pipe_context
*pcontext
,
1025 unsigned int start
, unsigned int count
,
1026 struct pipe_surface
**resources
)
1028 struct tegra_context
*context
= to_tegra_context(pcontext
);
1030 /* XXX unwrap resources */
1032 context
->gpu
->set_compute_resources(context
->gpu
, start
, count
, resources
);
1036 tegra_set_global_binding(struct pipe_context
*pcontext
, unsigned int first
,
1037 unsigned int count
, struct pipe_resource
**resources
,
1040 struct tegra_context
*context
= to_tegra_context(pcontext
);
1042 /* XXX unwrap resources */
1044 context
->gpu
->set_global_binding(context
->gpu
, first
, count
, resources
,
1049 tegra_launch_grid(struct pipe_context
*pcontext
,
1050 const struct pipe_grid_info
*info
)
1052 struct tegra_context
*context
= to_tegra_context(pcontext
);
1054 /* XXX unwrap info->indirect? */
1056 context
->gpu
->launch_grid(context
->gpu
, info
);
1060 tegra_get_sample_position(struct pipe_context
*pcontext
, unsigned int count
,
1061 unsigned int index
, float *value
)
1063 struct tegra_context
*context
= to_tegra_context(pcontext
);
1065 context
->gpu
->get_sample_position(context
->gpu
, count
, index
, value
);
1069 tegra_get_timestamp(struct pipe_context
*pcontext
)
1071 struct tegra_context
*context
= to_tegra_context(pcontext
);
1073 return context
->gpu
->get_timestamp(context
->gpu
);
1077 tegra_flush_resource(struct pipe_context
*pcontext
,
1078 struct pipe_resource
*presource
)
1080 struct tegra_resource
*resource
= to_tegra_resource(presource
);
1081 struct tegra_context
*context
= to_tegra_context(pcontext
);
1083 context
->gpu
->flush_resource(context
->gpu
, resource
->gpu
);
1087 tegra_invalidate_resource(struct pipe_context
*pcontext
,
1088 struct pipe_resource
*presource
)
1090 struct tegra_resource
*resource
= to_tegra_resource(presource
);
1091 struct tegra_context
*context
= to_tegra_context(pcontext
);
1093 context
->gpu
->invalidate_resource(context
->gpu
, resource
->gpu
);
1096 static enum pipe_reset_status
1097 tegra_get_device_reset_status(struct pipe_context
*pcontext
)
1099 struct tegra_context
*context
= to_tegra_context(pcontext
);
1101 return context
->gpu
->get_device_reset_status(context
->gpu
);
1105 tegra_set_device_reset_callback(struct pipe_context
*pcontext
,
1106 const struct pipe_device_reset_callback
*cb
)
1108 struct tegra_context
*context
= to_tegra_context(pcontext
);
1110 context
->gpu
->set_device_reset_callback(context
->gpu
, cb
);
1114 tegra_dump_debug_state(struct pipe_context
*pcontext
, FILE *stream
,
1117 struct tegra_context
*context
= to_tegra_context(pcontext
);
1119 context
->gpu
->dump_debug_state(context
->gpu
, stream
, flags
);
1123 tegra_emit_string_marker(struct pipe_context
*pcontext
, const char *string
,
1126 struct tegra_context
*context
= to_tegra_context(pcontext
);
1128 context
->gpu
->emit_string_marker(context
->gpu
, string
, length
);
1132 tegra_generate_mipmap(struct pipe_context
*pcontext
,
1133 struct pipe_resource
*presource
,
1134 enum pipe_format format
,
1135 unsigned int base_level
,
1136 unsigned int last_level
,
1137 unsigned int first_layer
,
1138 unsigned int last_layer
)
1140 struct tegra_resource
*resource
= to_tegra_resource(presource
);
1141 struct tegra_context
*context
= to_tegra_context(pcontext
);
1143 return context
->gpu
->generate_mipmap(context
->gpu
, resource
->gpu
, format
,
1144 base_level
, last_level
, first_layer
,
1149 tegra_create_texture_handle(struct pipe_context
*pcontext
,
1150 struct pipe_sampler_view
*view
,
1151 const struct pipe_sampler_state
*state
)
1153 struct tegra_context
*context
= to_tegra_context(pcontext
);
1155 return context
->gpu
->create_texture_handle(context
->gpu
, view
, state
);
1158 static void tegra_delete_texture_handle(struct pipe_context
*pcontext
,
1161 struct tegra_context
*context
= to_tegra_context(pcontext
);
1163 context
->gpu
->delete_texture_handle(context
->gpu
, handle
);
1166 static void tegra_make_texture_handle_resident(struct pipe_context
*pcontext
,
1167 uint64_t handle
, bool resident
)
1169 struct tegra_context
*context
= to_tegra_context(pcontext
);
1171 context
->gpu
->make_texture_handle_resident(context
->gpu
, handle
, resident
);
1174 static uint64_t tegra_create_image_handle(struct pipe_context
*pcontext
,
1175 const struct pipe_image_view
*image
)
1177 struct tegra_context
*context
= to_tegra_context(pcontext
);
1179 return context
->gpu
->create_image_handle(context
->gpu
, image
);
1182 static void tegra_delete_image_handle(struct pipe_context
*pcontext
,
1185 struct tegra_context
*context
= to_tegra_context(pcontext
);
1187 context
->gpu
->delete_image_handle(context
->gpu
, handle
);
1190 static void tegra_make_image_handle_resident(struct pipe_context
*pcontext
,
1191 uint64_t handle
, unsigned access
,
1194 struct tegra_context
*context
= to_tegra_context(pcontext
);
1196 context
->gpu
->make_image_handle_resident(context
->gpu
, handle
, access
,
1200 struct pipe_context
*
1201 tegra_screen_context_create(struct pipe_screen
*pscreen
, void *priv
,
1204 struct tegra_screen
*screen
= to_tegra_screen(pscreen
);
1205 struct tegra_context
*context
;
1207 context
= calloc(1, sizeof(*context
));
1211 context
->gpu
= screen
->gpu
->context_create(screen
->gpu
, priv
, flags
);
1212 if (!context
->gpu
) {
1213 debug_error("failed to create GPU context\n");
1217 context
->base
.screen
= &screen
->base
;
1218 context
->base
.priv
= priv
;
1221 * Create custom stream and const uploaders. Note that technically nouveau
1222 * already creates uploaders that could be reused, but that would make the
1223 * resource unwrapping rather complicate. The reason for that is that both
1224 * uploaders create resources based on the context that they were created
1225 * from, which means that nouveau's uploader will use the nouveau context
1226 * which means that those resources must not be unwrapped. So before each
1227 * resource is unwrapped, the code would need to check that it does not
1228 * correspond to the uploaders' buffers.
1230 * However, duplicating the uploaders here sounds worse than it is. The
1231 * default implementation that nouveau uses allocates buffers lazily, and
1232 * since it is never used, no buffers will every be allocated and the only
1233 * memory wasted is that occupied by the nouveau uploader itself.
1235 context
->base
.stream_uploader
= u_upload_create_default(&context
->base
);
1236 if (!context
->base
.stream_uploader
)
1239 context
->base
.const_uploader
= context
->base
.stream_uploader
;
1241 context
->base
.destroy
= tegra_destroy
;
1243 context
->base
.draw_vbo
= tegra_draw_vbo
;
1245 context
->base
.render_condition
= tegra_render_condition
;
1247 context
->base
.create_query
= tegra_create_query
;
1248 context
->base
.create_batch_query
= tegra_create_batch_query
;
1249 context
->base
.destroy_query
= tegra_destroy_query
;
1250 context
->base
.begin_query
= tegra_begin_query
;
1251 context
->base
.end_query
= tegra_end_query
;
1252 context
->base
.get_query_result
= tegra_get_query_result
;
1253 context
->base
.get_query_result_resource
= tegra_get_query_result_resource
;
1254 context
->base
.set_active_query_state
= tegra_set_active_query_state
;
1256 context
->base
.create_blend_state
= tegra_create_blend_state
;
1257 context
->base
.bind_blend_state
= tegra_bind_blend_state
;
1258 context
->base
.delete_blend_state
= tegra_delete_blend_state
;
1260 context
->base
.create_sampler_state
= tegra_create_sampler_state
;
1261 context
->base
.bind_sampler_states
= tegra_bind_sampler_states
;
1262 context
->base
.delete_sampler_state
= tegra_delete_sampler_state
;
1264 context
->base
.create_rasterizer_state
= tegra_create_rasterizer_state
;
1265 context
->base
.bind_rasterizer_state
= tegra_bind_rasterizer_state
;
1266 context
->base
.delete_rasterizer_state
= tegra_delete_rasterizer_state
;
1268 context
->base
.create_depth_stencil_alpha_state
= tegra_create_depth_stencil_alpha_state
;
1269 context
->base
.bind_depth_stencil_alpha_state
= tegra_bind_depth_stencil_alpha_state
;
1270 context
->base
.delete_depth_stencil_alpha_state
= tegra_delete_depth_stencil_alpha_state
;
1272 context
->base
.create_fs_state
= tegra_create_fs_state
;
1273 context
->base
.bind_fs_state
= tegra_bind_fs_state
;
1274 context
->base
.delete_fs_state
= tegra_delete_fs_state
;
1276 context
->base
.create_vs_state
= tegra_create_vs_state
;
1277 context
->base
.bind_vs_state
= tegra_bind_vs_state
;
1278 context
->base
.delete_vs_state
= tegra_delete_vs_state
;
1280 context
->base
.create_gs_state
= tegra_create_gs_state
;
1281 context
->base
.bind_gs_state
= tegra_bind_gs_state
;
1282 context
->base
.delete_gs_state
= tegra_delete_gs_state
;
1284 context
->base
.create_tcs_state
= tegra_create_tcs_state
;
1285 context
->base
.bind_tcs_state
= tegra_bind_tcs_state
;
1286 context
->base
.delete_tcs_state
= tegra_delete_tcs_state
;
1288 context
->base
.create_tes_state
= tegra_create_tes_state
;
1289 context
->base
.bind_tes_state
= tegra_bind_tes_state
;
1290 context
->base
.delete_tes_state
= tegra_delete_tes_state
;
1292 context
->base
.create_vertex_elements_state
= tegra_create_vertex_elements_state
;
1293 context
->base
.bind_vertex_elements_state
= tegra_bind_vertex_elements_state
;
1294 context
->base
.delete_vertex_elements_state
= tegra_delete_vertex_elements_state
;
1296 context
->base
.set_blend_color
= tegra_set_blend_color
;
1297 context
->base
.set_stencil_ref
= tegra_set_stencil_ref
;
1298 context
->base
.set_sample_mask
= tegra_set_sample_mask
;
1299 context
->base
.set_min_samples
= tegra_set_min_samples
;
1300 context
->base
.set_clip_state
= tegra_set_clip_state
;
1302 context
->base
.set_constant_buffer
= tegra_set_constant_buffer
;
1303 context
->base
.set_framebuffer_state
= tegra_set_framebuffer_state
;
1304 context
->base
.set_polygon_stipple
= tegra_set_polygon_stipple
;
1305 context
->base
.set_scissor_states
= tegra_set_scissor_states
;
1306 context
->base
.set_window_rectangles
= tegra_set_window_rectangles
;
1307 context
->base
.set_viewport_states
= tegra_set_viewport_states
;
1308 context
->base
.set_sampler_views
= tegra_set_sampler_views
;
1309 context
->base
.set_tess_state
= tegra_set_tess_state
;
1311 context
->base
.set_debug_callback
= tegra_set_debug_callback
;
1313 context
->base
.set_shader_buffers
= tegra_set_shader_buffers
;
1314 context
->base
.set_shader_images
= tegra_set_shader_images
;
1315 context
->base
.set_vertex_buffers
= tegra_set_vertex_buffers
;
1317 context
->base
.create_stream_output_target
= tegra_create_stream_output_target
;
1318 context
->base
.stream_output_target_destroy
= tegra_stream_output_target_destroy
;
1319 context
->base
.set_stream_output_targets
= tegra_set_stream_output_targets
;
1321 context
->base
.resource_copy_region
= tegra_resource_copy_region
;
1322 context
->base
.blit
= tegra_blit
;
1323 context
->base
.clear
= tegra_clear
;
1324 context
->base
.clear_render_target
= tegra_clear_render_target
;
1325 context
->base
.clear_depth_stencil
= tegra_clear_depth_stencil
;
1326 context
->base
.clear_texture
= tegra_clear_texture
;
1327 context
->base
.clear_buffer
= tegra_clear_buffer
;
1328 context
->base
.flush
= tegra_flush
;
1330 context
->base
.create_fence_fd
= tegra_create_fence_fd
;
1331 context
->base
.fence_server_sync
= tegra_fence_server_sync
;
1333 context
->base
.create_sampler_view
= tegra_create_sampler_view
;
1334 context
->base
.sampler_view_destroy
= tegra_sampler_view_destroy
;
1336 context
->base
.create_surface
= tegra_create_surface
;
1337 context
->base
.surface_destroy
= tegra_surface_destroy
;
1339 context
->base
.transfer_map
= tegra_transfer_map
;
1340 context
->base
.transfer_flush_region
= tegra_transfer_flush_region
;
1341 context
->base
.transfer_unmap
= tegra_transfer_unmap
;
1342 context
->base
.buffer_subdata
= tegra_buffer_subdata
;
1343 context
->base
.texture_subdata
= tegra_texture_subdata
;
1345 context
->base
.texture_barrier
= tegra_texture_barrier
;
1346 context
->base
.memory_barrier
= tegra_memory_barrier
;
1348 context
->base
.create_video_codec
= tegra_create_video_codec
;
1349 context
->base
.create_video_buffer
= tegra_create_video_buffer
;
1351 context
->base
.create_compute_state
= tegra_create_compute_state
;
1352 context
->base
.bind_compute_state
= tegra_bind_compute_state
;
1353 context
->base
.delete_compute_state
= tegra_delete_compute_state
;
1354 context
->base
.set_compute_resources
= tegra_set_compute_resources
;
1355 context
->base
.set_global_binding
= tegra_set_global_binding
;
1356 context
->base
.launch_grid
= tegra_launch_grid
;
1357 context
->base
.get_sample_position
= tegra_get_sample_position
;
1358 context
->base
.get_timestamp
= tegra_get_timestamp
;
1360 context
->base
.flush_resource
= tegra_flush_resource
;
1361 context
->base
.invalidate_resource
= tegra_invalidate_resource
;
1363 context
->base
.get_device_reset_status
= tegra_get_device_reset_status
;
1364 context
->base
.set_device_reset_callback
= tegra_set_device_reset_callback
;
1365 context
->base
.dump_debug_state
= tegra_dump_debug_state
;
1366 context
->base
.emit_string_marker
= tegra_emit_string_marker
;
1368 context
->base
.generate_mipmap
= tegra_generate_mipmap
;
1370 context
->base
.create_texture_handle
= tegra_create_texture_handle
;
1371 context
->base
.delete_texture_handle
= tegra_delete_texture_handle
;
1372 context
->base
.make_texture_handle_resident
= tegra_make_texture_handle_resident
;
1373 context
->base
.create_image_handle
= tegra_create_image_handle
;
1374 context
->base
.delete_image_handle
= tegra_delete_image_handle
;
1375 context
->base
.make_image_handle_resident
= tegra_make_image_handle_resident
;
1377 return &context
->base
;
1380 context
->gpu
->destroy(context
->gpu
);