2 * Copyright © 2014-2018 NVIDIA Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 #include "util/u_debug.h"
28 #include "util/u_inlines.h"
29 #include "util/u_upload_mgr.h"
31 #include "tegra_context.h"
32 #include "tegra_resource.h"
33 #include "tegra_screen.h"
36 tegra_destroy(struct pipe_context
*pcontext
)
38 struct tegra_context
*context
= to_tegra_context(pcontext
);
40 if (context
->base
.stream_uploader
)
41 u_upload_destroy(context
->base
.stream_uploader
);
43 context
->gpu
->destroy(context
->gpu
);
48 tegra_draw_vbo(struct pipe_context
*pcontext
,
49 const struct pipe_draw_info
*pinfo
)
51 struct tegra_context
*context
= to_tegra_context(pcontext
);
52 struct pipe_draw_indirect_info indirect
;
53 struct pipe_draw_info info
;
55 if (pinfo
&& (pinfo
->indirect
|| pinfo
->index_size
)) {
56 memcpy(&info
, pinfo
, sizeof(info
));
58 if (pinfo
->indirect
) {
59 memcpy(&indirect
, pinfo
->indirect
, sizeof(indirect
));
60 indirect
.buffer
= tegra_resource_unwrap(info
.indirect
->buffer
);
61 info
.indirect
= &indirect
;
64 if (pinfo
->index_size
&& !pinfo
->has_user_indices
)
65 info
.index
.resource
= tegra_resource_unwrap(info
.index
.resource
);
70 context
->gpu
->draw_vbo(context
->gpu
, pinfo
);
74 tegra_render_condition(struct pipe_context
*pcontext
,
75 struct pipe_query
*query
,
79 struct tegra_context
*context
= to_tegra_context(pcontext
);
81 context
->gpu
->render_condition(context
->gpu
, query
, condition
, mode
);
84 static struct pipe_query
*
85 tegra_create_query(struct pipe_context
*pcontext
, unsigned int query_type
,
88 struct tegra_context
*context
= to_tegra_context(pcontext
);
90 return context
->gpu
->create_query(context
->gpu
, query_type
, index
);
93 static struct pipe_query
*
94 tegra_create_batch_query(struct pipe_context
*pcontext
,
95 unsigned int num_queries
,
96 unsigned int *queries
)
98 struct tegra_context
*context
= to_tegra_context(pcontext
);
100 return context
->gpu
->create_batch_query(context
->gpu
, num_queries
,
105 tegra_destroy_query(struct pipe_context
*pcontext
, struct pipe_query
*query
)
107 struct tegra_context
*context
= to_tegra_context(pcontext
);
109 context
->gpu
->destroy_query(context
->gpu
, query
);
113 tegra_begin_query(struct pipe_context
*pcontext
, struct pipe_query
*query
)
115 struct tegra_context
*context
= to_tegra_context(pcontext
);
117 return context
->gpu
->begin_query(context
->gpu
, query
);
121 tegra_end_query(struct pipe_context
*pcontext
, struct pipe_query
*query
)
123 struct tegra_context
*context
= to_tegra_context(pcontext
);
125 return context
->gpu
->end_query(context
->gpu
, query
);
129 tegra_get_query_result(struct pipe_context
*pcontext
,
130 struct pipe_query
*query
,
132 union pipe_query_result
*result
)
134 struct tegra_context
*context
= to_tegra_context(pcontext
);
136 return context
->gpu
->get_query_result(context
->gpu
, query
, wait
,
141 tegra_get_query_result_resource(struct pipe_context
*pcontext
,
142 struct pipe_query
*query
,
144 enum pipe_query_value_type result_type
,
146 struct pipe_resource
*resource
,
149 struct tegra_context
*context
= to_tegra_context(pcontext
);
151 context
->gpu
->get_query_result_resource(context
->gpu
, query
, wait
,
152 result_type
, index
, resource
,
157 tegra_set_active_query_state(struct pipe_context
*pcontext
, boolean enable
)
159 struct tegra_context
*context
= to_tegra_context(pcontext
);
161 context
->gpu
->set_active_query_state(context
->gpu
, enable
);
165 tegra_create_blend_state(struct pipe_context
*pcontext
,
166 const struct pipe_blend_state
*cso
)
168 struct tegra_context
*context
= to_tegra_context(pcontext
);
170 return context
->gpu
->create_blend_state(context
->gpu
, cso
);
174 tegra_bind_blend_state(struct pipe_context
*pcontext
, void *so
)
176 struct tegra_context
*context
= to_tegra_context(pcontext
);
178 context
->gpu
->bind_blend_state(context
->gpu
, so
);
182 tegra_delete_blend_state(struct pipe_context
*pcontext
, void *so
)
184 struct tegra_context
*context
= to_tegra_context(pcontext
);
186 context
->gpu
->delete_blend_state(context
->gpu
, so
);
190 tegra_create_sampler_state(struct pipe_context
*pcontext
,
191 const struct pipe_sampler_state
*cso
)
193 struct tegra_context
*context
= to_tegra_context(pcontext
);
195 return context
->gpu
->create_sampler_state(context
->gpu
, cso
);
199 tegra_bind_sampler_states(struct pipe_context
*pcontext
, unsigned shader
,
200 unsigned start_slot
, unsigned num_samplers
,
203 struct tegra_context
*context
= to_tegra_context(pcontext
);
205 context
->gpu
->bind_sampler_states(context
->gpu
, shader
, start_slot
,
206 num_samplers
, samplers
);
210 tegra_delete_sampler_state(struct pipe_context
*pcontext
, void *so
)
212 struct tegra_context
*context
= to_tegra_context(pcontext
);
214 context
->gpu
->delete_sampler_state(context
->gpu
, so
);
218 tegra_create_rasterizer_state(struct pipe_context
*pcontext
,
219 const struct pipe_rasterizer_state
*cso
)
221 struct tegra_context
*context
= to_tegra_context(pcontext
);
223 return context
->gpu
->create_rasterizer_state(context
->gpu
, cso
);
227 tegra_bind_rasterizer_state(struct pipe_context
*pcontext
, void *so
)
229 struct tegra_context
*context
= to_tegra_context(pcontext
);
231 context
->gpu
->bind_rasterizer_state(context
->gpu
, so
);
235 tegra_delete_rasterizer_state(struct pipe_context
*pcontext
, void *so
)
237 struct tegra_context
*context
= to_tegra_context(pcontext
);
239 context
->gpu
->delete_rasterizer_state(context
->gpu
, so
);
243 tegra_create_depth_stencil_alpha_state(struct pipe_context
*pcontext
,
244 const struct pipe_depth_stencil_alpha_state
*cso
)
246 struct tegra_context
*context
= to_tegra_context(pcontext
);
248 return context
->gpu
->create_depth_stencil_alpha_state(context
->gpu
, cso
);
252 tegra_bind_depth_stencil_alpha_state(struct pipe_context
*pcontext
, void *so
)
254 struct tegra_context
*context
= to_tegra_context(pcontext
);
256 context
->gpu
->bind_depth_stencil_alpha_state(context
->gpu
, so
);
260 tegra_delete_depth_stencil_alpha_state(struct pipe_context
*pcontext
, void *so
)
262 struct tegra_context
*context
= to_tegra_context(pcontext
);
264 context
->gpu
->delete_depth_stencil_alpha_state(context
->gpu
, so
);
268 tegra_create_fs_state(struct pipe_context
*pcontext
,
269 const struct pipe_shader_state
*cso
)
271 struct tegra_context
*context
= to_tegra_context(pcontext
);
273 return context
->gpu
->create_fs_state(context
->gpu
, cso
);
277 tegra_bind_fs_state(struct pipe_context
*pcontext
, void *so
)
279 struct tegra_context
*context
= to_tegra_context(pcontext
);
281 context
->gpu
->bind_fs_state(context
->gpu
, so
);
285 tegra_delete_fs_state(struct pipe_context
*pcontext
, void *so
)
287 struct tegra_context
*context
= to_tegra_context(pcontext
);
289 context
->gpu
->delete_fs_state(context
->gpu
, so
);
293 tegra_create_vs_state(struct pipe_context
*pcontext
,
294 const struct pipe_shader_state
*cso
)
296 struct tegra_context
*context
= to_tegra_context(pcontext
);
298 return context
->gpu
->create_vs_state(context
->gpu
, cso
);
302 tegra_bind_vs_state(struct pipe_context
*pcontext
, void *so
)
304 struct tegra_context
*context
= to_tegra_context(pcontext
);
306 context
->gpu
->bind_vs_state(context
->gpu
, so
);
310 tegra_delete_vs_state(struct pipe_context
*pcontext
, void *so
)
312 struct tegra_context
*context
= to_tegra_context(pcontext
);
314 context
->gpu
->delete_vs_state(context
->gpu
, so
);
318 tegra_create_gs_state(struct pipe_context
*pcontext
,
319 const struct pipe_shader_state
*cso
)
321 struct tegra_context
*context
= to_tegra_context(pcontext
);
323 return context
->gpu
->create_gs_state(context
->gpu
, cso
);
327 tegra_bind_gs_state(struct pipe_context
*pcontext
, void *so
)
329 struct tegra_context
*context
= to_tegra_context(pcontext
);
331 context
->gpu
->bind_gs_state(context
->gpu
, so
);
335 tegra_delete_gs_state(struct pipe_context
*pcontext
, void *so
)
337 struct tegra_context
*context
= to_tegra_context(pcontext
);
339 context
->gpu
->delete_gs_state(context
->gpu
, so
);
343 tegra_create_tcs_state(struct pipe_context
*pcontext
,
344 const struct pipe_shader_state
*cso
)
346 struct tegra_context
*context
= to_tegra_context(pcontext
);
348 return context
->gpu
->create_tcs_state(context
->gpu
, cso
);
352 tegra_bind_tcs_state(struct pipe_context
*pcontext
, void *so
)
354 struct tegra_context
*context
= to_tegra_context(pcontext
);
356 context
->gpu
->bind_tcs_state(context
->gpu
, so
);
360 tegra_delete_tcs_state(struct pipe_context
*pcontext
, void *so
)
362 struct tegra_context
*context
= to_tegra_context(pcontext
);
364 context
->gpu
->delete_tcs_state(context
->gpu
, so
);
368 tegra_create_tes_state(struct pipe_context
*pcontext
,
369 const struct pipe_shader_state
*cso
)
371 struct tegra_context
*context
= to_tegra_context(pcontext
);
373 return context
->gpu
->create_tes_state(context
->gpu
, cso
);
377 tegra_bind_tes_state(struct pipe_context
*pcontext
, void *so
)
379 struct tegra_context
*context
= to_tegra_context(pcontext
);
381 context
->gpu
->bind_tes_state(context
->gpu
, so
);
385 tegra_delete_tes_state(struct pipe_context
*pcontext
, void *so
)
387 struct tegra_context
*context
= to_tegra_context(pcontext
);
389 context
->gpu
->delete_tes_state(context
->gpu
, so
);
393 tegra_create_vertex_elements_state(struct pipe_context
*pcontext
,
394 unsigned num_elements
,
395 const struct pipe_vertex_element
*elements
)
397 struct tegra_context
*context
= to_tegra_context(pcontext
);
399 return context
->gpu
->create_vertex_elements_state(context
->gpu
,
405 tegra_bind_vertex_elements_state(struct pipe_context
*pcontext
, void *so
)
407 struct tegra_context
*context
= to_tegra_context(pcontext
);
409 context
->gpu
->bind_vertex_elements_state(context
->gpu
, so
);
413 tegra_delete_vertex_elements_state(struct pipe_context
*pcontext
, void *so
)
415 struct tegra_context
*context
= to_tegra_context(pcontext
);
417 context
->gpu
->delete_vertex_elements_state(context
->gpu
, so
);
421 tegra_set_blend_color(struct pipe_context
*pcontext
,
422 const struct pipe_blend_color
*color
)
424 struct tegra_context
*context
= to_tegra_context(pcontext
);
426 context
->gpu
->set_blend_color(context
->gpu
, color
);
430 tegra_set_stencil_ref(struct pipe_context
*pcontext
,
431 const struct pipe_stencil_ref
*ref
)
433 struct tegra_context
*context
= to_tegra_context(pcontext
);
435 context
->gpu
->set_stencil_ref(context
->gpu
, ref
);
439 tegra_set_sample_mask(struct pipe_context
*pcontext
, unsigned int mask
)
441 struct tegra_context
*context
= to_tegra_context(pcontext
);
443 context
->gpu
->set_sample_mask(context
->gpu
, mask
);
447 tegra_set_min_samples(struct pipe_context
*pcontext
, unsigned int samples
)
449 struct tegra_context
*context
= to_tegra_context(pcontext
);
451 context
->gpu
->set_min_samples(context
->gpu
, samples
);
455 tegra_set_clip_state(struct pipe_context
*pcontext
,
456 const struct pipe_clip_state
*state
)
458 struct tegra_context
*context
= to_tegra_context(pcontext
);
460 context
->gpu
->set_clip_state(context
->gpu
, state
);
464 tegra_set_constant_buffer(struct pipe_context
*pcontext
, unsigned int shader
,
466 const struct pipe_constant_buffer
*buf
)
468 struct tegra_context
*context
= to_tegra_context(pcontext
);
469 struct pipe_constant_buffer buffer
;
471 if (buf
&& buf
->buffer
) {
472 memcpy(&buffer
, buf
, sizeof(buffer
));
473 buffer
.buffer
= tegra_resource_unwrap(buffer
.buffer
);
477 context
->gpu
->set_constant_buffer(context
->gpu
, shader
, index
, buf
);
481 tegra_set_framebuffer_state(struct pipe_context
*pcontext
,
482 const struct pipe_framebuffer_state
*fb
)
484 struct tegra_context
*context
= to_tegra_context(pcontext
);
485 struct pipe_framebuffer_state state
;
489 memcpy(&state
, fb
, sizeof(state
));
491 for (i
= 0; i
< fb
->nr_cbufs
; i
++)
492 state
.cbufs
[i
] = tegra_surface_unwrap(fb
->cbufs
[i
]);
494 while (i
< PIPE_MAX_COLOR_BUFS
)
495 state
.cbufs
[i
++] = NULL
;
497 state
.zsbuf
= tegra_surface_unwrap(fb
->zsbuf
);
502 context
->gpu
->set_framebuffer_state(context
->gpu
, fb
);
506 tegra_set_polygon_stipple(struct pipe_context
*pcontext
,
507 const struct pipe_poly_stipple
*stipple
)
509 struct tegra_context
*context
= to_tegra_context(pcontext
);
511 context
->gpu
->set_polygon_stipple(context
->gpu
, stipple
);
515 tegra_set_scissor_states(struct pipe_context
*pcontext
, unsigned start_slot
,
516 unsigned num_scissors
,
517 const struct pipe_scissor_state
*scissors
)
519 struct tegra_context
*context
= to_tegra_context(pcontext
);
521 context
->gpu
->set_scissor_states(context
->gpu
, start_slot
, num_scissors
,
526 tegra_set_window_rectangles(struct pipe_context
*pcontext
, boolean include
,
527 unsigned int num_rectangles
,
528 const struct pipe_scissor_state
*rectangles
)
530 struct tegra_context
*context
= to_tegra_context(pcontext
);
532 context
->gpu
->set_window_rectangles(context
->gpu
, include
, num_rectangles
,
537 tegra_set_viewport_states(struct pipe_context
*pcontext
, unsigned start_slot
,
538 unsigned num_viewports
,
539 const struct pipe_viewport_state
*viewports
)
541 struct tegra_context
*context
= to_tegra_context(pcontext
);
543 context
->gpu
->set_viewport_states(context
->gpu
, start_slot
, num_viewports
,
548 tegra_set_sampler_views(struct pipe_context
*pcontext
, unsigned shader
,
549 unsigned start_slot
, unsigned num_views
,
550 struct pipe_sampler_view
**pviews
)
552 struct pipe_sampler_view
*views
[PIPE_MAX_SHADER_SAMPLER_VIEWS
];
553 struct tegra_context
*context
= to_tegra_context(pcontext
);
556 for (i
= 0; i
< num_views
; i
++)
557 views
[i
] = tegra_sampler_view_unwrap(pviews
[i
]);
559 context
->gpu
->set_sampler_views(context
->gpu
, shader
, start_slot
,
564 tegra_set_tess_state(struct pipe_context
*pcontext
,
565 const float default_outer_level
[4],
566 const float default_inner_level
[2])
568 struct tegra_context
*context
= to_tegra_context(pcontext
);
570 context
->gpu
->set_tess_state(context
->gpu
, default_outer_level
,
571 default_inner_level
);
575 tegra_set_debug_callback(struct pipe_context
*pcontext
,
576 const struct pipe_debug_callback
*callback
)
578 struct tegra_context
*context
= to_tegra_context(pcontext
);
580 context
->gpu
->set_debug_callback(context
->gpu
, callback
);
584 tegra_set_shader_buffers(struct pipe_context
*pcontext
, unsigned int shader
,
585 unsigned start
, unsigned count
,
586 const struct pipe_shader_buffer
*buffers
,
587 unsigned writable_bitmask
)
589 struct tegra_context
*context
= to_tegra_context(pcontext
);
591 context
->gpu
->set_shader_buffers(context
->gpu
, shader
, start
, count
,
592 buffers
, writable_bitmask
);
596 tegra_set_shader_images(struct pipe_context
*pcontext
, unsigned int shader
,
597 unsigned start
, unsigned count
,
598 const struct pipe_image_view
*images
)
600 struct tegra_context
*context
= to_tegra_context(pcontext
);
602 context
->gpu
->set_shader_images(context
->gpu
, shader
, start
, count
,
607 tegra_set_vertex_buffers(struct pipe_context
*pcontext
, unsigned start_slot
,
608 unsigned num_buffers
,
609 const struct pipe_vertex_buffer
*buffers
)
611 struct tegra_context
*context
= to_tegra_context(pcontext
);
612 struct pipe_vertex_buffer buf
[PIPE_MAX_SHADER_INPUTS
];
615 if (num_buffers
&& buffers
) {
616 memcpy(buf
, buffers
, num_buffers
* sizeof(struct pipe_vertex_buffer
));
618 for (i
= 0; i
< num_buffers
; i
++) {
619 if (!buf
[i
].is_user_buffer
)
620 buf
[i
].buffer
.resource
= tegra_resource_unwrap(buf
[i
].buffer
.resource
);
626 context
->gpu
->set_vertex_buffers(context
->gpu
, start_slot
, num_buffers
,
630 static struct pipe_stream_output_target
*
631 tegra_create_stream_output_target(struct pipe_context
*pcontext
,
632 struct pipe_resource
*presource
,
633 unsigned buffer_offset
,
634 unsigned buffer_size
)
636 struct tegra_resource
*resource
= to_tegra_resource(presource
);
637 struct tegra_context
*context
= to_tegra_context(pcontext
);
639 return context
->gpu
->create_stream_output_target(context
->gpu
,
646 tegra_stream_output_target_destroy(struct pipe_context
*pcontext
,
647 struct pipe_stream_output_target
*target
)
649 struct tegra_context
*context
= to_tegra_context(pcontext
);
651 context
->gpu
->stream_output_target_destroy(context
->gpu
, target
);
655 tegra_set_stream_output_targets(struct pipe_context
*pcontext
,
656 unsigned num_targets
,
657 struct pipe_stream_output_target
**targets
,
658 const unsigned *offsets
)
660 struct tegra_context
*context
= to_tegra_context(pcontext
);
662 context
->gpu
->set_stream_output_targets(context
->gpu
, num_targets
,
667 tegra_resource_copy_region(struct pipe_context
*pcontext
,
668 struct pipe_resource
*pdst
,
669 unsigned int dst_level
,
673 struct pipe_resource
*psrc
,
674 unsigned int src_level
,
675 const struct pipe_box
*src_box
)
677 struct tegra_context
*context
= to_tegra_context(pcontext
);
678 struct tegra_resource
*dst
= to_tegra_resource(pdst
);
679 struct tegra_resource
*src
= to_tegra_resource(psrc
);
681 context
->gpu
->resource_copy_region(context
->gpu
, dst
->gpu
, dst_level
, dstx
,
682 dsty
, dstz
, src
->gpu
, src_level
,
687 tegra_blit(struct pipe_context
*pcontext
, const struct pipe_blit_info
*pinfo
)
689 struct tegra_context
*context
= to_tegra_context(pcontext
);
690 struct pipe_blit_info info
;
693 memcpy(&info
, pinfo
, sizeof(info
));
694 info
.dst
.resource
= tegra_resource_unwrap(info
.dst
.resource
);
695 info
.src
.resource
= tegra_resource_unwrap(info
.src
.resource
);
699 context
->gpu
->blit(context
->gpu
, pinfo
);
703 tegra_clear(struct pipe_context
*pcontext
, unsigned buffers
,
704 const union pipe_color_union
*color
, double depth
,
707 struct tegra_context
*context
= to_tegra_context(pcontext
);
709 context
->gpu
->clear(context
->gpu
, buffers
, color
, depth
, stencil
);
713 tegra_clear_render_target(struct pipe_context
*pcontext
,
714 struct pipe_surface
*pdst
,
715 const union pipe_color_union
*color
,
720 bool render_condition
)
722 struct tegra_context
*context
= to_tegra_context(pcontext
);
723 struct tegra_surface
*dst
= to_tegra_surface(pdst
);
725 context
->gpu
->clear_render_target(context
->gpu
, dst
->gpu
, color
, dstx
,
726 dsty
, width
, height
, render_condition
);
730 tegra_clear_depth_stencil(struct pipe_context
*pcontext
,
731 struct pipe_surface
*pdst
,
734 unsigned int stencil
,
739 bool render_condition
)
741 struct tegra_context
*context
= to_tegra_context(pcontext
);
742 struct tegra_surface
*dst
= to_tegra_surface(pdst
);
744 context
->gpu
->clear_depth_stencil(context
->gpu
, dst
->gpu
, flags
, depth
,
745 stencil
, dstx
, dsty
, width
, height
,
750 tegra_clear_texture(struct pipe_context
*pcontext
,
751 struct pipe_resource
*presource
,
753 const struct pipe_box
*box
,
756 struct tegra_resource
*resource
= to_tegra_resource(presource
);
757 struct tegra_context
*context
= to_tegra_context(pcontext
);
759 context
->gpu
->clear_texture(context
->gpu
, resource
->gpu
, level
, box
, data
);
763 tegra_clear_buffer(struct pipe_context
*pcontext
,
764 struct pipe_resource
*presource
,
770 struct tegra_resource
*resource
= to_tegra_resource(presource
);
771 struct tegra_context
*context
= to_tegra_context(pcontext
);
773 context
->gpu
->clear_buffer(context
->gpu
, resource
->gpu
, offset
, size
,
778 tegra_flush(struct pipe_context
*pcontext
, struct pipe_fence_handle
**fence
,
781 struct tegra_context
*context
= to_tegra_context(pcontext
);
783 context
->gpu
->flush(context
->gpu
, fence
, flags
);
787 tegra_create_fence_fd(struct pipe_context
*pcontext
,
788 struct pipe_fence_handle
**fence
,
789 int fd
, enum pipe_fd_type type
)
791 struct tegra_context
*context
= to_tegra_context(pcontext
);
793 assert(type
== PIPE_FD_TYPE_NATIVE_SYNC
);
794 context
->gpu
->create_fence_fd(context
->gpu
, fence
, fd
, type
);
798 tegra_fence_server_sync(struct pipe_context
*pcontext
,
799 struct pipe_fence_handle
*fence
)
801 struct tegra_context
*context
= to_tegra_context(pcontext
);
803 context
->gpu
->fence_server_sync(context
->gpu
, fence
);
806 static struct pipe_sampler_view
*
807 tegra_create_sampler_view(struct pipe_context
*pcontext
,
808 struct pipe_resource
*presource
,
809 const struct pipe_sampler_view
*template)
811 struct tegra_resource
*resource
= to_tegra_resource(presource
);
812 struct tegra_context
*context
= to_tegra_context(pcontext
);
813 struct tegra_sampler_view
*view
;
815 view
= calloc(1, sizeof(*view
));
819 view
->gpu
= context
->gpu
->create_sampler_view(context
->gpu
, resource
->gpu
,
821 memcpy(&view
->base
, view
->gpu
, sizeof(*view
->gpu
));
822 /* overwrite to prevent reference from being released */
823 view
->base
.texture
= NULL
;
825 pipe_reference_init(&view
->base
.reference
, 1);
826 pipe_resource_reference(&view
->base
.texture
, presource
);
827 view
->base
.context
= pcontext
;
833 tegra_sampler_view_destroy(struct pipe_context
*pcontext
,
834 struct pipe_sampler_view
*pview
)
836 struct tegra_sampler_view
*view
= to_tegra_sampler_view(pview
);
838 pipe_resource_reference(&view
->base
.texture
, NULL
);
839 pipe_sampler_view_reference(&view
->gpu
, NULL
);
843 static struct pipe_surface
*
844 tegra_create_surface(struct pipe_context
*pcontext
,
845 struct pipe_resource
*presource
,
846 const struct pipe_surface
*template)
848 struct tegra_resource
*resource
= to_tegra_resource(presource
);
849 struct tegra_context
*context
= to_tegra_context(pcontext
);
850 struct tegra_surface
*surface
;
852 surface
= calloc(1, sizeof(*surface
));
856 surface
->gpu
= context
->gpu
->create_surface(context
->gpu
, resource
->gpu
,
863 memcpy(&surface
->base
, surface
->gpu
, sizeof(*surface
->gpu
));
864 /* overwrite to prevent reference from being released */
865 surface
->base
.texture
= NULL
;
867 pipe_reference_init(&surface
->base
.reference
, 1);
868 pipe_resource_reference(&surface
->base
.texture
, presource
);
869 surface
->base
.context
= &context
->base
;
871 return &surface
->base
;
875 tegra_surface_destroy(struct pipe_context
*pcontext
,
876 struct pipe_surface
*psurface
)
878 struct tegra_surface
*surface
= to_tegra_surface(psurface
);
880 pipe_resource_reference(&surface
->base
.texture
, NULL
);
881 pipe_surface_reference(&surface
->gpu
, NULL
);
886 tegra_transfer_map(struct pipe_context
*pcontext
,
887 struct pipe_resource
*presource
,
888 unsigned level
, unsigned usage
,
889 const struct pipe_box
*box
,
890 struct pipe_transfer
**ptransfer
)
892 struct tegra_resource
*resource
= to_tegra_resource(presource
);
893 struct tegra_context
*context
= to_tegra_context(pcontext
);
894 struct tegra_transfer
*transfer
;
896 transfer
= calloc(1, sizeof(*transfer
));
900 transfer
->map
= context
->gpu
->transfer_map(context
->gpu
, resource
->gpu
,
903 memcpy(&transfer
->base
, transfer
->gpu
, sizeof(*transfer
->gpu
));
904 transfer
->base
.resource
= NULL
;
905 pipe_resource_reference(&transfer
->base
.resource
, presource
);
907 *ptransfer
= &transfer
->base
;
909 return transfer
->map
;
913 tegra_transfer_flush_region(struct pipe_context
*pcontext
,
914 struct pipe_transfer
*ptransfer
,
915 const struct pipe_box
*box
)
917 struct tegra_transfer
*transfer
= to_tegra_transfer(ptransfer
);
918 struct tegra_context
*context
= to_tegra_context(pcontext
);
920 context
->gpu
->transfer_flush_region(context
->gpu
, transfer
->gpu
, box
);
924 tegra_transfer_unmap(struct pipe_context
*pcontext
,
925 struct pipe_transfer
*ptransfer
)
927 struct tegra_transfer
*transfer
= to_tegra_transfer(ptransfer
);
928 struct tegra_context
*context
= to_tegra_context(pcontext
);
930 context
->gpu
->transfer_unmap(context
->gpu
, transfer
->gpu
);
931 pipe_resource_reference(&transfer
->base
.resource
, NULL
);
936 tegra_buffer_subdata(struct pipe_context
*pcontext
,
937 struct pipe_resource
*presource
,
938 unsigned usage
, unsigned offset
,
939 unsigned size
, const void *data
)
941 struct tegra_resource
*resource
= to_tegra_resource(presource
);
942 struct tegra_context
*context
= to_tegra_context(pcontext
);
944 context
->gpu
->buffer_subdata(context
->gpu
, resource
->gpu
, usage
, offset
,
949 tegra_texture_subdata(struct pipe_context
*pcontext
,
950 struct pipe_resource
*presource
,
953 const struct pipe_box
*box
,
956 unsigned layer_stride
)
958 struct tegra_resource
*resource
= to_tegra_resource(presource
);
959 struct tegra_context
*context
= to_tegra_context(pcontext
);
961 context
->gpu
->texture_subdata(context
->gpu
, resource
->gpu
, level
, usage
,
962 box
, data
, stride
, layer_stride
);
966 tegra_texture_barrier(struct pipe_context
*pcontext
, unsigned int flags
)
968 struct tegra_context
*context
= to_tegra_context(pcontext
);
970 context
->gpu
->texture_barrier(context
->gpu
, flags
);
974 tegra_memory_barrier(struct pipe_context
*pcontext
, unsigned int flags
)
976 struct tegra_context
*context
= to_tegra_context(pcontext
);
978 if (!(flags
& ~PIPE_BARRIER_UPDATE
))
981 context
->gpu
->memory_barrier(context
->gpu
, flags
);
984 static struct pipe_video_codec
*
985 tegra_create_video_codec(struct pipe_context
*pcontext
,
986 const struct pipe_video_codec
*template)
988 struct tegra_context
*context
= to_tegra_context(pcontext
);
990 return context
->gpu
->create_video_codec(context
->gpu
, template);
993 static struct pipe_video_buffer
*
994 tegra_create_video_buffer(struct pipe_context
*pcontext
,
995 const struct pipe_video_buffer
*template)
997 struct tegra_context
*context
= to_tegra_context(pcontext
);
999 return context
->gpu
->create_video_buffer(context
->gpu
, template);
1003 tegra_create_compute_state(struct pipe_context
*pcontext
,
1004 const struct pipe_compute_state
*template)
1006 struct tegra_context
*context
= to_tegra_context(pcontext
);
1008 return context
->gpu
->create_compute_state(context
->gpu
, template);
1012 tegra_bind_compute_state(struct pipe_context
*pcontext
, void *so
)
1014 struct tegra_context
*context
= to_tegra_context(pcontext
);
1016 context
->gpu
->bind_compute_state(context
->gpu
, so
);
1020 tegra_delete_compute_state(struct pipe_context
*pcontext
, void *so
)
1022 struct tegra_context
*context
= to_tegra_context(pcontext
);
1024 context
->gpu
->delete_compute_state(context
->gpu
, so
);
1028 tegra_set_compute_resources(struct pipe_context
*pcontext
,
1029 unsigned int start
, unsigned int count
,
1030 struct pipe_surface
**resources
)
1032 struct tegra_context
*context
= to_tegra_context(pcontext
);
1034 /* XXX unwrap resources */
1036 context
->gpu
->set_compute_resources(context
->gpu
, start
, count
, resources
);
1040 tegra_set_global_binding(struct pipe_context
*pcontext
, unsigned int first
,
1041 unsigned int count
, struct pipe_resource
**resources
,
1044 struct tegra_context
*context
= to_tegra_context(pcontext
);
1046 /* XXX unwrap resources */
1048 context
->gpu
->set_global_binding(context
->gpu
, first
, count
, resources
,
1053 tegra_launch_grid(struct pipe_context
*pcontext
,
1054 const struct pipe_grid_info
*info
)
1056 struct tegra_context
*context
= to_tegra_context(pcontext
);
1058 /* XXX unwrap info->indirect? */
1060 context
->gpu
->launch_grid(context
->gpu
, info
);
1064 tegra_get_sample_position(struct pipe_context
*pcontext
, unsigned int count
,
1065 unsigned int index
, float *value
)
1067 struct tegra_context
*context
= to_tegra_context(pcontext
);
1069 context
->gpu
->get_sample_position(context
->gpu
, count
, index
, value
);
1073 tegra_get_timestamp(struct pipe_context
*pcontext
)
1075 struct tegra_context
*context
= to_tegra_context(pcontext
);
1077 return context
->gpu
->get_timestamp(context
->gpu
);
1081 tegra_flush_resource(struct pipe_context
*pcontext
,
1082 struct pipe_resource
*presource
)
1084 struct tegra_resource
*resource
= to_tegra_resource(presource
);
1085 struct tegra_context
*context
= to_tegra_context(pcontext
);
1087 context
->gpu
->flush_resource(context
->gpu
, resource
->gpu
);
1091 tegra_invalidate_resource(struct pipe_context
*pcontext
,
1092 struct pipe_resource
*presource
)
1094 struct tegra_resource
*resource
= to_tegra_resource(presource
);
1095 struct tegra_context
*context
= to_tegra_context(pcontext
);
1097 context
->gpu
->invalidate_resource(context
->gpu
, resource
->gpu
);
1100 static enum pipe_reset_status
1101 tegra_get_device_reset_status(struct pipe_context
*pcontext
)
1103 struct tegra_context
*context
= to_tegra_context(pcontext
);
1105 return context
->gpu
->get_device_reset_status(context
->gpu
);
1109 tegra_set_device_reset_callback(struct pipe_context
*pcontext
,
1110 const struct pipe_device_reset_callback
*cb
)
1112 struct tegra_context
*context
= to_tegra_context(pcontext
);
1114 context
->gpu
->set_device_reset_callback(context
->gpu
, cb
);
1118 tegra_dump_debug_state(struct pipe_context
*pcontext
, FILE *stream
,
1121 struct tegra_context
*context
= to_tegra_context(pcontext
);
1123 context
->gpu
->dump_debug_state(context
->gpu
, stream
, flags
);
1127 tegra_emit_string_marker(struct pipe_context
*pcontext
, const char *string
,
1130 struct tegra_context
*context
= to_tegra_context(pcontext
);
1132 context
->gpu
->emit_string_marker(context
->gpu
, string
, length
);
1136 tegra_generate_mipmap(struct pipe_context
*pcontext
,
1137 struct pipe_resource
*presource
,
1138 enum pipe_format format
,
1139 unsigned int base_level
,
1140 unsigned int last_level
,
1141 unsigned int first_layer
,
1142 unsigned int last_layer
)
1144 struct tegra_resource
*resource
= to_tegra_resource(presource
);
1145 struct tegra_context
*context
= to_tegra_context(pcontext
);
1147 return context
->gpu
->generate_mipmap(context
->gpu
, resource
->gpu
, format
,
1148 base_level
, last_level
, first_layer
,
1153 tegra_create_texture_handle(struct pipe_context
*pcontext
,
1154 struct pipe_sampler_view
*view
,
1155 const struct pipe_sampler_state
*state
)
1157 struct tegra_context
*context
= to_tegra_context(pcontext
);
1159 return context
->gpu
->create_texture_handle(context
->gpu
, view
, state
);
1162 static void tegra_delete_texture_handle(struct pipe_context
*pcontext
,
1165 struct tegra_context
*context
= to_tegra_context(pcontext
);
1167 context
->gpu
->delete_texture_handle(context
->gpu
, handle
);
1170 static void tegra_make_texture_handle_resident(struct pipe_context
*pcontext
,
1171 uint64_t handle
, bool resident
)
1173 struct tegra_context
*context
= to_tegra_context(pcontext
);
1175 context
->gpu
->make_texture_handle_resident(context
->gpu
, handle
, resident
);
1178 static uint64_t tegra_create_image_handle(struct pipe_context
*pcontext
,
1179 const struct pipe_image_view
*image
)
1181 struct tegra_context
*context
= to_tegra_context(pcontext
);
1183 return context
->gpu
->create_image_handle(context
->gpu
, image
);
1186 static void tegra_delete_image_handle(struct pipe_context
*pcontext
,
1189 struct tegra_context
*context
= to_tegra_context(pcontext
);
1191 context
->gpu
->delete_image_handle(context
->gpu
, handle
);
1194 static void tegra_make_image_handle_resident(struct pipe_context
*pcontext
,
1195 uint64_t handle
, unsigned access
,
1198 struct tegra_context
*context
= to_tegra_context(pcontext
);
1200 context
->gpu
->make_image_handle_resident(context
->gpu
, handle
, access
,
1204 struct pipe_context
*
1205 tegra_screen_context_create(struct pipe_screen
*pscreen
, void *priv
,
1208 struct tegra_screen
*screen
= to_tegra_screen(pscreen
);
1209 struct tegra_context
*context
;
1211 context
= calloc(1, sizeof(*context
));
1215 context
->gpu
= screen
->gpu
->context_create(screen
->gpu
, priv
, flags
);
1216 if (!context
->gpu
) {
1217 debug_error("failed to create GPU context\n");
1221 context
->base
.screen
= &screen
->base
;
1222 context
->base
.priv
= priv
;
1225 * Create custom stream and const uploaders. Note that technically nouveau
1226 * already creates uploaders that could be reused, but that would make the
1227 * resource unwrapping rather complicate. The reason for that is that both
1228 * uploaders create resources based on the context that they were created
1229 * from, which means that nouveau's uploader will use the nouveau context
1230 * which means that those resources must not be unwrapped. So before each
1231 * resource is unwrapped, the code would need to check that it does not
1232 * correspond to the uploaders' buffers.
1234 * However, duplicating the uploaders here sounds worse than it is. The
1235 * default implementation that nouveau uses allocates buffers lazily, and
1236 * since it is never used, no buffers will every be allocated and the only
1237 * memory wasted is that occupied by the nouveau uploader itself.
1239 context
->base
.stream_uploader
= u_upload_create_default(&context
->base
);
1240 if (!context
->base
.stream_uploader
)
1243 context
->base
.const_uploader
= context
->base
.stream_uploader
;
1245 context
->base
.destroy
= tegra_destroy
;
1247 context
->base
.draw_vbo
= tegra_draw_vbo
;
1249 context
->base
.render_condition
= tegra_render_condition
;
1251 context
->base
.create_query
= tegra_create_query
;
1252 context
->base
.create_batch_query
= tegra_create_batch_query
;
1253 context
->base
.destroy_query
= tegra_destroy_query
;
1254 context
->base
.begin_query
= tegra_begin_query
;
1255 context
->base
.end_query
= tegra_end_query
;
1256 context
->base
.get_query_result
= tegra_get_query_result
;
1257 context
->base
.get_query_result_resource
= tegra_get_query_result_resource
;
1258 context
->base
.set_active_query_state
= tegra_set_active_query_state
;
1260 context
->base
.create_blend_state
= tegra_create_blend_state
;
1261 context
->base
.bind_blend_state
= tegra_bind_blend_state
;
1262 context
->base
.delete_blend_state
= tegra_delete_blend_state
;
1264 context
->base
.create_sampler_state
= tegra_create_sampler_state
;
1265 context
->base
.bind_sampler_states
= tegra_bind_sampler_states
;
1266 context
->base
.delete_sampler_state
= tegra_delete_sampler_state
;
1268 context
->base
.create_rasterizer_state
= tegra_create_rasterizer_state
;
1269 context
->base
.bind_rasterizer_state
= tegra_bind_rasterizer_state
;
1270 context
->base
.delete_rasterizer_state
= tegra_delete_rasterizer_state
;
1272 context
->base
.create_depth_stencil_alpha_state
= tegra_create_depth_stencil_alpha_state
;
1273 context
->base
.bind_depth_stencil_alpha_state
= tegra_bind_depth_stencil_alpha_state
;
1274 context
->base
.delete_depth_stencil_alpha_state
= tegra_delete_depth_stencil_alpha_state
;
1276 context
->base
.create_fs_state
= tegra_create_fs_state
;
1277 context
->base
.bind_fs_state
= tegra_bind_fs_state
;
1278 context
->base
.delete_fs_state
= tegra_delete_fs_state
;
1280 context
->base
.create_vs_state
= tegra_create_vs_state
;
1281 context
->base
.bind_vs_state
= tegra_bind_vs_state
;
1282 context
->base
.delete_vs_state
= tegra_delete_vs_state
;
1284 context
->base
.create_gs_state
= tegra_create_gs_state
;
1285 context
->base
.bind_gs_state
= tegra_bind_gs_state
;
1286 context
->base
.delete_gs_state
= tegra_delete_gs_state
;
1288 context
->base
.create_tcs_state
= tegra_create_tcs_state
;
1289 context
->base
.bind_tcs_state
= tegra_bind_tcs_state
;
1290 context
->base
.delete_tcs_state
= tegra_delete_tcs_state
;
1292 context
->base
.create_tes_state
= tegra_create_tes_state
;
1293 context
->base
.bind_tes_state
= tegra_bind_tes_state
;
1294 context
->base
.delete_tes_state
= tegra_delete_tes_state
;
1296 context
->base
.create_vertex_elements_state
= tegra_create_vertex_elements_state
;
1297 context
->base
.bind_vertex_elements_state
= tegra_bind_vertex_elements_state
;
1298 context
->base
.delete_vertex_elements_state
= tegra_delete_vertex_elements_state
;
1300 context
->base
.set_blend_color
= tegra_set_blend_color
;
1301 context
->base
.set_stencil_ref
= tegra_set_stencil_ref
;
1302 context
->base
.set_sample_mask
= tegra_set_sample_mask
;
1303 context
->base
.set_min_samples
= tegra_set_min_samples
;
1304 context
->base
.set_clip_state
= tegra_set_clip_state
;
1306 context
->base
.set_constant_buffer
= tegra_set_constant_buffer
;
1307 context
->base
.set_framebuffer_state
= tegra_set_framebuffer_state
;
1308 context
->base
.set_polygon_stipple
= tegra_set_polygon_stipple
;
1309 context
->base
.set_scissor_states
= tegra_set_scissor_states
;
1310 context
->base
.set_window_rectangles
= tegra_set_window_rectangles
;
1311 context
->base
.set_viewport_states
= tegra_set_viewport_states
;
1312 context
->base
.set_sampler_views
= tegra_set_sampler_views
;
1313 context
->base
.set_tess_state
= tegra_set_tess_state
;
1315 context
->base
.set_debug_callback
= tegra_set_debug_callback
;
1317 context
->base
.set_shader_buffers
= tegra_set_shader_buffers
;
1318 context
->base
.set_shader_images
= tegra_set_shader_images
;
1319 context
->base
.set_vertex_buffers
= tegra_set_vertex_buffers
;
1321 context
->base
.create_stream_output_target
= tegra_create_stream_output_target
;
1322 context
->base
.stream_output_target_destroy
= tegra_stream_output_target_destroy
;
1323 context
->base
.set_stream_output_targets
= tegra_set_stream_output_targets
;
1325 context
->base
.resource_copy_region
= tegra_resource_copy_region
;
1326 context
->base
.blit
= tegra_blit
;
1327 context
->base
.clear
= tegra_clear
;
1328 context
->base
.clear_render_target
= tegra_clear_render_target
;
1329 context
->base
.clear_depth_stencil
= tegra_clear_depth_stencil
;
1330 context
->base
.clear_texture
= tegra_clear_texture
;
1331 context
->base
.clear_buffer
= tegra_clear_buffer
;
1332 context
->base
.flush
= tegra_flush
;
1334 context
->base
.create_fence_fd
= tegra_create_fence_fd
;
1335 context
->base
.fence_server_sync
= tegra_fence_server_sync
;
1337 context
->base
.create_sampler_view
= tegra_create_sampler_view
;
1338 context
->base
.sampler_view_destroy
= tegra_sampler_view_destroy
;
1340 context
->base
.create_surface
= tegra_create_surface
;
1341 context
->base
.surface_destroy
= tegra_surface_destroy
;
1343 context
->base
.transfer_map
= tegra_transfer_map
;
1344 context
->base
.transfer_flush_region
= tegra_transfer_flush_region
;
1345 context
->base
.transfer_unmap
= tegra_transfer_unmap
;
1346 context
->base
.buffer_subdata
= tegra_buffer_subdata
;
1347 context
->base
.texture_subdata
= tegra_texture_subdata
;
1349 context
->base
.texture_barrier
= tegra_texture_barrier
;
1350 context
->base
.memory_barrier
= tegra_memory_barrier
;
1352 context
->base
.create_video_codec
= tegra_create_video_codec
;
1353 context
->base
.create_video_buffer
= tegra_create_video_buffer
;
1355 context
->base
.create_compute_state
= tegra_create_compute_state
;
1356 context
->base
.bind_compute_state
= tegra_bind_compute_state
;
1357 context
->base
.delete_compute_state
= tegra_delete_compute_state
;
1358 context
->base
.set_compute_resources
= tegra_set_compute_resources
;
1359 context
->base
.set_global_binding
= tegra_set_global_binding
;
1360 context
->base
.launch_grid
= tegra_launch_grid
;
1361 context
->base
.get_sample_position
= tegra_get_sample_position
;
1362 context
->base
.get_timestamp
= tegra_get_timestamp
;
1364 context
->base
.flush_resource
= tegra_flush_resource
;
1365 context
->base
.invalidate_resource
= tegra_invalidate_resource
;
1367 context
->base
.get_device_reset_status
= tegra_get_device_reset_status
;
1368 context
->base
.set_device_reset_callback
= tegra_set_device_reset_callback
;
1369 context
->base
.dump_debug_state
= tegra_dump_debug_state
;
1370 context
->base
.emit_string_marker
= tegra_emit_string_marker
;
1372 context
->base
.generate_mipmap
= tegra_generate_mipmap
;
1374 context
->base
.create_texture_handle
= tegra_create_texture_handle
;
1375 context
->base
.delete_texture_handle
= tegra_delete_texture_handle
;
1376 context
->base
.make_texture_handle_resident
= tegra_make_texture_handle_resident
;
1377 context
->base
.create_image_handle
= tegra_create_image_handle
;
1378 context
->base
.delete_image_handle
= tegra_delete_image_handle
;
1379 context
->base
.make_image_handle_resident
= tegra_make_image_handle_resident
;
1381 return &context
->base
;
1384 context
->gpu
->destroy(context
->gpu
);