v3d: flag dirty state when binding compute states
[mesa.git] / src / gallium / drivers / v3d / v3d_context.h
1 /*
2 * Copyright © 2014-2017 Broadcom
3 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #ifndef VC5_CONTEXT_H
26 #define VC5_CONTEXT_H
27
28 #ifdef V3D_VERSION
29 #include "broadcom/common/v3d_macros.h"
30 #endif
31
32 #include <stdio.h>
33
34 #include "pipe/p_context.h"
35 #include "pipe/p_state.h"
36 #include "util/bitset.h"
37 #include "util/slab.h"
38 #include "xf86drm.h"
39 #include "drm-uapi/v3d_drm.h"
40 #include "v3d_screen.h"
41 #include "broadcom/common/v3d_limits.h"
42
43 struct v3d_job;
44 struct v3d_bo;
45 void v3d_job_add_bo(struct v3d_job *job, struct v3d_bo *bo);
46
47 #include "v3d_bufmgr.h"
48 #include "v3d_resource.h"
49 #include "v3d_cl.h"
50
51 #ifdef USE_V3D_SIMULATOR
52 #define using_v3d_simulator true
53 #else
54 #define using_v3d_simulator false
55 #endif
56
57 #define VC5_DIRTY_BLEND (1ull << 0)
58 #define VC5_DIRTY_RASTERIZER (1ull << 1)
59 #define VC5_DIRTY_ZSA (1ull << 2)
60 #define VC5_DIRTY_COMPTEX (1ull << 3)
61 #define VC5_DIRTY_VERTTEX (1ull << 4)
62 #define VC5_DIRTY_FRAGTEX (1ull << 5)
63
64 #define VC5_DIRTY_SHADER_IMAGE (1ull << 9)
65 #define VC5_DIRTY_BLEND_COLOR (1ull << 10)
66 #define VC5_DIRTY_STENCIL_REF (1ull << 11)
67 #define VC5_DIRTY_SAMPLE_STATE (1ull << 12)
68 #define VC5_DIRTY_FRAMEBUFFER (1ull << 13)
69 #define VC5_DIRTY_STIPPLE (1ull << 14)
70 #define VC5_DIRTY_VIEWPORT (1ull << 15)
71 #define VC5_DIRTY_CONSTBUF (1ull << 16)
72 #define VC5_DIRTY_VTXSTATE (1ull << 17)
73 #define VC5_DIRTY_VTXBUF (1ull << 18)
74 #define VC5_DIRTY_SCISSOR (1ull << 19)
75 #define VC5_DIRTY_FLAT_SHADE_FLAGS (1ull << 20)
76 #define VC5_DIRTY_PRIM_MODE (1ull << 21)
77 #define VC5_DIRTY_CLIP (1ull << 22)
78 #define VC5_DIRTY_UNCOMPILED_CS (1ull << 23)
79 #define VC5_DIRTY_UNCOMPILED_VS (1ull << 24)
80 #define VC5_DIRTY_UNCOMPILED_FS (1ull << 25)
81
82 #define VC5_DIRTY_COMPILED_CS (1ull << 29)
83 #define VC5_DIRTY_COMPILED_VS (1ull << 30)
84 #define VC5_DIRTY_COMPILED_FS (1ull << 31)
85
86 #define VC5_DIRTY_FS_INPUTS (1ull << 35)
87 #define VC5_DIRTY_STREAMOUT (1ull << 36)
88 #define VC5_DIRTY_OQ (1ull << 37)
89 #define VC5_DIRTY_CENTROID_FLAGS (1ull << 38)
90 #define VC5_DIRTY_NOPERSPECTIVE_FLAGS (1ull << 39)
91 #define VC5_DIRTY_SSBO (1ull << 40)
92
93 #define VC5_MAX_FS_INPUTS 64
94
95 enum v3d_sampler_state_variant {
96 V3D_SAMPLER_STATE_BORDER_0,
97 V3D_SAMPLER_STATE_F16,
98 V3D_SAMPLER_STATE_F16_UNORM,
99 V3D_SAMPLER_STATE_F16_SNORM,
100 V3D_SAMPLER_STATE_F16_BGRA,
101 V3D_SAMPLER_STATE_F16_BGRA_UNORM,
102 V3D_SAMPLER_STATE_F16_BGRA_SNORM,
103 V3D_SAMPLER_STATE_F16_A,
104 V3D_SAMPLER_STATE_F16_A_SNORM,
105 V3D_SAMPLER_STATE_F16_A_UNORM,
106 V3D_SAMPLER_STATE_F16_LA,
107 V3D_SAMPLER_STATE_F16_LA_UNORM,
108 V3D_SAMPLER_STATE_F16_LA_SNORM,
109 V3D_SAMPLER_STATE_32,
110 V3D_SAMPLER_STATE_32_UNORM,
111 V3D_SAMPLER_STATE_32_SNORM,
112 V3D_SAMPLER_STATE_32_A,
113 V3D_SAMPLER_STATE_32_A_UNORM,
114 V3D_SAMPLER_STATE_32_A_SNORM,
115 V3D_SAMPLER_STATE_1010102U,
116 V3D_SAMPLER_STATE_16U,
117 V3D_SAMPLER_STATE_16I,
118 V3D_SAMPLER_STATE_8I,
119 V3D_SAMPLER_STATE_8U,
120
121 V3D_SAMPLER_STATE_VARIANT_COUNT,
122 };
123
124 enum v3d_flush_cond {
125 /* Flush job unless we are flushing for transform feedback, where we
126 * handle flushing in the driver via the 'Wait for TF' packet.
127 */
128 V3D_FLUSH_DEFAULT,
129 /* Always flush the job, even for cases where we would normally not
130 * do it, such as transform feedback.
131 */
132 V3D_FLUSH_ALWAYS,
133 /* Flush job if it is not the current FBO job. This is intended to
134 * skip automatic flushes of the current job for resources that we
135 * expect to be externally synchronized by the application using
136 * glMemoryBarrier(), such as SSBOs and shader images.
137 */
138 V3D_FLUSH_NOT_CURRENT_JOB,
139 };
140
141 struct v3d_sampler_view {
142 struct pipe_sampler_view base;
143 uint32_t p0;
144 uint32_t p1;
145 /* Precomputed swizzles to pass in to the shader key. */
146 uint8_t swizzle[4];
147
148 uint8_t texture_shader_state[32];
149 /* V3D 4.x: Texture state struct. */
150 struct v3d_bo *bo;
151
152 enum v3d_sampler_state_variant sampler_variant;
153
154 /* Actual texture to be read by this sampler view. May be different
155 * from base.texture in the case of having a shadow tiled copy of a
156 * raster texture.
157 */
158 struct pipe_resource *texture;
159 };
160
161 struct v3d_sampler_state {
162 struct pipe_sampler_state base;
163 uint32_t p0;
164 uint32_t p1;
165
166 /* V3D 3.x: Packed texture state. */
167 uint8_t texture_shader_state[32];
168 /* V3D 4.x: Sampler state struct. */
169 struct pipe_resource *sampler_state;
170 uint32_t sampler_state_offset[V3D_SAMPLER_STATE_VARIANT_COUNT];
171
172 bool border_color_variants;
173 };
174
175 struct v3d_texture_stateobj {
176 struct pipe_sampler_view *textures[V3D_MAX_TEXTURE_SAMPLERS];
177 unsigned num_textures;
178 struct pipe_sampler_state *samplers[V3D_MAX_TEXTURE_SAMPLERS];
179 unsigned num_samplers;
180 struct v3d_cl_reloc texture_state[V3D_MAX_TEXTURE_SAMPLERS];
181 };
182
183 struct v3d_shader_uniform_info {
184 enum quniform_contents *contents;
185 uint32_t *data;
186 uint32_t count;
187 };
188
189 struct v3d_uncompiled_shader {
190 /** A name for this program, so you can track it in shader-db output. */
191 uint32_t program_id;
192 /** How many variants of this program were compiled, for shader-db. */
193 uint32_t compiled_variant_count;
194 struct pipe_shader_state base;
195 uint32_t num_tf_outputs;
196 struct v3d_varying_slot *tf_outputs;
197 uint16_t tf_specs[16];
198 uint16_t tf_specs_psiz[16];
199 uint32_t num_tf_specs;
200 };
201
202 struct v3d_compiled_shader {
203 struct pipe_resource *resource;
204 uint32_t offset;
205
206 union {
207 struct v3d_prog_data *base;
208 struct v3d_vs_prog_data *vs;
209 struct v3d_fs_prog_data *fs;
210 struct v3d_compute_prog_data *compute;
211 } prog_data;
212
213 /**
214 * VC5_DIRTY_* flags that, when set in v3d->dirty, mean that the
215 * uniforms have to be rewritten (and therefore the shader state
216 * reemitted).
217 */
218 uint64_t uniform_dirty_bits;
219 };
220
221 struct v3d_program_stateobj {
222 struct v3d_uncompiled_shader *bind_vs, *bind_fs, *bind_compute;
223 struct v3d_compiled_shader *cs, *vs, *fs, *compute;
224
225 struct hash_table *cache[MESA_SHADER_STAGES];
226
227 struct v3d_bo *spill_bo;
228 int spill_size_per_thread;
229 };
230
231 struct v3d_constbuf_stateobj {
232 struct pipe_constant_buffer cb[PIPE_MAX_CONSTANT_BUFFERS];
233 uint32_t enabled_mask;
234 uint32_t dirty_mask;
235 };
236
237 struct v3d_vertexbuf_stateobj {
238 struct pipe_vertex_buffer vb[PIPE_MAX_ATTRIBS];
239 unsigned count;
240 uint32_t enabled_mask;
241 uint32_t dirty_mask;
242 };
243
244 struct v3d_vertex_stateobj {
245 struct pipe_vertex_element pipe[V3D_MAX_VS_INPUTS / 4];
246 unsigned num_elements;
247
248 uint8_t attrs[16 * (V3D_MAX_VS_INPUTS / 4)];
249 struct pipe_resource *defaults;
250 uint32_t defaults_offset;
251 };
252
253 struct v3d_stream_output_target {
254 struct pipe_stream_output_target base;
255 /* Number of transform feedback vertices written to this target */
256 uint32_t recorded_vertex_count;
257 };
258
259 struct v3d_streamout_stateobj {
260 struct pipe_stream_output_target *targets[PIPE_MAX_SO_BUFFERS];
261 /* Number of vertices we've written into the buffer so far. */
262 uint32_t offsets[PIPE_MAX_SO_BUFFERS];
263 unsigned num_targets;
264 };
265
266 struct v3d_ssbo_stateobj {
267 struct pipe_shader_buffer sb[PIPE_MAX_SHADER_BUFFERS];
268 uint32_t enabled_mask;
269 };
270
271 /* Hash table key for v3d->jobs */
272 struct v3d_job_key {
273 struct pipe_surface *cbufs[4];
274 struct pipe_surface *zsbuf;
275 };
276
277 enum v3d_ez_state {
278 VC5_EZ_UNDECIDED = 0,
279 VC5_EZ_GT_GE,
280 VC5_EZ_LT_LE,
281 VC5_EZ_DISABLED,
282 };
283
284 struct v3d_image_view {
285 struct pipe_image_view base;
286 /* V3D 4.x texture shader state struct */
287 struct pipe_resource *tex_state;
288 uint32_t tex_state_offset;
289 };
290
291 struct v3d_shaderimg_stateobj {
292 struct v3d_image_view si[PIPE_MAX_SHADER_IMAGES];
293 uint32_t enabled_mask;
294 };
295
296 /**
297 * A complete bin/render job.
298 *
299 * This is all of the state necessary to submit a bin/render to the kernel.
300 * We want to be able to have multiple in progress at a time, so that we don't
301 * need to flush an existing CL just to switch to rendering to a new render
302 * target (which would mean reading back from the old render target when
303 * starting to render to it again).
304 */
305 struct v3d_job {
306 struct v3d_context *v3d;
307 struct v3d_cl bcl;
308 struct v3d_cl rcl;
309 struct v3d_cl indirect;
310 struct v3d_bo *tile_alloc;
311 struct v3d_bo *tile_state;
312 uint32_t shader_rec_count;
313
314 struct drm_v3d_submit_cl submit;
315
316 /**
317 * Set of all BOs referenced by the job. This will be used for making
318 * the list of BOs that the kernel will need to have paged in to
319 * execute our job.
320 */
321 struct set *bos;
322
323 /** Sum of the sizes of the BOs referenced by the job. */
324 uint32_t referenced_size;
325
326 struct set *write_prscs;
327 struct set *tf_write_prscs;
328
329 /* Size of the submit.bo_handles array. */
330 uint32_t bo_handles_size;
331
332 /** @{ Surfaces to submit rendering for. */
333 struct pipe_surface *cbufs[4];
334 struct pipe_surface *zsbuf;
335 /** @} */
336 /** @{
337 * Bounding box of the scissor across all queued drawing.
338 *
339 * Note that the max values are exclusive.
340 */
341 uint32_t draw_min_x;
342 uint32_t draw_min_y;
343 uint32_t draw_max_x;
344 uint32_t draw_max_y;
345 /** @} */
346 /** @{
347 * Width/height of the color framebuffer being rendered to,
348 * for VC5_TILE_RENDERING_MODE_CONFIG.
349 */
350 uint32_t draw_width;
351 uint32_t draw_height;
352 /** @} */
353 /** @{ Tile information, depending on MSAA and float color buffer. */
354 uint32_t draw_tiles_x; /** @< Number of tiles wide for framebuffer. */
355 uint32_t draw_tiles_y; /** @< Number of tiles high for framebuffer. */
356
357 uint32_t tile_width; /** @< Width of a tile. */
358 uint32_t tile_height; /** @< Height of a tile. */
359 /** maximum internal_bpp of all color render targets. */
360 uint32_t internal_bpp;
361
362 /** Whether the current rendering is in a 4X MSAA tile buffer. */
363 bool msaa;
364 /** @} */
365
366 /* Bitmask of PIPE_CLEAR_* of buffers that were cleared before the
367 * first rendering.
368 */
369 uint32_t clear;
370 /* Bitmask of PIPE_CLEAR_* of buffers that have been read by a draw
371 * call without having been cleared first.
372 */
373 uint32_t load;
374 /* Bitmask of PIPE_CLEAR_* of buffers that have been rendered to
375 * (either clears or draws) and should be stored.
376 */
377 uint32_t store;
378 uint32_t clear_color[4][4];
379 float clear_z;
380 uint8_t clear_s;
381
382 /**
383 * Set if some drawing (triangles, blits, or just a glClear()) has
384 * been done to the FBO, meaning that we need to
385 * DRM_IOCTL_VC5_SUBMIT_CL.
386 */
387 bool needs_flush;
388
389 /* Set if any shader has dirtied cachelines in the TMU that need to be
390 * flushed before job end.
391 */
392 bool tmu_dirty_rcl;
393
394 /**
395 * Set if a packet enabling TF has been emitted in the job (V3D 4.x).
396 */
397 bool tf_enabled;
398
399 /**
400 * Current EZ state for drawing. Updated at the start of draw after
401 * we've decided on the shader being rendered.
402 */
403 enum v3d_ez_state ez_state;
404 /**
405 * The first EZ state that was used for drawing with a decided EZ
406 * direction (so either UNDECIDED, GT, or LT).
407 */
408 enum v3d_ez_state first_ez_state;
409
410 /**
411 * Number of draw calls (not counting full buffer clears) queued in
412 * the current job.
413 */
414 uint32_t draw_calls_queued;
415
416 struct v3d_job_key key;
417 };
418
419 struct v3d_context {
420 struct pipe_context base;
421
422 int fd;
423 struct v3d_screen *screen;
424
425 /** The 3D rendering job for the currently bound FBO. */
426 struct v3d_job *job;
427
428 /* Map from struct v3d_job_key to the job for that FBO.
429 */
430 struct hash_table *jobs;
431
432 /**
433 * Map from v3d_resource to a job writing to that resource.
434 *
435 * Primarily for flushing jobs rendering to textures that are now
436 * being read from.
437 */
438 struct hash_table *write_jobs;
439
440 struct slab_child_pool transfer_pool;
441 struct blitter_context *blitter;
442
443 /** bitfield of VC5_DIRTY_* */
444 uint64_t dirty;
445
446 struct primconvert_context *primconvert;
447
448 uint32_t next_uncompiled_program_id;
449 uint64_t next_compiled_program_id;
450
451 struct v3d_compiler_state *compiler_state;
452
453 uint8_t prim_mode;
454
455 /** Maximum index buffer valid for the current shader_rec. */
456 uint32_t max_index;
457
458 /** Sync object that our RCL or TFU job will update as its out_sync. */
459 uint32_t out_sync;
460
461 /* Stream uploader used by gallium internals. This could also be used
462 * by driver internals, but we tend to use the v3d_cl.h interfaces
463 * instead.
464 */
465 struct u_upload_mgr *uploader;
466 /* State uploader used inside the driver. This is for packing bits of
467 * long-term state inside buffers, since the kernel interfaces
468 * allocate a page at a time.
469 */
470 struct u_upload_mgr *state_uploader;
471
472 /** @{ Current pipeline state objects */
473 struct pipe_scissor_state scissor;
474 struct v3d_blend_state *blend;
475 struct v3d_rasterizer_state *rasterizer;
476 struct v3d_depth_stencil_alpha_state *zsa;
477
478 struct v3d_program_stateobj prog;
479 uint32_t compute_num_workgroups[3];
480 struct v3d_bo *compute_shared_memory;
481
482 struct v3d_vertex_stateobj *vtx;
483
484 struct {
485 struct pipe_blend_color f;
486 uint16_t hf[4];
487 } blend_color;
488 struct pipe_stencil_ref stencil_ref;
489 unsigned sample_mask;
490 struct pipe_framebuffer_state framebuffer;
491
492 /* Per render target, whether we should swap the R and B fields in the
493 * shader's color output and in blending. If render targets disagree
494 * on the R/B swap and use the constant color, then we would need to
495 * fall back to in-shader blending.
496 */
497 uint8_t swap_color_rb;
498
499 /* Per render target, whether we should treat the dst alpha values as
500 * one in blending.
501 *
502 * For RGBX formats, the tile buffer's alpha channel will be
503 * undefined.
504 */
505 uint8_t blend_dst_alpha_one;
506
507 bool active_queries;
508
509 uint32_t tf_prims_generated;
510 uint32_t prims_generated;
511
512 struct pipe_poly_stipple stipple;
513 struct pipe_clip_state clip;
514 struct pipe_viewport_state viewport;
515 struct v3d_ssbo_stateobj ssbo[PIPE_SHADER_TYPES];
516 struct v3d_shaderimg_stateobj shaderimg[PIPE_SHADER_TYPES];
517 struct v3d_constbuf_stateobj constbuf[PIPE_SHADER_TYPES];
518 struct v3d_texture_stateobj tex[PIPE_SHADER_TYPES];
519 struct v3d_vertexbuf_stateobj vertexbuf;
520 struct v3d_streamout_stateobj streamout;
521 struct v3d_bo *current_oq;
522 struct pipe_resource *prim_counts;
523 uint32_t prim_counts_offset;
524 struct pipe_debug_callback debug;
525 /** @} */
526 };
527
528 struct v3d_rasterizer_state {
529 struct pipe_rasterizer_state base;
530
531 float point_size;
532
533 uint8_t depth_offset[9];
534 uint8_t depth_offset_z16[9];
535 };
536
537 struct v3d_depth_stencil_alpha_state {
538 struct pipe_depth_stencil_alpha_state base;
539
540 enum v3d_ez_state ez_state;
541
542 uint8_t stencil_front[6];
543 uint8_t stencil_back[6];
544 };
545
546 struct v3d_blend_state {
547 struct pipe_blend_state base;
548
549 /* Per-RT mask of whether blending is enabled. */
550 uint8_t blend_enables;
551 };
552
553 #define perf_debug(...) do { \
554 if (unlikely(V3D_DEBUG & V3D_DEBUG_PERF)) \
555 fprintf(stderr, __VA_ARGS__); \
556 if (unlikely(v3d->debug.debug_message)) \
557 pipe_debug_message(&v3d->debug, PERF_INFO, __VA_ARGS__); \
558 } while (0)
559
560 #define foreach_bit(b, mask) \
561 for (uint32_t _m = (mask), b; _m && ({(b) = u_bit_scan(&_m); 1;});)
562
563 static inline struct v3d_context *
564 v3d_context(struct pipe_context *pcontext)
565 {
566 return (struct v3d_context *)pcontext;
567 }
568
569 static inline struct v3d_sampler_view *
570 v3d_sampler_view(struct pipe_sampler_view *psview)
571 {
572 return (struct v3d_sampler_view *)psview;
573 }
574
575 static inline struct v3d_sampler_state *
576 v3d_sampler_state(struct pipe_sampler_state *psampler)
577 {
578 return (struct v3d_sampler_state *)psampler;
579 }
580
581 static inline struct v3d_stream_output_target *
582 v3d_stream_output_target(struct pipe_stream_output_target *ptarget)
583 {
584 return (struct v3d_stream_output_target *)ptarget;
585 }
586
587 static inline uint32_t
588 v3d_stream_output_target_get_vertex_count(struct pipe_stream_output_target *ptarget)
589 {
590 return v3d_stream_output_target(ptarget)->recorded_vertex_count;
591 }
592
593 struct pipe_context *v3d_context_create(struct pipe_screen *pscreen,
594 void *priv, unsigned flags);
595 void v3d_program_init(struct pipe_context *pctx);
596 void v3d_program_fini(struct pipe_context *pctx);
597 void v3d_query_init(struct pipe_context *pctx);
598
599 void v3d_simulator_init(struct v3d_screen *screen);
600 void v3d_simulator_destroy(struct v3d_screen *screen);
601 uint32_t v3d_simulator_get_spill(uint32_t spill_size);
602 int v3d_simulator_ioctl(int fd, unsigned long request, void *arg);
603 void v3d_simulator_open_from_handle(int fd, int handle, uint32_t size);
604
605 static inline int
606 v3d_ioctl(int fd, unsigned long request, void *arg)
607 {
608 if (using_v3d_simulator)
609 return v3d_simulator_ioctl(fd, request, arg);
610 else
611 return drmIoctl(fd, request, arg);
612 }
613
614 static inline bool
615 v3d_transform_feedback_enabled(struct v3d_context *v3d)
616 {
617 return v3d->prog.bind_vs->num_tf_specs != 0 &&
618 v3d->active_queries;
619 }
620
621 void v3d_set_shader_uniform_dirty_flags(struct v3d_compiled_shader *shader);
622 struct v3d_cl_reloc v3d_write_uniforms(struct v3d_context *v3d,
623 struct v3d_compiled_shader *shader,
624 enum pipe_shader_type stage);
625
626 void v3d_flush(struct pipe_context *pctx);
627 void v3d_job_init(struct v3d_context *v3d);
628 struct v3d_job *v3d_get_job(struct v3d_context *v3d,
629 struct pipe_surface **cbufs,
630 struct pipe_surface *zsbuf);
631 struct v3d_job *v3d_get_job_for_fbo(struct v3d_context *v3d);
632 void v3d_job_add_bo(struct v3d_job *job, struct v3d_bo *bo);
633 void v3d_job_add_write_resource(struct v3d_job *job, struct pipe_resource *prsc);
634 void v3d_job_add_tf_write_resource(struct v3d_job *job, struct pipe_resource *prsc);
635 void v3d_job_submit(struct v3d_context *v3d, struct v3d_job *job);
636 void v3d_flush_jobs_using_bo(struct v3d_context *v3d, struct v3d_bo *bo);
637 void v3d_flush_jobs_writing_resource(struct v3d_context *v3d,
638 struct pipe_resource *prsc,
639 enum v3d_flush_cond flush_cond);
640 void v3d_flush_jobs_reading_resource(struct v3d_context *v3d,
641 struct pipe_resource *prsc,
642 enum v3d_flush_cond flush_cond);
643 void v3d_update_compiled_shaders(struct v3d_context *v3d, uint8_t prim_mode);
644 void v3d_update_compiled_cs(struct v3d_context *v3d);
645
646 bool v3d_rt_format_supported(const struct v3d_device_info *devinfo,
647 enum pipe_format f);
648 bool v3d_tex_format_supported(const struct v3d_device_info *devinfo,
649 enum pipe_format f);
650 uint8_t v3d_get_rt_format(const struct v3d_device_info *devinfo, enum pipe_format f);
651 uint8_t v3d_get_tex_format(const struct v3d_device_info *devinfo, enum pipe_format f);
652 uint8_t v3d_get_tex_return_size(const struct v3d_device_info *devinfo,
653 enum pipe_format f,
654 enum pipe_tex_compare compare);
655 uint8_t v3d_get_tex_return_channels(const struct v3d_device_info *devinfo,
656 enum pipe_format f);
657 const uint8_t *v3d_get_format_swizzle(const struct v3d_device_info *devinfo,
658 enum pipe_format f);
659 void v3d_get_internal_type_bpp_for_output_format(const struct v3d_device_info *devinfo,
660 uint32_t format,
661 uint32_t *type,
662 uint32_t *bpp);
663 bool v3d_tfu_supports_tex_format(const struct v3d_device_info *devinfo,
664 uint32_t tex_format);
665
666 void v3d_init_query_functions(struct v3d_context *v3d);
667 void v3d_blit(struct pipe_context *pctx, const struct pipe_blit_info *blit_info);
668 void v3d_blitter_save(struct v3d_context *v3d);
669 bool v3d_generate_mipmap(struct pipe_context *pctx,
670 struct pipe_resource *prsc,
671 enum pipe_format format,
672 unsigned int base_level,
673 unsigned int last_level,
674 unsigned int first_layer,
675 unsigned int last_layer);
676
677 struct v3d_fence *v3d_fence_create(struct v3d_context *v3d);
678
679 void v3d_tf_update_counters(struct v3d_context *v3d);
680
681 #ifdef v3dX
682 # include "v3dx_context.h"
683 #else
684 # define v3dX(x) v3d33_##x
685 # include "v3dx_context.h"
686 # undef v3dX
687
688 # define v3dX(x) v3d41_##x
689 # include "v3dx_context.h"
690 # undef v3dX
691 #endif
692
693 #endif /* VC5_CONTEXT_H */