gallium: remove pipe_surface::usage
[mesa.git] / src / gallium / drivers / r600 / r600_pipe.h
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jerome Glisse
25 */
26 #ifndef R600_PIPE_H
27 #define R600_PIPE_H
28
29 #include "util/u_blitter.h"
30 #include "util/u_slab.h"
31 #include "r600.h"
32 #include "r600_llvm.h"
33 #include "r600_public.h"
34 #include "r600_resource.h"
35 #include "evergreen_compute.h"
36
37 #define R600_NUM_ATOMS 36
38
39 #define R600_MAX_USER_CONST_BUFFERS 1
40 #define R600_MAX_DRIVER_CONST_BUFFERS 2
41 #define R600_MAX_CONST_BUFFERS (R600_MAX_USER_CONST_BUFFERS + R600_MAX_DRIVER_CONST_BUFFERS)
42
43 /* start driver buffers after user buffers */
44 #define R600_UCP_CONST_BUFFER (R600_MAX_USER_CONST_BUFFERS)
45 #define R600_TXQ_CONST_BUFFER (R600_MAX_USER_CONST_BUFFERS + 1)
46
47 #define R600_MAX_CONST_BUFFER_SIZE 4096
48
49 #ifdef PIPE_ARCH_BIG_ENDIAN
50 #define R600_BIG_ENDIAN 1
51 #else
52 #define R600_BIG_ENDIAN 0
53 #endif
54
55 #define R600_MAP_BUFFER_ALIGNMENT 64
56
57 struct r600_bytecode;
58 struct r600_shader_key;
59
60 /* This encapsulates a state or an operation which can emitted into the GPU
61 * command stream. It's not limited to states only, it can be used for anything
62 * that wants to write commands into the CS (e.g. cache flushes). */
63 struct r600_atom {
64 void (*emit)(struct r600_context *ctx, struct r600_atom *state);
65 unsigned id;
66 unsigned num_dw;
67 bool dirty;
68 };
69
70 /* This is an atom containing GPU commands that never change.
71 * This is supposed to be copied directly into the CS. */
72 struct r600_command_buffer {
73 uint32_t *buf;
74 unsigned num_dw;
75 unsigned max_num_dw;
76 unsigned pkt_flags;
77 };
78
79 struct r600_db_misc_state {
80 struct r600_atom atom;
81 bool occlusion_query_enabled;
82 bool flush_depthstencil_through_cb;
83 bool flush_depthstencil_in_place;
84 bool copy_depth, copy_stencil;
85 unsigned copy_sample;
86 unsigned log_samples;
87 unsigned db_shader_control;
88 };
89
90 struct r600_cb_misc_state {
91 struct r600_atom atom;
92 unsigned cb_color_control; /* this comes from blend state */
93 unsigned blend_colormask; /* 8*4 bits for 8 RGBA colorbuffers */
94 unsigned nr_cbufs;
95 unsigned nr_ps_color_outputs;
96 bool multiwrite;
97 bool dual_src_blend;
98 };
99
100 struct r600_clip_misc_state {
101 struct r600_atom atom;
102 unsigned pa_cl_clip_cntl; /* from rasterizer */
103 unsigned pa_cl_vs_out_cntl; /* from vertex shader */
104 unsigned clip_plane_enable; /* from rasterizer */
105 unsigned clip_dist_write; /* from vertex shader */
106 };
107
108 struct r600_alphatest_state {
109 struct r600_atom atom;
110 unsigned sx_alpha_test_control; /* this comes from dsa state */
111 unsigned sx_alpha_ref; /* this comes from dsa state */
112 bool bypass;
113 bool cb0_export_16bpc; /* from set_framebuffer_state */
114 };
115
116 struct r600_vgt_state {
117 struct r600_atom atom;
118 uint32_t vgt_multi_prim_ib_reset_en;
119 uint32_t vgt_multi_prim_ib_reset_indx;
120 };
121
122 struct r600_vgt2_state {
123 struct r600_atom atom;
124 uint32_t vgt_indx_offset;
125 };
126
127 struct r600_blend_color {
128 struct r600_atom atom;
129 struct pipe_blend_color state;
130 };
131
132 struct r600_clip_state {
133 struct r600_atom atom;
134 struct pipe_clip_state state;
135 };
136
137 struct r600_cs_shader_state {
138 struct r600_atom atom;
139 unsigned kernel_index;
140 struct r600_pipe_compute *shader;
141 };
142
143 struct r600_framebuffer {
144 struct r600_atom atom;
145 struct pipe_framebuffer_state state;
146 unsigned compressed_cb_mask;
147 unsigned nr_samples;
148 bool export_16bpc;
149 bool cb0_is_integer;
150 bool is_msaa_resolve;
151 };
152
153 struct r600_sample_mask {
154 struct r600_atom atom;
155 uint16_t sample_mask; /* there are only 8 bits on EG, 16 bits on Cayman */
156 };
157
158 struct r600_config_state {
159 struct r600_atom atom;
160 unsigned sq_gpr_resource_mgmt_1;
161 };
162
163 struct r600_stencil_ref
164 {
165 ubyte ref_value[2];
166 ubyte valuemask[2];
167 ubyte writemask[2];
168 };
169
170 struct r600_stencil_ref_state {
171 struct r600_atom atom;
172 struct r600_stencil_ref state;
173 struct pipe_stencil_ref pipe_state;
174 };
175
176 struct r600_viewport_state {
177 struct r600_atom atom;
178 struct pipe_viewport_state state;
179 };
180
181 struct compute_memory_pool;
182 void compute_memory_pool_delete(struct compute_memory_pool* pool);
183 struct compute_memory_pool* compute_memory_pool_new(
184 struct r600_screen *rscreen);
185
186 struct r600_pipe_fences {
187 struct r600_resource *bo;
188 unsigned *data;
189 unsigned next_index;
190 /* linked list of preallocated blocks */
191 struct list_head blocks;
192 /* linked list of freed fences */
193 struct list_head pool;
194 pipe_mutex mutex;
195 };
196
197 enum r600_msaa_texture_mode {
198 /* If the hw can fetch the first sample only (no decompression available).
199 * This means MSAA texturing is not fully implemented. */
200 MSAA_TEXTURE_SAMPLE_ZERO,
201
202 /* If the hw can fetch decompressed MSAA textures.
203 * Supported families: R600, R700, Evergreen.
204 * Cayman cannot use this, because it cannot do the decompression. */
205 MSAA_TEXTURE_DECOMPRESSED,
206
207 /* If the hw can fetch compressed MSAA textures, which means shaders can
208 * read resolved FMASK. This yields the best performance.
209 * Supported families: Evergreen, Cayman. */
210 MSAA_TEXTURE_COMPRESSED
211 };
212
213 struct r600_screen {
214 struct pipe_screen screen;
215 struct radeon_winsys *ws;
216 unsigned family;
217 enum chip_class chip_class;
218 struct radeon_info info;
219 bool has_streamout;
220 bool has_msaa;
221 enum r600_msaa_texture_mode msaa_texture_support;
222 struct r600_tiling_info tiling_info;
223 struct r600_pipe_fences fences;
224
225 /*for compute global memory binding, we allocate stuff here, instead of
226 * buffers.
227 * XXX: Not sure if this is the best place for global_pool. Also,
228 * it's not thread safe, so it won't work with multiple contexts. */
229 struct compute_memory_pool *global_pool;
230 };
231
232 struct r600_pipe_sampler_view {
233 struct pipe_sampler_view base;
234 struct r600_resource *tex_resource;
235 uint32_t tex_resource_words[8];
236 bool skip_mip_address_reloc;
237 };
238
239 struct r600_rasterizer_state {
240 struct r600_command_buffer buffer;
241 boolean flatshade;
242 boolean two_side;
243 unsigned sprite_coord_enable;
244 unsigned clip_plane_enable;
245 unsigned pa_sc_line_stipple;
246 unsigned pa_cl_clip_cntl;
247 float offset_units;
248 float offset_scale;
249 bool offset_enable;
250 bool scissor_enable;
251 bool multisample_enable;
252 };
253
254 struct r600_poly_offset_state {
255 struct r600_atom atom;
256 enum pipe_format zs_format;
257 float offset_units;
258 float offset_scale;
259 };
260
261 struct r600_blend_state {
262 struct r600_command_buffer buffer;
263 struct r600_command_buffer buffer_no_blend;
264 unsigned cb_target_mask;
265 unsigned cb_color_control;
266 unsigned cb_color_control_no_blend;
267 bool dual_src_blend;
268 bool alpha_to_one;
269 };
270
271 struct r600_dsa_state {
272 struct r600_command_buffer buffer;
273 unsigned alpha_ref;
274 ubyte valuemask[2];
275 ubyte writemask[2];
276 unsigned sx_alpha_test_control;
277 };
278
279 struct r600_pipe_shader;
280
281 struct r600_pipe_shader_selector {
282 struct r600_pipe_shader *current;
283
284 struct tgsi_token *tokens;
285 struct pipe_stream_output_info so;
286
287 unsigned num_shaders;
288
289 /* PIPE_SHADER_[VERTEX|FRAGMENT|...] */
290 unsigned type;
291
292 unsigned nr_ps_max_color_exports;
293 };
294
295 struct r600_pipe_sampler_state {
296 uint32_t tex_sampler_words[3];
297 union pipe_color_union border_color;
298 bool border_color_use;
299 bool seamless_cube_map;
300 };
301
302 /* needed for blitter save */
303 #define NUM_TEX_UNITS 16
304
305 struct r600_seamless_cube_map {
306 struct r600_atom atom;
307 bool enabled;
308 };
309
310 struct r600_samplerview_state {
311 struct r600_atom atom;
312 struct r600_pipe_sampler_view *views[NUM_TEX_UNITS];
313 uint32_t enabled_mask;
314 uint32_t dirty_mask;
315 uint32_t compressed_depthtex_mask; /* which textures are depth */
316 uint32_t compressed_colortex_mask;
317 boolean dirty_txq_constants;
318 };
319
320 struct r600_sampler_states {
321 struct r600_atom atom;
322 struct r600_pipe_sampler_state *states[NUM_TEX_UNITS];
323 uint32_t enabled_mask;
324 uint32_t dirty_mask;
325 uint32_t has_bordercolor_mask; /* which states contain the border color */
326 };
327
328 struct r600_textures_info {
329 struct r600_samplerview_state views;
330 struct r600_sampler_states states;
331 bool is_array_sampler[NUM_TEX_UNITS];
332
333 /* cube array txq workaround */
334 uint32_t *txq_constants;
335 };
336
337 struct r600_fence {
338 struct pipe_reference reference;
339 unsigned index; /* in the shared bo */
340 struct r600_resource *sleep_bo;
341 struct list_head head;
342 };
343
344 #define FENCE_BLOCK_SIZE 16
345
346 struct r600_fence_block {
347 struct r600_fence fences[FENCE_BLOCK_SIZE];
348 struct list_head head;
349 };
350
351 #define R600_CONSTANT_ARRAY_SIZE 256
352 #define R600_RESOURCE_ARRAY_SIZE 160
353
354 struct r600_constbuf_state
355 {
356 struct r600_atom atom;
357 struct pipe_constant_buffer cb[PIPE_MAX_CONSTANT_BUFFERS];
358 uint32_t enabled_mask;
359 uint32_t dirty_mask;
360 };
361
362 struct r600_vertexbuf_state
363 {
364 struct r600_atom atom;
365 struct pipe_vertex_buffer vb[PIPE_MAX_ATTRIBS];
366 uint32_t enabled_mask; /* non-NULL buffers */
367 uint32_t dirty_mask;
368 };
369
370 /* CSO (constant state object, in other words, immutable state). */
371 struct r600_cso_state
372 {
373 struct r600_atom atom;
374 void *cso; /* e.g. r600_blend_state */
375 struct r600_command_buffer *cb;
376 };
377
378 struct r600_scissor_state
379 {
380 struct r600_atom atom;
381 struct pipe_scissor_state scissor;
382 bool enable; /* r6xx only */
383 };
384
385 struct r600_context {
386 struct pipe_context context;
387 struct r600_screen *screen;
388 struct radeon_winsys *ws;
389 struct radeon_winsys_cs *cs;
390 struct blitter_context *blitter;
391 struct u_upload_mgr *uploader;
392 struct util_slab_mempool pool_transfers;
393
394 /* Hardware info. */
395 enum radeon_family family;
396 enum chip_class chip_class;
397 boolean has_vertex_cache;
398 boolean keep_tiling_flags;
399 unsigned default_ps_gprs, default_vs_gprs;
400 unsigned r6xx_num_clause_temp_gprs;
401 unsigned backend_mask;
402 unsigned max_db; /* for OQ */
403
404 /* Miscellaneous state objects. */
405 void *custom_dsa_flush;
406 void *custom_blend_resolve;
407 void *custom_blend_decompress;
408 void *custom_blend_fmask_decompress;
409 /* With rasterizer discard, there doesn't have to be a pixel shader.
410 * In that case, we bind this one: */
411 void *dummy_pixel_shader;
412 /* These dummy CMASK and FMASK buffers are used to get around the R6xx hardware
413 * bug where valid CMASK and FMASK are required to be present to avoid
414 * a hardlock in certain operations but aren't actually used
415 * for anything useful. */
416 struct r600_resource *dummy_fmask;
417 struct r600_resource *dummy_cmask;
418
419 /* State binding slots are here. */
420 struct r600_atom *atoms[R600_NUM_ATOMS];
421 /* States for CS initialization. */
422 struct r600_command_buffer start_cs_cmd; /* invariant state mostly */
423 /** Compute specific registers initializations. The start_cs_cmd atom
424 * must be emitted before start_compute_cs_cmd. */
425 struct r600_command_buffer start_compute_cs_cmd;
426 /* Register states. */
427 struct r600_alphatest_state alphatest_state;
428 struct r600_cso_state blend_state;
429 struct r600_blend_color blend_color;
430 struct r600_cb_misc_state cb_misc_state;
431 struct r600_clip_misc_state clip_misc_state;
432 struct r600_clip_state clip_state;
433 struct r600_db_misc_state db_misc_state;
434 struct r600_cso_state dsa_state;
435 struct r600_framebuffer framebuffer;
436 struct r600_poly_offset_state poly_offset_state;
437 struct r600_cso_state rasterizer_state;
438 struct r600_sample_mask sample_mask;
439 struct r600_scissor_state scissor;
440 struct r600_seamless_cube_map seamless_cube_map;
441 struct r600_config_state config_state;
442 struct r600_stencil_ref_state stencil_ref;
443 struct r600_vgt_state vgt_state;
444 struct r600_vgt2_state vgt2_state;
445 struct r600_viewport_state viewport;
446 /* Shaders and shader resources. */
447 struct r600_cso_state vertex_fetch_shader;
448 struct r600_cs_shader_state cs_shader_state;
449 struct r600_constbuf_state constbuf_state[PIPE_SHADER_TYPES];
450 struct r600_textures_info samplers[PIPE_SHADER_TYPES];
451 /** Vertex buffers for fetch shaders */
452 struct r600_vertexbuf_state vertex_buffer_state;
453 /** Vertex buffers for compute shaders */
454 struct r600_vertexbuf_state cs_vertex_buffer_state;
455
456 /* Additional context states. */
457 unsigned flags;
458 unsigned compute_cb_target_mask;
459 struct r600_pipe_shader_selector *ps_shader;
460 struct r600_pipe_shader_selector *vs_shader;
461 struct r600_rasterizer_state *rasterizer;
462 bool alpha_to_one;
463 bool force_blend_disable;
464 boolean dual_src_blend;
465
466 /* Index buffer. */
467 struct pipe_index_buffer index_buffer;
468
469 /* Last draw state (-1 = unset). */
470 int last_primitive_type; /* Last primitive type used in draw_vbo. */
471 int last_start_instance;
472
473 /* Queries. */
474 /* The list of active queries. Only one query of each type can be active. */
475 int num_occlusion_queries;
476 /* Manage queries in two separate groups:
477 * The timer ones and the others (streamout, occlusion).
478 *
479 * We do this because we should only suspend non-timer queries for u_blitter,
480 * and later if the non-timer queries are suspended, the context flush should
481 * only suspend and resume the timer queries. */
482 struct list_head active_timer_queries;
483 unsigned num_cs_dw_timer_queries_suspend;
484 struct list_head active_nontimer_queries;
485 unsigned num_cs_dw_nontimer_queries_suspend;
486 /* Flags if queries have been suspended. */
487 bool timer_queries_suspended;
488 bool nontimer_queries_suspended;
489
490 /* Render condition. */
491 struct pipe_query *current_render_cond;
492 unsigned current_render_cond_mode;
493 boolean predicate_drawing;
494
495 /* Streamout state. */
496 unsigned num_cs_dw_streamout_end;
497 unsigned num_so_targets;
498 struct r600_so_target *so_targets[PIPE_MAX_SO_BUFFERS];
499 boolean streamout_start;
500 unsigned streamout_append_bitmask;
501 bool streamout_suspended;
502
503 /* Deprecated state management. */
504 struct r600_range *range;
505 unsigned nblocks;
506 struct r600_block **blocks;
507 struct list_head dirty;
508 struct list_head enable_list;
509 unsigned pm4_dirty_cdwords;
510 };
511
512 static INLINE void r600_emit_command_buffer(struct radeon_winsys_cs *cs,
513 struct r600_command_buffer *cb)
514 {
515 assert(cs->cdw + cb->num_dw <= RADEON_MAX_CMDBUF_DWORDS);
516 memcpy(cs->buf + cs->cdw, cb->buf, 4 * cb->num_dw);
517 cs->cdw += cb->num_dw;
518 }
519
520 static INLINE void r600_emit_atom(struct r600_context *rctx, struct r600_atom *atom)
521 {
522 atom->emit(rctx, atom);
523 atom->dirty = false;
524 }
525
526 static INLINE void r600_set_cso_state(struct r600_cso_state *state, void *cso)
527 {
528 state->cso = cso;
529 state->atom.dirty = cso != NULL;
530 }
531
532 static INLINE void r600_set_cso_state_with_cb(struct r600_cso_state *state, void *cso,
533 struct r600_command_buffer *cb)
534 {
535 state->cb = cb;
536 state->atom.num_dw = cb->num_dw;
537 r600_set_cso_state(state, cso);
538 }
539
540 /* evergreen_state.c */
541 struct pipe_sampler_view *
542 evergreen_create_sampler_view_custom(struct pipe_context *ctx,
543 struct pipe_resource *texture,
544 const struct pipe_sampler_view *state,
545 unsigned width0, unsigned height0);
546 void evergreen_init_common_regs(struct r600_command_buffer *cb,
547 enum chip_class ctx_chip_class,
548 enum radeon_family ctx_family,
549 int ctx_drm_minor);
550 void cayman_init_common_regs(struct r600_command_buffer *cb,
551 enum chip_class ctx_chip_class,
552 enum radeon_family ctx_family,
553 int ctx_drm_minor);
554
555 void evergreen_init_state_functions(struct r600_context *rctx);
556 void evergreen_init_atom_start_cs(struct r600_context *rctx);
557 void evergreen_pipe_shader_ps(struct pipe_context *ctx, struct r600_pipe_shader *shader);
558 void evergreen_pipe_shader_vs(struct pipe_context *ctx, struct r600_pipe_shader *shader);
559 void *evergreen_create_db_flush_dsa(struct r600_context *rctx);
560 void *evergreen_create_resolve_blend(struct r600_context *rctx);
561 void *evergreen_create_decompress_blend(struct r600_context *rctx);
562 void *evergreen_create_fmask_decompress_blend(struct r600_context *rctx);
563 boolean evergreen_is_format_supported(struct pipe_screen *screen,
564 enum pipe_format format,
565 enum pipe_texture_target target,
566 unsigned sample_count,
567 unsigned usage);
568 void evergreen_init_color_surface(struct r600_context *rctx,
569 struct r600_surface *surf);
570 void evergreen_init_color_surface_rat(struct r600_context *rctx,
571 struct r600_surface *surf);
572 void evergreen_update_db_shader_control(struct r600_context * rctx);
573
574 /* r600_blit.c */
575 void r600_copy_buffer(struct pipe_context *ctx, struct
576 pipe_resource *dst, unsigned dstx,
577 struct pipe_resource *src, const struct pipe_box *src_box);
578 void r600_init_blit_functions(struct r600_context *rctx);
579 void r600_blit_decompress_depth(struct pipe_context *ctx,
580 struct r600_texture *texture,
581 struct r600_texture *staging,
582 unsigned first_level, unsigned last_level,
583 unsigned first_layer, unsigned last_layer,
584 unsigned first_sample, unsigned last_sample);
585 void r600_decompress_depth_textures(struct r600_context *rctx,
586 struct r600_samplerview_state *textures);
587 void r600_decompress_color_textures(struct r600_context *rctx,
588 struct r600_samplerview_state *textures);
589
590 /* r600_buffer.c */
591 bool r600_init_resource(struct r600_screen *rscreen,
592 struct r600_resource *res,
593 unsigned size, unsigned alignment,
594 unsigned bind, unsigned usage);
595 struct pipe_resource *r600_buffer_create(struct pipe_screen *screen,
596 const struct pipe_resource *templ,
597 unsigned alignment);
598
599 /* r600_pipe.c */
600 void r600_flush(struct pipe_context *ctx, struct pipe_fence_handle **fence,
601 unsigned flags);
602
603 /* r600_query.c */
604 void r600_init_query_functions(struct r600_context *rctx);
605 void r600_suspend_nontimer_queries(struct r600_context *ctx);
606 void r600_resume_nontimer_queries(struct r600_context *ctx);
607 void r600_suspend_timer_queries(struct r600_context *ctx);
608 void r600_resume_timer_queries(struct r600_context *ctx);
609
610 /* r600_resource.c */
611 void r600_init_context_resource_functions(struct r600_context *r600);
612
613 /* r600_shader.c */
614 int r600_pipe_shader_create(struct pipe_context *ctx,
615 struct r600_pipe_shader *shader,
616 struct r600_shader_key key);
617 #ifdef HAVE_OPENCL
618 int r600_compute_shader_create(struct pipe_context * ctx,
619 LLVMModuleRef mod, struct r600_bytecode * bytecode);
620 #endif
621 void r600_pipe_shader_destroy(struct pipe_context *ctx, struct r600_pipe_shader *shader);
622
623 /* r600_state.c */
624 struct pipe_sampler_view *
625 r600_create_sampler_view_custom(struct pipe_context *ctx,
626 struct pipe_resource *texture,
627 const struct pipe_sampler_view *state,
628 unsigned width_first_level, unsigned height_first_level);
629 void r600_init_state_functions(struct r600_context *rctx);
630 void r600_init_atom_start_cs(struct r600_context *rctx);
631 void r600_pipe_shader_ps(struct pipe_context *ctx, struct r600_pipe_shader *shader);
632 void r600_pipe_shader_vs(struct pipe_context *ctx, struct r600_pipe_shader *shader);
633 void *r600_create_db_flush_dsa(struct r600_context *rctx);
634 void *r600_create_resolve_blend(struct r600_context *rctx);
635 void *r700_create_resolve_blend(struct r600_context *rctx);
636 void *r600_create_decompress_blend(struct r600_context *rctx);
637 bool r600_adjust_gprs(struct r600_context *rctx);
638 boolean r600_is_format_supported(struct pipe_screen *screen,
639 enum pipe_format format,
640 enum pipe_texture_target target,
641 unsigned sample_count,
642 unsigned usage);
643 void r600_update_db_shader_control(struct r600_context * rctx);
644
645 /* r600_texture.c */
646 void r600_init_screen_texture_functions(struct pipe_screen *screen);
647 void r600_init_surface_functions(struct r600_context *r600);
648 uint32_t r600_translate_texformat(struct pipe_screen *screen, enum pipe_format format,
649 const unsigned char *swizzle_view,
650 uint32_t *word4_p, uint32_t *yuv_format_p);
651 unsigned r600_texture_get_offset(struct r600_texture *rtex,
652 unsigned level, unsigned layer);
653 struct pipe_surface *r600_create_surface_custom(struct pipe_context *pipe,
654 struct pipe_resource *texture,
655 const struct pipe_surface *templ,
656 unsigned width, unsigned height);
657
658 /* r600_state_common.c */
659 void r600_init_common_state_functions(struct r600_context *rctx);
660 void r600_emit_cso_state(struct r600_context *rctx, struct r600_atom *atom);
661 void r600_emit_alphatest_state(struct r600_context *rctx, struct r600_atom *atom);
662 void r600_emit_blend_color(struct r600_context *rctx, struct r600_atom *atom);
663 void r600_emit_vgt_state(struct r600_context *rctx, struct r600_atom *atom);
664 void r600_emit_vgt2_state(struct r600_context *rctx, struct r600_atom *atom);
665 void r600_emit_clip_misc_state(struct r600_context *rctx, struct r600_atom *atom);
666 void r600_emit_stencil_ref(struct r600_context *rctx, struct r600_atom *atom);
667 void r600_emit_viewport_state(struct r600_context *rctx, struct r600_atom *atom);
668 void r600_init_atom(struct r600_context *rctx, struct r600_atom *atom, unsigned id,
669 void (*emit)(struct r600_context *ctx, struct r600_atom *state),
670 unsigned num_dw);
671 void r600_vertex_buffers_dirty(struct r600_context *rctx);
672 void r600_sampler_views_dirty(struct r600_context *rctx,
673 struct r600_samplerview_state *state);
674 void r600_sampler_states_dirty(struct r600_context *rctx,
675 struct r600_sampler_states *state);
676 void r600_constant_buffers_dirty(struct r600_context *rctx, struct r600_constbuf_state *state);
677 void r600_draw_rectangle(struct blitter_context *blitter,
678 int x1, int y1, int x2, int y2, float depth,
679 enum blitter_attrib_type type, const union pipe_color_union *attrib);
680 uint32_t r600_translate_stencil_op(int s_op);
681 uint32_t r600_translate_fill(uint32_t func);
682 unsigned r600_tex_wrap(unsigned wrap);
683 unsigned r600_tex_filter(unsigned filter);
684 unsigned r600_tex_mipfilter(unsigned filter);
685 unsigned r600_tex_compare(unsigned compare);
686 bool sampler_state_needs_border_color(const struct pipe_sampler_state *state);
687
688 /*
689 * Helpers for building command buffers
690 */
691
692 #define PKT3_SET_CONFIG_REG 0x68
693 #define PKT3_SET_CONTEXT_REG 0x69
694 #define PKT3_SET_CTL_CONST 0x6F
695 #define PKT3_SET_LOOP_CONST 0x6C
696
697 #define R600_CONFIG_REG_OFFSET 0x08000
698 #define R600_CONTEXT_REG_OFFSET 0x28000
699 #define R600_CTL_CONST_OFFSET 0x3CFF0
700 #define R600_LOOP_CONST_OFFSET 0X0003E200
701 #define EG_LOOP_CONST_OFFSET 0x0003A200
702
703 #define PKT_TYPE_S(x) (((x) & 0x3) << 30)
704 #define PKT_COUNT_S(x) (((x) & 0x3FFF) << 16)
705 #define PKT3_IT_OPCODE_S(x) (((x) & 0xFF) << 8)
706 #define PKT3_PREDICATE(x) (((x) >> 0) & 0x1)
707 #define PKT3(op, count, predicate) (PKT_TYPE_S(3) | PKT_COUNT_S(count) | PKT3_IT_OPCODE_S(op) | PKT3_PREDICATE(predicate))
708
709 #define RADEON_CP_PACKET3_COMPUTE_MODE 0x00000002
710
711 /*Evergreen Compute packet3*/
712 #define PKT3C(op, count, predicate) (PKT_TYPE_S(3) | PKT3_IT_OPCODE_S(op) | PKT_COUNT_S(count) | PKT3_PREDICATE(predicate) | RADEON_CP_PACKET3_COMPUTE_MODE)
713
714 static INLINE void r600_store_value(struct r600_command_buffer *cb, unsigned value)
715 {
716 cb->buf[cb->num_dw++] = value;
717 }
718
719 static INLINE void r600_store_config_reg_seq(struct r600_command_buffer *cb, unsigned reg, unsigned num)
720 {
721 assert(reg < R600_CONTEXT_REG_OFFSET);
722 assert(cb->num_dw+2+num <= cb->max_num_dw);
723 cb->buf[cb->num_dw++] = PKT3(PKT3_SET_CONFIG_REG, num, 0);
724 cb->buf[cb->num_dw++] = (reg - R600_CONFIG_REG_OFFSET) >> 2;
725 }
726
727 /**
728 * Needs cb->pkt_flags set to RADEON_CP_PACKET3_COMPUTE_MODE for compute
729 * shaders.
730 */
731 static INLINE void r600_store_context_reg_seq(struct r600_command_buffer *cb, unsigned reg, unsigned num)
732 {
733 assert(reg >= R600_CONTEXT_REG_OFFSET && reg < R600_CTL_CONST_OFFSET);
734 assert(cb->num_dw+2+num <= cb->max_num_dw);
735 cb->buf[cb->num_dw++] = PKT3(PKT3_SET_CONTEXT_REG, num, 0) | cb->pkt_flags;
736 cb->buf[cb->num_dw++] = (reg - R600_CONTEXT_REG_OFFSET) >> 2;
737 }
738
739 /**
740 * Needs cb->pkt_flags set to RADEON_CP_PACKET3_COMPUTE_MODE for compute
741 * shaders.
742 */
743 static INLINE void r600_store_ctl_const_seq(struct r600_command_buffer *cb, unsigned reg, unsigned num)
744 {
745 assert(reg >= R600_CTL_CONST_OFFSET);
746 assert(cb->num_dw+2+num <= cb->max_num_dw);
747 cb->buf[cb->num_dw++] = PKT3(PKT3_SET_CTL_CONST, num, 0) | cb->pkt_flags;
748 cb->buf[cb->num_dw++] = (reg - R600_CTL_CONST_OFFSET) >> 2;
749 }
750
751 static INLINE void r600_store_loop_const_seq(struct r600_command_buffer *cb, unsigned reg, unsigned num)
752 {
753 assert(reg >= R600_LOOP_CONST_OFFSET);
754 assert(cb->num_dw+2+num <= cb->max_num_dw);
755 cb->buf[cb->num_dw++] = PKT3(PKT3_SET_LOOP_CONST, num, 0);
756 cb->buf[cb->num_dw++] = (reg - R600_LOOP_CONST_OFFSET) >> 2;
757 }
758
759 /**
760 * Needs cb->pkt_flags set to RADEON_CP_PACKET3_COMPUTE_MODE for compute
761 * shaders.
762 */
763 static INLINE void eg_store_loop_const_seq(struct r600_command_buffer *cb, unsigned reg, unsigned num)
764 {
765 assert(reg >= EG_LOOP_CONST_OFFSET);
766 assert(cb->num_dw+2+num <= cb->max_num_dw);
767 cb->buf[cb->num_dw++] = PKT3(PKT3_SET_LOOP_CONST, num, 0) | cb->pkt_flags;
768 cb->buf[cb->num_dw++] = (reg - EG_LOOP_CONST_OFFSET) >> 2;
769 }
770
771 static INLINE void r600_store_config_reg(struct r600_command_buffer *cb, unsigned reg, unsigned value)
772 {
773 r600_store_config_reg_seq(cb, reg, 1);
774 r600_store_value(cb, value);
775 }
776
777 static INLINE void r600_store_context_reg(struct r600_command_buffer *cb, unsigned reg, unsigned value)
778 {
779 r600_store_context_reg_seq(cb, reg, 1);
780 r600_store_value(cb, value);
781 }
782
783 static INLINE void r600_store_ctl_const(struct r600_command_buffer *cb, unsigned reg, unsigned value)
784 {
785 r600_store_ctl_const_seq(cb, reg, 1);
786 r600_store_value(cb, value);
787 }
788
789 static INLINE void r600_store_loop_const(struct r600_command_buffer *cb, unsigned reg, unsigned value)
790 {
791 r600_store_loop_const_seq(cb, reg, 1);
792 r600_store_value(cb, value);
793 }
794
795 static INLINE void eg_store_loop_const(struct r600_command_buffer *cb, unsigned reg, unsigned value)
796 {
797 eg_store_loop_const_seq(cb, reg, 1);
798 r600_store_value(cb, value);
799 }
800
801 void r600_init_command_buffer(struct r600_command_buffer *cb, unsigned num_dw);
802 void r600_release_command_buffer(struct r600_command_buffer *cb);
803
804 /*
805 * Helpers for emitting state into a command stream directly.
806 */
807
808 static INLINE unsigned r600_context_bo_reloc(struct r600_context *ctx, struct r600_resource *rbo,
809 enum radeon_bo_usage usage)
810 {
811 assert(usage);
812 return ctx->ws->cs_add_reloc(ctx->cs, rbo->cs_buf, usage, rbo->domains) * 4;
813 }
814
815 static INLINE void r600_write_value(struct radeon_winsys_cs *cs, unsigned value)
816 {
817 cs->buf[cs->cdw++] = value;
818 }
819
820 static INLINE void r600_write_array(struct radeon_winsys_cs *cs, unsigned num, unsigned *ptr)
821 {
822 assert(cs->cdw+num <= RADEON_MAX_CMDBUF_DWORDS);
823 memcpy(&cs->buf[cs->cdw], ptr, num * sizeof(ptr[0]));
824 cs->cdw += num;
825 }
826
827 static INLINE void r600_write_config_reg_seq(struct radeon_winsys_cs *cs, unsigned reg, unsigned num)
828 {
829 assert(reg < R600_CONTEXT_REG_OFFSET);
830 assert(cs->cdw+2+num <= RADEON_MAX_CMDBUF_DWORDS);
831 cs->buf[cs->cdw++] = PKT3(PKT3_SET_CONFIG_REG, num, 0);
832 cs->buf[cs->cdw++] = (reg - R600_CONFIG_REG_OFFSET) >> 2;
833 }
834
835 static INLINE void r600_write_context_reg_seq(struct radeon_winsys_cs *cs, unsigned reg, unsigned num)
836 {
837 assert(reg >= R600_CONTEXT_REG_OFFSET && reg < R600_CTL_CONST_OFFSET);
838 assert(cs->cdw+2+num <= RADEON_MAX_CMDBUF_DWORDS);
839 cs->buf[cs->cdw++] = PKT3(PKT3_SET_CONTEXT_REG, num, 0);
840 cs->buf[cs->cdw++] = (reg - R600_CONTEXT_REG_OFFSET) >> 2;
841 }
842
843 static INLINE void r600_write_compute_context_reg_seq(struct radeon_winsys_cs *cs, unsigned reg, unsigned num)
844 {
845 r600_write_context_reg_seq(cs, reg, num);
846 /* Set the compute bit on the packet header */
847 cs->buf[cs->cdw - 2] |= RADEON_CP_PACKET3_COMPUTE_MODE;
848 }
849
850 static INLINE void r600_write_ctl_const_seq(struct radeon_winsys_cs *cs, unsigned reg, unsigned num)
851 {
852 assert(reg >= R600_CTL_CONST_OFFSET);
853 assert(cs->cdw+2+num <= RADEON_MAX_CMDBUF_DWORDS);
854 cs->buf[cs->cdw++] = PKT3(PKT3_SET_CTL_CONST, num, 0);
855 cs->buf[cs->cdw++] = (reg - R600_CTL_CONST_OFFSET) >> 2;
856 }
857
858 static INLINE void r600_write_config_reg(struct radeon_winsys_cs *cs, unsigned reg, unsigned value)
859 {
860 r600_write_config_reg_seq(cs, reg, 1);
861 r600_write_value(cs, value);
862 }
863
864 static INLINE void r600_write_context_reg(struct radeon_winsys_cs *cs, unsigned reg, unsigned value)
865 {
866 r600_write_context_reg_seq(cs, reg, 1);
867 r600_write_value(cs, value);
868 }
869
870 static INLINE void r600_write_compute_context_reg(struct radeon_winsys_cs *cs, unsigned reg, unsigned value)
871 {
872 r600_write_compute_context_reg_seq(cs, reg, 1);
873 r600_write_value(cs, value);
874 }
875
876 static INLINE void r600_write_ctl_const(struct radeon_winsys_cs *cs, unsigned reg, unsigned value)
877 {
878 r600_write_ctl_const_seq(cs, reg, 1);
879 r600_write_value(cs, value);
880 }
881
882 /*
883 * common helpers
884 */
885 static INLINE uint32_t S_FIXED(float value, uint32_t frac_bits)
886 {
887 return value * (1 << frac_bits);
888 }
889 #define ALIGN_DIVUP(x, y) (((x) + (y) - 1) / (y))
890
891 static inline unsigned r600_tex_aniso_filter(unsigned filter)
892 {
893 if (filter <= 1) return 0;
894 if (filter <= 2) return 1;
895 if (filter <= 4) return 2;
896 if (filter <= 8) return 3;
897 /* else */ return 4;
898 }
899
900 /* 12.4 fixed-point */
901 static INLINE unsigned r600_pack_float_12p4(float x)
902 {
903 return x <= 0 ? 0 :
904 x >= 4096 ? 0xffff : x * 16;
905 }
906
907 static INLINE uint64_t r600_resource_va(struct pipe_screen *screen, struct pipe_resource *resource)
908 {
909 struct r600_screen *rscreen = (struct r600_screen*)screen;
910 struct r600_resource *rresource = (struct r600_resource*)resource;
911
912 return rscreen->ws->buffer_get_virtual_address(rresource->cs_buf);
913 }
914
915 #endif