Merge branch 'glsl-to-tgsi'
[mesa.git] / src / mesa / drivers / dri / i965 / brw_context.h
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33 #ifndef BRWCONTEXT_INC
34 #define BRWCONTEXT_INC
35
36 #include "intel_context.h"
37 #include "brw_structs.h"
38 #include "main/imports.h"
39
40
41 /* Glossary:
42 *
43 * URB - uniform resource buffer. A mid-sized buffer which is
44 * partitioned between the fixed function units and used for passing
45 * values (vertices, primitives, constants) between them.
46 *
47 * CURBE - constant URB entry. An urb region (entry) used to hold
48 * constant values which the fixed function units can be instructed to
49 * preload into the GRF when spawning a thread.
50 *
51 * VUE - vertex URB entry. An urb entry holding a vertex and usually
52 * a vertex header. The header contains control information and
53 * things like primitive type, Begin/end flags and clip codes.
54 *
55 * PUE - primitive URB entry. An urb entry produced by the setup (SF)
56 * unit holding rasterization and interpolation parameters.
57 *
58 * GRF - general register file. One of several register files
59 * addressable by programmed threads. The inputs (r0, payload, curbe,
60 * urb) of the thread are preloaded to this area before the thread is
61 * spawned. The registers are individually 8 dwords wide and suitable
62 * for general usage. Registers holding thread input values are not
63 * special and may be overwritten.
64 *
65 * MRF - message register file. Threads communicate (and terminate)
66 * by sending messages. Message parameters are placed in contiguous
67 * MRF registers. All program output is via these messages. URB
68 * entries are populated by sending a message to the shared URB
69 * function containing the new data, together with a control word,
70 * often an unmodified copy of R0.
71 *
72 * R0 - GRF register 0. Typically holds control information used when
73 * sending messages to other threads.
74 *
75 * EU or GEN4 EU: The name of the programmable subsystem of the
76 * i965 hardware. Threads are executed by the EU, the registers
77 * described above are part of the EU architecture.
78 *
79 * Fixed function units:
80 *
81 * CS - Command streamer. Notional first unit, little software
82 * interaction. Holds the URB entries used for constant data, ie the
83 * CURBEs.
84 *
85 * VF/VS - Vertex Fetch / Vertex Shader. The fixed function part of
86 * this unit is responsible for pulling vertices out of vertex buffers
87 * in vram and injecting them into the processing pipe as VUEs. If
88 * enabled, it first passes them to a VS thread which is a good place
89 * for the driver to implement any active vertex shader.
90 *
91 * GS - Geometry Shader. This corresponds to a new DX10 concept. If
92 * enabled, incoming strips etc are passed to GS threads in individual
93 * line/triangle/point units. The GS thread may perform arbitary
94 * computation and emit whatever primtives with whatever vertices it
95 * chooses. This makes GS an excellent place to implement GL's
96 * unfilled polygon modes, though of course it is capable of much
97 * more. Additionally, GS is used to translate away primitives not
98 * handled by latter units, including Quads and Lineloops.
99 *
100 * CS - Clipper. Mesa's clipping algorithms are imported to run on
101 * this unit. The fixed function part performs cliptesting against
102 * the 6 fixed clipplanes and makes descisions on whether or not the
103 * incoming primitive needs to be passed to a thread for clipping.
104 * User clip planes are handled via cooperation with the VS thread.
105 *
106 * SF - Strips Fans or Setup: Triangles are prepared for
107 * rasterization. Interpolation coefficients are calculated.
108 * Flatshading and two-side lighting usually performed here.
109 *
110 * WM - Windower. Interpolation of vertex attributes performed here.
111 * Fragment shader implemented here. SIMD aspects of EU taken full
112 * advantage of, as pixels are processed in blocks of 16.
113 *
114 * CC - Color Calculator. No EU threads associated with this unit.
115 * Handles blending and (presumably) depth and stencil testing.
116 */
117
118
119 #define BRW_MAX_CURBE (32*16)
120
121 struct brw_context;
122
123 enum brw_state_id {
124 BRW_STATE_URB_FENCE,
125 BRW_STATE_FRAGMENT_PROGRAM,
126 BRW_STATE_VERTEX_PROGRAM,
127 BRW_STATE_INPUT_DIMENSIONS,
128 BRW_STATE_CURBE_OFFSETS,
129 BRW_STATE_REDUCED_PRIMITIVE,
130 BRW_STATE_PRIMITIVE,
131 BRW_STATE_CONTEXT,
132 BRW_STATE_WM_INPUT_DIMENSIONS,
133 BRW_STATE_PSP,
134 BRW_STATE_WM_SURFACES,
135 BRW_STATE_VS_BINDING_TABLE,
136 BRW_STATE_GS_BINDING_TABLE,
137 BRW_STATE_PS_BINDING_TABLE,
138 BRW_STATE_INDICES,
139 BRW_STATE_VERTICES,
140 BRW_STATE_BATCH,
141 BRW_STATE_NR_WM_SURFACES,
142 BRW_STATE_NR_VS_SURFACES,
143 BRW_STATE_INDEX_BUFFER,
144 BRW_STATE_VS_CONSTBUF,
145 BRW_STATE_WM_CONSTBUF,
146 BRW_STATE_PROGRAM_CACHE,
147 BRW_STATE_STATE_BASE_ADDRESS,
148 };
149
150 #define BRW_NEW_URB_FENCE (1 << BRW_STATE_URB_FENCE)
151 #define BRW_NEW_FRAGMENT_PROGRAM (1 << BRW_STATE_FRAGMENT_PROGRAM)
152 #define BRW_NEW_VERTEX_PROGRAM (1 << BRW_STATE_VERTEX_PROGRAM)
153 #define BRW_NEW_INPUT_DIMENSIONS (1 << BRW_STATE_INPUT_DIMENSIONS)
154 #define BRW_NEW_CURBE_OFFSETS (1 << BRW_STATE_CURBE_OFFSETS)
155 #define BRW_NEW_REDUCED_PRIMITIVE (1 << BRW_STATE_REDUCED_PRIMITIVE)
156 #define BRW_NEW_PRIMITIVE (1 << BRW_STATE_PRIMITIVE)
157 #define BRW_NEW_CONTEXT (1 << BRW_STATE_CONTEXT)
158 #define BRW_NEW_WM_INPUT_DIMENSIONS (1 << BRW_STATE_WM_INPUT_DIMENSIONS)
159 #define BRW_NEW_PSP (1 << BRW_STATE_PSP)
160 #define BRW_NEW_WM_SURFACES (1 << BRW_STATE_WM_SURFACES)
161 #define BRW_NEW_VS_BINDING_TABLE (1 << BRW_STATE_VS_BINDING_TABLE)
162 #define BRW_NEW_GS_BINDING_TABLE (1 << BRW_STATE_GS_BINDING_TABLE)
163 #define BRW_NEW_PS_BINDING_TABLE (1 << BRW_STATE_PS_BINDING_TABLE)
164 #define BRW_NEW_INDICES (1 << BRW_STATE_INDICES)
165 #define BRW_NEW_VERTICES (1 << BRW_STATE_VERTICES)
166 /**
167 * Used for any batch entry with a relocated pointer that will be used
168 * by any 3D rendering.
169 */
170 #define BRW_NEW_BATCH (1 << BRW_STATE_BATCH)
171 /** \see brw.state.depth_region */
172 #define BRW_NEW_NR_WM_SURFACES (1 << BRW_STATE_NR_WM_SURFACES)
173 #define BRW_NEW_NR_VS_SURFACES (1 << BRW_STATE_NR_VS_SURFACES)
174 #define BRW_NEW_INDEX_BUFFER (1 << BRW_STATE_INDEX_BUFFER)
175 #define BRW_NEW_VS_CONSTBUF (1 << BRW_STATE_VS_CONSTBUF)
176 #define BRW_NEW_WM_CONSTBUF (1 << BRW_STATE_WM_CONSTBUF)
177 #define BRW_NEW_PROGRAM_CACHE (1 << BRW_STATE_PROGRAM_CACHE)
178 #define BRW_NEW_STATE_BASE_ADDRESS (1 << BRW_STATE_STATE_BASE_ADDRESS)
179
180 struct brw_state_flags {
181 /** State update flags signalled by mesa internals */
182 GLuint mesa;
183 /**
184 * State update flags signalled as the result of brw_tracked_state updates
185 */
186 GLuint brw;
187 /** State update flags signalled by brw_state_cache.c searches */
188 GLuint cache;
189 };
190
191 enum state_struct_type {
192 AUB_TRACE_VS_STATE = 1,
193 AUB_TRACE_GS_STATE = 2,
194 AUB_TRACE_CLIP_STATE = 3,
195 AUB_TRACE_SF_STATE = 4,
196 AUB_TRACE_WM_STATE = 5,
197 AUB_TRACE_CC_STATE = 6,
198 AUB_TRACE_CLIP_VP_STATE = 7,
199 AUB_TRACE_SF_VP_STATE = 8,
200 AUB_TRACE_CC_VP_STATE = 0x9,
201 AUB_TRACE_SAMPLER_STATE = 0xa,
202 AUB_TRACE_KERNEL_INSTRUCTIONS = 0xb,
203 AUB_TRACE_SCRATCH_SPACE = 0xc,
204 AUB_TRACE_SAMPLER_DEFAULT_COLOR = 0xd,
205
206 AUB_TRACE_SCISSOR_STATE = 0x15,
207 AUB_TRACE_BLEND_STATE = 0x16,
208 AUB_TRACE_DEPTH_STENCIL_STATE = 0x17,
209
210 /* Not written to .aub files the same way the structures above are. */
211 AUB_TRACE_NO_TYPE = 0x100,
212 AUB_TRACE_BINDING_TABLE = 0x101,
213 AUB_TRACE_SURFACE_STATE = 0x102,
214 AUB_TRACE_VS_CONSTANTS = 0x103,
215 };
216
217 /** Subclass of Mesa vertex program */
218 struct brw_vertex_program {
219 struct gl_vertex_program program;
220 GLuint id;
221 GLboolean use_const_buffer;
222 };
223
224
225 /** Subclass of Mesa fragment program */
226 struct brw_fragment_program {
227 struct gl_fragment_program program;
228 GLuint id; /**< serial no. to identify frag progs, never re-used */
229
230 /** for debugging, which texture units are referenced */
231 GLbitfield tex_units_used;
232 };
233
234 struct brw_shader {
235 struct gl_shader base;
236
237 /** Shader IR transformed for native compile, at link time. */
238 struct exec_list *ir;
239 };
240
241 struct brw_shader_program {
242 struct gl_shader_program base;
243 };
244
245 enum param_conversion {
246 PARAM_NO_CONVERT,
247 PARAM_CONVERT_F2I,
248 PARAM_CONVERT_F2U,
249 PARAM_CONVERT_F2B,
250 };
251
252 /* Data about a particular attempt to compile a program. Note that
253 * there can be many of these, each in a different GL state
254 * corresponding to a different brw_wm_prog_key struct, with different
255 * compiled programs:
256 */
257 struct brw_wm_prog_data {
258 GLuint curb_read_length;
259 GLuint urb_read_length;
260
261 GLuint first_curbe_grf;
262 GLuint first_curbe_grf_16;
263 GLuint reg_blocks;
264 GLuint reg_blocks_16;
265 GLuint total_scratch;
266
267 GLuint nr_params; /**< number of float params/constants */
268 GLuint nr_pull_params;
269 GLboolean error;
270 int dispatch_width;
271 uint32_t prog_offset_16;
272
273 /* Pointer to tracked values (only valid once
274 * _mesa_load_state_parameters has been called at runtime).
275 */
276 const float *param[MAX_UNIFORMS * 4]; /* should be: BRW_MAX_CURBE */
277 enum param_conversion param_convert[MAX_UNIFORMS * 4];
278 const float *pull_param[MAX_UNIFORMS * 4];
279 enum param_conversion pull_param_convert[MAX_UNIFORMS * 4];
280 };
281
282 struct brw_sf_prog_data {
283 GLuint urb_read_length;
284 GLuint total_grf;
285
286 /* Each vertex may have upto 12 attributes, 4 components each,
287 * except WPOS which requires only 2. (11*4 + 2) == 44 ==> 11
288 * rows.
289 *
290 * Actually we use 4 for each, so call it 12 rows.
291 */
292 GLuint urb_entry_size;
293 };
294
295 struct brw_clip_prog_data {
296 GLuint curb_read_length; /* user planes? */
297 GLuint clip_mode;
298 GLuint urb_read_length;
299 GLuint total_grf;
300 };
301
302 struct brw_gs_prog_data {
303 GLuint urb_read_length;
304 GLuint total_grf;
305 };
306
307 struct brw_vs_prog_data {
308 GLuint curb_read_length;
309 GLuint urb_read_length;
310 GLuint total_grf;
311 GLbitfield64 outputs_written;
312 GLuint nr_params; /**< number of float params/constants */
313
314 GLuint inputs_read;
315
316 /* Used for calculating urb partitions:
317 */
318 GLuint urb_entry_size;
319 };
320
321
322 /* Size == 0 if output either not written, or always [0,0,0,1]
323 */
324 struct brw_vs_ouput_sizes {
325 GLubyte output_size[VERT_RESULT_MAX];
326 };
327
328
329 /** Number of texture sampler units */
330 #define BRW_MAX_TEX_UNIT 16
331
332 /** Max number of render targets in a shader */
333 #define BRW_MAX_DRAW_BUFFERS 8
334
335 /**
336 * Size of our surface binding table for the WM.
337 * This contains pointers to the drawing surfaces and current texture
338 * objects and shader constant buffers (+2).
339 */
340 #define BRW_WM_MAX_SURF (BRW_MAX_DRAW_BUFFERS + BRW_MAX_TEX_UNIT + 1)
341
342 /**
343 * Helpers to convert drawing buffers, textures and constant buffers
344 * to surface binding table indexes, for WM.
345 */
346 #define SURF_INDEX_DRAW(d) (d)
347 #define SURF_INDEX_FRAG_CONST_BUFFER (BRW_MAX_DRAW_BUFFERS)
348 #define SURF_INDEX_TEXTURE(t) (BRW_MAX_DRAW_BUFFERS + 1 + (t))
349
350 /**
351 * Size of surface binding table for the VS.
352 * Only one constant buffer for now.
353 */
354 #define BRW_VS_MAX_SURF 1
355
356 /**
357 * Only a VS constant buffer
358 */
359 #define SURF_INDEX_VERT_CONST_BUFFER 0
360
361
362 enum brw_cache_id {
363 BRW_BLEND_STATE,
364 BRW_DEPTH_STENCIL_STATE,
365 BRW_COLOR_CALC_STATE,
366 BRW_CC_VP,
367 BRW_CC_UNIT,
368 BRW_WM_PROG,
369 BRW_SAMPLER,
370 BRW_WM_UNIT,
371 BRW_SF_PROG,
372 BRW_SF_VP,
373 BRW_SF_UNIT, /* scissor state on gen6 */
374 BRW_VS_UNIT,
375 BRW_VS_PROG,
376 BRW_GS_UNIT,
377 BRW_GS_PROG,
378 BRW_CLIP_VP,
379 BRW_CLIP_UNIT,
380 BRW_CLIP_PROG,
381
382 BRW_MAX_CACHE
383 };
384
385 struct brw_cache_item {
386 /**
387 * Effectively part of the key, cache_id identifies what kind of state
388 * buffer is involved, and also which brw->state.dirty.cache flag should
389 * be set when this cache item is chosen.
390 */
391 enum brw_cache_id cache_id;
392 /** 32-bit hash of the key data */
393 GLuint hash;
394 GLuint key_size; /* for variable-sized keys */
395 GLuint aux_size;
396 const void *key;
397
398 uint32_t offset;
399 uint32_t size;
400
401 struct brw_cache_item *next;
402 };
403
404
405
406 struct brw_cache {
407 struct brw_context *brw;
408
409 struct brw_cache_item **items;
410 drm_intel_bo *bo;
411 GLuint size, n_items;
412
413 uint32_t next_offset;
414 bool bo_used_by_gpu;
415 };
416
417
418 /* Considered adding a member to this struct to document which flags
419 * an update might raise so that ordering of the state atoms can be
420 * checked or derived at runtime. Dropped the idea in favor of having
421 * a debug mode where the state is monitored for flags which are
422 * raised that have already been tested against.
423 */
424 struct brw_tracked_state {
425 struct brw_state_flags dirty;
426 void (*prepare)( struct brw_context *brw );
427 void (*emit)( struct brw_context *brw );
428 };
429
430 /* Flags for brw->state.cache.
431 */
432 #define CACHE_NEW_BLEND_STATE (1<<BRW_BLEND_STATE)
433 #define CACHE_NEW_DEPTH_STENCIL_STATE (1<<BRW_DEPTH_STENCIL_STATE)
434 #define CACHE_NEW_COLOR_CALC_STATE (1<<BRW_COLOR_CALC_STATE)
435 #define CACHE_NEW_CC_VP (1<<BRW_CC_VP)
436 #define CACHE_NEW_CC_UNIT (1<<BRW_CC_UNIT)
437 #define CACHE_NEW_WM_PROG (1<<BRW_WM_PROG)
438 #define CACHE_NEW_SAMPLER (1<<BRW_SAMPLER)
439 #define CACHE_NEW_WM_UNIT (1<<BRW_WM_UNIT)
440 #define CACHE_NEW_SF_PROG (1<<BRW_SF_PROG)
441 #define CACHE_NEW_SF_VP (1<<BRW_SF_VP)
442 #define CACHE_NEW_SF_UNIT (1<<BRW_SF_UNIT)
443 #define CACHE_NEW_VS_UNIT (1<<BRW_VS_UNIT)
444 #define CACHE_NEW_VS_PROG (1<<BRW_VS_PROG)
445 #define CACHE_NEW_GS_UNIT (1<<BRW_GS_UNIT)
446 #define CACHE_NEW_GS_PROG (1<<BRW_GS_PROG)
447 #define CACHE_NEW_CLIP_VP (1<<BRW_CLIP_VP)
448 #define CACHE_NEW_CLIP_UNIT (1<<BRW_CLIP_UNIT)
449 #define CACHE_NEW_CLIP_PROG (1<<BRW_CLIP_PROG)
450
451 struct brw_cached_batch_item {
452 struct header *header;
453 GLuint sz;
454 struct brw_cached_batch_item *next;
455 };
456
457
458
459 /* Protect against a future where VERT_ATTRIB_MAX > 32. Wouldn't life
460 * be easier if C allowed arrays of packed elements?
461 */
462 #define ATTRIB_BIT_DWORDS ((VERT_ATTRIB_MAX+31)/32)
463
464 struct brw_vertex_buffer {
465 /** Buffer object containing the uploaded vertex data */
466 drm_intel_bo *bo;
467 uint32_t offset;
468 /** Byte stride between elements in the uploaded array */
469 GLuint stride;
470 };
471 struct brw_vertex_element {
472 const struct gl_client_array *glarray;
473
474 int buffer;
475
476 /** The corresponding Mesa vertex attribute */
477 gl_vert_attrib attrib;
478 /** Size of a complete element */
479 GLuint element_size;
480 /** Offset of the first element within the buffer object */
481 unsigned int offset;
482 };
483
484
485
486 struct brw_vertex_info {
487 GLuint sizes[ATTRIB_BIT_DWORDS * 2]; /* sizes:2[VERT_ATTRIB_MAX] */
488 };
489
490 struct brw_query_object {
491 struct gl_query_object Base;
492
493 /** Last query BO associated with this query. */
494 drm_intel_bo *bo;
495 /** First index in bo with query data for this object. */
496 int first_index;
497 /** Last index in bo with query data for this object. */
498 int last_index;
499 };
500
501
502 /**
503 * brw_context is derived from intel_context.
504 */
505 struct brw_context
506 {
507 struct intel_context intel; /**< base class, must be first field */
508 GLuint primitive;
509
510 GLboolean emit_state_always;
511 GLboolean has_surface_tile_offset;
512 GLboolean has_compr4;
513 GLboolean has_negative_rhw_bug;
514 GLboolean has_aa_line_parameters;
515 GLboolean has_pln;
516
517 struct {
518 struct brw_state_flags dirty;
519 /**
520 * List of buffers accumulated in brw_validate_state to receive
521 * drm_intel_bo_check_aperture treatment before exec, so we can
522 * know if we should flush the batch and try again before
523 * emitting primitives.
524 *
525 * This can be a fixed number as we only have a limited number of
526 * objects referenced from the batchbuffer in a primitive emit,
527 * consisting of the vertex buffers, pipelined state pointers,
528 * the CURBE, the depth buffer, and a query BO.
529 */
530 drm_intel_bo *validated_bos[VERT_ATTRIB_MAX + BRW_WM_MAX_SURF + 16];
531 int validated_bo_count;
532 } state;
533
534 struct brw_cache cache;
535 struct brw_cached_batch_item *cached_batch_items;
536
537 struct {
538 struct brw_vertex_element inputs[VERT_ATTRIB_MAX];
539 struct brw_vertex_buffer buffers[VERT_ATTRIB_MAX];
540 struct {
541 uint32_t handle;
542 uint32_t offset;
543 uint32_t stride;
544 } current_buffers[VERT_ATTRIB_MAX];
545
546 struct brw_vertex_element *enabled[VERT_ATTRIB_MAX];
547 GLuint nr_enabled;
548 GLuint nr_buffers, nr_current_buffers;
549
550 /* Summary of size and varying of active arrays, so we can check
551 * for changes to this state:
552 */
553 struct brw_vertex_info info;
554 unsigned int min_index, max_index;
555
556 /* Offset from start of vertex buffer so we can avoid redefining
557 * the same VB packed over and over again.
558 */
559 unsigned int start_vertex_bias;
560 } vb;
561
562 struct {
563 /**
564 * Index buffer for this draw_prims call.
565 *
566 * Updates are signaled by BRW_NEW_INDICES.
567 */
568 const struct _mesa_index_buffer *ib;
569
570 /* Updates are signaled by BRW_NEW_INDEX_BUFFER. */
571 drm_intel_bo *bo;
572 GLuint type;
573
574 /* Offset to index buffer index to use in CMD_3D_PRIM so that we can
575 * avoid re-uploading the IB packet over and over if we're actually
576 * referencing the same index buffer.
577 */
578 unsigned int start_vertex_offset;
579 } ib;
580
581 /* Active vertex program:
582 */
583 const struct gl_vertex_program *vertex_program;
584 const struct gl_fragment_program *fragment_program;
585
586 /* hw-dependent 3DSTATE_VF_STATISTICS opcode */
587 uint32_t CMD_VF_STATISTICS;
588 /* hw-dependent 3DSTATE_PIPELINE_SELECT opcode */
589 uint32_t CMD_PIPELINE_SELECT;
590 int vs_max_threads;
591 int wm_max_threads;
592
593 /* BRW_NEW_URB_ALLOCATIONS:
594 */
595 struct {
596 GLuint vsize; /* vertex size plus header in urb registers */
597 GLuint csize; /* constant buffer size in urb registers */
598 GLuint sfsize; /* setup data size in urb registers */
599
600 GLboolean constrained;
601
602 GLuint max_vs_entries; /* Maximum number of VS entries */
603 GLuint max_gs_entries; /* Maximum number of GS entries */
604
605 GLuint nr_vs_entries;
606 GLuint nr_gs_entries;
607 GLuint nr_clip_entries;
608 GLuint nr_sf_entries;
609 GLuint nr_cs_entries;
610
611 /* gen6:
612 * The length of each URB entry owned by the VS (or GS), as
613 * a number of 1024-bit (128-byte) rows. Should be >= 1.
614 *
615 * gen7: Same meaning, but in 512-bit (64-byte) rows.
616 */
617 GLuint vs_size;
618 GLuint gs_size;
619
620 GLuint vs_start;
621 GLuint gs_start;
622 GLuint clip_start;
623 GLuint sf_start;
624 GLuint cs_start;
625 GLuint size; /* Hardware URB size, in KB. */
626 } urb;
627
628
629 /* BRW_NEW_CURBE_OFFSETS:
630 */
631 struct {
632 GLuint wm_start; /**< pos of first wm const in CURBE buffer */
633 GLuint wm_size; /**< number of float[4] consts, multiple of 16 */
634 GLuint clip_start;
635 GLuint clip_size;
636 GLuint vs_start;
637 GLuint vs_size;
638 GLuint total_size;
639
640 drm_intel_bo *curbe_bo;
641 /** Offset within curbe_bo of space for current curbe entry */
642 GLuint curbe_offset;
643 /** Offset within curbe_bo of space for next curbe entry */
644 GLuint curbe_next_offset;
645
646 /**
647 * Copy of the last set of CURBEs uploaded. Frequently we'll end up
648 * in brw_curbe.c with the same set of constant data to be uploaded,
649 * so we'd rather not upload new constants in that case (it can cause
650 * a pipeline bubble since only up to 4 can be pipelined at a time).
651 */
652 GLfloat *last_buf;
653 /**
654 * Allocation for where to calculate the next set of CURBEs.
655 * It's a hot enough path that malloc/free of that data matters.
656 */
657 GLfloat *next_buf;
658 GLuint last_bufsz;
659 } curbe;
660
661 struct {
662 struct brw_vs_prog_data *prog_data;
663 int8_t *constant_map; /* variable array following prog_data */
664
665 drm_intel_bo *const_bo;
666 /** Offset in the program cache to the VS program */
667 uint32_t prog_offset;
668 uint32_t state_offset;
669
670 /** Binding table of pointers to surf_bo entries */
671 uint32_t bind_bo_offset;
672 uint32_t surf_offset[BRW_VS_MAX_SURF];
673 GLuint nr_surfaces;
674
675 uint32_t push_const_offset; /* Offset in the batchbuffer */
676 int push_const_size; /* in 256-bit register increments */
677 } vs;
678
679 struct {
680 struct brw_gs_prog_data *prog_data;
681
682 GLboolean prog_active;
683 /** Offset in the program cache to the CLIP program pre-gen6 */
684 uint32_t prog_offset;
685 uint32_t state_offset;
686 } gs;
687
688 struct {
689 struct brw_clip_prog_data *prog_data;
690
691 /** Offset in the program cache to the CLIP program pre-gen6 */
692 uint32_t prog_offset;
693
694 /* Offset in the batch to the CLIP state on pre-gen6. */
695 uint32_t state_offset;
696
697 /* As of gen6, this is the offset in the batch to the CLIP VP,
698 * instead of vp_bo.
699 */
700 uint32_t vp_offset;
701 } clip;
702
703
704 struct {
705 struct brw_sf_prog_data *prog_data;
706
707 /** Offset in the program cache to the CLIP program pre-gen6 */
708 uint32_t prog_offset;
709 uint32_t state_offset;
710 uint32_t vp_offset;
711 } sf;
712
713 struct {
714 struct brw_wm_prog_data *prog_data;
715 struct brw_wm_compile *compile_data;
716
717 /** Input sizes, calculated from active vertex program.
718 * One bit per fragment program input attribute.
719 */
720 GLbitfield input_size_masks[4];
721
722 /** offsets in the batch to sampler default colors (texture border color)
723 */
724 uint32_t sdc_offset[BRW_MAX_TEX_UNIT];
725
726 GLuint render_surf;
727 GLuint nr_surfaces;
728
729 GLuint max_threads;
730 drm_intel_bo *scratch_bo;
731
732 GLuint sampler_count;
733 uint32_t sampler_offset;
734
735 /** Offset in the program cache to the WM program */
736 uint32_t prog_offset;
737
738 /** Binding table of pointers to surf_bo entries */
739 uint32_t bind_bo_offset;
740 uint32_t surf_offset[BRW_WM_MAX_SURF];
741 uint32_t state_offset; /* offset in batchbuffer to pre-gen6 WM state */
742
743 drm_intel_bo *const_bo; /* pull constant buffer. */
744 /**
745 * This is offset in the batch to the push constants on gen6.
746 *
747 * Pre-gen6, push constants live in the CURBE.
748 */
749 uint32_t push_const_offset;
750 } wm;
751
752
753 struct {
754 uint32_t state_offset;
755 uint32_t blend_state_offset;
756 uint32_t depth_stencil_state_offset;
757 uint32_t vp_offset;
758 } cc;
759
760 struct {
761 struct brw_query_object *obj;
762 drm_intel_bo *bo;
763 int index;
764 GLboolean active;
765 } query;
766 /* Used to give every program string a unique id
767 */
768 GLuint program_id;
769
770 int num_prepare_atoms, num_emit_atoms;
771 struct brw_tracked_state prepare_atoms[64], emit_atoms[64];
772
773 /* If (INTEL_DEBUG & DEBUG_BATCH) */
774 struct {
775 uint32_t offset;
776 uint32_t size;
777 enum state_struct_type type;
778 } *state_batch_list;
779 int state_batch_count;
780 };
781
782
783 #define BRW_PACKCOLOR8888(r,g,b,a) ((r<<24) | (g<<16) | (b<<8) | a)
784
785 struct brw_instruction_info {
786 char *name;
787 int nsrc;
788 int ndst;
789 GLboolean is_arith;
790 };
791 extern const struct brw_instruction_info brw_opcodes[128];
792
793 /*======================================================================
794 * brw_vtbl.c
795 */
796 void brwInitVtbl( struct brw_context *brw );
797
798 /*======================================================================
799 * brw_context.c
800 */
801 GLboolean brwCreateContext( int api,
802 const struct gl_config *mesaVis,
803 __DRIcontext *driContextPriv,
804 void *sharedContextPrivate);
805
806 /*======================================================================
807 * brw_queryobj.c
808 */
809 void brw_init_queryobj_functions(struct dd_function_table *functions);
810 void brw_prepare_query_begin(struct brw_context *brw);
811 void brw_emit_query_begin(struct brw_context *brw);
812 void brw_emit_query_end(struct brw_context *brw);
813
814 /*======================================================================
815 * brw_state_dump.c
816 */
817 void brw_debug_batch(struct intel_context *intel);
818
819 /*======================================================================
820 * brw_tex.c
821 */
822 void brw_validate_textures( struct brw_context *brw );
823
824
825 /*======================================================================
826 * brw_program.c
827 */
828 void brwInitFragProgFuncs( struct dd_function_table *functions );
829
830
831 /* brw_urb.c
832 */
833 void brw_upload_urb_fence(struct brw_context *brw);
834
835 /* brw_curbe.c
836 */
837 void brw_upload_cs_urb_state(struct brw_context *brw);
838
839 /* brw_disasm.c */
840 int brw_disasm (FILE *file, struct brw_instruction *inst, int gen);
841
842 /*======================================================================
843 * Inline conversion functions. These are better-typed than the
844 * macros used previously:
845 */
846 static INLINE struct brw_context *
847 brw_context( struct gl_context *ctx )
848 {
849 return (struct brw_context *)ctx;
850 }
851
852 static INLINE struct brw_vertex_program *
853 brw_vertex_program(struct gl_vertex_program *p)
854 {
855 return (struct brw_vertex_program *) p;
856 }
857
858 static INLINE const struct brw_vertex_program *
859 brw_vertex_program_const(const struct gl_vertex_program *p)
860 {
861 return (const struct brw_vertex_program *) p;
862 }
863
864 static INLINE struct brw_fragment_program *
865 brw_fragment_program(struct gl_fragment_program *p)
866 {
867 return (struct brw_fragment_program *) p;
868 }
869
870 static INLINE const struct brw_fragment_program *
871 brw_fragment_program_const(const struct gl_fragment_program *p)
872 {
873 return (const struct brw_fragment_program *) p;
874 }
875
876 static inline
877 float convert_param(enum param_conversion conversion, float param)
878 {
879 union {
880 float f;
881 uint32_t u;
882 int32_t i;
883 } fi;
884
885 switch (conversion) {
886 case PARAM_NO_CONVERT:
887 return param;
888 case PARAM_CONVERT_F2I:
889 fi.i = param;
890 return fi.f;
891 case PARAM_CONVERT_F2U:
892 fi.u = param;
893 return fi.f;
894 case PARAM_CONVERT_F2B:
895 if (param != 0.0)
896 fi.i = 1;
897 else
898 fi.i = 0;
899 return fi.f;
900 default:
901 return param;
902 }
903 }
904
905 /**
906 * Pre-gen6, the register file of the EUs was shared between threads,
907 * and each thread used some subset allocated on a 16-register block
908 * granularity. The unit states wanted these block counts.
909 */
910 static inline int
911 brw_register_blocks(int reg_count)
912 {
913 return ALIGN(reg_count, 16) / 16 - 1;
914 }
915
916 static inline uint32_t
917 brw_program_reloc(struct brw_context *brw, uint32_t state_offset,
918 uint32_t prog_offset)
919 {
920 struct intel_context *intel = &brw->intel;
921
922 if (intel->gen >= 5) {
923 /* Using state base address. */
924 return prog_offset;
925 }
926
927 drm_intel_bo_emit_reloc(intel->batch.bo,
928 state_offset,
929 brw->cache.bo,
930 prog_offset,
931 I915_GEM_DOMAIN_INSTRUCTION, 0);
932
933 return brw->cache.bo->offset + prog_offset;
934 }
935
936 GLboolean brw_do_cubemap_normalize(struct exec_list *instructions);
937
938 #endif