1 /**************************************************************************
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "pipe/p_context.h"
30 #include "util/u_upload_mgr.h"
33 #include "brw_defines.h"
34 #include "brw_context.h"
35 #include "brw_state.h"
36 #include "brw_fallback.h"
38 #include "intel_batchbuffer.h"
39 #include "intel_buffer_objects.h"
40 #include "intel_tex.h"
45 unsigned brw_translate_surface_format( unsigned id
)
48 case PIPE_FORMAT_R64_FLOAT
:
49 return BRW_SURFACEFORMAT_R64_FLOAT
;
50 case PIPE_FORMAT_R64G64_FLOAT
:
51 return BRW_SURFACEFORMAT_R64G64_FLOAT
;
52 case PIPE_FORMAT_R64G64B64_FLOAT
:
53 return BRW_SURFACEFORMAT_R64G64B64_FLOAT
;
54 case PIPE_FORMAT_R64G64B64A64_FLOAT
:
55 return BRW_SURFACEFORMAT_R64G64B64A64_FLOAT
;
57 case PIPE_FORMAT_R32_FLOAT
:
58 return BRW_SURFACEFORMAT_R32_FLOAT
;
59 case PIPE_FORMAT_R32G32_FLOAT
:
60 return BRW_SURFACEFORMAT_R32G32_FLOAT
;
61 case PIPE_FORMAT_R32G32B32_FLOAT
:
62 return BRW_SURFACEFORMAT_R32G32B32_FLOAT
;
63 case PIPE_FORMAT_R32G32B32A32_FLOAT
:
64 return BRW_SURFACEFORMAT_R32G32B32A32_FLOAT
;
66 case PIPE_FORMAT_R32_UNORM
:
67 return BRW_SURFACEFORMAT_R32_UNORM
;
68 case PIPE_FORMAT_R32G32_UNORM
:
69 return BRW_SURFACEFORMAT_R32G32_UNORM
;
70 case PIPE_FORMAT_R32G32B32_UNORM
:
71 return BRW_SURFACEFORMAT_R32G32B32_UNORM
;
72 case PIPE_FORMAT_R32G32B32A32_UNORM
:
73 return BRW_SURFACEFORMAT_R32G32B32A32_UNORM
;
75 case PIPE_FORMAT_R32_USCALED
:
76 return BRW_SURFACEFORMAT_R32_USCALED
;
77 case PIPE_FORMAT_R32G32_USCALED
:
78 return BRW_SURFACEFORMAT_R32G32_USCALED
;
79 case PIPE_FORMAT_R32G32B32_USCALED
:
80 return BRW_SURFACEFORMAT_R32G32B32_USCALED
;
81 case PIPE_FORMAT_R32G32B32A32_USCALED
:
82 return BRW_SURFACEFORMAT_R32G32B32A32_USCALED
;
84 case PIPE_FORMAT_R32_SNORM
:
85 return BRW_SURFACEFORMAT_R32_SNORM
;
86 case PIPE_FORMAT_R32G32_SNORM
:
87 return BRW_SURFACEFORMAT_R32G32_SNORM
;
88 case PIPE_FORMAT_R32G32B32_SNORM
:
89 return BRW_SURFACEFORMAT_R32G32B32_SNORM
;
90 case PIPE_FORMAT_R32G32B32A32_SNORM
:
91 return BRW_SURFACEFORMAT_R32G32B32A32_SNORM
;
93 case PIPE_FORMAT_R32_SSCALED
:
94 return BRW_SURFACEFORMAT_R32_SSCALED
;
95 case PIPE_FORMAT_R32G32_SSCALED
:
96 return BRW_SURFACEFORMAT_R32G32_SSCALED
;
97 case PIPE_FORMAT_R32G32B32_SSCALED
:
98 return BRW_SURFACEFORMAT_R32G32B32_SSCALED
;
99 case PIPE_FORMAT_R32G32B32A32_SSCALED
:
100 return BRW_SURFACEFORMAT_R32G32B32A32_SSCALED
;
102 case PIPE_FORMAT_R16_UNORM
:
103 return BRW_SURFACEFORMAT_R16_UNORM
;
104 case PIPE_FORMAT_R16G16_UNORM
:
105 return BRW_SURFACEFORMAT_R16G16_UNORM
;
106 case PIPE_FORMAT_R16G16B16_UNORM
:
107 return BRW_SURFACEFORMAT_R16G16B16_UNORM
;
108 case PIPE_FORMAT_R16G16B16A16_UNORM
:
109 return BRW_SURFACEFORMAT_R16G16B16A16_UNORM
;
111 case PIPE_FORMAT_R16_USCALED
:
112 return BRW_SURFACEFORMAT_R16_USCALED
;
113 case PIPE_FORMAT_R16G16_USCALED
:
114 return BRW_SURFACEFORMAT_R16G16_USCALED
;
115 case PIPE_FORMAT_R16G16B16_USCALED
:
116 return BRW_SURFACEFORMAT_R16G16B16_USCALED
;
117 case PIPE_FORMAT_R16G16B16A16_USCALED
:
118 return BRW_SURFACEFORMAT_R16G16B16A16_USCALED
;
120 case PIPE_FORMAT_R16_SNORM
:
121 return BRW_SURFACEFORMAT_R16_SNORM
;
122 case PIPE_FORMAT_R16G16_SNORM
:
123 return BRW_SURFACEFORMAT_R16G16_SNORM
;
124 case PIPE_FORMAT_R16G16B16_SNORM
:
125 return BRW_SURFACEFORMAT_R16G16B16_SNORM
;
126 case PIPE_FORMAT_R16G16B16A16_SNORM
:
127 return BRW_SURFACEFORMAT_R16G16B16A16_SNORM
;
129 case PIPE_FORMAT_R16_SSCALED
:
130 return BRW_SURFACEFORMAT_R16_SSCALED
;
131 case PIPE_FORMAT_R16G16_SSCALED
:
132 return BRW_SURFACEFORMAT_R16G16_SSCALED
;
133 case PIPE_FORMAT_R16G16B16_SSCALED
:
134 return BRW_SURFACEFORMAT_R16G16B16_SSCALED
;
135 case PIPE_FORMAT_R16G16B16A16_SSCALED
:
136 return BRW_SURFACEFORMAT_R16G16B16A16_SSCALED
;
138 case PIPE_FORMAT_R8_UNORM
:
139 return BRW_SURFACEFORMAT_R8_UNORM
;
140 case PIPE_FORMAT_R8G8_UNORM
:
141 return BRW_SURFACEFORMAT_R8G8_UNORM
;
142 case PIPE_FORMAT_R8G8B8_UNORM
:
143 return BRW_SURFACEFORMAT_R8G8B8_UNORM
;
144 case PIPE_FORMAT_R8G8B8A8_UNORM
:
145 return BRW_SURFACEFORMAT_R8G8B8A8_UNORM
;
147 case PIPE_FORMAT_R8_USCALED
:
148 return BRW_SURFACEFORMAT_R8_USCALED
;
149 case PIPE_FORMAT_R8G8_USCALED
:
150 return BRW_SURFACEFORMAT_R8G8_USCALED
;
151 case PIPE_FORMAT_R8G8B8_USCALED
:
152 return BRW_SURFACEFORMAT_R8G8B8_USCALED
;
153 case PIPE_FORMAT_R8G8B8A8_USCALED
:
154 return BRW_SURFACEFORMAT_R8G8B8A8_USCALED
;
156 case PIPE_FORMAT_R8_SNORM
:
157 return BRW_SURFACEFORMAT_R8_SNORM
;
158 case PIPE_FORMAT_R8G8_SNORM
:
159 return BRW_SURFACEFORMAT_R8G8_SNORM
;
160 case PIPE_FORMAT_R8G8B8_SNORM
:
161 return BRW_SURFACEFORMAT_R8G8B8_SNORM
;
162 case PIPE_FORMAT_R8G8B8A8_SNORM
:
163 return BRW_SURFACEFORMAT_R8G8B8A8_SNORM
;
165 case PIPE_FORMAT_R8_SSCALED
:
166 return BRW_SURFACEFORMAT_R8_SSCALED
;
167 case PIPE_FORMAT_R8G8_SSCALED
:
168 return BRW_SURFACEFORMAT_R8G8_SSCALED
;
169 case PIPE_FORMAT_R8G8B8_SSCALED
:
170 return BRW_SURFACEFORMAT_R8G8B8_SSCALED
;
171 case PIPE_FORMAT_R8G8B8A8_SSCALED
:
172 return BRW_SURFACEFORMAT_R8G8B8A8_SSCALED
;
180 static unsigned get_index_type(int type
)
183 case 1: return BRW_INDEX_BYTE
;
184 case 2: return BRW_INDEX_WORD
;
185 case 4: return BRW_INDEX_DWORD
;
186 default: assert(0); return 0;
192 static boolean
brw_prepare_vertices(struct brw_context
*brw
)
194 GLcontext
*ctx
= &brw
->intel
.ctx
;
195 struct intel_context
*intel
= intel_context(ctx
);
196 GLbitfield vs_inputs
= brw
->vs
.prog_data
->inputs_read
;
198 const unsigned char *ptr
= NULL
;
199 GLuint interleave
= 0;
200 unsigned int min_index
= brw
->vb
.min_index
;
201 unsigned int max_index
= brw
->vb
.max_index
;
203 struct brw_vertex_element
*upload
[VERT_ATTRIB_MAX
];
204 GLuint nr_uploads
= 0;
206 /* First build an array of pointers to ve's in vb.inputs_read
209 _mesa_printf("%s %d..%d\n", __FUNCTION__
, min_index
, max_index
);
213 for (i
= 0; i
< brw
->vb
.nr_enabled
; i
++) {
214 struct brw_vertex_element
*input
= brw
->vb
.enabled
[i
];
216 input
->element_size
= get_size(input
->glarray
->Type
) * input
->glarray
->Size
;
218 if (brw_is_user_buffer(vb
)) {
219 u_upload_buffer( brw
->upload
,
220 min_index
* vb
->stride
,
221 (max_index
+ 1 - min_index
) * vb
->stride
,
229 count
= stride
== 0 ? 1 : max_index
+ 1 - min_index
;
232 /* Named buffer object: Just reference its contents directly. */
233 dri_bo_unreference(input
->bo
);
234 input
->bo
= intel_bufferobj_buffer(intel
, intel_buffer
,
236 dri_bo_reference(input
->bo
);
238 input
->offset
= (unsigned long)offset
;
239 input
->stride
= vb
->stride
;
240 input
->count
= count
;
242 assert(input
->offset
< input
->bo
->size
);
245 brw_prepare_query_begin(brw
);
247 for (i
= 0; i
< brw
->vb
.nr_enabled
; i
++) {
248 struct brw_vertex_element
*input
= brw
->vb
.enabled
[i
];
250 brw_add_validated_bo(brw
, input
->bo
);
254 static void brw_emit_vertices(struct brw_context
*brw
)
256 GLcontext
*ctx
= &brw
->intel
.ctx
;
257 struct intel_context
*intel
= intel_context(ctx
);
260 brw_emit_query_begin(brw
);
262 /* If the VS doesn't read any inputs (calculating vertex position from
263 * a state variable for some reason, for example), emit a single pad
264 * VERTEX_ELEMENT struct and bail.
266 * The stale VB state stays in place, but they don't do anything unless
267 * a VE loads from them.
269 if (brw
->vb
.nr_enabled
== 0) {
270 BEGIN_BATCH(3, IGNORE_CLIPRECTS
);
271 OUT_BATCH((CMD_VERTEX_ELEMENT
<< 16) | 1);
272 OUT_BATCH((0 << BRW_VE0_INDEX_SHIFT
) |
274 (BRW_SURFACEFORMAT_R32G32B32A32_FLOAT
<< BRW_VE0_FORMAT_SHIFT
) |
275 (0 << BRW_VE0_SRC_OFFSET_SHIFT
));
276 OUT_BATCH((BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_0_SHIFT
) |
277 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_1_SHIFT
) |
278 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_2_SHIFT
) |
279 (BRW_VE1_COMPONENT_STORE_1_FLT
<< BRW_VE1_COMPONENT_3_SHIFT
));
284 /* Now emit VB and VEP state packets.
286 * This still defines a hardware VB for each input, even if they
287 * are interleaved or from the same VBO. TBD if this makes a
288 * performance difference.
290 BEGIN_BATCH(1 + brw
->vb
.nr_enabled
* 4, IGNORE_CLIPRECTS
);
291 OUT_BATCH((CMD_VERTEX_BUFFER
<< 16) |
292 ((1 + brw
->vb
.nr_enabled
* 4) - 2));
294 for (i
= 0; i
< brw
->vb
.nr_enabled
; i
++) {
295 struct brw_vertex_element
*input
= brw
->vb
.enabled
[i
];
297 OUT_BATCH((i
<< BRW_VB0_INDEX_SHIFT
) |
298 BRW_VB0_ACCESS_VERTEXDATA
|
299 (input
->stride
<< BRW_VB0_PITCH_SHIFT
));
301 I915_GEM_DOMAIN_VERTEX
, 0,
303 if (BRW_IS_IGDNG(brw
)) {
306 I915_GEM_DOMAIN_VERTEX
, 0,
307 input
->offset
+ input
->stride
* input
->count
- 1);
309 assert(input
->count
== 1);
311 I915_GEM_DOMAIN_VERTEX
, 0,
312 input
->offset
+ input
->element_size
- 1);
315 OUT_BATCH(input
->stride
? input
->count
: 0);
316 OUT_BATCH(0); /* Instance data step rate */
320 BEGIN_BATCH(1 + brw
->vb
.nr_enabled
* 2, IGNORE_CLIPRECTS
);
321 OUT_BATCH((CMD_VERTEX_ELEMENT
<< 16) | ((1 + brw
->vb
.nr_enabled
* 2) - 2));
322 for (i
= 0; i
< brw
->vb
.nr_enabled
; i
++) {
323 struct brw_vertex_element
*input
= brw
->vb
.enabled
[i
];
324 uint32_t format
= get_surface_type(input
->glarray
->Type
,
325 input
->glarray
->Size
,
326 input
->glarray
->Format
,
327 input
->glarray
->Normalized
);
328 uint32_t comp0
= BRW_VE1_COMPONENT_STORE_SRC
;
329 uint32_t comp1
= BRW_VE1_COMPONENT_STORE_SRC
;
330 uint32_t comp2
= BRW_VE1_COMPONENT_STORE_SRC
;
331 uint32_t comp3
= BRW_VE1_COMPONENT_STORE_SRC
;
333 switch (input
->glarray
->Size
) {
334 case 0: comp0
= BRW_VE1_COMPONENT_STORE_0
;
335 case 1: comp1
= BRW_VE1_COMPONENT_STORE_0
;
336 case 2: comp2
= BRW_VE1_COMPONENT_STORE_0
;
337 case 3: comp3
= BRW_VE1_COMPONENT_STORE_1_FLT
;
341 OUT_BATCH((i
<< BRW_VE0_INDEX_SHIFT
) |
343 (format
<< BRW_VE0_FORMAT_SHIFT
) |
344 (0 << BRW_VE0_SRC_OFFSET_SHIFT
));
346 if (BRW_IS_IGDNG(brw
))
347 OUT_BATCH((comp0
<< BRW_VE1_COMPONENT_0_SHIFT
) |
348 (comp1
<< BRW_VE1_COMPONENT_1_SHIFT
) |
349 (comp2
<< BRW_VE1_COMPONENT_2_SHIFT
) |
350 (comp3
<< BRW_VE1_COMPONENT_3_SHIFT
));
352 OUT_BATCH((comp0
<< BRW_VE1_COMPONENT_0_SHIFT
) |
353 (comp1
<< BRW_VE1_COMPONENT_1_SHIFT
) |
354 (comp2
<< BRW_VE1_COMPONENT_2_SHIFT
) |
355 (comp3
<< BRW_VE1_COMPONENT_3_SHIFT
) |
356 ((i
* 4) << BRW_VE1_DST_OFFSET_SHIFT
));
361 const struct brw_tracked_state brw_vertices
= {
364 .brw
= BRW_NEW_BATCH
| BRW_NEW_VERTICES
,
367 .prepare
= brw_prepare_vertices
,
368 .emit
= brw_emit_vertices
,
371 static void brw_prepare_indices(struct brw_context
*brw
)
373 GLcontext
*ctx
= &brw
->intel
.ctx
;
374 struct intel_context
*intel
= &brw
->intel
;
375 const struct _mesa_index_buffer
*index_buffer
= brw
->ib
.ib
;
378 struct gl_buffer_object
*bufferobj
;
382 if (index_buffer
== NULL
)
385 ib_type_size
= get_size(index_buffer
->type
);
386 ib_size
= ib_type_size
* index_buffer
->count
;
387 bufferobj
= index_buffer
->obj
;;
389 /* Turn into a proper VBO:
391 if (!_mesa_is_bufferobj(bufferobj
)) {
392 brw
->ib
.start_vertex_offset
= 0;
394 /* Get new bufferobj, offset:
396 get_space(brw
, ib_size
, &bo
, &offset
);
400 brw_bo_subdata(bo
, offset
, ib_size
, index_buffer
->ptr
);
403 offset
= (GLuint
) (unsigned long) index_buffer
->ptr
;
404 brw
->ib
.start_vertex_offset
= 0;
406 /* If the index buffer isn't aligned to its element size, we have to
407 * rebase it into a temporary.
409 if ((get_size(index_buffer
->type
) - 1) & offset
) {
410 GLubyte
*map
= ctx
->Driver
.MapBuffer(ctx
,
411 GL_ELEMENT_ARRAY_BUFFER_ARB
,
416 get_space(brw
, ib_size
, &bo
, &offset
);
418 dri_bo_subdata(bo
, offset
, ib_size
, map
);
420 ctx
->Driver
.UnmapBuffer(ctx
, GL_ELEMENT_ARRAY_BUFFER_ARB
, bufferobj
);
422 bo
= intel_bufferobj_buffer(intel
, intel_buffer_object(bufferobj
),
424 dri_bo_reference(bo
);
426 /* Use CMD_3D_PRIM's start_vertex_offset to avoid re-uploading
427 * the index buffer state when we're just moving the start index
430 brw
->ib
.start_vertex_offset
= offset
/ ib_type_size
;
436 if (brw
->ib
.bo
!= bo
||
437 brw
->ib
.offset
!= offset
||
438 brw
->ib
.size
!= ib_size
)
440 drm_intel_bo_unreference(brw
->ib
.bo
);
442 brw
->ib
.offset
= offset
;
443 brw
->ib
.size
= ib_size
;
445 brw
->state
.dirty
.brw
|= BRW_NEW_INDEX_BUFFER
;
447 drm_intel_bo_unreference(bo
);
450 brw_add_validated_bo(brw
, brw
->ib
.bo
);
453 const struct brw_tracked_state brw_indices
= {
456 .brw
= BRW_NEW_INDICES
,
459 .prepare
= brw_prepare_indices
,
462 static void brw_emit_index_buffer(struct brw_context
*brw
)
464 struct intel_context
*intel
= &brw
->intel
;
465 const struct _mesa_index_buffer
*index_buffer
= brw
->ib
.ib
;
467 if (index_buffer
== NULL
)
470 /* Emit the indexbuffer packet:
473 struct brw_indexbuffer ib
;
475 memset(&ib
, 0, sizeof(ib
));
477 ib
.header
.bits
.opcode
= CMD_INDEX_BUFFER
;
478 ib
.header
.bits
.length
= sizeof(ib
)/4 - 2;
479 ib
.header
.bits
.index_format
= get_index_type(index_buffer
->type
);
480 ib
.header
.bits
.cut_index_enable
= 0;
482 BEGIN_BATCH(4, IGNORE_CLIPRECTS
);
483 OUT_BATCH( ib
.header
.dword
);
484 OUT_RELOC(brw
->ib
.bo
,
485 I915_GEM_DOMAIN_VERTEX
, 0,
487 OUT_RELOC(brw
->ib
.bo
,
488 I915_GEM_DOMAIN_VERTEX
, 0,
489 brw
->ib
.offset
+ brw
->ib
.size
- 1);
495 const struct brw_tracked_state brw_index_buffer
= {
498 .brw
= BRW_NEW_BATCH
| BRW_NEW_INDEX_BUFFER
,
501 .emit
= brw_emit_index_buffer
,