i965g: more work on compiling
[mesa.git] / src / gallium / drivers / i965 / brw_draw_upload.c
1 /**************************************************************************
2 *
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "pipe/p_context.h"
29
30 #include "util/u_upload_mgr.h"
31
32 #include "brw_draw.h"
33 #include "brw_defines.h"
34 #include "brw_context.h"
35 #include "brw_state.h"
36 #include "brw_fallback.h"
37
38 #include "brw_batchbuffer.h"
39
40
41
42
43 unsigned brw_translate_surface_format( unsigned id )
44 {
45 switch (id) {
46 case PIPE_FORMAT_R64_FLOAT:
47 return BRW_SURFACEFORMAT_R64_FLOAT;
48 case PIPE_FORMAT_R64G64_FLOAT:
49 return BRW_SURFACEFORMAT_R64G64_FLOAT;
50 case PIPE_FORMAT_R64G64B64_FLOAT:
51 return BRW_SURFACEFORMAT_R64G64B64_FLOAT;
52 case PIPE_FORMAT_R64G64B64A64_FLOAT:
53 return BRW_SURFACEFORMAT_R64G64B64A64_FLOAT;
54
55 case PIPE_FORMAT_R32_FLOAT:
56 return BRW_SURFACEFORMAT_R32_FLOAT;
57 case PIPE_FORMAT_R32G32_FLOAT:
58 return BRW_SURFACEFORMAT_R32G32_FLOAT;
59 case PIPE_FORMAT_R32G32B32_FLOAT:
60 return BRW_SURFACEFORMAT_R32G32B32_FLOAT;
61 case PIPE_FORMAT_R32G32B32A32_FLOAT:
62 return BRW_SURFACEFORMAT_R32G32B32A32_FLOAT;
63
64 case PIPE_FORMAT_R32_UNORM:
65 return BRW_SURFACEFORMAT_R32_UNORM;
66 case PIPE_FORMAT_R32G32_UNORM:
67 return BRW_SURFACEFORMAT_R32G32_UNORM;
68 case PIPE_FORMAT_R32G32B32_UNORM:
69 return BRW_SURFACEFORMAT_R32G32B32_UNORM;
70 case PIPE_FORMAT_R32G32B32A32_UNORM:
71 return BRW_SURFACEFORMAT_R32G32B32A32_UNORM;
72
73 case PIPE_FORMAT_R32_USCALED:
74 return BRW_SURFACEFORMAT_R32_USCALED;
75 case PIPE_FORMAT_R32G32_USCALED:
76 return BRW_SURFACEFORMAT_R32G32_USCALED;
77 case PIPE_FORMAT_R32G32B32_USCALED:
78 return BRW_SURFACEFORMAT_R32G32B32_USCALED;
79 case PIPE_FORMAT_R32G32B32A32_USCALED:
80 return BRW_SURFACEFORMAT_R32G32B32A32_USCALED;
81
82 case PIPE_FORMAT_R32_SNORM:
83 return BRW_SURFACEFORMAT_R32_SNORM;
84 case PIPE_FORMAT_R32G32_SNORM:
85 return BRW_SURFACEFORMAT_R32G32_SNORM;
86 case PIPE_FORMAT_R32G32B32_SNORM:
87 return BRW_SURFACEFORMAT_R32G32B32_SNORM;
88 case PIPE_FORMAT_R32G32B32A32_SNORM:
89 return BRW_SURFACEFORMAT_R32G32B32A32_SNORM;
90
91 case PIPE_FORMAT_R32_SSCALED:
92 return BRW_SURFACEFORMAT_R32_SSCALED;
93 case PIPE_FORMAT_R32G32_SSCALED:
94 return BRW_SURFACEFORMAT_R32G32_SSCALED;
95 case PIPE_FORMAT_R32G32B32_SSCALED:
96 return BRW_SURFACEFORMAT_R32G32B32_SSCALED;
97 case PIPE_FORMAT_R32G32B32A32_SSCALED:
98 return BRW_SURFACEFORMAT_R32G32B32A32_SSCALED;
99
100 case PIPE_FORMAT_R16_UNORM:
101 return BRW_SURFACEFORMAT_R16_UNORM;
102 case PIPE_FORMAT_R16G16_UNORM:
103 return BRW_SURFACEFORMAT_R16G16_UNORM;
104 case PIPE_FORMAT_R16G16B16_UNORM:
105 return BRW_SURFACEFORMAT_R16G16B16_UNORM;
106 case PIPE_FORMAT_R16G16B16A16_UNORM:
107 return BRW_SURFACEFORMAT_R16G16B16A16_UNORM;
108
109 case PIPE_FORMAT_R16_USCALED:
110 return BRW_SURFACEFORMAT_R16_USCALED;
111 case PIPE_FORMAT_R16G16_USCALED:
112 return BRW_SURFACEFORMAT_R16G16_USCALED;
113 case PIPE_FORMAT_R16G16B16_USCALED:
114 return BRW_SURFACEFORMAT_R16G16B16_USCALED;
115 case PIPE_FORMAT_R16G16B16A16_USCALED:
116 return BRW_SURFACEFORMAT_R16G16B16A16_USCALED;
117
118 case PIPE_FORMAT_R16_SNORM:
119 return BRW_SURFACEFORMAT_R16_SNORM;
120 case PIPE_FORMAT_R16G16_SNORM:
121 return BRW_SURFACEFORMAT_R16G16_SNORM;
122 case PIPE_FORMAT_R16G16B16_SNORM:
123 return BRW_SURFACEFORMAT_R16G16B16_SNORM;
124 case PIPE_FORMAT_R16G16B16A16_SNORM:
125 return BRW_SURFACEFORMAT_R16G16B16A16_SNORM;
126
127 case PIPE_FORMAT_R16_SSCALED:
128 return BRW_SURFACEFORMAT_R16_SSCALED;
129 case PIPE_FORMAT_R16G16_SSCALED:
130 return BRW_SURFACEFORMAT_R16G16_SSCALED;
131 case PIPE_FORMAT_R16G16B16_SSCALED:
132 return BRW_SURFACEFORMAT_R16G16B16_SSCALED;
133 case PIPE_FORMAT_R16G16B16A16_SSCALED:
134 return BRW_SURFACEFORMAT_R16G16B16A16_SSCALED;
135
136 case PIPE_FORMAT_R8_UNORM:
137 return BRW_SURFACEFORMAT_R8_UNORM;
138 case PIPE_FORMAT_R8G8_UNORM:
139 return BRW_SURFACEFORMAT_R8G8_UNORM;
140 case PIPE_FORMAT_R8G8B8_UNORM:
141 return BRW_SURFACEFORMAT_R8G8B8_UNORM;
142 case PIPE_FORMAT_R8G8B8A8_UNORM:
143 return BRW_SURFACEFORMAT_R8G8B8A8_UNORM;
144
145 case PIPE_FORMAT_R8_USCALED:
146 return BRW_SURFACEFORMAT_R8_USCALED;
147 case PIPE_FORMAT_R8G8_USCALED:
148 return BRW_SURFACEFORMAT_R8G8_USCALED;
149 case PIPE_FORMAT_R8G8B8_USCALED:
150 return BRW_SURFACEFORMAT_R8G8B8_USCALED;
151 case PIPE_FORMAT_R8G8B8A8_USCALED:
152 return BRW_SURFACEFORMAT_R8G8B8A8_USCALED;
153
154 case PIPE_FORMAT_R8_SNORM:
155 return BRW_SURFACEFORMAT_R8_SNORM;
156 case PIPE_FORMAT_R8G8_SNORM:
157 return BRW_SURFACEFORMAT_R8G8_SNORM;
158 case PIPE_FORMAT_R8G8B8_SNORM:
159 return BRW_SURFACEFORMAT_R8G8B8_SNORM;
160 case PIPE_FORMAT_R8G8B8A8_SNORM:
161 return BRW_SURFACEFORMAT_R8G8B8A8_SNORM;
162
163 case PIPE_FORMAT_R8_SSCALED:
164 return BRW_SURFACEFORMAT_R8_SSCALED;
165 case PIPE_FORMAT_R8G8_SSCALED:
166 return BRW_SURFACEFORMAT_R8G8_SSCALED;
167 case PIPE_FORMAT_R8G8B8_SSCALED:
168 return BRW_SURFACEFORMAT_R8G8B8_SSCALED;
169 case PIPE_FORMAT_R8G8B8A8_SSCALED:
170 return BRW_SURFACEFORMAT_R8G8B8A8_SSCALED;
171
172 default:
173 assert(0);
174 return 0;
175 }
176 }
177
178 static unsigned get_index_type(int type)
179 {
180 switch (type) {
181 case 1: return BRW_INDEX_BYTE;
182 case 2: return BRW_INDEX_WORD;
183 case 4: return BRW_INDEX_DWORD;
184 default: assert(0); return 0;
185 }
186 }
187
188
189
190 static boolean brw_prepare_vertices(struct brw_context *brw)
191 {
192 GLbitfield vs_inputs = brw->vs.prog_data->inputs_read;
193 GLuint i;
194 const unsigned char *ptr = NULL;
195 GLuint interleave = 0;
196 unsigned int min_index = brw->vb.min_index;
197 unsigned int max_index = brw->vb.max_index;
198
199 struct brw_vertex_element *upload[VERT_ATTRIB_MAX];
200 GLuint nr_uploads = 0;
201
202 /* First build an array of pointers to ve's in vb.inputs_read
203 */
204 if (0)
205 _mesa_printf("%s %d..%d\n", __FUNCTION__, min_index, max_index);
206
207
208
209 for (i = 0; i < brw->vb.num_vertex_buffer; i++) {
210 struct brw_vertex_buffer *vb = brw->vb.vertex_buffer[i];
211 unsigned size = (vb->stride == 0 ?
212 vb->size :
213 vb->stride * (max_index + 1 - min_index));
214
215
216 if (brw_is_user_buffer(vb)) {
217 u_upload_buffer( brw->upload_vertex,
218 min_index * vb->stride,
219 size,
220 &offset,
221 &buffer );
222 }
223 else
224 {
225 offset = 0;
226 buffer = vb->buffer;
227 }
228
229 /* Set up post-upload info about this vertex buffer:
230 */
231 input->offset = (unsigned long)offset;
232 input->stride = vb->stride;
233 input->count = count;
234 brw->sws->bo_unreference(input->bo);
235 input->bo = intel_bufferobj_buffer(intel, intel_buffer,
236 INTEL_READ);
237 brw->sws->bo_reference(input->bo);
238
239 assert(input->offset < input->bo->size);
240 assert(input->offset + size <= input->bo->size);
241 }
242
243 brw_prepare_query_begin(brw);
244
245 for (i = 0; i < brw->vb.nr_enabled; i++) {
246 struct brw_vertex_element *input = brw->vb.enabled[i];
247
248 brw_add_validated_bo(brw, input->bo);
249 }
250 }
251
252 static void brw_emit_vertices(struct brw_context *brw)
253 {
254 GLuint i;
255
256 brw_emit_query_begin(brw);
257
258 /* If the VS doesn't read any inputs (calculating vertex position from
259 * a state variable for some reason, for example), emit a single pad
260 * VERTEX_ELEMENT struct and bail.
261 *
262 * The stale VB state stays in place, but they don't do anything unless
263 * a VE loads from them.
264 */
265 if (brw->vb.nr_enabled == 0) {
266 BEGIN_BATCH(3, IGNORE_CLIPRECTS);
267 OUT_BATCH((CMD_VERTEX_ELEMENT << 16) | 1);
268 OUT_BATCH((0 << BRW_VE0_INDEX_SHIFT) |
269 BRW_VE0_VALID |
270 (BRW_SURFACEFORMAT_R32G32B32A32_FLOAT << BRW_VE0_FORMAT_SHIFT) |
271 (0 << BRW_VE0_SRC_OFFSET_SHIFT));
272 OUT_BATCH((BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_0_SHIFT) |
273 (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_1_SHIFT) |
274 (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_2_SHIFT) |
275 (BRW_VE1_COMPONENT_STORE_1_FLT << BRW_VE1_COMPONENT_3_SHIFT));
276 ADVANCE_BATCH();
277 return;
278 }
279
280 /* Now emit VB and VEP state packets.
281 *
282 * This still defines a hardware VB for each input, even if they
283 * are interleaved or from the same VBO. TBD if this makes a
284 * performance difference.
285 */
286 BEGIN_BATCH(1 + brw->vb.nr_enabled * 4, IGNORE_CLIPRECTS);
287 OUT_BATCH((CMD_VERTEX_BUFFER << 16) |
288 ((1 + brw->vb.nr_enabled * 4) - 2));
289
290 for (i = 0; i < brw->vb.nr_enabled; i++) {
291 struct brw_vertex_element *input = brw->vb.enabled[i];
292
293 OUT_BATCH((i << BRW_VB0_INDEX_SHIFT) |
294 BRW_VB0_ACCESS_VERTEXDATA |
295 (input->stride << BRW_VB0_PITCH_SHIFT));
296 OUT_RELOC(input->bo,
297 I915_GEM_DOMAIN_VERTEX, 0,
298 input->offset);
299 if (BRW_IS_IGDNG(brw)) {
300 if (input->stride) {
301 OUT_RELOC(input->bo,
302 I915_GEM_DOMAIN_VERTEX, 0,
303 input->offset + input->stride * input->count - 1);
304 } else {
305 assert(input->count == 1);
306 OUT_RELOC(input->bo,
307 I915_GEM_DOMAIN_VERTEX, 0,
308 input->offset + input->element_size - 1);
309 }
310 } else
311 OUT_BATCH(input->stride ? input->count : 0);
312 OUT_BATCH(0); /* Instance data step rate */
313 }
314 ADVANCE_BATCH();
315
316 BEGIN_BATCH(1 + brw->vb.nr_enabled * 2, IGNORE_CLIPRECTS);
317 OUT_BATCH((CMD_VERTEX_ELEMENT << 16) | ((1 + brw->vb.nr_enabled * 2) - 2));
318 for (i = 0; i < brw->vb.nr_enabled; i++) {
319 struct brw_vertex_element *input = brw->vb.enabled[i];
320 uint32_t format = get_surface_type(input->glarray->Type,
321 input->glarray->Size,
322 input->glarray->Format,
323 input->glarray->Normalized);
324 uint32_t comp0 = BRW_VE1_COMPONENT_STORE_SRC;
325 uint32_t comp1 = BRW_VE1_COMPONENT_STORE_SRC;
326 uint32_t comp2 = BRW_VE1_COMPONENT_STORE_SRC;
327 uint32_t comp3 = BRW_VE1_COMPONENT_STORE_SRC;
328
329 switch (input->glarray->Size) {
330 case 0: comp0 = BRW_VE1_COMPONENT_STORE_0;
331 case 1: comp1 = BRW_VE1_COMPONENT_STORE_0;
332 case 2: comp2 = BRW_VE1_COMPONENT_STORE_0;
333 case 3: comp3 = BRW_VE1_COMPONENT_STORE_1_FLT;
334 break;
335 }
336
337 OUT_BATCH((i << BRW_VE0_INDEX_SHIFT) |
338 BRW_VE0_VALID |
339 (format << BRW_VE0_FORMAT_SHIFT) |
340 (0 << BRW_VE0_SRC_OFFSET_SHIFT));
341
342 if (BRW_IS_IGDNG(brw))
343 OUT_BATCH((comp0 << BRW_VE1_COMPONENT_0_SHIFT) |
344 (comp1 << BRW_VE1_COMPONENT_1_SHIFT) |
345 (comp2 << BRW_VE1_COMPONENT_2_SHIFT) |
346 (comp3 << BRW_VE1_COMPONENT_3_SHIFT));
347 else
348 OUT_BATCH((comp0 << BRW_VE1_COMPONENT_0_SHIFT) |
349 (comp1 << BRW_VE1_COMPONENT_1_SHIFT) |
350 (comp2 << BRW_VE1_COMPONENT_2_SHIFT) |
351 (comp3 << BRW_VE1_COMPONENT_3_SHIFT) |
352 ((i * 4) << BRW_VE1_DST_OFFSET_SHIFT));
353 }
354 ADVANCE_BATCH();
355 }
356
357 const struct brw_tracked_state brw_vertices = {
358 .dirty = {
359 .mesa = 0,
360 .brw = BRW_NEW_BATCH | BRW_NEW_VERTICES,
361 .cache = 0,
362 },
363 .prepare = brw_prepare_vertices,
364 .emit = brw_emit_vertices,
365 };
366
367 static void brw_prepare_indices(struct brw_context *brw)
368 {
369 const struct _mesa_index_buffer *index_buffer = brw->ib.ib;
370 GLuint ib_size;
371 struct brw_winsys_buffer *bo = NULL;
372 struct gl_buffer_object *bufferobj;
373 GLuint offset;
374 GLuint ib_type_size;
375
376 if (index_buffer == NULL)
377 return;
378
379 ib_type_size = get_size(index_buffer->type);
380 ib_size = ib_type_size * index_buffer->count;
381 bufferobj = index_buffer->obj;;
382
383 /* Turn into a proper VBO:
384 */
385 if (!_mesa_is_bufferobj(bufferobj)) {
386 brw->ib.start_vertex_offset = 0;
387
388 /* Get new bufferobj, offset:
389 */
390 get_space(brw, ib_size, &bo, &offset);
391
392 /* Straight upload
393 */
394 brw_bo_subdata(bo, offset, ib_size, index_buffer->ptr);
395
396 } else {
397 offset = (GLuint) (unsigned long) index_buffer->ptr;
398 brw->ib.start_vertex_offset = 0;
399
400 /* If the index buffer isn't aligned to its element size, we have to
401 * rebase it into a temporary.
402 */
403 if ((get_size(index_buffer->type) - 1) & offset) {
404 GLubyte *map = ctx->Driver.MapBuffer(ctx,
405 GL_ELEMENT_ARRAY_BUFFER_ARB,
406 GL_DYNAMIC_DRAW_ARB,
407 bufferobj);
408 map += offset;
409
410 get_space(brw, ib_size, &bo, &offset);
411
412 dri_bo_subdata(bo, offset, ib_size, map);
413
414 ctx->Driver.UnmapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER_ARB, bufferobj);
415 } else {
416 bo = intel_bufferobj_buffer(intel, intel_buffer_object(bufferobj),
417 INTEL_READ);
418 brw->sws->bo_reference(bo);
419
420 /* Use CMD_3D_PRIM's start_vertex_offset to avoid re-uploading
421 * the index buffer state when we're just moving the start index
422 * of our drawing.
423 */
424 brw->ib.start_vertex_offset = offset / ib_type_size;
425 offset = 0;
426 ib_size = bo->size;
427 }
428 }
429
430 if (brw->ib.bo != bo ||
431 brw->ib.offset != offset ||
432 brw->ib.size != ib_size)
433 {
434 drm_intel_bo_unreference(brw->ib.bo);
435 brw->ib.bo = bo;
436 brw->ib.offset = offset;
437 brw->ib.size = ib_size;
438
439 brw->state.dirty.brw |= BRW_NEW_INDEX_BUFFER;
440 } else {
441 drm_intel_bo_unreference(bo);
442 }
443
444 brw_add_validated_bo(brw, brw->ib.bo);
445 }
446
447 const struct brw_tracked_state brw_indices = {
448 .dirty = {
449 .mesa = 0,
450 .brw = BRW_NEW_INDICES,
451 .cache = 0,
452 },
453 .prepare = brw_prepare_indices,
454 };
455
456 static void brw_emit_index_buffer(struct brw_context *brw)
457 {
458 const struct _mesa_index_buffer *index_buffer = brw->ib.ib;
459
460 if (index_buffer == NULL)
461 return;
462
463 /* Emit the indexbuffer packet:
464 */
465 {
466 struct brw_indexbuffer ib;
467
468 memset(&ib, 0, sizeof(ib));
469
470 ib.header.bits.opcode = CMD_INDEX_BUFFER;
471 ib.header.bits.length = sizeof(ib)/4 - 2;
472 ib.header.bits.index_format = get_index_type(index_buffer->type);
473 ib.header.bits.cut_index_enable = 0;
474
475 BEGIN_BATCH(4, IGNORE_CLIPRECTS);
476 OUT_BATCH( ib.header.dword );
477 OUT_RELOC(brw->ib.bo,
478 I915_GEM_DOMAIN_VERTEX, 0,
479 brw->ib.offset);
480 OUT_RELOC(brw->ib.bo,
481 I915_GEM_DOMAIN_VERTEX, 0,
482 brw->ib.offset + brw->ib.size - 1);
483 OUT_BATCH( 0 );
484 ADVANCE_BATCH();
485 }
486 }
487
488 const struct brw_tracked_state brw_index_buffer = {
489 .dirty = {
490 .mesa = 0,
491 .brw = BRW_NEW_BATCH | BRW_NEW_INDEX_BUFFER,
492 .cache = 0,
493 },
494 .emit = brw_emit_index_buffer,
495 };