i965g: more files compiling
[mesa.git] / src / gallium / drivers / i965 / brw_draw_upload.c
1 /**************************************************************************
2 *
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "pipe/p_context.h"
29
30 #include "util/u_upload_mgr.h"
31
32 #include "brw_draw.h"
33 #include "brw_defines.h"
34 #include "brw_context.h"
35 #include "brw_state.h"
36 #include "brw_fallback.h"
37
38 #include "brw_batchbuffer.h"
39 #include "intel_buffer_objects.h"
40 #include "intel_tex.h"
41
42
43
44
45 unsigned brw_translate_surface_format( unsigned id )
46 {
47 switch (id) {
48 case PIPE_FORMAT_R64_FLOAT:
49 return BRW_SURFACEFORMAT_R64_FLOAT;
50 case PIPE_FORMAT_R64G64_FLOAT:
51 return BRW_SURFACEFORMAT_R64G64_FLOAT;
52 case PIPE_FORMAT_R64G64B64_FLOAT:
53 return BRW_SURFACEFORMAT_R64G64B64_FLOAT;
54 case PIPE_FORMAT_R64G64B64A64_FLOAT:
55 return BRW_SURFACEFORMAT_R64G64B64A64_FLOAT;
56
57 case PIPE_FORMAT_R32_FLOAT:
58 return BRW_SURFACEFORMAT_R32_FLOAT;
59 case PIPE_FORMAT_R32G32_FLOAT:
60 return BRW_SURFACEFORMAT_R32G32_FLOAT;
61 case PIPE_FORMAT_R32G32B32_FLOAT:
62 return BRW_SURFACEFORMAT_R32G32B32_FLOAT;
63 case PIPE_FORMAT_R32G32B32A32_FLOAT:
64 return BRW_SURFACEFORMAT_R32G32B32A32_FLOAT;
65
66 case PIPE_FORMAT_R32_UNORM:
67 return BRW_SURFACEFORMAT_R32_UNORM;
68 case PIPE_FORMAT_R32G32_UNORM:
69 return BRW_SURFACEFORMAT_R32G32_UNORM;
70 case PIPE_FORMAT_R32G32B32_UNORM:
71 return BRW_SURFACEFORMAT_R32G32B32_UNORM;
72 case PIPE_FORMAT_R32G32B32A32_UNORM:
73 return BRW_SURFACEFORMAT_R32G32B32A32_UNORM;
74
75 case PIPE_FORMAT_R32_USCALED:
76 return BRW_SURFACEFORMAT_R32_USCALED;
77 case PIPE_FORMAT_R32G32_USCALED:
78 return BRW_SURFACEFORMAT_R32G32_USCALED;
79 case PIPE_FORMAT_R32G32B32_USCALED:
80 return BRW_SURFACEFORMAT_R32G32B32_USCALED;
81 case PIPE_FORMAT_R32G32B32A32_USCALED:
82 return BRW_SURFACEFORMAT_R32G32B32A32_USCALED;
83
84 case PIPE_FORMAT_R32_SNORM:
85 return BRW_SURFACEFORMAT_R32_SNORM;
86 case PIPE_FORMAT_R32G32_SNORM:
87 return BRW_SURFACEFORMAT_R32G32_SNORM;
88 case PIPE_FORMAT_R32G32B32_SNORM:
89 return BRW_SURFACEFORMAT_R32G32B32_SNORM;
90 case PIPE_FORMAT_R32G32B32A32_SNORM:
91 return BRW_SURFACEFORMAT_R32G32B32A32_SNORM;
92
93 case PIPE_FORMAT_R32_SSCALED:
94 return BRW_SURFACEFORMAT_R32_SSCALED;
95 case PIPE_FORMAT_R32G32_SSCALED:
96 return BRW_SURFACEFORMAT_R32G32_SSCALED;
97 case PIPE_FORMAT_R32G32B32_SSCALED:
98 return BRW_SURFACEFORMAT_R32G32B32_SSCALED;
99 case PIPE_FORMAT_R32G32B32A32_SSCALED:
100 return BRW_SURFACEFORMAT_R32G32B32A32_SSCALED;
101
102 case PIPE_FORMAT_R16_UNORM:
103 return BRW_SURFACEFORMAT_R16_UNORM;
104 case PIPE_FORMAT_R16G16_UNORM:
105 return BRW_SURFACEFORMAT_R16G16_UNORM;
106 case PIPE_FORMAT_R16G16B16_UNORM:
107 return BRW_SURFACEFORMAT_R16G16B16_UNORM;
108 case PIPE_FORMAT_R16G16B16A16_UNORM:
109 return BRW_SURFACEFORMAT_R16G16B16A16_UNORM;
110
111 case PIPE_FORMAT_R16_USCALED:
112 return BRW_SURFACEFORMAT_R16_USCALED;
113 case PIPE_FORMAT_R16G16_USCALED:
114 return BRW_SURFACEFORMAT_R16G16_USCALED;
115 case PIPE_FORMAT_R16G16B16_USCALED:
116 return BRW_SURFACEFORMAT_R16G16B16_USCALED;
117 case PIPE_FORMAT_R16G16B16A16_USCALED:
118 return BRW_SURFACEFORMAT_R16G16B16A16_USCALED;
119
120 case PIPE_FORMAT_R16_SNORM:
121 return BRW_SURFACEFORMAT_R16_SNORM;
122 case PIPE_FORMAT_R16G16_SNORM:
123 return BRW_SURFACEFORMAT_R16G16_SNORM;
124 case PIPE_FORMAT_R16G16B16_SNORM:
125 return BRW_SURFACEFORMAT_R16G16B16_SNORM;
126 case PIPE_FORMAT_R16G16B16A16_SNORM:
127 return BRW_SURFACEFORMAT_R16G16B16A16_SNORM;
128
129 case PIPE_FORMAT_R16_SSCALED:
130 return BRW_SURFACEFORMAT_R16_SSCALED;
131 case PIPE_FORMAT_R16G16_SSCALED:
132 return BRW_SURFACEFORMAT_R16G16_SSCALED;
133 case PIPE_FORMAT_R16G16B16_SSCALED:
134 return BRW_SURFACEFORMAT_R16G16B16_SSCALED;
135 case PIPE_FORMAT_R16G16B16A16_SSCALED:
136 return BRW_SURFACEFORMAT_R16G16B16A16_SSCALED;
137
138 case PIPE_FORMAT_R8_UNORM:
139 return BRW_SURFACEFORMAT_R8_UNORM;
140 case PIPE_FORMAT_R8G8_UNORM:
141 return BRW_SURFACEFORMAT_R8G8_UNORM;
142 case PIPE_FORMAT_R8G8B8_UNORM:
143 return BRW_SURFACEFORMAT_R8G8B8_UNORM;
144 case PIPE_FORMAT_R8G8B8A8_UNORM:
145 return BRW_SURFACEFORMAT_R8G8B8A8_UNORM;
146
147 case PIPE_FORMAT_R8_USCALED:
148 return BRW_SURFACEFORMAT_R8_USCALED;
149 case PIPE_FORMAT_R8G8_USCALED:
150 return BRW_SURFACEFORMAT_R8G8_USCALED;
151 case PIPE_FORMAT_R8G8B8_USCALED:
152 return BRW_SURFACEFORMAT_R8G8B8_USCALED;
153 case PIPE_FORMAT_R8G8B8A8_USCALED:
154 return BRW_SURFACEFORMAT_R8G8B8A8_USCALED;
155
156 case PIPE_FORMAT_R8_SNORM:
157 return BRW_SURFACEFORMAT_R8_SNORM;
158 case PIPE_FORMAT_R8G8_SNORM:
159 return BRW_SURFACEFORMAT_R8G8_SNORM;
160 case PIPE_FORMAT_R8G8B8_SNORM:
161 return BRW_SURFACEFORMAT_R8G8B8_SNORM;
162 case PIPE_FORMAT_R8G8B8A8_SNORM:
163 return BRW_SURFACEFORMAT_R8G8B8A8_SNORM;
164
165 case PIPE_FORMAT_R8_SSCALED:
166 return BRW_SURFACEFORMAT_R8_SSCALED;
167 case PIPE_FORMAT_R8G8_SSCALED:
168 return BRW_SURFACEFORMAT_R8G8_SSCALED;
169 case PIPE_FORMAT_R8G8B8_SSCALED:
170 return BRW_SURFACEFORMAT_R8G8B8_SSCALED;
171 case PIPE_FORMAT_R8G8B8A8_SSCALED:
172 return BRW_SURFACEFORMAT_R8G8B8A8_SSCALED;
173
174 default:
175 assert(0);
176 return 0;
177 }
178 }
179
180 static unsigned get_index_type(int type)
181 {
182 switch (type) {
183 case 1: return BRW_INDEX_BYTE;
184 case 2: return BRW_INDEX_WORD;
185 case 4: return BRW_INDEX_DWORD;
186 default: assert(0); return 0;
187 }
188 }
189
190
191
192 static boolean brw_prepare_vertices(struct brw_context *brw)
193 {
194 GLbitfield vs_inputs = brw->vs.prog_data->inputs_read;
195 GLuint i;
196 const unsigned char *ptr = NULL;
197 GLuint interleave = 0;
198 unsigned int min_index = brw->vb.min_index;
199 unsigned int max_index = brw->vb.max_index;
200
201 struct brw_vertex_element *upload[VERT_ATTRIB_MAX];
202 GLuint nr_uploads = 0;
203
204 /* First build an array of pointers to ve's in vb.inputs_read
205 */
206 if (0)
207 _mesa_printf("%s %d..%d\n", __FUNCTION__, min_index, max_index);
208
209
210
211 for (i = 0; i < brw->vb.num_vertex_buffer; i++) {
212 struct brw_vertex_buffer *vb = brw->vb.vertex_buffer[i];
213 unsigned size = (vb->stride == 0 ?
214 vb->size :
215 vb->stride * (max_index + 1 - min_index));
216
217
218 if (brw_is_user_buffer(vb)) {
219 u_upload_buffer( brw->upload_vertex,
220 min_index * vb->stride,
221 size,
222 &offset,
223 &buffer );
224 }
225 else
226 {
227 offset = 0;
228 buffer = vb->buffer;
229 }
230
231 /* Set up post-upload info about this vertex buffer:
232 */
233 input->offset = (unsigned long)offset;
234 input->stride = vb->stride;
235 input->count = count;
236 brw->sws->bo_unreference(input->bo);
237 input->bo = intel_bufferobj_buffer(intel, intel_buffer,
238 INTEL_READ);
239 brw->sws->bo_reference(input->bo);
240
241 assert(input->offset < input->bo->size);
242 assert(input->offset + size <= input->bo->size);
243 }
244
245 brw_prepare_query_begin(brw);
246
247 for (i = 0; i < brw->vb.nr_enabled; i++) {
248 struct brw_vertex_element *input = brw->vb.enabled[i];
249
250 brw_add_validated_bo(brw, input->bo);
251 }
252 }
253
254 static void brw_emit_vertices(struct brw_context *brw)
255 {
256 GLuint i;
257
258 brw_emit_query_begin(brw);
259
260 /* If the VS doesn't read any inputs (calculating vertex position from
261 * a state variable for some reason, for example), emit a single pad
262 * VERTEX_ELEMENT struct and bail.
263 *
264 * The stale VB state stays in place, but they don't do anything unless
265 * a VE loads from them.
266 */
267 if (brw->vb.nr_enabled == 0) {
268 BEGIN_BATCH(3, IGNORE_CLIPRECTS);
269 OUT_BATCH((CMD_VERTEX_ELEMENT << 16) | 1);
270 OUT_BATCH((0 << BRW_VE0_INDEX_SHIFT) |
271 BRW_VE0_VALID |
272 (BRW_SURFACEFORMAT_R32G32B32A32_FLOAT << BRW_VE0_FORMAT_SHIFT) |
273 (0 << BRW_VE0_SRC_OFFSET_SHIFT));
274 OUT_BATCH((BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_0_SHIFT) |
275 (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_1_SHIFT) |
276 (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_2_SHIFT) |
277 (BRW_VE1_COMPONENT_STORE_1_FLT << BRW_VE1_COMPONENT_3_SHIFT));
278 ADVANCE_BATCH();
279 return;
280 }
281
282 /* Now emit VB and VEP state packets.
283 *
284 * This still defines a hardware VB for each input, even if they
285 * are interleaved or from the same VBO. TBD if this makes a
286 * performance difference.
287 */
288 BEGIN_BATCH(1 + brw->vb.nr_enabled * 4, IGNORE_CLIPRECTS);
289 OUT_BATCH((CMD_VERTEX_BUFFER << 16) |
290 ((1 + brw->vb.nr_enabled * 4) - 2));
291
292 for (i = 0; i < brw->vb.nr_enabled; i++) {
293 struct brw_vertex_element *input = brw->vb.enabled[i];
294
295 OUT_BATCH((i << BRW_VB0_INDEX_SHIFT) |
296 BRW_VB0_ACCESS_VERTEXDATA |
297 (input->stride << BRW_VB0_PITCH_SHIFT));
298 OUT_RELOC(input->bo,
299 I915_GEM_DOMAIN_VERTEX, 0,
300 input->offset);
301 if (BRW_IS_IGDNG(brw)) {
302 if (input->stride) {
303 OUT_RELOC(input->bo,
304 I915_GEM_DOMAIN_VERTEX, 0,
305 input->offset + input->stride * input->count - 1);
306 } else {
307 assert(input->count == 1);
308 OUT_RELOC(input->bo,
309 I915_GEM_DOMAIN_VERTEX, 0,
310 input->offset + input->element_size - 1);
311 }
312 } else
313 OUT_BATCH(input->stride ? input->count : 0);
314 OUT_BATCH(0); /* Instance data step rate */
315 }
316 ADVANCE_BATCH();
317
318 BEGIN_BATCH(1 + brw->vb.nr_enabled * 2, IGNORE_CLIPRECTS);
319 OUT_BATCH((CMD_VERTEX_ELEMENT << 16) | ((1 + brw->vb.nr_enabled * 2) - 2));
320 for (i = 0; i < brw->vb.nr_enabled; i++) {
321 struct brw_vertex_element *input = brw->vb.enabled[i];
322 uint32_t format = get_surface_type(input->glarray->Type,
323 input->glarray->Size,
324 input->glarray->Format,
325 input->glarray->Normalized);
326 uint32_t comp0 = BRW_VE1_COMPONENT_STORE_SRC;
327 uint32_t comp1 = BRW_VE1_COMPONENT_STORE_SRC;
328 uint32_t comp2 = BRW_VE1_COMPONENT_STORE_SRC;
329 uint32_t comp3 = BRW_VE1_COMPONENT_STORE_SRC;
330
331 switch (input->glarray->Size) {
332 case 0: comp0 = BRW_VE1_COMPONENT_STORE_0;
333 case 1: comp1 = BRW_VE1_COMPONENT_STORE_0;
334 case 2: comp2 = BRW_VE1_COMPONENT_STORE_0;
335 case 3: comp3 = BRW_VE1_COMPONENT_STORE_1_FLT;
336 break;
337 }
338
339 OUT_BATCH((i << BRW_VE0_INDEX_SHIFT) |
340 BRW_VE0_VALID |
341 (format << BRW_VE0_FORMAT_SHIFT) |
342 (0 << BRW_VE0_SRC_OFFSET_SHIFT));
343
344 if (BRW_IS_IGDNG(brw))
345 OUT_BATCH((comp0 << BRW_VE1_COMPONENT_0_SHIFT) |
346 (comp1 << BRW_VE1_COMPONENT_1_SHIFT) |
347 (comp2 << BRW_VE1_COMPONENT_2_SHIFT) |
348 (comp3 << BRW_VE1_COMPONENT_3_SHIFT));
349 else
350 OUT_BATCH((comp0 << BRW_VE1_COMPONENT_0_SHIFT) |
351 (comp1 << BRW_VE1_COMPONENT_1_SHIFT) |
352 (comp2 << BRW_VE1_COMPONENT_2_SHIFT) |
353 (comp3 << BRW_VE1_COMPONENT_3_SHIFT) |
354 ((i * 4) << BRW_VE1_DST_OFFSET_SHIFT));
355 }
356 ADVANCE_BATCH();
357 }
358
359 const struct brw_tracked_state brw_vertices = {
360 .dirty = {
361 .mesa = 0,
362 .brw = BRW_NEW_BATCH | BRW_NEW_VERTICES,
363 .cache = 0,
364 },
365 .prepare = brw_prepare_vertices,
366 .emit = brw_emit_vertices,
367 };
368
369 static void brw_prepare_indices(struct brw_context *brw)
370 {
371 const struct _mesa_index_buffer *index_buffer = brw->ib.ib;
372 GLuint ib_size;
373 struct brw_winsys_buffer *bo = NULL;
374 struct gl_buffer_object *bufferobj;
375 GLuint offset;
376 GLuint ib_type_size;
377
378 if (index_buffer == NULL)
379 return;
380
381 ib_type_size = get_size(index_buffer->type);
382 ib_size = ib_type_size * index_buffer->count;
383 bufferobj = index_buffer->obj;;
384
385 /* Turn into a proper VBO:
386 */
387 if (!_mesa_is_bufferobj(bufferobj)) {
388 brw->ib.start_vertex_offset = 0;
389
390 /* Get new bufferobj, offset:
391 */
392 get_space(brw, ib_size, &bo, &offset);
393
394 /* Straight upload
395 */
396 brw_bo_subdata(bo, offset, ib_size, index_buffer->ptr);
397
398 } else {
399 offset = (GLuint) (unsigned long) index_buffer->ptr;
400 brw->ib.start_vertex_offset = 0;
401
402 /* If the index buffer isn't aligned to its element size, we have to
403 * rebase it into a temporary.
404 */
405 if ((get_size(index_buffer->type) - 1) & offset) {
406 GLubyte *map = ctx->Driver.MapBuffer(ctx,
407 GL_ELEMENT_ARRAY_BUFFER_ARB,
408 GL_DYNAMIC_DRAW_ARB,
409 bufferobj);
410 map += offset;
411
412 get_space(brw, ib_size, &bo, &offset);
413
414 dri_bo_subdata(bo, offset, ib_size, map);
415
416 ctx->Driver.UnmapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER_ARB, bufferobj);
417 } else {
418 bo = intel_bufferobj_buffer(intel, intel_buffer_object(bufferobj),
419 INTEL_READ);
420 brw->sws->bo_reference(bo);
421
422 /* Use CMD_3D_PRIM's start_vertex_offset to avoid re-uploading
423 * the index buffer state when we're just moving the start index
424 * of our drawing.
425 */
426 brw->ib.start_vertex_offset = offset / ib_type_size;
427 offset = 0;
428 ib_size = bo->size;
429 }
430 }
431
432 if (brw->ib.bo != bo ||
433 brw->ib.offset != offset ||
434 brw->ib.size != ib_size)
435 {
436 drm_intel_bo_unreference(brw->ib.bo);
437 brw->ib.bo = bo;
438 brw->ib.offset = offset;
439 brw->ib.size = ib_size;
440
441 brw->state.dirty.brw |= BRW_NEW_INDEX_BUFFER;
442 } else {
443 drm_intel_bo_unreference(bo);
444 }
445
446 brw_add_validated_bo(brw, brw->ib.bo);
447 }
448
449 const struct brw_tracked_state brw_indices = {
450 .dirty = {
451 .mesa = 0,
452 .brw = BRW_NEW_INDICES,
453 .cache = 0,
454 },
455 .prepare = brw_prepare_indices,
456 };
457
458 static void brw_emit_index_buffer(struct brw_context *brw)
459 {
460 const struct _mesa_index_buffer *index_buffer = brw->ib.ib;
461
462 if (index_buffer == NULL)
463 return;
464
465 /* Emit the indexbuffer packet:
466 */
467 {
468 struct brw_indexbuffer ib;
469
470 memset(&ib, 0, sizeof(ib));
471
472 ib.header.bits.opcode = CMD_INDEX_BUFFER;
473 ib.header.bits.length = sizeof(ib)/4 - 2;
474 ib.header.bits.index_format = get_index_type(index_buffer->type);
475 ib.header.bits.cut_index_enable = 0;
476
477 BEGIN_BATCH(4, IGNORE_CLIPRECTS);
478 OUT_BATCH( ib.header.dword );
479 OUT_RELOC(brw->ib.bo,
480 I915_GEM_DOMAIN_VERTEX, 0,
481 brw->ib.offset);
482 OUT_RELOC(brw->ib.bo,
483 I915_GEM_DOMAIN_VERTEX, 0,
484 brw->ib.offset + brw->ib.size - 1);
485 OUT_BATCH( 0 );
486 ADVANCE_BATCH();
487 }
488 }
489
490 const struct brw_tracked_state brw_index_buffer = {
491 .dirty = {
492 .mesa = 0,
493 .brw = BRW_NEW_BATCH | BRW_NEW_INDEX_BUFFER,
494 .cache = 0,
495 },
496 .emit = brw_emit_index_buffer,
497 };