i965g: more compiling wip
[mesa.git] / src / gallium / drivers / i965 / brw_misc_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33
34 #include "brw_debug.h"
35 #include "brw_batchbuffer.h"
36 #include "brw_context.h"
37 #include "brw_state.h"
38 #include "brw_defines.h"
39 #include "brw_screen.h"
40
41
42
43
44
45 /***********************************************************************
46 * Blend color
47 */
48
49 static int upload_blend_constant_color(struct brw_context *brw)
50 {
51 BRW_CACHED_BATCH_STRUCT(brw, &brw->curr.bcc);
52 return 0;
53 }
54
55
56 const struct brw_tracked_state brw_blend_constant_color = {
57 .dirty = {
58 .mesa = PIPE_NEW_BLEND_COLOR,
59 .brw = 0,
60 .cache = 0
61 },
62 .emit = upload_blend_constant_color
63 };
64
65 /* Constant single cliprect for framebuffer object or DRI2 drawing */
66 static int upload_drawing_rect(struct brw_context *brw)
67 {
68 BEGIN_BATCH(4, NO_LOOP_CLIPRECTS);
69 OUT_BATCH(_3DSTATE_DRAWRECT_INFO_I965);
70 OUT_BATCH(0);
71 OUT_BATCH(((brw->curr.fb.width - 1) & 0xffff) |
72 ((brw->curr.fb.height - 1) << 16));
73 OUT_BATCH(0);
74 ADVANCE_BATCH();
75 return 0;
76 }
77
78 const struct brw_tracked_state brw_drawing_rect = {
79 .dirty = {
80 .mesa = PIPE_NEW_FRAMEBUFFER,
81 .brw = 0,
82 .cache = 0
83 },
84 .emit = upload_drawing_rect
85 };
86
87 static int prepare_binding_table_pointers(struct brw_context *brw)
88 {
89 brw_add_validated_bo(brw, brw->vs.bind_bo);
90 brw_add_validated_bo(brw, brw->wm.bind_bo);
91 return 0;
92 }
93
94 /**
95 * Upload the binding table pointers, which point each stage's array of surface
96 * state pointers.
97 *
98 * The binding table pointers are relative to the surface state base address,
99 * which is 0.
100 */
101 static int upload_binding_table_pointers(struct brw_context *brw)
102 {
103 BEGIN_BATCH(6, IGNORE_CLIPRECTS);
104 OUT_BATCH(CMD_BINDING_TABLE_PTRS << 16 | (6 - 2));
105 if (brw->vs.bind_bo != NULL)
106 OUT_RELOC(brw->vs.bind_bo, I915_GEM_DOMAIN_SAMPLER, 0, 0); /* vs */
107 else
108 OUT_BATCH(0);
109 OUT_BATCH(0); /* gs */
110 OUT_BATCH(0); /* clip */
111 OUT_BATCH(0); /* sf */
112 OUT_RELOC(brw->wm.bind_bo, I915_GEM_DOMAIN_SAMPLER, 0, 0); /* wm/ps */
113 ADVANCE_BATCH();
114 return 0;
115 }
116
117 const struct brw_tracked_state brw_binding_table_pointers = {
118 .dirty = {
119 .mesa = 0,
120 .brw = BRW_NEW_BATCH,
121 .cache = CACHE_NEW_SURF_BIND,
122 },
123 .prepare = prepare_binding_table_pointers,
124 .emit = upload_binding_table_pointers,
125 };
126
127
128 /**
129 * Upload pointers to the per-stage state.
130 *
131 * The state pointers in this packet are all relative to the general state
132 * base address set by CMD_STATE_BASE_ADDRESS, which is 0.
133 */
134 static int upload_pipelined_state_pointers(struct brw_context *brw )
135 {
136 BEGIN_BATCH(7, IGNORE_CLIPRECTS);
137 OUT_BATCH(CMD_PIPELINED_STATE_POINTERS << 16 | (7 - 2));
138 OUT_RELOC(brw->vs.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
139 if (brw->gs.prog_active)
140 OUT_RELOC(brw->gs.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
141 else
142 OUT_BATCH(0);
143 OUT_RELOC(brw->clip.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
144 OUT_RELOC(brw->sf.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
145 OUT_RELOC(brw->wm.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
146 OUT_RELOC(brw->cc.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
147 ADVANCE_BATCH();
148
149 brw->state.dirty.brw |= BRW_NEW_PSP;
150 return 0;
151 }
152
153
154 static int prepare_psp_urb_cbs(struct brw_context *brw)
155 {
156 brw_add_validated_bo(brw, brw->vs.state_bo);
157 brw_add_validated_bo(brw, brw->gs.state_bo);
158 brw_add_validated_bo(brw, brw->clip.state_bo);
159 brw_add_validated_bo(brw, brw->sf.state_bo);
160 brw_add_validated_bo(brw, brw->wm.state_bo);
161 brw_add_validated_bo(brw, brw->cc.state_bo);
162 return 0;
163 }
164
165 static int upload_psp_urb_cbs(struct brw_context *brw )
166 {
167 int ret;
168
169 ret = upload_pipelined_state_pointers(brw);
170 if (ret)
171 return ret;
172
173 ret = brw_upload_urb_fence(brw);
174 if (ret)
175 return ret;
176
177 ret = brw_upload_cs_urb_state(brw);
178 if (ret)
179 return ret;
180
181 return 0;
182 }
183
184 const struct brw_tracked_state brw_psp_urb_cbs = {
185 .dirty = {
186 .mesa = 0,
187 .brw = BRW_NEW_URB_FENCE | BRW_NEW_BATCH,
188 .cache = (CACHE_NEW_VS_UNIT |
189 CACHE_NEW_GS_UNIT |
190 CACHE_NEW_GS_PROG |
191 CACHE_NEW_CLIP_UNIT |
192 CACHE_NEW_SF_UNIT |
193 CACHE_NEW_WM_UNIT |
194 CACHE_NEW_CC_UNIT)
195 },
196 .prepare = prepare_psp_urb_cbs,
197 .emit = upload_psp_urb_cbs,
198 };
199
200 static int prepare_depthbuffer(struct brw_context *brw)
201 {
202 struct pipe_surface *zsbuf = brw->curr.fb.zsbuf;
203
204 if (zsbuf)
205 brw_add_validated_bo(brw, brw_surface_bo(zsbuf));
206
207 return 0;
208 }
209
210 static int emit_depthbuffer(struct brw_context *brw)
211 {
212 struct pipe_surface *surface = brw->curr.fb.zsbuf;
213 unsigned int len = (BRW_IS_G4X(brw) || BRW_IS_IGDNG(brw)) ? 6 : 5;
214
215 if (surface == NULL) {
216 BEGIN_BATCH(len, IGNORE_CLIPRECTS);
217 OUT_BATCH(CMD_DEPTH_BUFFER << 16 | (len - 2));
218 OUT_BATCH((BRW_DEPTHFORMAT_D32_FLOAT << 18) |
219 (BRW_SURFACE_NULL << 29));
220 OUT_BATCH(0);
221 OUT_BATCH(0);
222 OUT_BATCH(0);
223
224 if (BRW_IS_G4X(brw) || BRW_IS_IGDNG(brw))
225 OUT_BATCH(0);
226
227 ADVANCE_BATCH();
228 } else {
229 struct brw_winsys_buffer *bo;
230 unsigned int format;
231 unsigned int pitch;
232 unsigned int cpp;
233
234 switch (surface->format) {
235 case PIPE_FORMAT_Z16_UNORM:
236 format = BRW_DEPTHFORMAT_D16_UNORM;
237 cpp = 2;
238 break;
239 case PIPE_FORMAT_Z24S8_UNORM:
240 format = BRW_DEPTHFORMAT_D24_UNORM_S8_UINT;
241 cpp = 4;
242 break;
243 case PIPE_FORMAT_Z32_FLOAT:
244 format = BRW_DEPTHFORMAT_D32_FLOAT;
245 cpp = 4;
246 break;
247 default:
248 assert(0);
249 return PIPE_ERROR_BAD_INPUT;
250 }
251
252 bo = brw_surface_bo(surface);
253 pitch = brw_surface_pitch(surface);
254
255 BEGIN_BATCH(len, IGNORE_CLIPRECTS);
256 OUT_BATCH(CMD_DEPTH_BUFFER << 16 | (len - 2));
257 OUT_BATCH(((pitch * cpp) - 1) |
258 (format << 18) |
259 (BRW_TILEWALK_YMAJOR << 26) |
260 ((surface->layout != PIPE_SURFACE_LAYOUT_LINEAR) << 27) |
261 (BRW_SURFACE_2D << 29));
262 OUT_RELOC(bo,
263 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
264 surface->offset);
265 OUT_BATCH((BRW_SURFACE_MIPMAPLAYOUT_BELOW << 1) |
266 ((pitch - 1) << 6) |
267 ((surface->height - 1) << 19));
268 OUT_BATCH(0);
269
270 if (BRW_IS_G4X(brw) || BRW_IS_IGDNG(brw))
271 OUT_BATCH(0);
272
273 ADVANCE_BATCH();
274 }
275
276 return 0;
277 }
278
279 const struct brw_tracked_state brw_depthbuffer = {
280 .dirty = {
281 .mesa = 0,
282 .brw = BRW_NEW_DEPTH_BUFFER | BRW_NEW_BATCH,
283 .cache = 0,
284 },
285 .prepare = prepare_depthbuffer,
286 .emit = emit_depthbuffer,
287 };
288
289
290
291 /***********************************************************************
292 * Polygon stipple packet
293 */
294
295 static int upload_polygon_stipple(struct brw_context *brw)
296 {
297 BRW_CACHED_BATCH_STRUCT(brw, &brw->curr.bps);
298 return 0;
299 }
300
301 const struct brw_tracked_state brw_polygon_stipple = {
302 .dirty = {
303 .mesa = PIPE_NEW_POLYGON_STIPPLE,
304 .brw = 0,
305 .cache = 0
306 },
307 .emit = upload_polygon_stipple
308 };
309
310
311 /***********************************************************************
312 * Polygon stipple offset packet
313 */
314
315 static int upload_polygon_stipple_offset(struct brw_context *brw)
316 {
317 struct brw_polygon_stipple_offset bpso;
318
319 /* This is invarient state in gallium:
320 */
321 memset(&bpso, 0, sizeof(bpso));
322 bpso.header.opcode = CMD_POLY_STIPPLE_OFFSET;
323 bpso.header.length = sizeof(bpso)/4-2;
324 bpso.bits0.y_offset = 0;
325 bpso.bits0.x_offset = 0;
326
327 BRW_CACHED_BATCH_STRUCT(brw, &bpso);
328 return 0;
329 }
330
331 const struct brw_tracked_state brw_polygon_stipple_offset = {
332 .dirty = {
333 .mesa = 0,
334 .brw = BRW_NEW_CONTEXT,
335 .cache = 0
336 },
337 .emit = upload_polygon_stipple_offset
338 };
339
340 /**********************************************************************
341 * AA Line parameters
342 */
343 static int upload_aa_line_parameters(struct brw_context *brw)
344 {
345 struct brw_aa_line_parameters balp;
346
347 if (BRW_IS_965(brw))
348 return 0;
349
350 /* use legacy aa line coverage computation */
351 memset(&balp, 0, sizeof(balp));
352 balp.header.opcode = CMD_AA_LINE_PARAMETERS;
353 balp.header.length = sizeof(balp) / 4 - 2;
354
355 BRW_CACHED_BATCH_STRUCT(brw, &balp);
356 return 0;
357 }
358
359 const struct brw_tracked_state brw_aa_line_parameters = {
360 .dirty = {
361 .mesa = 0,
362 .brw = BRW_NEW_CONTEXT,
363 .cache = 0
364 },
365 .emit = upload_aa_line_parameters
366 };
367
368 /***********************************************************************
369 * Line stipple packet
370 */
371
372 static int upload_line_stipple(struct brw_context *brw)
373 {
374 struct brw_line_stipple *bls = NULL; //brw->curr.rast->bls;
375 BRW_CACHED_BATCH_STRUCT(brw, bls);
376 return 0;
377 }
378
379 const struct brw_tracked_state brw_line_stipple = {
380 .dirty = {
381 .mesa = PIPE_NEW_RAST,
382 .brw = 0,
383 .cache = 0
384 },
385 .emit = upload_line_stipple
386 };
387
388
389 /***********************************************************************
390 * Misc invarient state packets
391 */
392
393 static int upload_invarient_state( struct brw_context *brw )
394 {
395 {
396 /* 0x61040000 Pipeline Select */
397 /* PipelineSelect : 0 */
398 struct brw_pipeline_select ps;
399
400 memset(&ps, 0, sizeof(ps));
401 if (BRW_IS_G4X(brw) || BRW_IS_IGDNG(brw))
402 ps.header.opcode = CMD_PIPELINE_SELECT_GM45;
403 else
404 ps.header.opcode = CMD_PIPELINE_SELECT_965;
405 ps.header.pipeline_select = 0;
406 BRW_BATCH_STRUCT(brw, &ps);
407 }
408
409 {
410 struct brw_global_depth_offset_clamp gdo;
411 memset(&gdo, 0, sizeof(gdo));
412
413 /* Disable depth offset clamping.
414 */
415 gdo.header.opcode = CMD_GLOBAL_DEPTH_OFFSET_CLAMP;
416 gdo.header.length = sizeof(gdo)/4 - 2;
417 gdo.depth_offset_clamp = 0.0;
418
419 BRW_BATCH_STRUCT(brw, &gdo);
420 }
421
422
423 /* 0x61020000 State Instruction Pointer */
424 {
425 struct brw_system_instruction_pointer sip;
426 memset(&sip, 0, sizeof(sip));
427
428 sip.header.opcode = CMD_STATE_INSN_POINTER;
429 sip.header.length = 0;
430 sip.bits0.pad = 0;
431 sip.bits0.system_instruction_pointer = 0;
432 BRW_BATCH_STRUCT(brw, &sip);
433 }
434
435
436 {
437 struct brw_vf_statistics vfs;
438 memset(&vfs, 0, sizeof(vfs));
439
440 if (BRW_IS_G4X(brw) || BRW_IS_IGDNG(brw))
441 vfs.opcode = CMD_VF_STATISTICS_GM45;
442 else
443 vfs.opcode = CMD_VF_STATISTICS_965;
444
445 if (BRW_DEBUG & DEBUG_STATS)
446 vfs.statistics_enable = 1;
447
448 BRW_BATCH_STRUCT(brw, &vfs);
449 }
450
451 return 0;
452 }
453
454 const struct brw_tracked_state brw_invarient_state = {
455 .dirty = {
456 .mesa = 0,
457 .brw = BRW_NEW_CONTEXT,
458 .cache = 0
459 },
460 .emit = upload_invarient_state
461 };
462
463 /**
464 * Define the base addresses which some state is referenced from.
465 *
466 * This allows us to avoid having to emit relocations in many places for
467 * cached state, and instead emit pointers inside of large, mostly-static
468 * state pools. This comes at the expense of memory, and more expensive cache
469 * misses.
470 */
471 static int upload_state_base_address( struct brw_context *brw )
472 {
473 /* Output the structure (brw_state_base_address) directly to the
474 * batchbuffer, so we can emit relocations inline.
475 */
476 if (BRW_IS_IGDNG(brw)) {
477 BEGIN_BATCH(8, IGNORE_CLIPRECTS);
478 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (8 - 2));
479 OUT_BATCH(1); /* General state base address */
480 OUT_BATCH(1); /* Surface state base address */
481 OUT_BATCH(1); /* Indirect object base address */
482 OUT_BATCH(1); /* Instruction base address */
483 OUT_BATCH(1); /* General state upper bound */
484 OUT_BATCH(1); /* Indirect object upper bound */
485 OUT_BATCH(1); /* Instruction access upper bound */
486 ADVANCE_BATCH();
487 } else {
488 BEGIN_BATCH(6, IGNORE_CLIPRECTS);
489 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (6 - 2));
490 OUT_BATCH(1); /* General state base address */
491 OUT_BATCH(1); /* Surface state base address */
492 OUT_BATCH(1); /* Indirect object base address */
493 OUT_BATCH(1); /* General state upper bound */
494 OUT_BATCH(1); /* Indirect object upper bound */
495 ADVANCE_BATCH();
496 }
497 return 0;
498 }
499
500 const struct brw_tracked_state brw_state_base_address = {
501 .dirty = {
502 .mesa = 0,
503 .brw = BRW_NEW_CONTEXT,
504 .cache = 0,
505 },
506 .emit = upload_state_base_address
507 };