Merge remote branch 'origin/master' into nv50-compiler
[mesa.git] / src / gallium / winsys / r600 / drm / r600_state.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jerome Glisse
25 */
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <string.h>
29 #include <errno.h>
30 #include "radeon_priv.h"
31 #include "r600d.h"
32
33 #include "util/u_memory.h"
34
35 static int r600_state_pm4_resource(struct radeon_state *state);
36 static int r600_state_pm4_cb0(struct radeon_state *state);
37 static int r600_state_pm4_vgt(struct radeon_state *state);
38 static int r600_state_pm4_db(struct radeon_state *state);
39 static int r600_state_pm4_shader(struct radeon_state *state);
40 static int r600_state_pm4_draw(struct radeon_state *state);
41 static int r600_state_pm4_config(struct radeon_state *state);
42 static int r600_state_pm4_generic(struct radeon_state *state);
43 static int r600_state_pm4_query_begin(struct radeon_state *state);
44 static int r600_state_pm4_query_end(struct radeon_state *state);
45 static int r700_state_pm4_config(struct radeon_state *state);
46 static int r700_state_pm4_cb0(struct radeon_state *state);
47 static int r700_state_pm4_db(struct radeon_state *state);
48
49 #include "r600_states.h"
50
51
52 #define SUB_NONE(param) { { 0, R600_names_##param, (sizeof(R600_names_##param)/sizeof(struct radeon_register)) } }
53 #define SUB_PS(param) { R600_SHADER_PS, R600_names_##param, (sizeof(R600_names_##param)/sizeof(struct radeon_register)) }
54 #define SUB_VS(param) { R600_SHADER_VS, R600_names_##param, (sizeof(R600_names_##param)/sizeof(struct radeon_register)) }
55 #define SUB_GS(param) { R600_SHADER_GS, R600_names_##param, (sizeof(R600_names_##param)/sizeof(struct radeon_register)) }
56 #define SUB_FS(param) { R600_SHADER_FS, R600_names_##param, (sizeof(R600_names_##param)/sizeof(struct radeon_register)) }
57
58 /* some of these are overriden at runtime for R700 */
59 struct radeon_stype_info r600_stypes[] = {
60 { R600_STATE_CONFIG, 1, 0, r600_state_pm4_config, SUB_NONE(CONFIG), },
61 { R600_STATE_CB_CNTL, 1, 0, r600_state_pm4_generic, SUB_NONE(CB_CNTL) },
62 { R600_STATE_RASTERIZER, 1, 0, r600_state_pm4_generic, SUB_NONE(RASTERIZER) },
63 { R600_STATE_VIEWPORT, 1, 0, r600_state_pm4_generic, SUB_NONE(VIEWPORT) },
64 { R600_STATE_SCISSOR, 1, 0, r600_state_pm4_generic, SUB_NONE(SCISSOR) },
65 { R600_STATE_BLEND, 1, 0, r600_state_pm4_generic, SUB_NONE(BLEND), },
66 { R600_STATE_DSA, 1, 0, r600_state_pm4_generic, SUB_NONE(DSA), },
67 { R600_STATE_SHADER, 1, 0, r600_state_pm4_shader, { SUB_PS(PS_SHADER), SUB_VS(VS_SHADER) } },
68 { R600_STATE_CONSTANT, 256, 0x10, r600_state_pm4_generic, { SUB_PS(PS_CONSTANT), SUB_VS(VS_CONSTANT) } },
69 { R600_STATE_RESOURCE, 160, 0x1c, r600_state_pm4_resource, { SUB_PS(PS_RESOURCE), SUB_VS(VS_RESOURCE), SUB_GS(GS_RESOURCE), SUB_FS(FS_RESOURCE)} },
70 { R600_STATE_SAMPLER, 18, 0xc, r600_state_pm4_generic, { SUB_PS(PS_SAMPLER), SUB_VS(VS_SAMPLER), SUB_GS(GS_SAMPLER) } },
71 { R600_STATE_SAMPLER_BORDER, 18, 0x10, r600_state_pm4_generic, { SUB_PS(PS_SAMPLER_BORDER), SUB_VS(VS_SAMPLER_BORDER), SUB_GS(GS_SAMPLER_BORDER) } },
72 { R600_STATE_CB0, 1, 0, r600_state_pm4_cb0, SUB_NONE(CB0) },
73 { R600_STATE_CB1, 1, 0, r600_state_pm4_cb0, SUB_NONE(CB1) },
74 { R600_STATE_CB2, 1, 0, r600_state_pm4_cb0, SUB_NONE(CB2) },
75 { R600_STATE_CB3, 1, 0, r600_state_pm4_cb0, SUB_NONE(CB3) },
76 { R600_STATE_CB4, 1, 0, r600_state_pm4_cb0, SUB_NONE(CB4) },
77 { R600_STATE_CB5, 1, 0, r600_state_pm4_cb0, SUB_NONE(CB5) },
78 { R600_STATE_CB6, 1, 0, r600_state_pm4_cb0, SUB_NONE(CB6) },
79 { R600_STATE_CB7, 1, 0, r600_state_pm4_cb0, SUB_NONE(CB7) },
80 { R600_STATE_QUERY_BEGIN, 1, 0, r600_state_pm4_query_begin, SUB_NONE(VGT_EVENT) },
81 { R600_STATE_QUERY_END, 1, 0, r600_state_pm4_query_end, SUB_NONE(VGT_EVENT) },
82 { R600_STATE_DB, 1, 0, r600_state_pm4_db, SUB_NONE(DB) },
83 { R600_STATE_UCP, 1, 0, r600_state_pm4_generic, SUB_NONE(UCP) },
84 { R600_STATE_VGT, 1, 0, r600_state_pm4_vgt, SUB_NONE(VGT) },
85 { R600_STATE_DRAW, 1, 0, r600_state_pm4_draw, SUB_NONE(DRAW) },
86 };
87 #define STYPES_SIZE Elements(r600_stypes)
88
89 static const struct radeon_register *get_regs(struct radeon_state *state)
90 {
91 return state->stype->reginfo[state->shader_index].regs;
92 }
93
94 /*
95 * r600/r700 state functions
96 */
97 static int r600_state_pm4_bytecode(struct radeon_state *state, unsigned offset, unsigned id, unsigned nreg)
98 {
99 const struct radeon_register *regs = get_regs(state);
100 unsigned i;
101 int r;
102
103 if (!offset) {
104 fprintf(stderr, "%s invalid register for state %d %d\n",
105 __func__, state->stype->stype, id);
106 return -EINVAL;
107 }
108 if (offset >= R600_CONFIG_REG_OFFSET && offset < R600_CONFIG_REG_END) {
109 state->pm4[state->cpm4++] = PKT3(PKT3_SET_CONFIG_REG, nreg);
110 state->pm4[state->cpm4++] = (offset - R600_CONFIG_REG_OFFSET) >> 2;
111 for (i = 0; i < nreg; i++) {
112 state->pm4[state->cpm4++] = state->states[id + i];
113 }
114 for (i = 0; i < nreg; i++) {
115 if (regs[id + i].need_reloc) {
116 state->pm4[state->cpm4++] = PKT3(PKT3_NOP, 0);
117 r = radeon_state_reloc(state, state->cpm4, regs[id + i].bo_id);
118 if (r)
119 return r;
120 state->pm4[state->cpm4++] = state->bo[regs[id + i].bo_id]->handle;
121 }
122 }
123 return 0;
124 }
125 if (offset >= R600_CONTEXT_REG_OFFSET && offset < R600_CONTEXT_REG_END) {
126 state->pm4[state->cpm4++] = PKT3(PKT3_SET_CONTEXT_REG, nreg);
127 state->pm4[state->cpm4++] = (offset - R600_CONTEXT_REG_OFFSET) >> 2;
128 for (i = 0; i < nreg; i++) {
129 state->pm4[state->cpm4++] = state->states[id + i];
130 }
131 for (i = 0; i < nreg; i++) {
132 if (regs[id + i].need_reloc) {
133 state->pm4[state->cpm4++] = PKT3(PKT3_NOP, 0);
134 r = radeon_state_reloc(state, state->cpm4, regs[id + i].bo_id);
135 if (r)
136 return r;
137 state->pm4[state->cpm4++] = state->bo[regs[id + i].bo_id]->handle;
138 }
139 }
140 return 0;
141 }
142 if (offset >= R600_ALU_CONST_OFFSET && offset < R600_ALU_CONST_END) {
143 state->pm4[state->cpm4++] = PKT3(PKT3_SET_ALU_CONST, nreg);
144 state->pm4[state->cpm4++] = (offset - R600_ALU_CONST_OFFSET) >> 2;
145 for (i = 0; i < nreg; i++) {
146 state->pm4[state->cpm4++] = state->states[id + i];
147 }
148 return 0;
149 }
150 if (offset >= R600_SAMPLER_OFFSET && offset < R600_SAMPLER_END) {
151 state->pm4[state->cpm4++] = PKT3(PKT3_SET_SAMPLER, nreg);
152 state->pm4[state->cpm4++] = (offset - R600_SAMPLER_OFFSET) >> 2;
153 for (i = 0; i < nreg; i++) {
154 state->pm4[state->cpm4++] = state->states[id + i];
155 }
156 return 0;
157 }
158 fprintf(stderr, "%s unsupported offset 0x%08X\n", __func__, offset);
159 return -EINVAL;
160 }
161
162 static int r600_state_pm4_generic(struct radeon_state *state)
163 {
164 const struct radeon_register *regs = get_regs(state);
165 unsigned i, offset, nreg, coffset, loffset, soffset;
166 unsigned start;
167 int r;
168
169 if (!state->nstates)
170 return 0;
171 soffset = state->id * state->stype->stride;
172 offset = loffset = regs[0].offset + soffset;
173 start = 0;
174 for (i = 1, nreg = 1; i < state->nstates; i++) {
175 coffset = regs[i].offset + soffset;
176 if (coffset == (loffset + 4)) {
177 nreg++;
178 loffset = coffset;
179 } else {
180 r = r600_state_pm4_bytecode(state, offset, start, nreg);
181 if (r) {
182 fprintf(stderr, "%s invalid 0x%08X %d\n", __func__, start, nreg);
183 return r;
184 }
185 offset = loffset = coffset;
186 nreg = 1;
187 start = i;
188 }
189 }
190 return r600_state_pm4_bytecode(state, offset, start, nreg);
191 }
192
193 static void r600_state_pm4_with_flush(struct radeon_state *state, u32 flags)
194 {
195 unsigned i, j, add, size;
196
197 state->nreloc = 0;
198 for (i = 0; i < state->nbo; i++) {
199 for (j = 0, add = 1; j < state->nreloc; j++) {
200 if (state->bo[state->reloc_bo_id[j]] == state->bo[i]) {
201 add = 0;
202 break;
203 }
204 }
205 if (add) {
206 state->reloc_bo_id[state->nreloc++] = i;
207 }
208 }
209 for (i = 0; i < state->nreloc; i++) {
210 size = (state->bo[state->reloc_bo_id[i]]->size + 255) >> 8;
211 state->pm4[state->cpm4++] = PKT3(PKT3_SURFACE_SYNC, 3);
212 state->pm4[state->cpm4++] = flags;
213 state->pm4[state->cpm4++] = size;
214 state->pm4[state->cpm4++] = 0x00000000;
215 state->pm4[state->cpm4++] = 0x0000000A;
216 state->pm4[state->cpm4++] = PKT3(PKT3_NOP, 0);
217 state->reloc_pm4_id[i] = state->cpm4;
218 state->pm4[state->cpm4++] = state->bo[state->reloc_bo_id[i]]->handle;
219 }
220 }
221
222 static int r600_state_pm4_cb0(struct radeon_state *state)
223 {
224 int r;
225
226 r600_state_pm4_with_flush(state, S_0085F0_CB_ACTION_ENA(1) |
227 S_0085F0_CB0_DEST_BASE_ENA(1));
228 r = r600_state_pm4_generic(state);
229 if (r)
230 return r;
231 state->pm4[state->cpm4++] = PKT3(PKT3_SURFACE_BASE_UPDATE, 0);
232 state->pm4[state->cpm4++] = 0x00000002;
233 return 0;
234 }
235
236 static int r700_state_pm4_cb0(struct radeon_state *state)
237 {
238 int r;
239
240 r600_state_pm4_with_flush(state, S_0085F0_CB_ACTION_ENA(1) |
241 S_0085F0_CB0_DEST_BASE_ENA(1));
242 r = r600_state_pm4_generic(state);
243 if (r)
244 return r;
245 return 0;
246 }
247
248 static int r600_state_pm4_db(struct radeon_state *state)
249 {
250 int r;
251
252 r600_state_pm4_with_flush(state, S_0085F0_DB_ACTION_ENA(1) |
253 S_0085F0_DB_DEST_BASE_ENA(1));
254 r = r600_state_pm4_generic(state);
255 if (r)
256 return r;
257 state->pm4[state->cpm4++] = PKT3(PKT3_SURFACE_BASE_UPDATE, 0);
258 state->pm4[state->cpm4++] = 0x00000001;
259 return 0;
260 }
261
262 static int r700_state_pm4_db(struct radeon_state *state)
263 {
264 int r;
265
266 r600_state_pm4_with_flush(state, S_0085F0_DB_ACTION_ENA(1) |
267 S_0085F0_DB_DEST_BASE_ENA(1));
268 r = r600_state_pm4_generic(state);
269 if (r)
270 return r;
271 return 0;
272 }
273
274 static int r600_state_pm4_config(struct radeon_state *state)
275 {
276 state->pm4[state->cpm4++] = PKT3(PKT3_START_3D_CMDBUF, 0);
277 state->pm4[state->cpm4++] = 0x00000000;
278 state->pm4[state->cpm4++] = PKT3(PKT3_CONTEXT_CONTROL, 1);
279 state->pm4[state->cpm4++] = 0x80000000;
280 state->pm4[state->cpm4++] = 0x80000000;
281 state->pm4[state->cpm4++] = PKT3(PKT3_EVENT_WRITE, 0);
282 state->pm4[state->cpm4++] = EVENT_TYPE_CACHE_FLUSH_AND_INV_EVENT;
283 state->pm4[state->cpm4++] = PKT3(PKT3_SET_CONFIG_REG, 1);
284 state->pm4[state->cpm4++] = 0x00000010;
285 state->pm4[state->cpm4++] = 0x00028000;
286 return r600_state_pm4_generic(state);
287 }
288
289 static int r600_state_pm4_query_begin(struct radeon_state *state)
290 {
291 int r;
292
293 state->cpm4 = 0;
294 state->pm4[state->cpm4++] = PKT3(PKT3_EVENT_WRITE, 2);
295 state->pm4[state->cpm4++] = EVENT_TYPE_ZPASS_DONE;
296 state->pm4[state->cpm4++] = state->states[0];
297 state->pm4[state->cpm4++] = 0x0;
298 state->pm4[state->cpm4++] = PKT3(PKT3_NOP, 0);
299 r = radeon_state_reloc(state, state->cpm4, 0);
300 if (r)
301 return r;
302 state->pm4[state->cpm4++] = state->bo[0]->handle;
303 return 0;
304 }
305
306 static int r600_state_pm4_query_end(struct radeon_state *state)
307 {
308 int r;
309
310 state->cpm4 = 0;
311 state->pm4[state->cpm4++] = PKT3(PKT3_EVENT_WRITE, 2);
312 state->pm4[state->cpm4++] = EVENT_TYPE_ZPASS_DONE;
313 state->pm4[state->cpm4++] = state->states[0];
314 state->pm4[state->cpm4++] = 0x0;
315 state->pm4[state->cpm4++] = PKT3(PKT3_NOP, 0);
316 r = radeon_state_reloc(state, state->cpm4, 0);
317 if (r)
318 return r;
319 state->pm4[state->cpm4++] = state->bo[0]->handle;
320 return 0;
321 }
322
323 static int r700_state_pm4_config(struct radeon_state *state)
324 {
325 state->pm4[state->cpm4++] = PKT3(PKT3_CONTEXT_CONTROL, 1);
326 state->pm4[state->cpm4++] = 0x80000000;
327 state->pm4[state->cpm4++] = 0x80000000;
328 state->pm4[state->cpm4++] = PKT3(PKT3_EVENT_WRITE, 0);
329 state->pm4[state->cpm4++] = EVENT_TYPE_CACHE_FLUSH_AND_INV_EVENT;
330 state->pm4[state->cpm4++] = PKT3(PKT3_SET_CONFIG_REG, 1);
331 state->pm4[state->cpm4++] = 0x00000010;
332 state->pm4[state->cpm4++] = 0x00028000;
333 return r600_state_pm4_generic(state);
334 }
335
336 static int r600_state_pm4_shader(struct radeon_state *state)
337 {
338 r600_state_pm4_with_flush(state, S_0085F0_SH_ACTION_ENA(1));
339 return r600_state_pm4_generic(state);
340 }
341
342 static int r600_state_pm4_vgt(struct radeon_state *state)
343 {
344 int r;
345
346 r = r600_state_pm4_bytecode(state, R_028400_VGT_MAX_VTX_INDX, R600_VGT__VGT_MAX_VTX_INDX, 1);
347 if (r)
348 return r;
349 r = r600_state_pm4_bytecode(state, R_028404_VGT_MIN_VTX_INDX, R600_VGT__VGT_MIN_VTX_INDX, 1);
350 if (r)
351 return r;
352 r = r600_state_pm4_bytecode(state, R_028408_VGT_INDX_OFFSET, R600_VGT__VGT_INDX_OFFSET, 1);
353 if (r)
354 return r;
355 r = r600_state_pm4_bytecode(state, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX, R600_VGT__VGT_MULTI_PRIM_IB_RESET_INDX, 1);
356 if (r)
357 return r;
358 r = r600_state_pm4_bytecode(state, R_008958_VGT_PRIMITIVE_TYPE, R600_VGT__VGT_PRIMITIVE_TYPE, 1);
359 if (r)
360 return r;
361 state->pm4[state->cpm4++] = PKT3(PKT3_INDEX_TYPE, 0);
362 state->pm4[state->cpm4++] = state->states[R600_VGT__VGT_DMA_INDEX_TYPE];
363 state->pm4[state->cpm4++] = PKT3(PKT3_NUM_INSTANCES, 0);
364 state->pm4[state->cpm4++] = state->states[R600_VGT__VGT_DMA_NUM_INSTANCES];
365 return 0;
366 }
367
368 static int r600_state_pm4_draw(struct radeon_state *state)
369 {
370 int r;
371
372 if (state->nbo) {
373 state->pm4[state->cpm4++] = PKT3(PKT3_DRAW_INDEX, 3);
374 state->pm4[state->cpm4++] = state->states[R600_DRAW__VGT_DMA_BASE];
375 state->pm4[state->cpm4++] = state->states[R600_DRAW__VGT_DMA_BASE_HI];
376 state->pm4[state->cpm4++] = state->states[R600_DRAW__VGT_NUM_INDICES];
377 state->pm4[state->cpm4++] = state->states[R600_DRAW__VGT_DRAW_INITIATOR];
378 state->pm4[state->cpm4++] = PKT3(PKT3_NOP, 0);
379 r = radeon_state_reloc(state, state->cpm4, 0);
380 if (r)
381 return r;
382 state->pm4[state->cpm4++] = state->bo[0]->handle;
383 } else {
384 state->pm4[state->cpm4++] = PKT3(PKT3_DRAW_INDEX_AUTO, 1);
385 state->pm4[state->cpm4++] = state->states[R600_DRAW__VGT_NUM_INDICES];
386 state->pm4[state->cpm4++] = state->states[R600_DRAW__VGT_DRAW_INITIATOR];
387 }
388 state->pm4[state->cpm4++] = PKT3(PKT3_EVENT_WRITE, 0);
389 state->pm4[state->cpm4++] = EVENT_TYPE_CACHE_FLUSH_AND_INV_EVENT;
390 return 0;
391 }
392
393 static int r600_state_pm4_resource(struct radeon_state *state)
394 {
395 u32 flags, type, nbo, offset, soffset;
396 int r;
397 const struct radeon_register *regs = get_regs(state);
398
399 soffset = state->id * state->stype->stride;
400 type = G_038018_TYPE(state->states[6]);
401 switch (type) {
402 case 2:
403 flags = S_0085F0_TC_ACTION_ENA(1);
404 nbo = 2;
405 break;
406 case 3:
407 flags = S_0085F0_VC_ACTION_ENA(1);
408 nbo = 1;
409 break;
410 default:
411 return 0;
412 }
413 if (state->nbo != nbo) {
414 fprintf(stderr, "%s need %d bo got %d\n", __func__, nbo, state->nbo);
415 return -EINVAL;
416 }
417 r600_state_pm4_with_flush(state, flags);
418 offset = regs[0].offset + soffset;
419 state->pm4[state->cpm4++] = PKT3(PKT3_SET_RESOURCE, 7);
420 state->pm4[state->cpm4++] = (offset - R_038000_SQ_TEX_RESOURCE_WORD0_0) >> 2;
421 state->pm4[state->cpm4++] = state->states[0];
422 state->pm4[state->cpm4++] = state->states[1];
423 state->pm4[state->cpm4++] = state->states[2];
424 state->pm4[state->cpm4++] = state->states[3];
425 state->pm4[state->cpm4++] = state->states[4];
426 state->pm4[state->cpm4++] = state->states[5];
427 state->pm4[state->cpm4++] = state->states[6];
428 state->pm4[state->cpm4++] = PKT3(PKT3_NOP, 0);
429 r = radeon_state_reloc(state, state->cpm4, 0);
430 if (r)
431 return r;
432 state->pm4[state->cpm4++] = state->bo[0]->handle;
433 if (type == 2) {
434 state->pm4[state->cpm4++] = PKT3(PKT3_NOP, 0);
435 r = radeon_state_reloc(state, state->cpm4, 1);
436 if (r)
437 return r;
438 state->pm4[state->cpm4++] = state->bo[1]->handle;
439 }
440 return 0;
441 }
442
443
444 static void r600_modify_type_array(struct radeon *radeon)
445 {
446 int i;
447 switch (radeon->family) {
448 case CHIP_RV770:
449 case CHIP_RV730:
450 case CHIP_RV710:
451 case CHIP_RV740:
452 break;
453 default:
454 return;
455 }
456
457 /* r700 needs some mods */
458 for (i = 0; i < radeon->nstype; i++) {
459 struct radeon_stype_info *info = &radeon->stype[i];
460
461 switch(info->stype) {
462 case R600_STATE_CONFIG:
463 info->pm4 = r700_state_pm4_config;
464 break;
465 case R600_STATE_CB0:
466 info->pm4 = r700_state_pm4_cb0;
467 break;
468 case R600_STATE_DB:
469 info->pm4 = r700_state_pm4_db;
470 };
471 }
472 }
473
474 static void r600_build_types_array(struct radeon *radeon)
475 {
476 int i, j;
477 int id = 0;
478
479 for (i = 0; i < STYPES_SIZE; i++) {
480 r600_stypes[i].base_id = id;
481 r600_stypes[i].npm4 = 128;
482 if (r600_stypes[i].reginfo[0].shader_type == 0) {
483 id += r600_stypes[i].num;
484 } else {
485 for (j = 0; j < R600_SHADER_MAX; j++) {
486 if (r600_stypes[i].reginfo[j].shader_type)
487 id += r600_stypes[i].num;
488 }
489 }
490 }
491 radeon->stype = r600_stypes;
492 radeon->nstype = STYPES_SIZE;
493
494 r600_modify_type_array(radeon);
495 }
496
497 int r600_init(struct radeon *radeon)
498 {
499 r600_build_types_array(radeon);
500 return 0;
501 }