Code reorganization: update build.
[mesa.git] / src / gallium / drivers / cell / ppu / cell_spu.c
1 /**************************************************************************
2 *
3 * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 #include <pthread.h>
30
31 #include "cell_spu.h"
32 #include "pipe/p_format.h"
33 #include "pipe/p_state.h"
34 #include "cell/common.h"
35
36
37 /*
38 helpful headers:
39 /opt/ibm/cell-sdk/prototype/src/include/ppu/cbe_mfc.h
40 */
41
42
43 struct cell_global_info cell_global;
44
45
46 /**
47 * Write a 1-word message to the given SPE mailbox.
48 */
49 void
50 send_mbox_message(spe_context_ptr_t ctx, unsigned int msg)
51 {
52 spe_in_mbox_write(ctx, &msg, 1, SPE_MBOX_ALL_BLOCKING);
53 }
54
55
56 /**
57 * Wait for a 1-word message to arrive in given mailbox.
58 */
59 uint
60 wait_mbox_message(spe_context_ptr_t ctx)
61 {
62 do {
63 unsigned data;
64 int count = spe_out_mbox_read(ctx, &data, 1);
65
66 if (count == 1) {
67 return data;
68 }
69
70 if (count < 0) {
71 /* error */ ;
72 }
73 } while (1);
74 }
75
76
77 static void *cell_thread_function(void *arg)
78 {
79 struct cell_init_info *init = (struct cell_init_info *) arg;
80 unsigned entry = SPE_DEFAULT_ENTRY;
81
82 ASSERT_ALIGN16(init);
83
84 if (spe_context_run(cell_global.spe_contexts[init->id], &entry, 0,
85 init, NULL, NULL) < 0) {
86 fprintf(stderr, "spe_context_run() failed\n");
87 exit(1);
88 }
89
90 pthread_exit(NULL);
91 }
92
93
94 /**
95 * Create the SPU threads
96 */
97 void
98 cell_start_spus(struct cell_context *cell)
99 {
100 uint i, j;
101
102 assert(cell->num_spus <= MAX_SPUS);
103
104 ASSERT_ALIGN16(&cell_global.command[0]);
105 ASSERT_ALIGN16(&cell_global.command[1]);
106
107 ASSERT_ALIGN16(&cell_global.inits[0]);
108 ASSERT_ALIGN16(&cell_global.inits[1]);
109
110 for (i = 0; i < cell->num_spus; i++) {
111 cell_global.inits[i].id = i;
112 cell_global.inits[i].num_spus = cell->num_spus;
113 cell_global.inits[i].cmd = &cell_global.command[i];
114 for (j = 0; j < CELL_NUM_BUFFERS; j++) {
115 cell_global.inits[i].buffers[j] = cell->buffer[j];
116 }
117 cell_global.inits[i].buffer_status = &cell->buffer_status[0][0][0];
118
119 cell_global.spe_contexts[i] = spe_context_create(0, NULL);
120 if (!cell_global.spe_contexts[i]) {
121 fprintf(stderr, "spe_context_create() failed\n");
122 exit(1);
123 }
124
125 if (spe_program_load(cell_global.spe_contexts[i], &g3d_spu)) {
126 fprintf(stderr, "spe_program_load() failed\n");
127 exit(1);
128 }
129
130 pthread_create(&cell_global.spe_threads[i], NULL, &cell_thread_function,
131 &cell_global.inits[i]);
132 }
133 }
134
135
136 /**
137 * Tell all the SPUs to stop/exit.
138 */
139 void
140 cell_spu_exit(struct cell_context *cell)
141 {
142 uint i;
143
144 for (i = 0; i < cell->num_spus; i++) {
145 send_mbox_message(cell_global.spe_contexts[i], CELL_CMD_EXIT);
146 }
147
148 /* wait for threads to exit */
149 for (i = 0; i < cell->num_spus; i++) {
150 void *value;
151 pthread_join(cell_global.spe_threads[i], &value);
152 cell_global.spe_threads[i] = 0;
153 cell_global.spe_contexts[i] = 0;
154 }
155 }