run copyright.sh for 2011.
[binutils-gdb.git] / gdb / spu-tdep.c
1 /* SPU target-dependent code for GDB, the GNU debugger.
2 Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011
3 Free Software Foundation, Inc.
4
5 Contributed by Ulrich Weigand <uweigand@de.ibm.com>.
6 Based on a port by Sid Manning <sid@us.ibm.com>.
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22
23 #include "defs.h"
24 #include "arch-utils.h"
25 #include "gdbtypes.h"
26 #include "gdbcmd.h"
27 #include "gdbcore.h"
28 #include "gdb_string.h"
29 #include "gdb_assert.h"
30 #include "frame.h"
31 #include "frame-unwind.h"
32 #include "frame-base.h"
33 #include "trad-frame.h"
34 #include "symtab.h"
35 #include "symfile.h"
36 #include "value.h"
37 #include "inferior.h"
38 #include "dis-asm.h"
39 #include "objfiles.h"
40 #include "language.h"
41 #include "regcache.h"
42 #include "reggroups.h"
43 #include "floatformat.h"
44 #include "block.h"
45 #include "observer.h"
46 #include "infcall.h"
47
48 #include "spu-tdep.h"
49
50
51 /* The list of available "set spu " and "show spu " commands. */
52 static struct cmd_list_element *setspucmdlist = NULL;
53 static struct cmd_list_element *showspucmdlist = NULL;
54
55 /* Whether to stop for new SPE contexts. */
56 static int spu_stop_on_load_p = 0;
57 /* Whether to automatically flush the SW-managed cache. */
58 static int spu_auto_flush_cache_p = 1;
59
60
61 /* The tdep structure. */
62 struct gdbarch_tdep
63 {
64 /* The spufs ID identifying our address space. */
65 int id;
66
67 /* SPU-specific vector type. */
68 struct type *spu_builtin_type_vec128;
69 };
70
71
72 /* SPU-specific vector type. */
73 static struct type *
74 spu_builtin_type_vec128 (struct gdbarch *gdbarch)
75 {
76 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
77
78 if (!tdep->spu_builtin_type_vec128)
79 {
80 const struct builtin_type *bt = builtin_type (gdbarch);
81 struct type *t;
82
83 t = arch_composite_type (gdbarch,
84 "__spu_builtin_type_vec128", TYPE_CODE_UNION);
85 append_composite_type_field (t, "uint128", bt->builtin_int128);
86 append_composite_type_field (t, "v2_int64",
87 init_vector_type (bt->builtin_int64, 2));
88 append_composite_type_field (t, "v4_int32",
89 init_vector_type (bt->builtin_int32, 4));
90 append_composite_type_field (t, "v8_int16",
91 init_vector_type (bt->builtin_int16, 8));
92 append_composite_type_field (t, "v16_int8",
93 init_vector_type (bt->builtin_int8, 16));
94 append_composite_type_field (t, "v2_double",
95 init_vector_type (bt->builtin_double, 2));
96 append_composite_type_field (t, "v4_float",
97 init_vector_type (bt->builtin_float, 4));
98
99 TYPE_VECTOR (t) = 1;
100 TYPE_NAME (t) = "spu_builtin_type_vec128";
101
102 tdep->spu_builtin_type_vec128 = t;
103 }
104
105 return tdep->spu_builtin_type_vec128;
106 }
107
108
109 /* The list of available "info spu " commands. */
110 static struct cmd_list_element *infospucmdlist = NULL;
111
112 /* Registers. */
113
114 static const char *
115 spu_register_name (struct gdbarch *gdbarch, int reg_nr)
116 {
117 static char *register_names[] =
118 {
119 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
120 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
121 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
122 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
123 "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
124 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
125 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
126 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
127 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
128 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
129 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
130 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
131 "r96", "r97", "r98", "r99", "r100", "r101", "r102", "r103",
132 "r104", "r105", "r106", "r107", "r108", "r109", "r110", "r111",
133 "r112", "r113", "r114", "r115", "r116", "r117", "r118", "r119",
134 "r120", "r121", "r122", "r123", "r124", "r125", "r126", "r127",
135 "id", "pc", "sp", "fpscr", "srr0", "lslr", "decr", "decr_status"
136 };
137
138 if (reg_nr < 0)
139 return NULL;
140 if (reg_nr >= sizeof register_names / sizeof *register_names)
141 return NULL;
142
143 return register_names[reg_nr];
144 }
145
146 static struct type *
147 spu_register_type (struct gdbarch *gdbarch, int reg_nr)
148 {
149 if (reg_nr < SPU_NUM_GPRS)
150 return spu_builtin_type_vec128 (gdbarch);
151
152 switch (reg_nr)
153 {
154 case SPU_ID_REGNUM:
155 return builtin_type (gdbarch)->builtin_uint32;
156
157 case SPU_PC_REGNUM:
158 return builtin_type (gdbarch)->builtin_func_ptr;
159
160 case SPU_SP_REGNUM:
161 return builtin_type (gdbarch)->builtin_data_ptr;
162
163 case SPU_FPSCR_REGNUM:
164 return builtin_type (gdbarch)->builtin_uint128;
165
166 case SPU_SRR0_REGNUM:
167 return builtin_type (gdbarch)->builtin_uint32;
168
169 case SPU_LSLR_REGNUM:
170 return builtin_type (gdbarch)->builtin_uint32;
171
172 case SPU_DECR_REGNUM:
173 return builtin_type (gdbarch)->builtin_uint32;
174
175 case SPU_DECR_STATUS_REGNUM:
176 return builtin_type (gdbarch)->builtin_uint32;
177
178 default:
179 internal_error (__FILE__, __LINE__, "invalid regnum");
180 }
181 }
182
183 /* Pseudo registers for preferred slots - stack pointer. */
184
185 static void
186 spu_pseudo_register_read_spu (struct regcache *regcache, const char *regname,
187 gdb_byte *buf)
188 {
189 struct gdbarch *gdbarch = get_regcache_arch (regcache);
190 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
191 gdb_byte reg[32];
192 char annex[32];
193 ULONGEST id;
194
195 regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
196 xsnprintf (annex, sizeof annex, "%d/%s", (int) id, regname);
197 memset (reg, 0, sizeof reg);
198 target_read (&current_target, TARGET_OBJECT_SPU, annex,
199 reg, 0, sizeof reg);
200
201 store_unsigned_integer (buf, 4, byte_order, strtoulst (reg, NULL, 16));
202 }
203
204 static void
205 spu_pseudo_register_read (struct gdbarch *gdbarch, struct regcache *regcache,
206 int regnum, gdb_byte *buf)
207 {
208 gdb_byte reg[16];
209 char annex[32];
210 ULONGEST id;
211
212 switch (regnum)
213 {
214 case SPU_SP_REGNUM:
215 regcache_raw_read (regcache, SPU_RAW_SP_REGNUM, reg);
216 memcpy (buf, reg, 4);
217 break;
218
219 case SPU_FPSCR_REGNUM:
220 regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
221 xsnprintf (annex, sizeof annex, "%d/fpcr", (int) id);
222 target_read (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 16);
223 break;
224
225 case SPU_SRR0_REGNUM:
226 spu_pseudo_register_read_spu (regcache, "srr0", buf);
227 break;
228
229 case SPU_LSLR_REGNUM:
230 spu_pseudo_register_read_spu (regcache, "lslr", buf);
231 break;
232
233 case SPU_DECR_REGNUM:
234 spu_pseudo_register_read_spu (regcache, "decr", buf);
235 break;
236
237 case SPU_DECR_STATUS_REGNUM:
238 spu_pseudo_register_read_spu (regcache, "decr_status", buf);
239 break;
240
241 default:
242 internal_error (__FILE__, __LINE__, _("invalid regnum"));
243 }
244 }
245
246 static void
247 spu_pseudo_register_write_spu (struct regcache *regcache, const char *regname,
248 const gdb_byte *buf)
249 {
250 struct gdbarch *gdbarch = get_regcache_arch (regcache);
251 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
252 gdb_byte reg[32];
253 char annex[32];
254 ULONGEST id;
255
256 regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
257 xsnprintf (annex, sizeof annex, "%d/%s", (int) id, regname);
258 xsnprintf (reg, sizeof reg, "0x%s",
259 phex_nz (extract_unsigned_integer (buf, 4, byte_order), 4));
260 target_write (&current_target, TARGET_OBJECT_SPU, annex,
261 reg, 0, strlen (reg));
262 }
263
264 static void
265 spu_pseudo_register_write (struct gdbarch *gdbarch, struct regcache *regcache,
266 int regnum, const gdb_byte *buf)
267 {
268 gdb_byte reg[16];
269 char annex[32];
270 ULONGEST id;
271
272 switch (regnum)
273 {
274 case SPU_SP_REGNUM:
275 regcache_raw_read (regcache, SPU_RAW_SP_REGNUM, reg);
276 memcpy (reg, buf, 4);
277 regcache_raw_write (regcache, SPU_RAW_SP_REGNUM, reg);
278 break;
279
280 case SPU_FPSCR_REGNUM:
281 regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
282 xsnprintf (annex, sizeof annex, "%d/fpcr", (int) id);
283 target_write (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 16);
284 break;
285
286 case SPU_SRR0_REGNUM:
287 spu_pseudo_register_write_spu (regcache, "srr0", buf);
288 break;
289
290 case SPU_LSLR_REGNUM:
291 spu_pseudo_register_write_spu (regcache, "lslr", buf);
292 break;
293
294 case SPU_DECR_REGNUM:
295 spu_pseudo_register_write_spu (regcache, "decr", buf);
296 break;
297
298 case SPU_DECR_STATUS_REGNUM:
299 spu_pseudo_register_write_spu (regcache, "decr_status", buf);
300 break;
301
302 default:
303 internal_error (__FILE__, __LINE__, _("invalid regnum"));
304 }
305 }
306
307 /* Value conversion -- access scalar values at the preferred slot. */
308
309 static struct value *
310 spu_value_from_register (struct type *type, int regnum,
311 struct frame_info *frame)
312 {
313 struct value *value = default_value_from_register (type, regnum, frame);
314 int len = TYPE_LENGTH (type);
315
316 if (regnum < SPU_NUM_GPRS && len < 16)
317 {
318 int preferred_slot = len < 4 ? 4 - len : 0;
319 set_value_offset (value, preferred_slot);
320 }
321
322 return value;
323 }
324
325 /* Register groups. */
326
327 static int
328 spu_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
329 struct reggroup *group)
330 {
331 /* Registers displayed via 'info regs'. */
332 if (group == general_reggroup)
333 return 1;
334
335 /* Registers displayed via 'info float'. */
336 if (group == float_reggroup)
337 return 0;
338
339 /* Registers that need to be saved/restored in order to
340 push or pop frames. */
341 if (group == save_reggroup || group == restore_reggroup)
342 return 1;
343
344 return default_register_reggroup_p (gdbarch, regnum, group);
345 }
346
347
348 /* Address handling. */
349
350 static int
351 spu_gdbarch_id (struct gdbarch *gdbarch)
352 {
353 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
354 int id = tdep->id;
355
356 /* The objfile architecture of a standalone SPU executable does not
357 provide an SPU ID. Retrieve it from the the objfile's relocated
358 address range in this special case. */
359 if (id == -1
360 && symfile_objfile && symfile_objfile->obfd
361 && bfd_get_arch (symfile_objfile->obfd) == bfd_arch_spu
362 && symfile_objfile->sections != symfile_objfile->sections_end)
363 id = SPUADDR_SPU (obj_section_addr (symfile_objfile->sections));
364
365 return id;
366 }
367
368 static int
369 spu_address_class_type_flags (int byte_size, int dwarf2_addr_class)
370 {
371 if (dwarf2_addr_class == 1)
372 return TYPE_INSTANCE_FLAG_ADDRESS_CLASS_1;
373 else
374 return 0;
375 }
376
377 static const char *
378 spu_address_class_type_flags_to_name (struct gdbarch *gdbarch, int type_flags)
379 {
380 if (type_flags & TYPE_INSTANCE_FLAG_ADDRESS_CLASS_1)
381 return "__ea";
382 else
383 return NULL;
384 }
385
386 static int
387 spu_address_class_name_to_type_flags (struct gdbarch *gdbarch,
388 const char *name, int *type_flags_ptr)
389 {
390 if (strcmp (name, "__ea") == 0)
391 {
392 *type_flags_ptr = TYPE_INSTANCE_FLAG_ADDRESS_CLASS_1;
393 return 1;
394 }
395 else
396 return 0;
397 }
398
399 static void
400 spu_address_to_pointer (struct gdbarch *gdbarch,
401 struct type *type, gdb_byte *buf, CORE_ADDR addr)
402 {
403 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
404 store_unsigned_integer (buf, TYPE_LENGTH (type), byte_order,
405 SPUADDR_ADDR (addr));
406 }
407
408 static CORE_ADDR
409 spu_pointer_to_address (struct gdbarch *gdbarch,
410 struct type *type, const gdb_byte *buf)
411 {
412 int id = spu_gdbarch_id (gdbarch);
413 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
414 ULONGEST addr
415 = extract_unsigned_integer (buf, TYPE_LENGTH (type), byte_order);
416
417 /* Do not convert __ea pointers. */
418 if (TYPE_ADDRESS_CLASS_1 (type))
419 return addr;
420
421 return addr? SPUADDR (id, addr) : 0;
422 }
423
424 static CORE_ADDR
425 spu_integer_to_address (struct gdbarch *gdbarch,
426 struct type *type, const gdb_byte *buf)
427 {
428 int id = spu_gdbarch_id (gdbarch);
429 ULONGEST addr = unpack_long (type, buf);
430
431 return SPUADDR (id, addr);
432 }
433
434
435 /* Decoding SPU instructions. */
436
437 enum
438 {
439 op_lqd = 0x34,
440 op_lqx = 0x3c4,
441 op_lqa = 0x61,
442 op_lqr = 0x67,
443 op_stqd = 0x24,
444 op_stqx = 0x144,
445 op_stqa = 0x41,
446 op_stqr = 0x47,
447
448 op_il = 0x081,
449 op_ila = 0x21,
450 op_a = 0x0c0,
451 op_ai = 0x1c,
452
453 op_selb = 0x4,
454
455 op_br = 0x64,
456 op_bra = 0x60,
457 op_brsl = 0x66,
458 op_brasl = 0x62,
459 op_brnz = 0x42,
460 op_brz = 0x40,
461 op_brhnz = 0x46,
462 op_brhz = 0x44,
463 op_bi = 0x1a8,
464 op_bisl = 0x1a9,
465 op_biz = 0x128,
466 op_binz = 0x129,
467 op_bihz = 0x12a,
468 op_bihnz = 0x12b,
469 };
470
471 static int
472 is_rr (unsigned int insn, int op, int *rt, int *ra, int *rb)
473 {
474 if ((insn >> 21) == op)
475 {
476 *rt = insn & 127;
477 *ra = (insn >> 7) & 127;
478 *rb = (insn >> 14) & 127;
479 return 1;
480 }
481
482 return 0;
483 }
484
485 static int
486 is_rrr (unsigned int insn, int op, int *rt, int *ra, int *rb, int *rc)
487 {
488 if ((insn >> 28) == op)
489 {
490 *rt = (insn >> 21) & 127;
491 *ra = (insn >> 7) & 127;
492 *rb = (insn >> 14) & 127;
493 *rc = insn & 127;
494 return 1;
495 }
496
497 return 0;
498 }
499
500 static int
501 is_ri7 (unsigned int insn, int op, int *rt, int *ra, int *i7)
502 {
503 if ((insn >> 21) == op)
504 {
505 *rt = insn & 127;
506 *ra = (insn >> 7) & 127;
507 *i7 = (((insn >> 14) & 127) ^ 0x40) - 0x40;
508 return 1;
509 }
510
511 return 0;
512 }
513
514 static int
515 is_ri10 (unsigned int insn, int op, int *rt, int *ra, int *i10)
516 {
517 if ((insn >> 24) == op)
518 {
519 *rt = insn & 127;
520 *ra = (insn >> 7) & 127;
521 *i10 = (((insn >> 14) & 0x3ff) ^ 0x200) - 0x200;
522 return 1;
523 }
524
525 return 0;
526 }
527
528 static int
529 is_ri16 (unsigned int insn, int op, int *rt, int *i16)
530 {
531 if ((insn >> 23) == op)
532 {
533 *rt = insn & 127;
534 *i16 = (((insn >> 7) & 0xffff) ^ 0x8000) - 0x8000;
535 return 1;
536 }
537
538 return 0;
539 }
540
541 static int
542 is_ri18 (unsigned int insn, int op, int *rt, int *i18)
543 {
544 if ((insn >> 25) == op)
545 {
546 *rt = insn & 127;
547 *i18 = (((insn >> 7) & 0x3ffff) ^ 0x20000) - 0x20000;
548 return 1;
549 }
550
551 return 0;
552 }
553
554 static int
555 is_branch (unsigned int insn, int *offset, int *reg)
556 {
557 int rt, i7, i16;
558
559 if (is_ri16 (insn, op_br, &rt, &i16)
560 || is_ri16 (insn, op_brsl, &rt, &i16)
561 || is_ri16 (insn, op_brnz, &rt, &i16)
562 || is_ri16 (insn, op_brz, &rt, &i16)
563 || is_ri16 (insn, op_brhnz, &rt, &i16)
564 || is_ri16 (insn, op_brhz, &rt, &i16))
565 {
566 *reg = SPU_PC_REGNUM;
567 *offset = i16 << 2;
568 return 1;
569 }
570
571 if (is_ri16 (insn, op_bra, &rt, &i16)
572 || is_ri16 (insn, op_brasl, &rt, &i16))
573 {
574 *reg = -1;
575 *offset = i16 << 2;
576 return 1;
577 }
578
579 if (is_ri7 (insn, op_bi, &rt, reg, &i7)
580 || is_ri7 (insn, op_bisl, &rt, reg, &i7)
581 || is_ri7 (insn, op_biz, &rt, reg, &i7)
582 || is_ri7 (insn, op_binz, &rt, reg, &i7)
583 || is_ri7 (insn, op_bihz, &rt, reg, &i7)
584 || is_ri7 (insn, op_bihnz, &rt, reg, &i7))
585 {
586 *offset = 0;
587 return 1;
588 }
589
590 return 0;
591 }
592
593
594 /* Prolog parsing. */
595
596 struct spu_prologue_data
597 {
598 /* Stack frame size. -1 if analysis was unsuccessful. */
599 int size;
600
601 /* How to find the CFA. The CFA is equal to SP at function entry. */
602 int cfa_reg;
603 int cfa_offset;
604
605 /* Offset relative to CFA where a register is saved. -1 if invalid. */
606 int reg_offset[SPU_NUM_GPRS];
607 };
608
609 static CORE_ADDR
610 spu_analyze_prologue (struct gdbarch *gdbarch,
611 CORE_ADDR start_pc, CORE_ADDR end_pc,
612 struct spu_prologue_data *data)
613 {
614 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
615 int found_sp = 0;
616 int found_fp = 0;
617 int found_lr = 0;
618 int found_bc = 0;
619 int reg_immed[SPU_NUM_GPRS];
620 gdb_byte buf[16];
621 CORE_ADDR prolog_pc = start_pc;
622 CORE_ADDR pc;
623 int i;
624
625
626 /* Initialize DATA to default values. */
627 data->size = -1;
628
629 data->cfa_reg = SPU_RAW_SP_REGNUM;
630 data->cfa_offset = 0;
631
632 for (i = 0; i < SPU_NUM_GPRS; i++)
633 data->reg_offset[i] = -1;
634
635 /* Set up REG_IMMED array. This is non-zero for a register if we know its
636 preferred slot currently holds this immediate value. */
637 for (i = 0; i < SPU_NUM_GPRS; i++)
638 reg_immed[i] = 0;
639
640 /* Scan instructions until the first branch.
641
642 The following instructions are important prolog components:
643
644 - The first instruction to set up the stack pointer.
645 - The first instruction to set up the frame pointer.
646 - The first instruction to save the link register.
647 - The first instruction to save the backchain.
648
649 We return the instruction after the latest of these four,
650 or the incoming PC if none is found. The first instruction
651 to set up the stack pointer also defines the frame size.
652
653 Note that instructions saving incoming arguments to their stack
654 slots are not counted as important, because they are hard to
655 identify with certainty. This should not matter much, because
656 arguments are relevant only in code compiled with debug data,
657 and in such code the GDB core will advance until the first source
658 line anyway, using SAL data.
659
660 For purposes of stack unwinding, we analyze the following types
661 of instructions in addition:
662
663 - Any instruction adding to the current frame pointer.
664 - Any instruction loading an immediate constant into a register.
665 - Any instruction storing a register onto the stack.
666
667 These are used to compute the CFA and REG_OFFSET output. */
668
669 for (pc = start_pc; pc < end_pc; pc += 4)
670 {
671 unsigned int insn;
672 int rt, ra, rb, rc, immed;
673
674 if (target_read_memory (pc, buf, 4))
675 break;
676 insn = extract_unsigned_integer (buf, 4, byte_order);
677
678 /* AI is the typical instruction to set up a stack frame.
679 It is also used to initialize the frame pointer. */
680 if (is_ri10 (insn, op_ai, &rt, &ra, &immed))
681 {
682 if (rt == data->cfa_reg && ra == data->cfa_reg)
683 data->cfa_offset -= immed;
684
685 if (rt == SPU_RAW_SP_REGNUM && ra == SPU_RAW_SP_REGNUM
686 && !found_sp)
687 {
688 found_sp = 1;
689 prolog_pc = pc + 4;
690
691 data->size = -immed;
692 }
693 else if (rt == SPU_FP_REGNUM && ra == SPU_RAW_SP_REGNUM
694 && !found_fp)
695 {
696 found_fp = 1;
697 prolog_pc = pc + 4;
698
699 data->cfa_reg = SPU_FP_REGNUM;
700 data->cfa_offset -= immed;
701 }
702 }
703
704 /* A is used to set up stack frames of size >= 512 bytes.
705 If we have tracked the contents of the addend register,
706 we can handle this as well. */
707 else if (is_rr (insn, op_a, &rt, &ra, &rb))
708 {
709 if (rt == data->cfa_reg && ra == data->cfa_reg)
710 {
711 if (reg_immed[rb] != 0)
712 data->cfa_offset -= reg_immed[rb];
713 else
714 data->cfa_reg = -1; /* We don't know the CFA any more. */
715 }
716
717 if (rt == SPU_RAW_SP_REGNUM && ra == SPU_RAW_SP_REGNUM
718 && !found_sp)
719 {
720 found_sp = 1;
721 prolog_pc = pc + 4;
722
723 if (reg_immed[rb] != 0)
724 data->size = -reg_immed[rb];
725 }
726 }
727
728 /* We need to track IL and ILA used to load immediate constants
729 in case they are later used as input to an A instruction. */
730 else if (is_ri16 (insn, op_il, &rt, &immed))
731 {
732 reg_immed[rt] = immed;
733
734 if (rt == SPU_RAW_SP_REGNUM && !found_sp)
735 found_sp = 1;
736 }
737
738 else if (is_ri18 (insn, op_ila, &rt, &immed))
739 {
740 reg_immed[rt] = immed & 0x3ffff;
741
742 if (rt == SPU_RAW_SP_REGNUM && !found_sp)
743 found_sp = 1;
744 }
745
746 /* STQD is used to save registers to the stack. */
747 else if (is_ri10 (insn, op_stqd, &rt, &ra, &immed))
748 {
749 if (ra == data->cfa_reg)
750 data->reg_offset[rt] = data->cfa_offset - (immed << 4);
751
752 if (ra == data->cfa_reg && rt == SPU_LR_REGNUM
753 && !found_lr)
754 {
755 found_lr = 1;
756 prolog_pc = pc + 4;
757 }
758
759 if (ra == SPU_RAW_SP_REGNUM
760 && (found_sp? immed == 0 : rt == SPU_RAW_SP_REGNUM)
761 && !found_bc)
762 {
763 found_bc = 1;
764 prolog_pc = pc + 4;
765 }
766 }
767
768 /* _start uses SELB to set up the stack pointer. */
769 else if (is_rrr (insn, op_selb, &rt, &ra, &rb, &rc))
770 {
771 if (rt == SPU_RAW_SP_REGNUM && !found_sp)
772 found_sp = 1;
773 }
774
775 /* We terminate if we find a branch. */
776 else if (is_branch (insn, &immed, &ra))
777 break;
778 }
779
780
781 /* If we successfully parsed until here, and didn't find any instruction
782 modifying SP, we assume we have a frameless function. */
783 if (!found_sp)
784 data->size = 0;
785
786 /* Return cooked instead of raw SP. */
787 if (data->cfa_reg == SPU_RAW_SP_REGNUM)
788 data->cfa_reg = SPU_SP_REGNUM;
789
790 return prolog_pc;
791 }
792
793 /* Return the first instruction after the prologue starting at PC. */
794 static CORE_ADDR
795 spu_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
796 {
797 struct spu_prologue_data data;
798 return spu_analyze_prologue (gdbarch, pc, (CORE_ADDR)-1, &data);
799 }
800
801 /* Return the frame pointer in use at address PC. */
802 static void
803 spu_virtual_frame_pointer (struct gdbarch *gdbarch, CORE_ADDR pc,
804 int *reg, LONGEST *offset)
805 {
806 struct spu_prologue_data data;
807 spu_analyze_prologue (gdbarch, pc, (CORE_ADDR)-1, &data);
808
809 if (data.size != -1 && data.cfa_reg != -1)
810 {
811 /* The 'frame pointer' address is CFA minus frame size. */
812 *reg = data.cfa_reg;
813 *offset = data.cfa_offset - data.size;
814 }
815 else
816 {
817 /* ??? We don't really know ... */
818 *reg = SPU_SP_REGNUM;
819 *offset = 0;
820 }
821 }
822
823 /* Return true if we are in the function's epilogue, i.e. after the
824 instruction that destroyed the function's stack frame.
825
826 1) scan forward from the point of execution:
827 a) If you find an instruction that modifies the stack pointer
828 or transfers control (except a return), execution is not in
829 an epilogue, return.
830 b) Stop scanning if you find a return instruction or reach the
831 end of the function or reach the hard limit for the size of
832 an epilogue.
833 2) scan backward from the point of execution:
834 a) If you find an instruction that modifies the stack pointer,
835 execution *is* in an epilogue, return.
836 b) Stop scanning if you reach an instruction that transfers
837 control or the beginning of the function or reach the hard
838 limit for the size of an epilogue. */
839
840 static int
841 spu_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
842 {
843 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
844 CORE_ADDR scan_pc, func_start, func_end, epilogue_start, epilogue_end;
845 bfd_byte buf[4];
846 unsigned int insn;
847 int rt, ra, rb, rc, immed;
848
849 /* Find the search limits based on function boundaries and hard limit.
850 We assume the epilogue can be up to 64 instructions long. */
851
852 const int spu_max_epilogue_size = 64 * 4;
853
854 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
855 return 0;
856
857 if (pc - func_start < spu_max_epilogue_size)
858 epilogue_start = func_start;
859 else
860 epilogue_start = pc - spu_max_epilogue_size;
861
862 if (func_end - pc < spu_max_epilogue_size)
863 epilogue_end = func_end;
864 else
865 epilogue_end = pc + spu_max_epilogue_size;
866
867 /* Scan forward until next 'bi $0'. */
868
869 for (scan_pc = pc; scan_pc < epilogue_end; scan_pc += 4)
870 {
871 if (target_read_memory (scan_pc, buf, 4))
872 return 0;
873 insn = extract_unsigned_integer (buf, 4, byte_order);
874
875 if (is_branch (insn, &immed, &ra))
876 {
877 if (immed == 0 && ra == SPU_LR_REGNUM)
878 break;
879
880 return 0;
881 }
882
883 if (is_ri10 (insn, op_ai, &rt, &ra, &immed)
884 || is_rr (insn, op_a, &rt, &ra, &rb)
885 || is_ri10 (insn, op_lqd, &rt, &ra, &immed))
886 {
887 if (rt == SPU_RAW_SP_REGNUM)
888 return 0;
889 }
890 }
891
892 if (scan_pc >= epilogue_end)
893 return 0;
894
895 /* Scan backward until adjustment to stack pointer (R1). */
896
897 for (scan_pc = pc - 4; scan_pc >= epilogue_start; scan_pc -= 4)
898 {
899 if (target_read_memory (scan_pc, buf, 4))
900 return 0;
901 insn = extract_unsigned_integer (buf, 4, byte_order);
902
903 if (is_branch (insn, &immed, &ra))
904 return 0;
905
906 if (is_ri10 (insn, op_ai, &rt, &ra, &immed)
907 || is_rr (insn, op_a, &rt, &ra, &rb)
908 || is_ri10 (insn, op_lqd, &rt, &ra, &immed))
909 {
910 if (rt == SPU_RAW_SP_REGNUM)
911 return 1;
912 }
913 }
914
915 return 0;
916 }
917
918
919 /* Normal stack frames. */
920
921 struct spu_unwind_cache
922 {
923 CORE_ADDR func;
924 CORE_ADDR frame_base;
925 CORE_ADDR local_base;
926
927 struct trad_frame_saved_reg *saved_regs;
928 };
929
930 static struct spu_unwind_cache *
931 spu_frame_unwind_cache (struct frame_info *this_frame,
932 void **this_prologue_cache)
933 {
934 struct gdbarch *gdbarch = get_frame_arch (this_frame);
935 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
936 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
937 struct spu_unwind_cache *info;
938 struct spu_prologue_data data;
939 CORE_ADDR id = tdep->id;
940 gdb_byte buf[16];
941
942 if (*this_prologue_cache)
943 return *this_prologue_cache;
944
945 info = FRAME_OBSTACK_ZALLOC (struct spu_unwind_cache);
946 *this_prologue_cache = info;
947 info->saved_regs = trad_frame_alloc_saved_regs (this_frame);
948 info->frame_base = 0;
949 info->local_base = 0;
950
951 /* Find the start of the current function, and analyze its prologue. */
952 info->func = get_frame_func (this_frame);
953 if (info->func == 0)
954 {
955 /* Fall back to using the current PC as frame ID. */
956 info->func = get_frame_pc (this_frame);
957 data.size = -1;
958 }
959 else
960 spu_analyze_prologue (gdbarch, info->func, get_frame_pc (this_frame),
961 &data);
962
963 /* If successful, use prologue analysis data. */
964 if (data.size != -1 && data.cfa_reg != -1)
965 {
966 CORE_ADDR cfa;
967 int i;
968
969 /* Determine CFA via unwound CFA_REG plus CFA_OFFSET. */
970 get_frame_register (this_frame, data.cfa_reg, buf);
971 cfa = extract_unsigned_integer (buf, 4, byte_order) + data.cfa_offset;
972 cfa = SPUADDR (id, cfa);
973
974 /* Call-saved register slots. */
975 for (i = 0; i < SPU_NUM_GPRS; i++)
976 if (i == SPU_LR_REGNUM
977 || (i >= SPU_SAVED1_REGNUM && i <= SPU_SAVEDN_REGNUM))
978 if (data.reg_offset[i] != -1)
979 info->saved_regs[i].addr = cfa - data.reg_offset[i];
980
981 /* Frame bases. */
982 info->frame_base = cfa;
983 info->local_base = cfa - data.size;
984 }
985
986 /* Otherwise, fall back to reading the backchain link. */
987 else
988 {
989 CORE_ADDR reg;
990 LONGEST backchain;
991 ULONGEST lslr;
992 int status;
993
994 /* Get local store limit. */
995 lslr = get_frame_register_unsigned (this_frame, SPU_LSLR_REGNUM);
996 if (!lslr)
997 lslr = (ULONGEST) -1;
998
999 /* Get the backchain. */
1000 reg = get_frame_register_unsigned (this_frame, SPU_SP_REGNUM);
1001 status = safe_read_memory_integer (SPUADDR (id, reg), 4, byte_order,
1002 &backchain);
1003
1004 /* A zero backchain terminates the frame chain. Also, sanity
1005 check against the local store size limit. */
1006 if (status && backchain > 0 && backchain <= lslr)
1007 {
1008 /* Assume the link register is saved into its slot. */
1009 if (backchain + 16 <= lslr)
1010 info->saved_regs[SPU_LR_REGNUM].addr = SPUADDR (id, backchain + 16);
1011
1012 /* Frame bases. */
1013 info->frame_base = SPUADDR (id, backchain);
1014 info->local_base = SPUADDR (id, reg);
1015 }
1016 }
1017
1018 /* If we didn't find a frame, we cannot determine SP / return address. */
1019 if (info->frame_base == 0)
1020 return info;
1021
1022 /* The previous SP is equal to the CFA. */
1023 trad_frame_set_value (info->saved_regs, SPU_SP_REGNUM,
1024 SPUADDR_ADDR (info->frame_base));
1025
1026 /* Read full contents of the unwound link register in order to
1027 be able to determine the return address. */
1028 if (trad_frame_addr_p (info->saved_regs, SPU_LR_REGNUM))
1029 target_read_memory (info->saved_regs[SPU_LR_REGNUM].addr, buf, 16);
1030 else
1031 get_frame_register (this_frame, SPU_LR_REGNUM, buf);
1032
1033 /* Normally, the return address is contained in the slot 0 of the
1034 link register, and slots 1-3 are zero. For an overlay return,
1035 slot 0 contains the address of the overlay manager return stub,
1036 slot 1 contains the partition number of the overlay section to
1037 be returned to, and slot 2 contains the return address within
1038 that section. Return the latter address in that case. */
1039 if (extract_unsigned_integer (buf + 8, 4, byte_order) != 0)
1040 trad_frame_set_value (info->saved_regs, SPU_PC_REGNUM,
1041 extract_unsigned_integer (buf + 8, 4, byte_order));
1042 else
1043 trad_frame_set_value (info->saved_regs, SPU_PC_REGNUM,
1044 extract_unsigned_integer (buf, 4, byte_order));
1045
1046 return info;
1047 }
1048
1049 static void
1050 spu_frame_this_id (struct frame_info *this_frame,
1051 void **this_prologue_cache, struct frame_id *this_id)
1052 {
1053 struct spu_unwind_cache *info =
1054 spu_frame_unwind_cache (this_frame, this_prologue_cache);
1055
1056 if (info->frame_base == 0)
1057 return;
1058
1059 *this_id = frame_id_build (info->frame_base, info->func);
1060 }
1061
1062 static struct value *
1063 spu_frame_prev_register (struct frame_info *this_frame,
1064 void **this_prologue_cache, int regnum)
1065 {
1066 struct spu_unwind_cache *info
1067 = spu_frame_unwind_cache (this_frame, this_prologue_cache);
1068
1069 /* Special-case the stack pointer. */
1070 if (regnum == SPU_RAW_SP_REGNUM)
1071 regnum = SPU_SP_REGNUM;
1072
1073 return trad_frame_get_prev_register (this_frame, info->saved_regs, regnum);
1074 }
1075
1076 static const struct frame_unwind spu_frame_unwind = {
1077 NORMAL_FRAME,
1078 spu_frame_this_id,
1079 spu_frame_prev_register,
1080 NULL,
1081 default_frame_sniffer
1082 };
1083
1084 static CORE_ADDR
1085 spu_frame_base_address (struct frame_info *this_frame, void **this_cache)
1086 {
1087 struct spu_unwind_cache *info
1088 = spu_frame_unwind_cache (this_frame, this_cache);
1089 return info->local_base;
1090 }
1091
1092 static const struct frame_base spu_frame_base = {
1093 &spu_frame_unwind,
1094 spu_frame_base_address,
1095 spu_frame_base_address,
1096 spu_frame_base_address
1097 };
1098
1099 static CORE_ADDR
1100 spu_unwind_pc (struct gdbarch *gdbarch, struct frame_info *next_frame)
1101 {
1102 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1103 CORE_ADDR pc = frame_unwind_register_unsigned (next_frame, SPU_PC_REGNUM);
1104 /* Mask off interrupt enable bit. */
1105 return SPUADDR (tdep->id, pc & -4);
1106 }
1107
1108 static CORE_ADDR
1109 spu_unwind_sp (struct gdbarch *gdbarch, struct frame_info *next_frame)
1110 {
1111 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1112 CORE_ADDR sp = frame_unwind_register_unsigned (next_frame, SPU_SP_REGNUM);
1113 return SPUADDR (tdep->id, sp);
1114 }
1115
1116 static CORE_ADDR
1117 spu_read_pc (struct regcache *regcache)
1118 {
1119 struct gdbarch_tdep *tdep = gdbarch_tdep (get_regcache_arch (regcache));
1120 ULONGEST pc;
1121 regcache_cooked_read_unsigned (regcache, SPU_PC_REGNUM, &pc);
1122 /* Mask off interrupt enable bit. */
1123 return SPUADDR (tdep->id, pc & -4);
1124 }
1125
1126 static void
1127 spu_write_pc (struct regcache *regcache, CORE_ADDR pc)
1128 {
1129 /* Keep interrupt enabled state unchanged. */
1130 ULONGEST old_pc;
1131 regcache_cooked_read_unsigned (regcache, SPU_PC_REGNUM, &old_pc);
1132 regcache_cooked_write_unsigned (regcache, SPU_PC_REGNUM,
1133 (SPUADDR_ADDR (pc) & -4) | (old_pc & 3));
1134 }
1135
1136
1137 /* Cell/B.E. cross-architecture unwinder support. */
1138
1139 struct spu2ppu_cache
1140 {
1141 struct frame_id frame_id;
1142 struct regcache *regcache;
1143 };
1144
1145 static struct gdbarch *
1146 spu2ppu_prev_arch (struct frame_info *this_frame, void **this_cache)
1147 {
1148 struct spu2ppu_cache *cache = *this_cache;
1149 return get_regcache_arch (cache->regcache);
1150 }
1151
1152 static void
1153 spu2ppu_this_id (struct frame_info *this_frame,
1154 void **this_cache, struct frame_id *this_id)
1155 {
1156 struct spu2ppu_cache *cache = *this_cache;
1157 *this_id = cache->frame_id;
1158 }
1159
1160 static struct value *
1161 spu2ppu_prev_register (struct frame_info *this_frame,
1162 void **this_cache, int regnum)
1163 {
1164 struct spu2ppu_cache *cache = *this_cache;
1165 struct gdbarch *gdbarch = get_regcache_arch (cache->regcache);
1166 gdb_byte *buf;
1167
1168 buf = alloca (register_size (gdbarch, regnum));
1169 regcache_cooked_read (cache->regcache, regnum, buf);
1170 return frame_unwind_got_bytes (this_frame, regnum, buf);
1171 }
1172
1173 static int
1174 spu2ppu_sniffer (const struct frame_unwind *self,
1175 struct frame_info *this_frame, void **this_prologue_cache)
1176 {
1177 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1178 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1179 CORE_ADDR base, func, backchain;
1180 gdb_byte buf[4];
1181
1182 if (gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_spu)
1183 return 0;
1184
1185 base = get_frame_sp (this_frame);
1186 func = get_frame_pc (this_frame);
1187 if (target_read_memory (base, buf, 4))
1188 return 0;
1189 backchain = extract_unsigned_integer (buf, 4, byte_order);
1190
1191 if (!backchain)
1192 {
1193 struct frame_info *fi;
1194
1195 struct spu2ppu_cache *cache
1196 = FRAME_OBSTACK_CALLOC (1, struct spu2ppu_cache);
1197
1198 cache->frame_id = frame_id_build (base + 16, func);
1199
1200 for (fi = get_next_frame (this_frame); fi; fi = get_next_frame (fi))
1201 if (gdbarch_bfd_arch_info (get_frame_arch (fi))->arch != bfd_arch_spu)
1202 break;
1203
1204 if (fi)
1205 {
1206 cache->regcache = frame_save_as_regcache (fi);
1207 *this_prologue_cache = cache;
1208 return 1;
1209 }
1210 else
1211 {
1212 struct regcache *regcache;
1213 regcache = get_thread_arch_regcache (inferior_ptid, target_gdbarch);
1214 cache->regcache = regcache_dup (regcache);
1215 *this_prologue_cache = cache;
1216 return 1;
1217 }
1218 }
1219
1220 return 0;
1221 }
1222
1223 static void
1224 spu2ppu_dealloc_cache (struct frame_info *self, void *this_cache)
1225 {
1226 struct spu2ppu_cache *cache = this_cache;
1227 regcache_xfree (cache->regcache);
1228 }
1229
1230 static const struct frame_unwind spu2ppu_unwind = {
1231 ARCH_FRAME,
1232 spu2ppu_this_id,
1233 spu2ppu_prev_register,
1234 NULL,
1235 spu2ppu_sniffer,
1236 spu2ppu_dealloc_cache,
1237 spu2ppu_prev_arch,
1238 };
1239
1240
1241 /* Function calling convention. */
1242
1243 static CORE_ADDR
1244 spu_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1245 {
1246 return sp & ~15;
1247 }
1248
1249 static CORE_ADDR
1250 spu_push_dummy_code (struct gdbarch *gdbarch, CORE_ADDR sp, CORE_ADDR funaddr,
1251 struct value **args, int nargs, struct type *value_type,
1252 CORE_ADDR *real_pc, CORE_ADDR *bp_addr,
1253 struct regcache *regcache)
1254 {
1255 /* Allocate space sufficient for a breakpoint, keeping the stack aligned. */
1256 sp = (sp - 4) & ~15;
1257 /* Store the address of that breakpoint */
1258 *bp_addr = sp;
1259 /* The call starts at the callee's entry point. */
1260 *real_pc = funaddr;
1261
1262 return sp;
1263 }
1264
1265 static int
1266 spu_scalar_value_p (struct type *type)
1267 {
1268 switch (TYPE_CODE (type))
1269 {
1270 case TYPE_CODE_INT:
1271 case TYPE_CODE_ENUM:
1272 case TYPE_CODE_RANGE:
1273 case TYPE_CODE_CHAR:
1274 case TYPE_CODE_BOOL:
1275 case TYPE_CODE_PTR:
1276 case TYPE_CODE_REF:
1277 return TYPE_LENGTH (type) <= 16;
1278
1279 default:
1280 return 0;
1281 }
1282 }
1283
1284 static void
1285 spu_value_to_regcache (struct regcache *regcache, int regnum,
1286 struct type *type, const gdb_byte *in)
1287 {
1288 int len = TYPE_LENGTH (type);
1289
1290 if (spu_scalar_value_p (type))
1291 {
1292 int preferred_slot = len < 4 ? 4 - len : 0;
1293 regcache_cooked_write_part (regcache, regnum, preferred_slot, len, in);
1294 }
1295 else
1296 {
1297 while (len >= 16)
1298 {
1299 regcache_cooked_write (regcache, regnum++, in);
1300 in += 16;
1301 len -= 16;
1302 }
1303
1304 if (len > 0)
1305 regcache_cooked_write_part (regcache, regnum, 0, len, in);
1306 }
1307 }
1308
1309 static void
1310 spu_regcache_to_value (struct regcache *regcache, int regnum,
1311 struct type *type, gdb_byte *out)
1312 {
1313 int len = TYPE_LENGTH (type);
1314
1315 if (spu_scalar_value_p (type))
1316 {
1317 int preferred_slot = len < 4 ? 4 - len : 0;
1318 regcache_cooked_read_part (regcache, regnum, preferred_slot, len, out);
1319 }
1320 else
1321 {
1322 while (len >= 16)
1323 {
1324 regcache_cooked_read (regcache, regnum++, out);
1325 out += 16;
1326 len -= 16;
1327 }
1328
1329 if (len > 0)
1330 regcache_cooked_read_part (regcache, regnum, 0, len, out);
1331 }
1332 }
1333
1334 static CORE_ADDR
1335 spu_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1336 struct regcache *regcache, CORE_ADDR bp_addr,
1337 int nargs, struct value **args, CORE_ADDR sp,
1338 int struct_return, CORE_ADDR struct_addr)
1339 {
1340 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1341 CORE_ADDR sp_delta;
1342 int i;
1343 int regnum = SPU_ARG1_REGNUM;
1344 int stack_arg = -1;
1345 gdb_byte buf[16];
1346
1347 /* Set the return address. */
1348 memset (buf, 0, sizeof buf);
1349 store_unsigned_integer (buf, 4, byte_order, SPUADDR_ADDR (bp_addr));
1350 regcache_cooked_write (regcache, SPU_LR_REGNUM, buf);
1351
1352 /* If STRUCT_RETURN is true, then the struct return address (in
1353 STRUCT_ADDR) will consume the first argument-passing register.
1354 Both adjust the register count and store that value. */
1355 if (struct_return)
1356 {
1357 memset (buf, 0, sizeof buf);
1358 store_unsigned_integer (buf, 4, byte_order, SPUADDR_ADDR (struct_addr));
1359 regcache_cooked_write (regcache, regnum++, buf);
1360 }
1361
1362 /* Fill in argument registers. */
1363 for (i = 0; i < nargs; i++)
1364 {
1365 struct value *arg = args[i];
1366 struct type *type = check_typedef (value_type (arg));
1367 const gdb_byte *contents = value_contents (arg);
1368 int len = TYPE_LENGTH (type);
1369 int n_regs = align_up (len, 16) / 16;
1370
1371 /* If the argument doesn't wholly fit into registers, it and
1372 all subsequent arguments go to the stack. */
1373 if (regnum + n_regs - 1 > SPU_ARGN_REGNUM)
1374 {
1375 stack_arg = i;
1376 break;
1377 }
1378
1379 spu_value_to_regcache (regcache, regnum, type, contents);
1380 regnum += n_regs;
1381 }
1382
1383 /* Overflow arguments go to the stack. */
1384 if (stack_arg != -1)
1385 {
1386 CORE_ADDR ap;
1387
1388 /* Allocate all required stack size. */
1389 for (i = stack_arg; i < nargs; i++)
1390 {
1391 struct type *type = check_typedef (value_type (args[i]));
1392 sp -= align_up (TYPE_LENGTH (type), 16);
1393 }
1394
1395 /* Fill in stack arguments. */
1396 ap = sp;
1397 for (i = stack_arg; i < nargs; i++)
1398 {
1399 struct value *arg = args[i];
1400 struct type *type = check_typedef (value_type (arg));
1401 int len = TYPE_LENGTH (type);
1402 int preferred_slot;
1403
1404 if (spu_scalar_value_p (type))
1405 preferred_slot = len < 4 ? 4 - len : 0;
1406 else
1407 preferred_slot = 0;
1408
1409 target_write_memory (ap + preferred_slot, value_contents (arg), len);
1410 ap += align_up (TYPE_LENGTH (type), 16);
1411 }
1412 }
1413
1414 /* Allocate stack frame header. */
1415 sp -= 32;
1416
1417 /* Store stack back chain. */
1418 regcache_cooked_read (regcache, SPU_RAW_SP_REGNUM, buf);
1419 target_write_memory (sp, buf, 16);
1420
1421 /* Finally, update all slots of the SP register. */
1422 sp_delta = sp - extract_unsigned_integer (buf, 4, byte_order);
1423 for (i = 0; i < 4; i++)
1424 {
1425 CORE_ADDR sp_slot = extract_unsigned_integer (buf + 4*i, 4, byte_order);
1426 store_unsigned_integer (buf + 4*i, 4, byte_order, sp_slot + sp_delta);
1427 }
1428 regcache_cooked_write (regcache, SPU_RAW_SP_REGNUM, buf);
1429
1430 return sp;
1431 }
1432
1433 static struct frame_id
1434 spu_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1435 {
1436 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1437 CORE_ADDR pc = get_frame_register_unsigned (this_frame, SPU_PC_REGNUM);
1438 CORE_ADDR sp = get_frame_register_unsigned (this_frame, SPU_SP_REGNUM);
1439 return frame_id_build (SPUADDR (tdep->id, sp), SPUADDR (tdep->id, pc & -4));
1440 }
1441
1442 /* Function return value access. */
1443
1444 static enum return_value_convention
1445 spu_return_value (struct gdbarch *gdbarch, struct type *func_type,
1446 struct type *type, struct regcache *regcache,
1447 gdb_byte *out, const gdb_byte *in)
1448 {
1449 enum return_value_convention rvc;
1450
1451 if (TYPE_LENGTH (type) <= (SPU_ARGN_REGNUM - SPU_ARG1_REGNUM + 1) * 16)
1452 rvc = RETURN_VALUE_REGISTER_CONVENTION;
1453 else
1454 rvc = RETURN_VALUE_STRUCT_CONVENTION;
1455
1456 if (in)
1457 {
1458 switch (rvc)
1459 {
1460 case RETURN_VALUE_REGISTER_CONVENTION:
1461 spu_value_to_regcache (regcache, SPU_ARG1_REGNUM, type, in);
1462 break;
1463
1464 case RETURN_VALUE_STRUCT_CONVENTION:
1465 error ("Cannot set function return value.");
1466 break;
1467 }
1468 }
1469 else if (out)
1470 {
1471 switch (rvc)
1472 {
1473 case RETURN_VALUE_REGISTER_CONVENTION:
1474 spu_regcache_to_value (regcache, SPU_ARG1_REGNUM, type, out);
1475 break;
1476
1477 case RETURN_VALUE_STRUCT_CONVENTION:
1478 error ("Function return value unknown.");
1479 break;
1480 }
1481 }
1482
1483 return rvc;
1484 }
1485
1486
1487 /* Breakpoints. */
1488
1489 static const gdb_byte *
1490 spu_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR * pcptr, int *lenptr)
1491 {
1492 static const gdb_byte breakpoint[] = { 0x00, 0x00, 0x3f, 0xff };
1493
1494 *lenptr = sizeof breakpoint;
1495 return breakpoint;
1496 }
1497
1498 static int
1499 spu_memory_remove_breakpoint (struct gdbarch *gdbarch,
1500 struct bp_target_info *bp_tgt)
1501 {
1502 /* We work around a problem in combined Cell/B.E. debugging here. Consider
1503 that in a combined application, we have some breakpoints inserted in SPU
1504 code, and now the application forks (on the PPU side). GDB common code
1505 will assume that the fork system call copied all breakpoints into the new
1506 process' address space, and that all those copies now need to be removed
1507 (see breakpoint.c:detach_breakpoints).
1508
1509 While this is certainly true for PPU side breakpoints, it is not true
1510 for SPU side breakpoints. fork will clone the SPU context file
1511 descriptors, so that all the existing SPU contexts are in accessible
1512 in the new process. However, the contents of the SPU contexts themselves
1513 are *not* cloned. Therefore the effect of detach_breakpoints is to
1514 remove SPU breakpoints from the *original* SPU context's local store
1515 -- this is not the correct behaviour.
1516
1517 The workaround is to check whether the PID we are asked to remove this
1518 breakpoint from (i.e. ptid_get_pid (inferior_ptid)) is different from the
1519 PID of the current inferior (i.e. current_inferior ()->pid). This is only
1520 true in the context of detach_breakpoints. If so, we simply do nothing.
1521 [ Note that for the fork child process, it does not matter if breakpoints
1522 remain inserted, because those SPU contexts are not runnable anyway --
1523 the Linux kernel allows only the original process to invoke spu_run. */
1524
1525 if (ptid_get_pid (inferior_ptid) != current_inferior ()->pid)
1526 return 0;
1527
1528 return default_memory_remove_breakpoint (gdbarch, bp_tgt);
1529 }
1530
1531
1532 /* Software single-stepping support. */
1533
1534 static int
1535 spu_software_single_step (struct frame_info *frame)
1536 {
1537 struct gdbarch *gdbarch = get_frame_arch (frame);
1538 struct address_space *aspace = get_frame_address_space (frame);
1539 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1540 CORE_ADDR pc, next_pc;
1541 unsigned int insn;
1542 int offset, reg;
1543 gdb_byte buf[4];
1544 ULONGEST lslr;
1545
1546 pc = get_frame_pc (frame);
1547
1548 if (target_read_memory (pc, buf, 4))
1549 return 1;
1550 insn = extract_unsigned_integer (buf, 4, byte_order);
1551
1552 /* Get local store limit. */
1553 lslr = get_frame_register_unsigned (frame, SPU_LSLR_REGNUM);
1554 if (!lslr)
1555 lslr = (ULONGEST) -1;
1556
1557 /* Next sequential instruction is at PC + 4, except if the current
1558 instruction is a PPE-assisted call, in which case it is at PC + 8.
1559 Wrap around LS limit to be on the safe side. */
1560 if ((insn & 0xffffff00) == 0x00002100)
1561 next_pc = (SPUADDR_ADDR (pc) + 8) & lslr;
1562 else
1563 next_pc = (SPUADDR_ADDR (pc) + 4) & lslr;
1564
1565 insert_single_step_breakpoint (gdbarch,
1566 aspace, SPUADDR (SPUADDR_SPU (pc), next_pc));
1567
1568 if (is_branch (insn, &offset, &reg))
1569 {
1570 CORE_ADDR target = offset;
1571
1572 if (reg == SPU_PC_REGNUM)
1573 target += SPUADDR_ADDR (pc);
1574 else if (reg != -1)
1575 {
1576 get_frame_register_bytes (frame, reg, 0, 4, buf);
1577 target += extract_unsigned_integer (buf, 4, byte_order) & -4;
1578 }
1579
1580 target = target & lslr;
1581 if (target != next_pc)
1582 insert_single_step_breakpoint (gdbarch, aspace,
1583 SPUADDR (SPUADDR_SPU (pc), target));
1584 }
1585
1586 return 1;
1587 }
1588
1589
1590 /* Longjmp support. */
1591
1592 static int
1593 spu_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
1594 {
1595 struct gdbarch *gdbarch = get_frame_arch (frame);
1596 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1597 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1598 gdb_byte buf[4];
1599 CORE_ADDR jb_addr;
1600
1601 /* Jump buffer is pointed to by the argument register $r3. */
1602 get_frame_register_bytes (frame, SPU_ARG1_REGNUM, 0, 4, buf);
1603 jb_addr = extract_unsigned_integer (buf, 4, byte_order);
1604 if (target_read_memory (SPUADDR (tdep->id, jb_addr), buf, 4))
1605 return 0;
1606
1607 *pc = extract_unsigned_integer (buf, 4, byte_order);
1608 *pc = SPUADDR (tdep->id, *pc);
1609 return 1;
1610 }
1611
1612
1613 /* Disassembler. */
1614
1615 struct spu_dis_asm_data
1616 {
1617 struct gdbarch *gdbarch;
1618 int id;
1619 };
1620
1621 static void
1622 spu_dis_asm_print_address (bfd_vma addr, struct disassemble_info *info)
1623 {
1624 struct spu_dis_asm_data *data = info->application_data;
1625 print_address (data->gdbarch, SPUADDR (data->id, addr), info->stream);
1626 }
1627
1628 static int
1629 gdb_print_insn_spu (bfd_vma memaddr, struct disassemble_info *info)
1630 {
1631 /* The opcodes disassembler does 18-bit address arithmetic. Make sure the
1632 SPU ID encoded in the high bits is added back when we call print_address. */
1633 struct disassemble_info spu_info = *info;
1634 struct spu_dis_asm_data data;
1635 data.gdbarch = info->application_data;
1636 data.id = SPUADDR_SPU (memaddr);
1637
1638 spu_info.application_data = &data;
1639 spu_info.print_address_func = spu_dis_asm_print_address;
1640 return print_insn_spu (memaddr, &spu_info);
1641 }
1642
1643
1644 /* Target overlays for the SPU overlay manager.
1645
1646 See the documentation of simple_overlay_update for how the
1647 interface is supposed to work.
1648
1649 Data structures used by the overlay manager:
1650
1651 struct ovly_table
1652 {
1653 u32 vma;
1654 u32 size;
1655 u32 pos;
1656 u32 buf;
1657 } _ovly_table[]; -- one entry per overlay section
1658
1659 struct ovly_buf_table
1660 {
1661 u32 mapped;
1662 } _ovly_buf_table[]; -- one entry per overlay buffer
1663
1664 _ovly_table should never change.
1665
1666 Both tables are aligned to a 16-byte boundary, the symbols _ovly_table
1667 and _ovly_buf_table are of type STT_OBJECT and their size set to the size
1668 of the respective array. buf in _ovly_table is an index into _ovly_buf_table.
1669
1670 mapped is an index into _ovly_table. Both the mapped and buf indices start
1671 from one to reference the first entry in their respective tables. */
1672
1673 /* Using the per-objfile private data mechanism, we store for each
1674 objfile an array of "struct spu_overlay_table" structures, one
1675 for each obj_section of the objfile. This structure holds two
1676 fields, MAPPED_PTR and MAPPED_VAL. If MAPPED_PTR is zero, this
1677 is *not* an overlay section. If it is non-zero, it represents
1678 a target address. The overlay section is mapped iff the target
1679 integer at this location equals MAPPED_VAL. */
1680
1681 static const struct objfile_data *spu_overlay_data;
1682
1683 struct spu_overlay_table
1684 {
1685 CORE_ADDR mapped_ptr;
1686 CORE_ADDR mapped_val;
1687 };
1688
1689 /* Retrieve the overlay table for OBJFILE. If not already cached, read
1690 the _ovly_table data structure from the target and initialize the
1691 spu_overlay_table data structure from it. */
1692 static struct spu_overlay_table *
1693 spu_get_overlay_table (struct objfile *objfile)
1694 {
1695 enum bfd_endian byte_order = bfd_big_endian (objfile->obfd)?
1696 BFD_ENDIAN_BIG : BFD_ENDIAN_LITTLE;
1697 struct minimal_symbol *ovly_table_msym, *ovly_buf_table_msym;
1698 CORE_ADDR ovly_table_base, ovly_buf_table_base;
1699 unsigned ovly_table_size, ovly_buf_table_size;
1700 struct spu_overlay_table *tbl;
1701 struct obj_section *osect;
1702 char *ovly_table;
1703 int i;
1704
1705 tbl = objfile_data (objfile, spu_overlay_data);
1706 if (tbl)
1707 return tbl;
1708
1709 ovly_table_msym = lookup_minimal_symbol ("_ovly_table", NULL, objfile);
1710 if (!ovly_table_msym)
1711 return NULL;
1712
1713 ovly_buf_table_msym = lookup_minimal_symbol ("_ovly_buf_table", NULL, objfile);
1714 if (!ovly_buf_table_msym)
1715 return NULL;
1716
1717 ovly_table_base = SYMBOL_VALUE_ADDRESS (ovly_table_msym);
1718 ovly_table_size = MSYMBOL_SIZE (ovly_table_msym);
1719
1720 ovly_buf_table_base = SYMBOL_VALUE_ADDRESS (ovly_buf_table_msym);
1721 ovly_buf_table_size = MSYMBOL_SIZE (ovly_buf_table_msym);
1722
1723 ovly_table = xmalloc (ovly_table_size);
1724 read_memory (ovly_table_base, ovly_table, ovly_table_size);
1725
1726 tbl = OBSTACK_CALLOC (&objfile->objfile_obstack,
1727 objfile->sections_end - objfile->sections,
1728 struct spu_overlay_table);
1729
1730 for (i = 0; i < ovly_table_size / 16; i++)
1731 {
1732 CORE_ADDR vma = extract_unsigned_integer (ovly_table + 16*i + 0,
1733 4, byte_order);
1734 CORE_ADDR size = extract_unsigned_integer (ovly_table + 16*i + 4,
1735 4, byte_order);
1736 CORE_ADDR pos = extract_unsigned_integer (ovly_table + 16*i + 8,
1737 4, byte_order);
1738 CORE_ADDR buf = extract_unsigned_integer (ovly_table + 16*i + 12,
1739 4, byte_order);
1740
1741 if (buf == 0 || (buf - 1) * 4 >= ovly_buf_table_size)
1742 continue;
1743
1744 ALL_OBJFILE_OSECTIONS (objfile, osect)
1745 if (vma == bfd_section_vma (objfile->obfd, osect->the_bfd_section)
1746 && pos == osect->the_bfd_section->filepos)
1747 {
1748 int ndx = osect - objfile->sections;
1749 tbl[ndx].mapped_ptr = ovly_buf_table_base + (buf - 1) * 4;
1750 tbl[ndx].mapped_val = i + 1;
1751 break;
1752 }
1753 }
1754
1755 xfree (ovly_table);
1756 set_objfile_data (objfile, spu_overlay_data, tbl);
1757 return tbl;
1758 }
1759
1760 /* Read _ovly_buf_table entry from the target to dermine whether
1761 OSECT is currently mapped, and update the mapped state. */
1762 static void
1763 spu_overlay_update_osect (struct obj_section *osect)
1764 {
1765 enum bfd_endian byte_order = bfd_big_endian (osect->objfile->obfd)?
1766 BFD_ENDIAN_BIG : BFD_ENDIAN_LITTLE;
1767 struct spu_overlay_table *ovly_table;
1768 CORE_ADDR id, val;
1769
1770 ovly_table = spu_get_overlay_table (osect->objfile);
1771 if (!ovly_table)
1772 return;
1773
1774 ovly_table += osect - osect->objfile->sections;
1775 if (ovly_table->mapped_ptr == 0)
1776 return;
1777
1778 id = SPUADDR_SPU (obj_section_addr (osect));
1779 val = read_memory_unsigned_integer (SPUADDR (id, ovly_table->mapped_ptr),
1780 4, byte_order);
1781 osect->ovly_mapped = (val == ovly_table->mapped_val);
1782 }
1783
1784 /* If OSECT is NULL, then update all sections' mapped state.
1785 If OSECT is non-NULL, then update only OSECT's mapped state. */
1786 static void
1787 spu_overlay_update (struct obj_section *osect)
1788 {
1789 /* Just one section. */
1790 if (osect)
1791 spu_overlay_update_osect (osect);
1792
1793 /* All sections. */
1794 else
1795 {
1796 struct objfile *objfile;
1797
1798 ALL_OBJSECTIONS (objfile, osect)
1799 if (section_is_overlay (osect))
1800 spu_overlay_update_osect (osect);
1801 }
1802 }
1803
1804 /* Whenever a new objfile is loaded, read the target's _ovly_table.
1805 If there is one, go through all sections and make sure for non-
1806 overlay sections LMA equals VMA, while for overlay sections LMA
1807 is larger than SPU_OVERLAY_LMA. */
1808 static void
1809 spu_overlay_new_objfile (struct objfile *objfile)
1810 {
1811 struct spu_overlay_table *ovly_table;
1812 struct obj_section *osect;
1813
1814 /* If we've already touched this file, do nothing. */
1815 if (!objfile || objfile_data (objfile, spu_overlay_data) != NULL)
1816 return;
1817
1818 /* Consider only SPU objfiles. */
1819 if (bfd_get_arch (objfile->obfd) != bfd_arch_spu)
1820 return;
1821
1822 /* Check if this objfile has overlays. */
1823 ovly_table = spu_get_overlay_table (objfile);
1824 if (!ovly_table)
1825 return;
1826
1827 /* Now go and fiddle with all the LMAs. */
1828 ALL_OBJFILE_OSECTIONS (objfile, osect)
1829 {
1830 bfd *obfd = objfile->obfd;
1831 asection *bsect = osect->the_bfd_section;
1832 int ndx = osect - objfile->sections;
1833
1834 if (ovly_table[ndx].mapped_ptr == 0)
1835 bfd_section_lma (obfd, bsect) = bfd_section_vma (obfd, bsect);
1836 else
1837 bfd_section_lma (obfd, bsect) = SPU_OVERLAY_LMA + bsect->filepos;
1838 }
1839 }
1840
1841
1842 /* Insert temporary breakpoint on "main" function of newly loaded
1843 SPE context OBJFILE. */
1844 static void
1845 spu_catch_start (struct objfile *objfile)
1846 {
1847 struct minimal_symbol *minsym;
1848 struct symtab *symtab;
1849 CORE_ADDR pc;
1850 char buf[32];
1851
1852 /* Do this only if requested by "set spu stop-on-load on". */
1853 if (!spu_stop_on_load_p)
1854 return;
1855
1856 /* Consider only SPU objfiles. */
1857 if (!objfile || bfd_get_arch (objfile->obfd) != bfd_arch_spu)
1858 return;
1859
1860 /* The main objfile is handled differently. */
1861 if (objfile == symfile_objfile)
1862 return;
1863
1864 /* There can be multiple symbols named "main". Search for the
1865 "main" in *this* objfile. */
1866 minsym = lookup_minimal_symbol ("main", NULL, objfile);
1867 if (!minsym)
1868 return;
1869
1870 /* If we have debugging information, try to use it -- this
1871 will allow us to properly skip the prologue. */
1872 pc = SYMBOL_VALUE_ADDRESS (minsym);
1873 symtab = find_pc_sect_symtab (pc, SYMBOL_OBJ_SECTION (minsym));
1874 if (symtab != NULL)
1875 {
1876 struct blockvector *bv = BLOCKVECTOR (symtab);
1877 struct block *block = BLOCKVECTOR_BLOCK (bv, GLOBAL_BLOCK);
1878 struct symbol *sym;
1879 struct symtab_and_line sal;
1880
1881 sym = lookup_block_symbol (block, "main", VAR_DOMAIN);
1882 if (sym)
1883 {
1884 fixup_symbol_section (sym, objfile);
1885 sal = find_function_start_sal (sym, 1);
1886 pc = sal.pc;
1887 }
1888 }
1889
1890 /* Use a numerical address for the set_breakpoint command to avoid having
1891 the breakpoint re-set incorrectly. */
1892 xsnprintf (buf, sizeof buf, "*%s", core_addr_to_string (pc));
1893 create_breakpoint (get_objfile_arch (objfile), buf /* arg */,
1894 NULL /* cond_string */, -1 /* thread */,
1895 0 /* parse_condition_and_thread */, 1 /* tempflag */,
1896 bp_breakpoint /* type_wanted */,
1897 0 /* ignore_count */,
1898 AUTO_BOOLEAN_FALSE /* pending_break_support */,
1899 NULL /* ops */, 0 /* from_tty */, 1 /* enabled */,
1900 0 /* internal */);
1901 }
1902
1903
1904 /* Look up OBJFILE loaded into FRAME's SPU context. */
1905 static struct objfile *
1906 spu_objfile_from_frame (struct frame_info *frame)
1907 {
1908 struct gdbarch *gdbarch = get_frame_arch (frame);
1909 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1910 struct objfile *obj;
1911
1912 if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
1913 return NULL;
1914
1915 ALL_OBJFILES (obj)
1916 {
1917 if (obj->sections != obj->sections_end
1918 && SPUADDR_SPU (obj_section_addr (obj->sections)) == tdep->id)
1919 return obj;
1920 }
1921
1922 return NULL;
1923 }
1924
1925 /* Flush cache for ea pointer access if available. */
1926 static void
1927 flush_ea_cache (void)
1928 {
1929 struct minimal_symbol *msymbol;
1930 struct objfile *obj;
1931
1932 if (!has_stack_frames ())
1933 return;
1934
1935 obj = spu_objfile_from_frame (get_current_frame ());
1936 if (obj == NULL)
1937 return;
1938
1939 /* Lookup inferior function __cache_flush. */
1940 msymbol = lookup_minimal_symbol ("__cache_flush", NULL, obj);
1941 if (msymbol != NULL)
1942 {
1943 struct type *type;
1944 CORE_ADDR addr;
1945
1946 type = objfile_type (obj)->builtin_void;
1947 type = lookup_function_type (type);
1948 type = lookup_pointer_type (type);
1949 addr = SYMBOL_VALUE_ADDRESS (msymbol);
1950
1951 call_function_by_hand (value_from_pointer (type, addr), 0, NULL);
1952 }
1953 }
1954
1955 /* This handler is called when the inferior has stopped. If it is stopped in
1956 SPU architecture then flush the ea cache if used. */
1957 static void
1958 spu_attach_normal_stop (struct bpstats *bs, int print_frame)
1959 {
1960 if (!spu_auto_flush_cache_p)
1961 return;
1962
1963 /* Temporarily reset spu_auto_flush_cache_p to avoid recursively
1964 re-entering this function when __cache_flush stops. */
1965 spu_auto_flush_cache_p = 0;
1966 flush_ea_cache ();
1967 spu_auto_flush_cache_p = 1;
1968 }
1969
1970
1971 /* "info spu" commands. */
1972
1973 static void
1974 info_spu_event_command (char *args, int from_tty)
1975 {
1976 struct frame_info *frame = get_selected_frame (NULL);
1977 ULONGEST event_status = 0;
1978 ULONGEST event_mask = 0;
1979 struct cleanup *chain;
1980 gdb_byte buf[100];
1981 char annex[32];
1982 LONGEST len;
1983 int rc, id;
1984
1985 if (gdbarch_bfd_arch_info (get_frame_arch (frame))->arch != bfd_arch_spu)
1986 error (_("\"info spu\" is only supported on the SPU architecture."));
1987
1988 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
1989
1990 xsnprintf (annex, sizeof annex, "%d/event_status", id);
1991 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1992 buf, 0, (sizeof (buf) - 1));
1993 if (len <= 0)
1994 error (_("Could not read event_status."));
1995 buf[len] = '\0';
1996 event_status = strtoulst (buf, NULL, 16);
1997
1998 xsnprintf (annex, sizeof annex, "%d/event_mask", id);
1999 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2000 buf, 0, (sizeof (buf) - 1));
2001 if (len <= 0)
2002 error (_("Could not read event_mask."));
2003 buf[len] = '\0';
2004 event_mask = strtoulst (buf, NULL, 16);
2005
2006 chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoEvent");
2007
2008 if (ui_out_is_mi_like_p (uiout))
2009 {
2010 ui_out_field_fmt (uiout, "event_status",
2011 "0x%s", phex_nz (event_status, 4));
2012 ui_out_field_fmt (uiout, "event_mask",
2013 "0x%s", phex_nz (event_mask, 4));
2014 }
2015 else
2016 {
2017 printf_filtered (_("Event Status 0x%s\n"), phex (event_status, 4));
2018 printf_filtered (_("Event Mask 0x%s\n"), phex (event_mask, 4));
2019 }
2020
2021 do_cleanups (chain);
2022 }
2023
2024 static void
2025 info_spu_signal_command (char *args, int from_tty)
2026 {
2027 struct frame_info *frame = get_selected_frame (NULL);
2028 struct gdbarch *gdbarch = get_frame_arch (frame);
2029 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2030 ULONGEST signal1 = 0;
2031 ULONGEST signal1_type = 0;
2032 int signal1_pending = 0;
2033 ULONGEST signal2 = 0;
2034 ULONGEST signal2_type = 0;
2035 int signal2_pending = 0;
2036 struct cleanup *chain;
2037 char annex[32];
2038 gdb_byte buf[100];
2039 LONGEST len;
2040 int rc, id;
2041
2042 if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
2043 error (_("\"info spu\" is only supported on the SPU architecture."));
2044
2045 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
2046
2047 xsnprintf (annex, sizeof annex, "%d/signal1", id);
2048 len = target_read (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 4);
2049 if (len < 0)
2050 error (_("Could not read signal1."));
2051 else if (len == 4)
2052 {
2053 signal1 = extract_unsigned_integer (buf, 4, byte_order);
2054 signal1_pending = 1;
2055 }
2056
2057 xsnprintf (annex, sizeof annex, "%d/signal1_type", id);
2058 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2059 buf, 0, (sizeof (buf) - 1));
2060 if (len <= 0)
2061 error (_("Could not read signal1_type."));
2062 buf[len] = '\0';
2063 signal1_type = strtoulst (buf, NULL, 16);
2064
2065 xsnprintf (annex, sizeof annex, "%d/signal2", id);
2066 len = target_read (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 4);
2067 if (len < 0)
2068 error (_("Could not read signal2."));
2069 else if (len == 4)
2070 {
2071 signal2 = extract_unsigned_integer (buf, 4, byte_order);
2072 signal2_pending = 1;
2073 }
2074
2075 xsnprintf (annex, sizeof annex, "%d/signal2_type", id);
2076 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2077 buf, 0, (sizeof (buf) - 1));
2078 if (len <= 0)
2079 error (_("Could not read signal2_type."));
2080 buf[len] = '\0';
2081 signal2_type = strtoulst (buf, NULL, 16);
2082
2083 chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoSignal");
2084
2085 if (ui_out_is_mi_like_p (uiout))
2086 {
2087 ui_out_field_int (uiout, "signal1_pending", signal1_pending);
2088 ui_out_field_fmt (uiout, "signal1", "0x%s", phex_nz (signal1, 4));
2089 ui_out_field_int (uiout, "signal1_type", signal1_type);
2090 ui_out_field_int (uiout, "signal2_pending", signal2_pending);
2091 ui_out_field_fmt (uiout, "signal2", "0x%s", phex_nz (signal2, 4));
2092 ui_out_field_int (uiout, "signal2_type", signal2_type);
2093 }
2094 else
2095 {
2096 if (signal1_pending)
2097 printf_filtered (_("Signal 1 control word 0x%s "), phex (signal1, 4));
2098 else
2099 printf_filtered (_("Signal 1 not pending "));
2100
2101 if (signal1_type)
2102 printf_filtered (_("(Type Or)\n"));
2103 else
2104 printf_filtered (_("(Type Overwrite)\n"));
2105
2106 if (signal2_pending)
2107 printf_filtered (_("Signal 2 control word 0x%s "), phex (signal2, 4));
2108 else
2109 printf_filtered (_("Signal 2 not pending "));
2110
2111 if (signal2_type)
2112 printf_filtered (_("(Type Or)\n"));
2113 else
2114 printf_filtered (_("(Type Overwrite)\n"));
2115 }
2116
2117 do_cleanups (chain);
2118 }
2119
2120 static void
2121 info_spu_mailbox_list (gdb_byte *buf, int nr, enum bfd_endian byte_order,
2122 const char *field, const char *msg)
2123 {
2124 struct cleanup *chain;
2125 int i;
2126
2127 if (nr <= 0)
2128 return;
2129
2130 chain = make_cleanup_ui_out_table_begin_end (uiout, 1, nr, "mbox");
2131
2132 ui_out_table_header (uiout, 32, ui_left, field, msg);
2133 ui_out_table_body (uiout);
2134
2135 for (i = 0; i < nr; i++)
2136 {
2137 struct cleanup *val_chain;
2138 ULONGEST val;
2139 val_chain = make_cleanup_ui_out_tuple_begin_end (uiout, "mbox");
2140 val = extract_unsigned_integer (buf + 4*i, 4, byte_order);
2141 ui_out_field_fmt (uiout, field, "0x%s", phex (val, 4));
2142 do_cleanups (val_chain);
2143
2144 if (!ui_out_is_mi_like_p (uiout))
2145 printf_filtered ("\n");
2146 }
2147
2148 do_cleanups (chain);
2149 }
2150
2151 static void
2152 info_spu_mailbox_command (char *args, int from_tty)
2153 {
2154 struct frame_info *frame = get_selected_frame (NULL);
2155 struct gdbarch *gdbarch = get_frame_arch (frame);
2156 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2157 struct cleanup *chain;
2158 char annex[32];
2159 gdb_byte buf[1024];
2160 LONGEST len;
2161 int i, id;
2162
2163 if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
2164 error (_("\"info spu\" is only supported on the SPU architecture."));
2165
2166 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
2167
2168 chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoMailbox");
2169
2170 xsnprintf (annex, sizeof annex, "%d/mbox_info", id);
2171 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2172 buf, 0, sizeof buf);
2173 if (len < 0)
2174 error (_("Could not read mbox_info."));
2175
2176 info_spu_mailbox_list (buf, len / 4, byte_order,
2177 "mbox", "SPU Outbound Mailbox");
2178
2179 xsnprintf (annex, sizeof annex, "%d/ibox_info", id);
2180 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2181 buf, 0, sizeof buf);
2182 if (len < 0)
2183 error (_("Could not read ibox_info."));
2184
2185 info_spu_mailbox_list (buf, len / 4, byte_order,
2186 "ibox", "SPU Outbound Interrupt Mailbox");
2187
2188 xsnprintf (annex, sizeof annex, "%d/wbox_info", id);
2189 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2190 buf, 0, sizeof buf);
2191 if (len < 0)
2192 error (_("Could not read wbox_info."));
2193
2194 info_spu_mailbox_list (buf, len / 4, byte_order,
2195 "wbox", "SPU Inbound Mailbox");
2196
2197 do_cleanups (chain);
2198 }
2199
2200 static ULONGEST
2201 spu_mfc_get_bitfield (ULONGEST word, int first, int last)
2202 {
2203 ULONGEST mask = ~(~(ULONGEST)0 << (last - first + 1));
2204 return (word >> (63 - last)) & mask;
2205 }
2206
2207 static void
2208 info_spu_dma_cmdlist (gdb_byte *buf, int nr, enum bfd_endian byte_order)
2209 {
2210 static char *spu_mfc_opcode[256] =
2211 {
2212 /* 00 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2213 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2214 /* 10 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2215 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2216 /* 20 */ "put", "putb", "putf", NULL, "putl", "putlb", "putlf", NULL,
2217 "puts", "putbs", "putfs", NULL, NULL, NULL, NULL, NULL,
2218 /* 30 */ "putr", "putrb", "putrf", NULL, "putrl", "putrlb", "putrlf", NULL,
2219 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2220 /* 40 */ "get", "getb", "getf", NULL, "getl", "getlb", "getlf", NULL,
2221 "gets", "getbs", "getfs", NULL, NULL, NULL, NULL, NULL,
2222 /* 50 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2223 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2224 /* 60 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2225 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2226 /* 70 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2227 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2228 /* 80 */ "sdcrt", "sdcrtst", NULL, NULL, NULL, NULL, NULL, NULL,
2229 NULL, "sdcrz", NULL, NULL, NULL, "sdcrst", NULL, "sdcrf",
2230 /* 90 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2231 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2232 /* a0 */ "sndsig", "sndsigb", "sndsigf", NULL, NULL, NULL, NULL, NULL,
2233 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2234 /* b0 */ "putlluc", NULL, NULL, NULL, "putllc", NULL, NULL, NULL,
2235 "putqlluc", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2236 /* c0 */ "barrier", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2237 "mfceieio", NULL, NULL, NULL, "mfcsync", NULL, NULL, NULL,
2238 /* d0 */ "getllar", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2239 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2240 /* e0 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2241 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2242 /* f0 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2243 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2244 };
2245
2246 int *seq = alloca (nr * sizeof (int));
2247 int done = 0;
2248 struct cleanup *chain;
2249 int i, j;
2250
2251
2252 /* Determine sequence in which to display (valid) entries. */
2253 for (i = 0; i < nr; i++)
2254 {
2255 /* Search for the first valid entry all of whose
2256 dependencies are met. */
2257 for (j = 0; j < nr; j++)
2258 {
2259 ULONGEST mfc_cq_dw3;
2260 ULONGEST dependencies;
2261
2262 if (done & (1 << (nr - 1 - j)))
2263 continue;
2264
2265 mfc_cq_dw3
2266 = extract_unsigned_integer (buf + 32*j + 24,8, byte_order);
2267 if (!spu_mfc_get_bitfield (mfc_cq_dw3, 16, 16))
2268 continue;
2269
2270 dependencies = spu_mfc_get_bitfield (mfc_cq_dw3, 0, nr - 1);
2271 if ((dependencies & done) != dependencies)
2272 continue;
2273
2274 seq[i] = j;
2275 done |= 1 << (nr - 1 - j);
2276 break;
2277 }
2278
2279 if (j == nr)
2280 break;
2281 }
2282
2283 nr = i;
2284
2285
2286 chain = make_cleanup_ui_out_table_begin_end (uiout, 10, nr, "dma_cmd");
2287
2288 ui_out_table_header (uiout, 7, ui_left, "opcode", "Opcode");
2289 ui_out_table_header (uiout, 3, ui_left, "tag", "Tag");
2290 ui_out_table_header (uiout, 3, ui_left, "tid", "TId");
2291 ui_out_table_header (uiout, 3, ui_left, "rid", "RId");
2292 ui_out_table_header (uiout, 18, ui_left, "ea", "EA");
2293 ui_out_table_header (uiout, 7, ui_left, "lsa", "LSA");
2294 ui_out_table_header (uiout, 7, ui_left, "size", "Size");
2295 ui_out_table_header (uiout, 7, ui_left, "lstaddr", "LstAddr");
2296 ui_out_table_header (uiout, 7, ui_left, "lstsize", "LstSize");
2297 ui_out_table_header (uiout, 1, ui_left, "error_p", "E");
2298
2299 ui_out_table_body (uiout);
2300
2301 for (i = 0; i < nr; i++)
2302 {
2303 struct cleanup *cmd_chain;
2304 ULONGEST mfc_cq_dw0;
2305 ULONGEST mfc_cq_dw1;
2306 ULONGEST mfc_cq_dw2;
2307 int mfc_cmd_opcode, mfc_cmd_tag, rclass_id, tclass_id;
2308 int lsa, size, list_lsa, list_size, mfc_lsa, mfc_size;
2309 ULONGEST mfc_ea;
2310 int list_valid_p, noop_valid_p, qw_valid_p, ea_valid_p, cmd_error_p;
2311
2312 /* Decode contents of MFC Command Queue Context Save/Restore Registers.
2313 See "Cell Broadband Engine Registers V1.3", section 3.3.2.1. */
2314
2315 mfc_cq_dw0
2316 = extract_unsigned_integer (buf + 32*seq[i], 8, byte_order);
2317 mfc_cq_dw1
2318 = extract_unsigned_integer (buf + 32*seq[i] + 8, 8, byte_order);
2319 mfc_cq_dw2
2320 = extract_unsigned_integer (buf + 32*seq[i] + 16, 8, byte_order);
2321
2322 list_lsa = spu_mfc_get_bitfield (mfc_cq_dw0, 0, 14);
2323 list_size = spu_mfc_get_bitfield (mfc_cq_dw0, 15, 26);
2324 mfc_cmd_opcode = spu_mfc_get_bitfield (mfc_cq_dw0, 27, 34);
2325 mfc_cmd_tag = spu_mfc_get_bitfield (mfc_cq_dw0, 35, 39);
2326 list_valid_p = spu_mfc_get_bitfield (mfc_cq_dw0, 40, 40);
2327 rclass_id = spu_mfc_get_bitfield (mfc_cq_dw0, 41, 43);
2328 tclass_id = spu_mfc_get_bitfield (mfc_cq_dw0, 44, 46);
2329
2330 mfc_ea = spu_mfc_get_bitfield (mfc_cq_dw1, 0, 51) << 12
2331 | spu_mfc_get_bitfield (mfc_cq_dw2, 25, 36);
2332
2333 mfc_lsa = spu_mfc_get_bitfield (mfc_cq_dw2, 0, 13);
2334 mfc_size = spu_mfc_get_bitfield (mfc_cq_dw2, 14, 24);
2335 noop_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 37, 37);
2336 qw_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 38, 38);
2337 ea_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 39, 39);
2338 cmd_error_p = spu_mfc_get_bitfield (mfc_cq_dw2, 40, 40);
2339
2340 cmd_chain = make_cleanup_ui_out_tuple_begin_end (uiout, "cmd");
2341
2342 if (spu_mfc_opcode[mfc_cmd_opcode])
2343 ui_out_field_string (uiout, "opcode", spu_mfc_opcode[mfc_cmd_opcode]);
2344 else
2345 ui_out_field_int (uiout, "opcode", mfc_cmd_opcode);
2346
2347 ui_out_field_int (uiout, "tag", mfc_cmd_tag);
2348 ui_out_field_int (uiout, "tid", tclass_id);
2349 ui_out_field_int (uiout, "rid", rclass_id);
2350
2351 if (ea_valid_p)
2352 ui_out_field_fmt (uiout, "ea", "0x%s", phex (mfc_ea, 8));
2353 else
2354 ui_out_field_skip (uiout, "ea");
2355
2356 ui_out_field_fmt (uiout, "lsa", "0x%05x", mfc_lsa << 4);
2357 if (qw_valid_p)
2358 ui_out_field_fmt (uiout, "size", "0x%05x", mfc_size << 4);
2359 else
2360 ui_out_field_fmt (uiout, "size", "0x%05x", mfc_size);
2361
2362 if (list_valid_p)
2363 {
2364 ui_out_field_fmt (uiout, "lstaddr", "0x%05x", list_lsa << 3);
2365 ui_out_field_fmt (uiout, "lstsize", "0x%05x", list_size << 3);
2366 }
2367 else
2368 {
2369 ui_out_field_skip (uiout, "lstaddr");
2370 ui_out_field_skip (uiout, "lstsize");
2371 }
2372
2373 if (cmd_error_p)
2374 ui_out_field_string (uiout, "error_p", "*");
2375 else
2376 ui_out_field_skip (uiout, "error_p");
2377
2378 do_cleanups (cmd_chain);
2379
2380 if (!ui_out_is_mi_like_p (uiout))
2381 printf_filtered ("\n");
2382 }
2383
2384 do_cleanups (chain);
2385 }
2386
2387 static void
2388 info_spu_dma_command (char *args, int from_tty)
2389 {
2390 struct frame_info *frame = get_selected_frame (NULL);
2391 struct gdbarch *gdbarch = get_frame_arch (frame);
2392 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2393 ULONGEST dma_info_type;
2394 ULONGEST dma_info_mask;
2395 ULONGEST dma_info_status;
2396 ULONGEST dma_info_stall_and_notify;
2397 ULONGEST dma_info_atomic_command_status;
2398 struct cleanup *chain;
2399 char annex[32];
2400 gdb_byte buf[1024];
2401 LONGEST len;
2402 int i, id;
2403
2404 if (gdbarch_bfd_arch_info (get_frame_arch (frame))->arch != bfd_arch_spu)
2405 error (_("\"info spu\" is only supported on the SPU architecture."));
2406
2407 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
2408
2409 xsnprintf (annex, sizeof annex, "%d/dma_info", id);
2410 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2411 buf, 0, 40 + 16 * 32);
2412 if (len <= 0)
2413 error (_("Could not read dma_info."));
2414
2415 dma_info_type
2416 = extract_unsigned_integer (buf, 8, byte_order);
2417 dma_info_mask
2418 = extract_unsigned_integer (buf + 8, 8, byte_order);
2419 dma_info_status
2420 = extract_unsigned_integer (buf + 16, 8, byte_order);
2421 dma_info_stall_and_notify
2422 = extract_unsigned_integer (buf + 24, 8, byte_order);
2423 dma_info_atomic_command_status
2424 = extract_unsigned_integer (buf + 32, 8, byte_order);
2425
2426 chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoDMA");
2427
2428 if (ui_out_is_mi_like_p (uiout))
2429 {
2430 ui_out_field_fmt (uiout, "dma_info_type", "0x%s",
2431 phex_nz (dma_info_type, 4));
2432 ui_out_field_fmt (uiout, "dma_info_mask", "0x%s",
2433 phex_nz (dma_info_mask, 4));
2434 ui_out_field_fmt (uiout, "dma_info_status", "0x%s",
2435 phex_nz (dma_info_status, 4));
2436 ui_out_field_fmt (uiout, "dma_info_stall_and_notify", "0x%s",
2437 phex_nz (dma_info_stall_and_notify, 4));
2438 ui_out_field_fmt (uiout, "dma_info_atomic_command_status", "0x%s",
2439 phex_nz (dma_info_atomic_command_status, 4));
2440 }
2441 else
2442 {
2443 const char *query_msg = _("no query pending");
2444
2445 if (dma_info_type & 4)
2446 switch (dma_info_type & 3)
2447 {
2448 case 1: query_msg = _("'any' query pending"); break;
2449 case 2: query_msg = _("'all' query pending"); break;
2450 default: query_msg = _("undefined query type"); break;
2451 }
2452
2453 printf_filtered (_("Tag-Group Status 0x%s\n"),
2454 phex (dma_info_status, 4));
2455 printf_filtered (_("Tag-Group Mask 0x%s (%s)\n"),
2456 phex (dma_info_mask, 4), query_msg);
2457 printf_filtered (_("Stall-and-Notify 0x%s\n"),
2458 phex (dma_info_stall_and_notify, 4));
2459 printf_filtered (_("Atomic Cmd Status 0x%s\n"),
2460 phex (dma_info_atomic_command_status, 4));
2461 printf_filtered ("\n");
2462 }
2463
2464 info_spu_dma_cmdlist (buf + 40, 16, byte_order);
2465 do_cleanups (chain);
2466 }
2467
2468 static void
2469 info_spu_proxydma_command (char *args, int from_tty)
2470 {
2471 struct frame_info *frame = get_selected_frame (NULL);
2472 struct gdbarch *gdbarch = get_frame_arch (frame);
2473 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2474 ULONGEST dma_info_type;
2475 ULONGEST dma_info_mask;
2476 ULONGEST dma_info_status;
2477 struct cleanup *chain;
2478 char annex[32];
2479 gdb_byte buf[1024];
2480 LONGEST len;
2481 int i, id;
2482
2483 if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
2484 error (_("\"info spu\" is only supported on the SPU architecture."));
2485
2486 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
2487
2488 xsnprintf (annex, sizeof annex, "%d/proxydma_info", id);
2489 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2490 buf, 0, 24 + 8 * 32);
2491 if (len <= 0)
2492 error (_("Could not read proxydma_info."));
2493
2494 dma_info_type = extract_unsigned_integer (buf, 8, byte_order);
2495 dma_info_mask = extract_unsigned_integer (buf + 8, 8, byte_order);
2496 dma_info_status = extract_unsigned_integer (buf + 16, 8, byte_order);
2497
2498 chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoProxyDMA");
2499
2500 if (ui_out_is_mi_like_p (uiout))
2501 {
2502 ui_out_field_fmt (uiout, "proxydma_info_type", "0x%s",
2503 phex_nz (dma_info_type, 4));
2504 ui_out_field_fmt (uiout, "proxydma_info_mask", "0x%s",
2505 phex_nz (dma_info_mask, 4));
2506 ui_out_field_fmt (uiout, "proxydma_info_status", "0x%s",
2507 phex_nz (dma_info_status, 4));
2508 }
2509 else
2510 {
2511 const char *query_msg;
2512
2513 switch (dma_info_type & 3)
2514 {
2515 case 0: query_msg = _("no query pending"); break;
2516 case 1: query_msg = _("'any' query pending"); break;
2517 case 2: query_msg = _("'all' query pending"); break;
2518 default: query_msg = _("undefined query type"); break;
2519 }
2520
2521 printf_filtered (_("Tag-Group Status 0x%s\n"),
2522 phex (dma_info_status, 4));
2523 printf_filtered (_("Tag-Group Mask 0x%s (%s)\n"),
2524 phex (dma_info_mask, 4), query_msg);
2525 printf_filtered ("\n");
2526 }
2527
2528 info_spu_dma_cmdlist (buf + 24, 8, byte_order);
2529 do_cleanups (chain);
2530 }
2531
2532 static void
2533 info_spu_command (char *args, int from_tty)
2534 {
2535 printf_unfiltered (_("\"info spu\" must be followed by the name of an SPU facility.\n"));
2536 help_list (infospucmdlist, "info spu ", -1, gdb_stdout);
2537 }
2538
2539
2540 /* Root of all "set spu "/"show spu " commands. */
2541
2542 static void
2543 show_spu_command (char *args, int from_tty)
2544 {
2545 help_list (showspucmdlist, "show spu ", all_commands, gdb_stdout);
2546 }
2547
2548 static void
2549 set_spu_command (char *args, int from_tty)
2550 {
2551 help_list (setspucmdlist, "set spu ", all_commands, gdb_stdout);
2552 }
2553
2554 static void
2555 show_spu_stop_on_load (struct ui_file *file, int from_tty,
2556 struct cmd_list_element *c, const char *value)
2557 {
2558 fprintf_filtered (file, _("Stopping for new SPE threads is %s.\n"),
2559 value);
2560 }
2561
2562 static void
2563 show_spu_auto_flush_cache (struct ui_file *file, int from_tty,
2564 struct cmd_list_element *c, const char *value)
2565 {
2566 fprintf_filtered (file, _("Automatic software-cache flush is %s.\n"),
2567 value);
2568 }
2569
2570
2571 /* Set up gdbarch struct. */
2572
2573 static struct gdbarch *
2574 spu_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2575 {
2576 struct gdbarch *gdbarch;
2577 struct gdbarch_tdep *tdep;
2578 int id = -1;
2579
2580 /* Which spufs ID was requested as address space? */
2581 if (info.tdep_info)
2582 id = *(int *)info.tdep_info;
2583 /* For objfile architectures of SPU solibs, decode the ID from the name.
2584 This assumes the filename convention employed by solib-spu.c. */
2585 else if (info.abfd)
2586 {
2587 char *name = strrchr (info.abfd->filename, '@');
2588 if (name)
2589 sscanf (name, "@0x%*x <%d>", &id);
2590 }
2591
2592 /* Find a candidate among extant architectures. */
2593 for (arches = gdbarch_list_lookup_by_info (arches, &info);
2594 arches != NULL;
2595 arches = gdbarch_list_lookup_by_info (arches->next, &info))
2596 {
2597 tdep = gdbarch_tdep (arches->gdbarch);
2598 if (tdep && tdep->id == id)
2599 return arches->gdbarch;
2600 }
2601
2602 /* None found, so create a new architecture. */
2603 tdep = XCALLOC (1, struct gdbarch_tdep);
2604 tdep->id = id;
2605 gdbarch = gdbarch_alloc (&info, tdep);
2606
2607 /* Disassembler. */
2608 set_gdbarch_print_insn (gdbarch, gdb_print_insn_spu);
2609
2610 /* Registers. */
2611 set_gdbarch_num_regs (gdbarch, SPU_NUM_REGS);
2612 set_gdbarch_num_pseudo_regs (gdbarch, SPU_NUM_PSEUDO_REGS);
2613 set_gdbarch_sp_regnum (gdbarch, SPU_SP_REGNUM);
2614 set_gdbarch_pc_regnum (gdbarch, SPU_PC_REGNUM);
2615 set_gdbarch_read_pc (gdbarch, spu_read_pc);
2616 set_gdbarch_write_pc (gdbarch, spu_write_pc);
2617 set_gdbarch_register_name (gdbarch, spu_register_name);
2618 set_gdbarch_register_type (gdbarch, spu_register_type);
2619 set_gdbarch_pseudo_register_read (gdbarch, spu_pseudo_register_read);
2620 set_gdbarch_pseudo_register_write (gdbarch, spu_pseudo_register_write);
2621 set_gdbarch_value_from_register (gdbarch, spu_value_from_register);
2622 set_gdbarch_register_reggroup_p (gdbarch, spu_register_reggroup_p);
2623
2624 /* Data types. */
2625 set_gdbarch_char_signed (gdbarch, 0);
2626 set_gdbarch_ptr_bit (gdbarch, 32);
2627 set_gdbarch_addr_bit (gdbarch, 32);
2628 set_gdbarch_short_bit (gdbarch, 16);
2629 set_gdbarch_int_bit (gdbarch, 32);
2630 set_gdbarch_long_bit (gdbarch, 32);
2631 set_gdbarch_long_long_bit (gdbarch, 64);
2632 set_gdbarch_float_bit (gdbarch, 32);
2633 set_gdbarch_double_bit (gdbarch, 64);
2634 set_gdbarch_long_double_bit (gdbarch, 64);
2635 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2636 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2637 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double);
2638
2639 /* Address handling. */
2640 set_gdbarch_address_to_pointer (gdbarch, spu_address_to_pointer);
2641 set_gdbarch_pointer_to_address (gdbarch, spu_pointer_to_address);
2642 set_gdbarch_integer_to_address (gdbarch, spu_integer_to_address);
2643 set_gdbarch_address_class_type_flags (gdbarch, spu_address_class_type_flags);
2644 set_gdbarch_address_class_type_flags_to_name
2645 (gdbarch, spu_address_class_type_flags_to_name);
2646 set_gdbarch_address_class_name_to_type_flags
2647 (gdbarch, spu_address_class_name_to_type_flags);
2648
2649
2650 /* Inferior function calls. */
2651 set_gdbarch_call_dummy_location (gdbarch, ON_STACK);
2652 set_gdbarch_frame_align (gdbarch, spu_frame_align);
2653 set_gdbarch_frame_red_zone_size (gdbarch, 2000);
2654 set_gdbarch_push_dummy_code (gdbarch, spu_push_dummy_code);
2655 set_gdbarch_push_dummy_call (gdbarch, spu_push_dummy_call);
2656 set_gdbarch_dummy_id (gdbarch, spu_dummy_id);
2657 set_gdbarch_return_value (gdbarch, spu_return_value);
2658
2659 /* Frame handling. */
2660 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2661 frame_unwind_append_unwinder (gdbarch, &spu_frame_unwind);
2662 frame_base_set_default (gdbarch, &spu_frame_base);
2663 set_gdbarch_unwind_pc (gdbarch, spu_unwind_pc);
2664 set_gdbarch_unwind_sp (gdbarch, spu_unwind_sp);
2665 set_gdbarch_virtual_frame_pointer (gdbarch, spu_virtual_frame_pointer);
2666 set_gdbarch_frame_args_skip (gdbarch, 0);
2667 set_gdbarch_skip_prologue (gdbarch, spu_skip_prologue);
2668 set_gdbarch_in_function_epilogue_p (gdbarch, spu_in_function_epilogue_p);
2669
2670 /* Cell/B.E. cross-architecture unwinder support. */
2671 frame_unwind_prepend_unwinder (gdbarch, &spu2ppu_unwind);
2672
2673 /* Breakpoints. */
2674 set_gdbarch_decr_pc_after_break (gdbarch, 4);
2675 set_gdbarch_breakpoint_from_pc (gdbarch, spu_breakpoint_from_pc);
2676 set_gdbarch_memory_remove_breakpoint (gdbarch, spu_memory_remove_breakpoint);
2677 set_gdbarch_cannot_step_breakpoint (gdbarch, 1);
2678 set_gdbarch_software_single_step (gdbarch, spu_software_single_step);
2679 set_gdbarch_get_longjmp_target (gdbarch, spu_get_longjmp_target);
2680
2681 /* Overlays. */
2682 set_gdbarch_overlay_update (gdbarch, spu_overlay_update);
2683
2684 return gdbarch;
2685 }
2686
2687 /* Provide a prototype to silence -Wmissing-prototypes. */
2688 extern initialize_file_ftype _initialize_spu_tdep;
2689
2690 void
2691 _initialize_spu_tdep (void)
2692 {
2693 register_gdbarch_init (bfd_arch_spu, spu_gdbarch_init);
2694
2695 /* Add ourselves to objfile event chain. */
2696 observer_attach_new_objfile (spu_overlay_new_objfile);
2697 spu_overlay_data = register_objfile_data ();
2698
2699 /* Install spu stop-on-load handler. */
2700 observer_attach_new_objfile (spu_catch_start);
2701
2702 /* Add ourselves to normal_stop event chain. */
2703 observer_attach_normal_stop (spu_attach_normal_stop);
2704
2705 /* Add root prefix command for all "set spu"/"show spu" commands. */
2706 add_prefix_cmd ("spu", no_class, set_spu_command,
2707 _("Various SPU specific commands."),
2708 &setspucmdlist, "set spu ", 0, &setlist);
2709 add_prefix_cmd ("spu", no_class, show_spu_command,
2710 _("Various SPU specific commands."),
2711 &showspucmdlist, "show spu ", 0, &showlist);
2712
2713 /* Toggle whether or not to add a temporary breakpoint at the "main"
2714 function of new SPE contexts. */
2715 add_setshow_boolean_cmd ("stop-on-load", class_support,
2716 &spu_stop_on_load_p, _("\
2717 Set whether to stop for new SPE threads."),
2718 _("\
2719 Show whether to stop for new SPE threads."),
2720 _("\
2721 Use \"on\" to give control to the user when a new SPE thread\n\
2722 enters its \"main\" function.\n\
2723 Use \"off\" to disable stopping for new SPE threads."),
2724 NULL,
2725 show_spu_stop_on_load,
2726 &setspucmdlist, &showspucmdlist);
2727
2728 /* Toggle whether or not to automatically flush the software-managed
2729 cache whenever SPE execution stops. */
2730 add_setshow_boolean_cmd ("auto-flush-cache", class_support,
2731 &spu_auto_flush_cache_p, _("\
2732 Set whether to automatically flush the software-managed cache."),
2733 _("\
2734 Show whether to automatically flush the software-managed cache."),
2735 _("\
2736 Use \"on\" to automatically flush the software-managed cache\n\
2737 whenever SPE execution stops.\n\
2738 Use \"off\" to never automatically flush the software-managed cache."),
2739 NULL,
2740 show_spu_auto_flush_cache,
2741 &setspucmdlist, &showspucmdlist);
2742
2743 /* Add root prefix command for all "info spu" commands. */
2744 add_prefix_cmd ("spu", class_info, info_spu_command,
2745 _("Various SPU specific commands."),
2746 &infospucmdlist, "info spu ", 0, &infolist);
2747
2748 /* Add various "info spu" commands. */
2749 add_cmd ("event", class_info, info_spu_event_command,
2750 _("Display SPU event facility status.\n"),
2751 &infospucmdlist);
2752 add_cmd ("signal", class_info, info_spu_signal_command,
2753 _("Display SPU signal notification facility status.\n"),
2754 &infospucmdlist);
2755 add_cmd ("mailbox", class_info, info_spu_mailbox_command,
2756 _("Display SPU mailbox facility status.\n"),
2757 &infospucmdlist);
2758 add_cmd ("dma", class_info, info_spu_dma_command,
2759 _("Display MFC DMA status.\n"),
2760 &infospucmdlist);
2761 add_cmd ("proxydma", class_info, info_spu_proxydma_command,
2762 _("Display MFC Proxy-DMA status.\n"),
2763 &infospucmdlist);
2764 }