* ppc-linux-tdep.c: Include "solib.h", "solist.h", "exceptions.h",
[binutils-gdb.git] / gdb / spu-tdep.c
1 /* SPU target-dependent code for GDB, the GNU debugger.
2 Copyright (C) 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
3
4 Contributed by Ulrich Weigand <uweigand@de.ibm.com>.
5 Based on a port by Sid Manning <sid@us.ibm.com>.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "arch-utils.h"
24 #include "gdbtypes.h"
25 #include "gdbcmd.h"
26 #include "gdbcore.h"
27 #include "gdb_string.h"
28 #include "gdb_assert.h"
29 #include "frame.h"
30 #include "frame-unwind.h"
31 #include "frame-base.h"
32 #include "trad-frame.h"
33 #include "symtab.h"
34 #include "symfile.h"
35 #include "value.h"
36 #include "inferior.h"
37 #include "dis-asm.h"
38 #include "objfiles.h"
39 #include "language.h"
40 #include "regcache.h"
41 #include "reggroups.h"
42 #include "floatformat.h"
43 #include "observer.h"
44
45 #include "spu-tdep.h"
46
47
48 /* The tdep structure. */
49 struct gdbarch_tdep
50 {
51 /* The spufs ID identifying our address space. */
52 int id;
53
54 /* SPU-specific vector type. */
55 struct type *spu_builtin_type_vec128;
56 };
57
58
59 /* SPU-specific vector type. */
60 static struct type *
61 spu_builtin_type_vec128 (struct gdbarch *gdbarch)
62 {
63 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
64
65 if (!tdep->spu_builtin_type_vec128)
66 {
67 const struct builtin_type *bt = builtin_type (gdbarch);
68 struct type *t;
69
70 t = arch_composite_type (gdbarch,
71 "__spu_builtin_type_vec128", TYPE_CODE_UNION);
72 append_composite_type_field (t, "uint128", bt->builtin_int128);
73 append_composite_type_field (t, "v2_int64",
74 init_vector_type (bt->builtin_int64, 2));
75 append_composite_type_field (t, "v4_int32",
76 init_vector_type (bt->builtin_int32, 4));
77 append_composite_type_field (t, "v8_int16",
78 init_vector_type (bt->builtin_int16, 8));
79 append_composite_type_field (t, "v16_int8",
80 init_vector_type (bt->builtin_int8, 16));
81 append_composite_type_field (t, "v2_double",
82 init_vector_type (bt->builtin_double, 2));
83 append_composite_type_field (t, "v4_float",
84 init_vector_type (bt->builtin_float, 4));
85
86 TYPE_VECTOR (t) = 1;
87 TYPE_NAME (t) = "spu_builtin_type_vec128";
88
89 tdep->spu_builtin_type_vec128 = t;
90 }
91
92 return tdep->spu_builtin_type_vec128;
93 }
94
95
96 /* The list of available "info spu " commands. */
97 static struct cmd_list_element *infospucmdlist = NULL;
98
99 /* Registers. */
100
101 static const char *
102 spu_register_name (struct gdbarch *gdbarch, int reg_nr)
103 {
104 static char *register_names[] =
105 {
106 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
107 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
108 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
109 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
110 "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
111 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
112 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
113 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
114 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
115 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
116 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
117 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
118 "r96", "r97", "r98", "r99", "r100", "r101", "r102", "r103",
119 "r104", "r105", "r106", "r107", "r108", "r109", "r110", "r111",
120 "r112", "r113", "r114", "r115", "r116", "r117", "r118", "r119",
121 "r120", "r121", "r122", "r123", "r124", "r125", "r126", "r127",
122 "id", "pc", "sp", "fpscr", "srr0", "lslr", "decr", "decr_status"
123 };
124
125 if (reg_nr < 0)
126 return NULL;
127 if (reg_nr >= sizeof register_names / sizeof *register_names)
128 return NULL;
129
130 return register_names[reg_nr];
131 }
132
133 static struct type *
134 spu_register_type (struct gdbarch *gdbarch, int reg_nr)
135 {
136 if (reg_nr < SPU_NUM_GPRS)
137 return spu_builtin_type_vec128 (gdbarch);
138
139 switch (reg_nr)
140 {
141 case SPU_ID_REGNUM:
142 return builtin_type (gdbarch)->builtin_uint32;
143
144 case SPU_PC_REGNUM:
145 return builtin_type (gdbarch)->builtin_func_ptr;
146
147 case SPU_SP_REGNUM:
148 return builtin_type (gdbarch)->builtin_data_ptr;
149
150 case SPU_FPSCR_REGNUM:
151 return builtin_type (gdbarch)->builtin_uint128;
152
153 case SPU_SRR0_REGNUM:
154 return builtin_type (gdbarch)->builtin_uint32;
155
156 case SPU_LSLR_REGNUM:
157 return builtin_type (gdbarch)->builtin_uint32;
158
159 case SPU_DECR_REGNUM:
160 return builtin_type (gdbarch)->builtin_uint32;
161
162 case SPU_DECR_STATUS_REGNUM:
163 return builtin_type (gdbarch)->builtin_uint32;
164
165 default:
166 internal_error (__FILE__, __LINE__, "invalid regnum");
167 }
168 }
169
170 /* Pseudo registers for preferred slots - stack pointer. */
171
172 static void
173 spu_pseudo_register_read_spu (struct regcache *regcache, const char *regname,
174 gdb_byte *buf)
175 {
176 struct gdbarch *gdbarch = get_regcache_arch (regcache);
177 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
178 gdb_byte reg[32];
179 char annex[32];
180 ULONGEST id;
181
182 regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
183 xsnprintf (annex, sizeof annex, "%d/%s", (int) id, regname);
184 memset (reg, 0, sizeof reg);
185 target_read (&current_target, TARGET_OBJECT_SPU, annex,
186 reg, 0, sizeof reg);
187
188 store_unsigned_integer (buf, 4, byte_order, strtoulst (reg, NULL, 16));
189 }
190
191 static void
192 spu_pseudo_register_read (struct gdbarch *gdbarch, struct regcache *regcache,
193 int regnum, gdb_byte *buf)
194 {
195 gdb_byte reg[16];
196 char annex[32];
197 ULONGEST id;
198
199 switch (regnum)
200 {
201 case SPU_SP_REGNUM:
202 regcache_raw_read (regcache, SPU_RAW_SP_REGNUM, reg);
203 memcpy (buf, reg, 4);
204 break;
205
206 case SPU_FPSCR_REGNUM:
207 regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
208 xsnprintf (annex, sizeof annex, "%d/fpcr", (int) id);
209 target_read (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 16);
210 break;
211
212 case SPU_SRR0_REGNUM:
213 spu_pseudo_register_read_spu (regcache, "srr0", buf);
214 break;
215
216 case SPU_LSLR_REGNUM:
217 spu_pseudo_register_read_spu (regcache, "lslr", buf);
218 break;
219
220 case SPU_DECR_REGNUM:
221 spu_pseudo_register_read_spu (regcache, "decr", buf);
222 break;
223
224 case SPU_DECR_STATUS_REGNUM:
225 spu_pseudo_register_read_spu (regcache, "decr_status", buf);
226 break;
227
228 default:
229 internal_error (__FILE__, __LINE__, _("invalid regnum"));
230 }
231 }
232
233 static void
234 spu_pseudo_register_write_spu (struct regcache *regcache, const char *regname,
235 const gdb_byte *buf)
236 {
237 struct gdbarch *gdbarch = get_regcache_arch (regcache);
238 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
239 gdb_byte reg[32];
240 char annex[32];
241 ULONGEST id;
242
243 regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
244 xsnprintf (annex, sizeof annex, "%d/%s", (int) id, regname);
245 xsnprintf (reg, sizeof reg, "0x%s",
246 phex_nz (extract_unsigned_integer (buf, 4, byte_order), 4));
247 target_write (&current_target, TARGET_OBJECT_SPU, annex,
248 reg, 0, strlen (reg));
249 }
250
251 static void
252 spu_pseudo_register_write (struct gdbarch *gdbarch, struct regcache *regcache,
253 int regnum, const gdb_byte *buf)
254 {
255 gdb_byte reg[16];
256 char annex[32];
257 ULONGEST id;
258
259 switch (regnum)
260 {
261 case SPU_SP_REGNUM:
262 regcache_raw_read (regcache, SPU_RAW_SP_REGNUM, reg);
263 memcpy (reg, buf, 4);
264 regcache_raw_write (regcache, SPU_RAW_SP_REGNUM, reg);
265 break;
266
267 case SPU_FPSCR_REGNUM:
268 regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
269 xsnprintf (annex, sizeof annex, "%d/fpcr", (int) id);
270 target_write (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 16);
271 break;
272
273 case SPU_SRR0_REGNUM:
274 spu_pseudo_register_write_spu (regcache, "srr0", buf);
275 break;
276
277 case SPU_LSLR_REGNUM:
278 spu_pseudo_register_write_spu (regcache, "lslr", buf);
279 break;
280
281 case SPU_DECR_REGNUM:
282 spu_pseudo_register_write_spu (regcache, "decr", buf);
283 break;
284
285 case SPU_DECR_STATUS_REGNUM:
286 spu_pseudo_register_write_spu (regcache, "decr_status", buf);
287 break;
288
289 default:
290 internal_error (__FILE__, __LINE__, _("invalid regnum"));
291 }
292 }
293
294 /* Value conversion -- access scalar values at the preferred slot. */
295
296 static struct value *
297 spu_value_from_register (struct type *type, int regnum,
298 struct frame_info *frame)
299 {
300 struct value *value = default_value_from_register (type, regnum, frame);
301 int len = TYPE_LENGTH (type);
302
303 if (regnum < SPU_NUM_GPRS && len < 16)
304 {
305 int preferred_slot = len < 4 ? 4 - len : 0;
306 set_value_offset (value, preferred_slot);
307 }
308
309 return value;
310 }
311
312 /* Register groups. */
313
314 static int
315 spu_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
316 struct reggroup *group)
317 {
318 /* Registers displayed via 'info regs'. */
319 if (group == general_reggroup)
320 return 1;
321
322 /* Registers displayed via 'info float'. */
323 if (group == float_reggroup)
324 return 0;
325
326 /* Registers that need to be saved/restored in order to
327 push or pop frames. */
328 if (group == save_reggroup || group == restore_reggroup)
329 return 1;
330
331 return default_register_reggroup_p (gdbarch, regnum, group);
332 }
333
334 /* Address conversion. */
335
336 static int
337 spu_gdbarch_id (struct gdbarch *gdbarch)
338 {
339 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
340 int id = tdep->id;
341
342 /* The objfile architecture of a standalone SPU executable does not
343 provide an SPU ID. Retrieve it from the the objfile's relocated
344 address range in this special case. */
345 if (id == -1
346 && symfile_objfile && symfile_objfile->obfd
347 && bfd_get_arch (symfile_objfile->obfd) == bfd_arch_spu
348 && symfile_objfile->sections != symfile_objfile->sections_end)
349 id = SPUADDR_SPU (obj_section_addr (symfile_objfile->sections));
350
351 return id;
352 }
353
354 static ULONGEST
355 spu_lslr (int id)
356 {
357 gdb_byte buf[32];
358 char annex[32];
359
360 if (id == -1)
361 return SPU_LS_SIZE - 1;
362
363 xsnprintf (annex, sizeof annex, "%d/lslr", id);
364 memset (buf, 0, sizeof buf);
365 target_read (&current_target, TARGET_OBJECT_SPU, annex,
366 buf, 0, sizeof buf);
367
368 return strtoulst (buf, NULL, 16);
369 }
370
371 static void
372 spu_address_to_pointer (struct gdbarch *gdbarch,
373 struct type *type, gdb_byte *buf, CORE_ADDR addr)
374 {
375 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
376 store_unsigned_integer (buf, TYPE_LENGTH (type), byte_order,
377 SPUADDR_ADDR (addr));
378 }
379
380 static CORE_ADDR
381 spu_pointer_to_address (struct gdbarch *gdbarch,
382 struct type *type, const gdb_byte *buf)
383 {
384 int id = spu_gdbarch_id (gdbarch);
385 ULONGEST lslr = spu_lslr (id);
386 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
387 ULONGEST addr
388 = extract_unsigned_integer (buf, TYPE_LENGTH (type), byte_order);
389
390 return addr? SPUADDR (id, addr & lslr) : 0;
391 }
392
393 static CORE_ADDR
394 spu_integer_to_address (struct gdbarch *gdbarch,
395 struct type *type, const gdb_byte *buf)
396 {
397 int id = spu_gdbarch_id (gdbarch);
398 ULONGEST lslr = spu_lslr (id);
399 ULONGEST addr = unpack_long (type, buf);
400
401 return SPUADDR (id, addr & lslr);
402 }
403
404
405 /* Decoding SPU instructions. */
406
407 enum
408 {
409 op_lqd = 0x34,
410 op_lqx = 0x3c4,
411 op_lqa = 0x61,
412 op_lqr = 0x67,
413 op_stqd = 0x24,
414 op_stqx = 0x144,
415 op_stqa = 0x41,
416 op_stqr = 0x47,
417
418 op_il = 0x081,
419 op_ila = 0x21,
420 op_a = 0x0c0,
421 op_ai = 0x1c,
422
423 op_selb = 0x4,
424
425 op_br = 0x64,
426 op_bra = 0x60,
427 op_brsl = 0x66,
428 op_brasl = 0x62,
429 op_brnz = 0x42,
430 op_brz = 0x40,
431 op_brhnz = 0x46,
432 op_brhz = 0x44,
433 op_bi = 0x1a8,
434 op_bisl = 0x1a9,
435 op_biz = 0x128,
436 op_binz = 0x129,
437 op_bihz = 0x12a,
438 op_bihnz = 0x12b,
439 };
440
441 static int
442 is_rr (unsigned int insn, int op, int *rt, int *ra, int *rb)
443 {
444 if ((insn >> 21) == op)
445 {
446 *rt = insn & 127;
447 *ra = (insn >> 7) & 127;
448 *rb = (insn >> 14) & 127;
449 return 1;
450 }
451
452 return 0;
453 }
454
455 static int
456 is_rrr (unsigned int insn, int op, int *rt, int *ra, int *rb, int *rc)
457 {
458 if ((insn >> 28) == op)
459 {
460 *rt = (insn >> 21) & 127;
461 *ra = (insn >> 7) & 127;
462 *rb = (insn >> 14) & 127;
463 *rc = insn & 127;
464 return 1;
465 }
466
467 return 0;
468 }
469
470 static int
471 is_ri7 (unsigned int insn, int op, int *rt, int *ra, int *i7)
472 {
473 if ((insn >> 21) == op)
474 {
475 *rt = insn & 127;
476 *ra = (insn >> 7) & 127;
477 *i7 = (((insn >> 14) & 127) ^ 0x40) - 0x40;
478 return 1;
479 }
480
481 return 0;
482 }
483
484 static int
485 is_ri10 (unsigned int insn, int op, int *rt, int *ra, int *i10)
486 {
487 if ((insn >> 24) == op)
488 {
489 *rt = insn & 127;
490 *ra = (insn >> 7) & 127;
491 *i10 = (((insn >> 14) & 0x3ff) ^ 0x200) - 0x200;
492 return 1;
493 }
494
495 return 0;
496 }
497
498 static int
499 is_ri16 (unsigned int insn, int op, int *rt, int *i16)
500 {
501 if ((insn >> 23) == op)
502 {
503 *rt = insn & 127;
504 *i16 = (((insn >> 7) & 0xffff) ^ 0x8000) - 0x8000;
505 return 1;
506 }
507
508 return 0;
509 }
510
511 static int
512 is_ri18 (unsigned int insn, int op, int *rt, int *i18)
513 {
514 if ((insn >> 25) == op)
515 {
516 *rt = insn & 127;
517 *i18 = (((insn >> 7) & 0x3ffff) ^ 0x20000) - 0x20000;
518 return 1;
519 }
520
521 return 0;
522 }
523
524 static int
525 is_branch (unsigned int insn, int *offset, int *reg)
526 {
527 int rt, i7, i16;
528
529 if (is_ri16 (insn, op_br, &rt, &i16)
530 || is_ri16 (insn, op_brsl, &rt, &i16)
531 || is_ri16 (insn, op_brnz, &rt, &i16)
532 || is_ri16 (insn, op_brz, &rt, &i16)
533 || is_ri16 (insn, op_brhnz, &rt, &i16)
534 || is_ri16 (insn, op_brhz, &rt, &i16))
535 {
536 *reg = SPU_PC_REGNUM;
537 *offset = i16 << 2;
538 return 1;
539 }
540
541 if (is_ri16 (insn, op_bra, &rt, &i16)
542 || is_ri16 (insn, op_brasl, &rt, &i16))
543 {
544 *reg = -1;
545 *offset = i16 << 2;
546 return 1;
547 }
548
549 if (is_ri7 (insn, op_bi, &rt, reg, &i7)
550 || is_ri7 (insn, op_bisl, &rt, reg, &i7)
551 || is_ri7 (insn, op_biz, &rt, reg, &i7)
552 || is_ri7 (insn, op_binz, &rt, reg, &i7)
553 || is_ri7 (insn, op_bihz, &rt, reg, &i7)
554 || is_ri7 (insn, op_bihnz, &rt, reg, &i7))
555 {
556 *offset = 0;
557 return 1;
558 }
559
560 return 0;
561 }
562
563
564 /* Prolog parsing. */
565
566 struct spu_prologue_data
567 {
568 /* Stack frame size. -1 if analysis was unsuccessful. */
569 int size;
570
571 /* How to find the CFA. The CFA is equal to SP at function entry. */
572 int cfa_reg;
573 int cfa_offset;
574
575 /* Offset relative to CFA where a register is saved. -1 if invalid. */
576 int reg_offset[SPU_NUM_GPRS];
577 };
578
579 static CORE_ADDR
580 spu_analyze_prologue (struct gdbarch *gdbarch,
581 CORE_ADDR start_pc, CORE_ADDR end_pc,
582 struct spu_prologue_data *data)
583 {
584 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
585 int found_sp = 0;
586 int found_fp = 0;
587 int found_lr = 0;
588 int reg_immed[SPU_NUM_GPRS];
589 gdb_byte buf[16];
590 CORE_ADDR prolog_pc = start_pc;
591 CORE_ADDR pc;
592 int i;
593
594
595 /* Initialize DATA to default values. */
596 data->size = -1;
597
598 data->cfa_reg = SPU_RAW_SP_REGNUM;
599 data->cfa_offset = 0;
600
601 for (i = 0; i < SPU_NUM_GPRS; i++)
602 data->reg_offset[i] = -1;
603
604 /* Set up REG_IMMED array. This is non-zero for a register if we know its
605 preferred slot currently holds this immediate value. */
606 for (i = 0; i < SPU_NUM_GPRS; i++)
607 reg_immed[i] = 0;
608
609 /* Scan instructions until the first branch.
610
611 The following instructions are important prolog components:
612
613 - The first instruction to set up the stack pointer.
614 - The first instruction to set up the frame pointer.
615 - The first instruction to save the link register.
616
617 We return the instruction after the latest of these three,
618 or the incoming PC if none is found. The first instruction
619 to set up the stack pointer also defines the frame size.
620
621 Note that instructions saving incoming arguments to their stack
622 slots are not counted as important, because they are hard to
623 identify with certainty. This should not matter much, because
624 arguments are relevant only in code compiled with debug data,
625 and in such code the GDB core will advance until the first source
626 line anyway, using SAL data.
627
628 For purposes of stack unwinding, we analyze the following types
629 of instructions in addition:
630
631 - Any instruction adding to the current frame pointer.
632 - Any instruction loading an immediate constant into a register.
633 - Any instruction storing a register onto the stack.
634
635 These are used to compute the CFA and REG_OFFSET output. */
636
637 for (pc = start_pc; pc < end_pc; pc += 4)
638 {
639 unsigned int insn;
640 int rt, ra, rb, rc, immed;
641
642 if (target_read_memory (pc, buf, 4))
643 break;
644 insn = extract_unsigned_integer (buf, 4, byte_order);
645
646 /* AI is the typical instruction to set up a stack frame.
647 It is also used to initialize the frame pointer. */
648 if (is_ri10 (insn, op_ai, &rt, &ra, &immed))
649 {
650 if (rt == data->cfa_reg && ra == data->cfa_reg)
651 data->cfa_offset -= immed;
652
653 if (rt == SPU_RAW_SP_REGNUM && ra == SPU_RAW_SP_REGNUM
654 && !found_sp)
655 {
656 found_sp = 1;
657 prolog_pc = pc + 4;
658
659 data->size = -immed;
660 }
661 else if (rt == SPU_FP_REGNUM && ra == SPU_RAW_SP_REGNUM
662 && !found_fp)
663 {
664 found_fp = 1;
665 prolog_pc = pc + 4;
666
667 data->cfa_reg = SPU_FP_REGNUM;
668 data->cfa_offset -= immed;
669 }
670 }
671
672 /* A is used to set up stack frames of size >= 512 bytes.
673 If we have tracked the contents of the addend register,
674 we can handle this as well. */
675 else if (is_rr (insn, op_a, &rt, &ra, &rb))
676 {
677 if (rt == data->cfa_reg && ra == data->cfa_reg)
678 {
679 if (reg_immed[rb] != 0)
680 data->cfa_offset -= reg_immed[rb];
681 else
682 data->cfa_reg = -1; /* We don't know the CFA any more. */
683 }
684
685 if (rt == SPU_RAW_SP_REGNUM && ra == SPU_RAW_SP_REGNUM
686 && !found_sp)
687 {
688 found_sp = 1;
689 prolog_pc = pc + 4;
690
691 if (reg_immed[rb] != 0)
692 data->size = -reg_immed[rb];
693 }
694 }
695
696 /* We need to track IL and ILA used to load immediate constants
697 in case they are later used as input to an A instruction. */
698 else if (is_ri16 (insn, op_il, &rt, &immed))
699 {
700 reg_immed[rt] = immed;
701
702 if (rt == SPU_RAW_SP_REGNUM && !found_sp)
703 found_sp = 1;
704 }
705
706 else if (is_ri18 (insn, op_ila, &rt, &immed))
707 {
708 reg_immed[rt] = immed & 0x3ffff;
709
710 if (rt == SPU_RAW_SP_REGNUM && !found_sp)
711 found_sp = 1;
712 }
713
714 /* STQD is used to save registers to the stack. */
715 else if (is_ri10 (insn, op_stqd, &rt, &ra, &immed))
716 {
717 if (ra == data->cfa_reg)
718 data->reg_offset[rt] = data->cfa_offset - (immed << 4);
719
720 if (ra == data->cfa_reg && rt == SPU_LR_REGNUM
721 && !found_lr)
722 {
723 found_lr = 1;
724 prolog_pc = pc + 4;
725 }
726 }
727
728 /* _start uses SELB to set up the stack pointer. */
729 else if (is_rrr (insn, op_selb, &rt, &ra, &rb, &rc))
730 {
731 if (rt == SPU_RAW_SP_REGNUM && !found_sp)
732 found_sp = 1;
733 }
734
735 /* We terminate if we find a branch. */
736 else if (is_branch (insn, &immed, &ra))
737 break;
738 }
739
740
741 /* If we successfully parsed until here, and didn't find any instruction
742 modifying SP, we assume we have a frameless function. */
743 if (!found_sp)
744 data->size = 0;
745
746 /* Return cooked instead of raw SP. */
747 if (data->cfa_reg == SPU_RAW_SP_REGNUM)
748 data->cfa_reg = SPU_SP_REGNUM;
749
750 return prolog_pc;
751 }
752
753 /* Return the first instruction after the prologue starting at PC. */
754 static CORE_ADDR
755 spu_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
756 {
757 struct spu_prologue_data data;
758 return spu_analyze_prologue (gdbarch, pc, (CORE_ADDR)-1, &data);
759 }
760
761 /* Return the frame pointer in use at address PC. */
762 static void
763 spu_virtual_frame_pointer (struct gdbarch *gdbarch, CORE_ADDR pc,
764 int *reg, LONGEST *offset)
765 {
766 struct spu_prologue_data data;
767 spu_analyze_prologue (gdbarch, pc, (CORE_ADDR)-1, &data);
768
769 if (data.size != -1 && data.cfa_reg != -1)
770 {
771 /* The 'frame pointer' address is CFA minus frame size. */
772 *reg = data.cfa_reg;
773 *offset = data.cfa_offset - data.size;
774 }
775 else
776 {
777 /* ??? We don't really know ... */
778 *reg = SPU_SP_REGNUM;
779 *offset = 0;
780 }
781 }
782
783 /* Return true if we are in the function's epilogue, i.e. after the
784 instruction that destroyed the function's stack frame.
785
786 1) scan forward from the point of execution:
787 a) If you find an instruction that modifies the stack pointer
788 or transfers control (except a return), execution is not in
789 an epilogue, return.
790 b) Stop scanning if you find a return instruction or reach the
791 end of the function or reach the hard limit for the size of
792 an epilogue.
793 2) scan backward from the point of execution:
794 a) If you find an instruction that modifies the stack pointer,
795 execution *is* in an epilogue, return.
796 b) Stop scanning if you reach an instruction that transfers
797 control or the beginning of the function or reach the hard
798 limit for the size of an epilogue. */
799
800 static int
801 spu_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
802 {
803 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
804 CORE_ADDR scan_pc, func_start, func_end, epilogue_start, epilogue_end;
805 bfd_byte buf[4];
806 unsigned int insn;
807 int rt, ra, rb, rc, immed;
808
809 /* Find the search limits based on function boundaries and hard limit.
810 We assume the epilogue can be up to 64 instructions long. */
811
812 const int spu_max_epilogue_size = 64 * 4;
813
814 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
815 return 0;
816
817 if (pc - func_start < spu_max_epilogue_size)
818 epilogue_start = func_start;
819 else
820 epilogue_start = pc - spu_max_epilogue_size;
821
822 if (func_end - pc < spu_max_epilogue_size)
823 epilogue_end = func_end;
824 else
825 epilogue_end = pc + spu_max_epilogue_size;
826
827 /* Scan forward until next 'bi $0'. */
828
829 for (scan_pc = pc; scan_pc < epilogue_end; scan_pc += 4)
830 {
831 if (target_read_memory (scan_pc, buf, 4))
832 return 0;
833 insn = extract_unsigned_integer (buf, 4, byte_order);
834
835 if (is_branch (insn, &immed, &ra))
836 {
837 if (immed == 0 && ra == SPU_LR_REGNUM)
838 break;
839
840 return 0;
841 }
842
843 if (is_ri10 (insn, op_ai, &rt, &ra, &immed)
844 || is_rr (insn, op_a, &rt, &ra, &rb)
845 || is_ri10 (insn, op_lqd, &rt, &ra, &immed))
846 {
847 if (rt == SPU_RAW_SP_REGNUM)
848 return 0;
849 }
850 }
851
852 if (scan_pc >= epilogue_end)
853 return 0;
854
855 /* Scan backward until adjustment to stack pointer (R1). */
856
857 for (scan_pc = pc - 4; scan_pc >= epilogue_start; scan_pc -= 4)
858 {
859 if (target_read_memory (scan_pc, buf, 4))
860 return 0;
861 insn = extract_unsigned_integer (buf, 4, byte_order);
862
863 if (is_branch (insn, &immed, &ra))
864 return 0;
865
866 if (is_ri10 (insn, op_ai, &rt, &ra, &immed)
867 || is_rr (insn, op_a, &rt, &ra, &rb)
868 || is_ri10 (insn, op_lqd, &rt, &ra, &immed))
869 {
870 if (rt == SPU_RAW_SP_REGNUM)
871 return 1;
872 }
873 }
874
875 return 0;
876 }
877
878
879 /* Normal stack frames. */
880
881 struct spu_unwind_cache
882 {
883 CORE_ADDR func;
884 CORE_ADDR frame_base;
885 CORE_ADDR local_base;
886
887 struct trad_frame_saved_reg *saved_regs;
888 };
889
890 static struct spu_unwind_cache *
891 spu_frame_unwind_cache (struct frame_info *this_frame,
892 void **this_prologue_cache)
893 {
894 struct gdbarch *gdbarch = get_frame_arch (this_frame);
895 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
896 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
897 struct spu_unwind_cache *info;
898 struct spu_prologue_data data;
899 CORE_ADDR id = tdep->id;
900 gdb_byte buf[16];
901
902 if (*this_prologue_cache)
903 return *this_prologue_cache;
904
905 info = FRAME_OBSTACK_ZALLOC (struct spu_unwind_cache);
906 *this_prologue_cache = info;
907 info->saved_regs = trad_frame_alloc_saved_regs (this_frame);
908 info->frame_base = 0;
909 info->local_base = 0;
910
911 /* Find the start of the current function, and analyze its prologue. */
912 info->func = get_frame_func (this_frame);
913 if (info->func == 0)
914 {
915 /* Fall back to using the current PC as frame ID. */
916 info->func = get_frame_pc (this_frame);
917 data.size = -1;
918 }
919 else
920 spu_analyze_prologue (gdbarch, info->func, get_frame_pc (this_frame),
921 &data);
922
923 /* If successful, use prologue analysis data. */
924 if (data.size != -1 && data.cfa_reg != -1)
925 {
926 CORE_ADDR cfa;
927 int i;
928
929 /* Determine CFA via unwound CFA_REG plus CFA_OFFSET. */
930 get_frame_register (this_frame, data.cfa_reg, buf);
931 cfa = extract_unsigned_integer (buf, 4, byte_order) + data.cfa_offset;
932 cfa = SPUADDR (id, cfa);
933
934 /* Call-saved register slots. */
935 for (i = 0; i < SPU_NUM_GPRS; i++)
936 if (i == SPU_LR_REGNUM
937 || (i >= SPU_SAVED1_REGNUM && i <= SPU_SAVEDN_REGNUM))
938 if (data.reg_offset[i] != -1)
939 info->saved_regs[i].addr = cfa - data.reg_offset[i];
940
941 /* Frame bases. */
942 info->frame_base = cfa;
943 info->local_base = cfa - data.size;
944 }
945
946 /* Otherwise, fall back to reading the backchain link. */
947 else
948 {
949 CORE_ADDR reg;
950 LONGEST backchain;
951 int status;
952
953 /* Get the backchain. */
954 reg = get_frame_register_unsigned (this_frame, SPU_SP_REGNUM);
955 status = safe_read_memory_integer (SPUADDR (id, reg), 4, byte_order,
956 &backchain);
957
958 /* A zero backchain terminates the frame chain. Also, sanity
959 check against the local store size limit. */
960 if (status && backchain > 0 && backchain < SPU_LS_SIZE)
961 {
962 /* Assume the link register is saved into its slot. */
963 if (backchain + 16 < SPU_LS_SIZE)
964 info->saved_regs[SPU_LR_REGNUM].addr = SPUADDR (id, backchain + 16);
965
966 /* Frame bases. */
967 info->frame_base = SPUADDR (id, backchain);
968 info->local_base = SPUADDR (id, reg);
969 }
970 }
971
972 /* If we didn't find a frame, we cannot determine SP / return address. */
973 if (info->frame_base == 0)
974 return info;
975
976 /* The previous SP is equal to the CFA. */
977 trad_frame_set_value (info->saved_regs, SPU_SP_REGNUM,
978 SPUADDR_ADDR (info->frame_base));
979
980 /* Read full contents of the unwound link register in order to
981 be able to determine the return address. */
982 if (trad_frame_addr_p (info->saved_regs, SPU_LR_REGNUM))
983 target_read_memory (info->saved_regs[SPU_LR_REGNUM].addr, buf, 16);
984 else
985 get_frame_register (this_frame, SPU_LR_REGNUM, buf);
986
987 /* Normally, the return address is contained in the slot 0 of the
988 link register, and slots 1-3 are zero. For an overlay return,
989 slot 0 contains the address of the overlay manager return stub,
990 slot 1 contains the partition number of the overlay section to
991 be returned to, and slot 2 contains the return address within
992 that section. Return the latter address in that case. */
993 if (extract_unsigned_integer (buf + 8, 4, byte_order) != 0)
994 trad_frame_set_value (info->saved_regs, SPU_PC_REGNUM,
995 extract_unsigned_integer (buf + 8, 4, byte_order));
996 else
997 trad_frame_set_value (info->saved_regs, SPU_PC_REGNUM,
998 extract_unsigned_integer (buf, 4, byte_order));
999
1000 return info;
1001 }
1002
1003 static void
1004 spu_frame_this_id (struct frame_info *this_frame,
1005 void **this_prologue_cache, struct frame_id *this_id)
1006 {
1007 struct spu_unwind_cache *info =
1008 spu_frame_unwind_cache (this_frame, this_prologue_cache);
1009
1010 if (info->frame_base == 0)
1011 return;
1012
1013 *this_id = frame_id_build (info->frame_base, info->func);
1014 }
1015
1016 static struct value *
1017 spu_frame_prev_register (struct frame_info *this_frame,
1018 void **this_prologue_cache, int regnum)
1019 {
1020 struct spu_unwind_cache *info
1021 = spu_frame_unwind_cache (this_frame, this_prologue_cache);
1022
1023 /* Special-case the stack pointer. */
1024 if (regnum == SPU_RAW_SP_REGNUM)
1025 regnum = SPU_SP_REGNUM;
1026
1027 return trad_frame_get_prev_register (this_frame, info->saved_regs, regnum);
1028 }
1029
1030 static const struct frame_unwind spu_frame_unwind = {
1031 NORMAL_FRAME,
1032 spu_frame_this_id,
1033 spu_frame_prev_register,
1034 NULL,
1035 default_frame_sniffer
1036 };
1037
1038 static CORE_ADDR
1039 spu_frame_base_address (struct frame_info *this_frame, void **this_cache)
1040 {
1041 struct spu_unwind_cache *info
1042 = spu_frame_unwind_cache (this_frame, this_cache);
1043 return info->local_base;
1044 }
1045
1046 static const struct frame_base spu_frame_base = {
1047 &spu_frame_unwind,
1048 spu_frame_base_address,
1049 spu_frame_base_address,
1050 spu_frame_base_address
1051 };
1052
1053 static CORE_ADDR
1054 spu_unwind_pc (struct gdbarch *gdbarch, struct frame_info *next_frame)
1055 {
1056 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1057 CORE_ADDR pc = frame_unwind_register_unsigned (next_frame, SPU_PC_REGNUM);
1058 /* Mask off interrupt enable bit. */
1059 return SPUADDR (tdep->id, pc & -4);
1060 }
1061
1062 static CORE_ADDR
1063 spu_unwind_sp (struct gdbarch *gdbarch, struct frame_info *next_frame)
1064 {
1065 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1066 CORE_ADDR sp = frame_unwind_register_unsigned (next_frame, SPU_SP_REGNUM);
1067 return SPUADDR (tdep->id, sp);
1068 }
1069
1070 static CORE_ADDR
1071 spu_read_pc (struct regcache *regcache)
1072 {
1073 struct gdbarch_tdep *tdep = gdbarch_tdep (get_regcache_arch (regcache));
1074 ULONGEST pc;
1075 regcache_cooked_read_unsigned (regcache, SPU_PC_REGNUM, &pc);
1076 /* Mask off interrupt enable bit. */
1077 return SPUADDR (tdep->id, pc & -4);
1078 }
1079
1080 static void
1081 spu_write_pc (struct regcache *regcache, CORE_ADDR pc)
1082 {
1083 /* Keep interrupt enabled state unchanged. */
1084 ULONGEST old_pc;
1085 regcache_cooked_read_unsigned (regcache, SPU_PC_REGNUM, &old_pc);
1086 regcache_cooked_write_unsigned (regcache, SPU_PC_REGNUM,
1087 (SPUADDR_ADDR (pc) & -4) | (old_pc & 3));
1088 }
1089
1090
1091 /* Cell/B.E. cross-architecture unwinder support. */
1092
1093 struct spu2ppu_cache
1094 {
1095 struct frame_id frame_id;
1096 struct regcache *regcache;
1097 };
1098
1099 static struct gdbarch *
1100 spu2ppu_prev_arch (struct frame_info *this_frame, void **this_cache)
1101 {
1102 struct spu2ppu_cache *cache = *this_cache;
1103 return get_regcache_arch (cache->regcache);
1104 }
1105
1106 static void
1107 spu2ppu_this_id (struct frame_info *this_frame,
1108 void **this_cache, struct frame_id *this_id)
1109 {
1110 struct spu2ppu_cache *cache = *this_cache;
1111 *this_id = cache->frame_id;
1112 }
1113
1114 static struct value *
1115 spu2ppu_prev_register (struct frame_info *this_frame,
1116 void **this_cache, int regnum)
1117 {
1118 struct spu2ppu_cache *cache = *this_cache;
1119 struct gdbarch *gdbarch = get_regcache_arch (cache->regcache);
1120 gdb_byte *buf;
1121
1122 buf = alloca (register_size (gdbarch, regnum));
1123 regcache_cooked_read (cache->regcache, regnum, buf);
1124 return frame_unwind_got_bytes (this_frame, regnum, buf);
1125 }
1126
1127 static int
1128 spu2ppu_sniffer (const struct frame_unwind *self,
1129 struct frame_info *this_frame, void **this_prologue_cache)
1130 {
1131 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1132 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1133 CORE_ADDR base, func, backchain;
1134 gdb_byte buf[4];
1135
1136 if (gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_spu)
1137 return 0;
1138
1139 base = get_frame_sp (this_frame);
1140 func = get_frame_pc (this_frame);
1141 if (target_read_memory (base, buf, 4))
1142 return 0;
1143 backchain = extract_unsigned_integer (buf, 4, byte_order);
1144
1145 if (!backchain)
1146 {
1147 struct frame_info *fi;
1148
1149 struct spu2ppu_cache *cache
1150 = FRAME_OBSTACK_CALLOC (1, struct spu2ppu_cache);
1151
1152 cache->frame_id = frame_id_build (base + 16, func);
1153
1154 for (fi = get_next_frame (this_frame); fi; fi = get_next_frame (fi))
1155 if (gdbarch_bfd_arch_info (get_frame_arch (fi))->arch != bfd_arch_spu)
1156 break;
1157
1158 if (fi)
1159 {
1160 cache->regcache = frame_save_as_regcache (fi);
1161 *this_prologue_cache = cache;
1162 return 1;
1163 }
1164 else
1165 {
1166 struct regcache *regcache;
1167 regcache = get_thread_arch_regcache (inferior_ptid, target_gdbarch);
1168 cache->regcache = regcache_dup (regcache);
1169 *this_prologue_cache = cache;
1170 return 1;
1171 }
1172 }
1173
1174 return 0;
1175 }
1176
1177 static void
1178 spu2ppu_dealloc_cache (struct frame_info *self, void *this_cache)
1179 {
1180 struct spu2ppu_cache *cache = this_cache;
1181 regcache_xfree (cache->regcache);
1182 }
1183
1184 static const struct frame_unwind spu2ppu_unwind = {
1185 ARCH_FRAME,
1186 spu2ppu_this_id,
1187 spu2ppu_prev_register,
1188 NULL,
1189 spu2ppu_sniffer,
1190 spu2ppu_dealloc_cache,
1191 spu2ppu_prev_arch,
1192 };
1193
1194
1195 /* Function calling convention. */
1196
1197 static CORE_ADDR
1198 spu_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1199 {
1200 return sp & ~15;
1201 }
1202
1203 static CORE_ADDR
1204 spu_push_dummy_code (struct gdbarch *gdbarch, CORE_ADDR sp, CORE_ADDR funaddr,
1205 struct value **args, int nargs, struct type *value_type,
1206 CORE_ADDR *real_pc, CORE_ADDR *bp_addr,
1207 struct regcache *regcache)
1208 {
1209 /* Allocate space sufficient for a breakpoint, keeping the stack aligned. */
1210 sp = (sp - 4) & ~15;
1211 /* Store the address of that breakpoint */
1212 *bp_addr = sp;
1213 /* The call starts at the callee's entry point. */
1214 *real_pc = funaddr;
1215
1216 return sp;
1217 }
1218
1219 static int
1220 spu_scalar_value_p (struct type *type)
1221 {
1222 switch (TYPE_CODE (type))
1223 {
1224 case TYPE_CODE_INT:
1225 case TYPE_CODE_ENUM:
1226 case TYPE_CODE_RANGE:
1227 case TYPE_CODE_CHAR:
1228 case TYPE_CODE_BOOL:
1229 case TYPE_CODE_PTR:
1230 case TYPE_CODE_REF:
1231 return TYPE_LENGTH (type) <= 16;
1232
1233 default:
1234 return 0;
1235 }
1236 }
1237
1238 static void
1239 spu_value_to_regcache (struct regcache *regcache, int regnum,
1240 struct type *type, const gdb_byte *in)
1241 {
1242 int len = TYPE_LENGTH (type);
1243
1244 if (spu_scalar_value_p (type))
1245 {
1246 int preferred_slot = len < 4 ? 4 - len : 0;
1247 regcache_cooked_write_part (regcache, regnum, preferred_slot, len, in);
1248 }
1249 else
1250 {
1251 while (len >= 16)
1252 {
1253 regcache_cooked_write (regcache, regnum++, in);
1254 in += 16;
1255 len -= 16;
1256 }
1257
1258 if (len > 0)
1259 regcache_cooked_write_part (regcache, regnum, 0, len, in);
1260 }
1261 }
1262
1263 static void
1264 spu_regcache_to_value (struct regcache *regcache, int regnum,
1265 struct type *type, gdb_byte *out)
1266 {
1267 int len = TYPE_LENGTH (type);
1268
1269 if (spu_scalar_value_p (type))
1270 {
1271 int preferred_slot = len < 4 ? 4 - len : 0;
1272 regcache_cooked_read_part (regcache, regnum, preferred_slot, len, out);
1273 }
1274 else
1275 {
1276 while (len >= 16)
1277 {
1278 regcache_cooked_read (regcache, regnum++, out);
1279 out += 16;
1280 len -= 16;
1281 }
1282
1283 if (len > 0)
1284 regcache_cooked_read_part (regcache, regnum, 0, len, out);
1285 }
1286 }
1287
1288 static CORE_ADDR
1289 spu_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1290 struct regcache *regcache, CORE_ADDR bp_addr,
1291 int nargs, struct value **args, CORE_ADDR sp,
1292 int struct_return, CORE_ADDR struct_addr)
1293 {
1294 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1295 CORE_ADDR sp_delta;
1296 int i;
1297 int regnum = SPU_ARG1_REGNUM;
1298 int stack_arg = -1;
1299 gdb_byte buf[16];
1300
1301 /* Set the return address. */
1302 memset (buf, 0, sizeof buf);
1303 store_unsigned_integer (buf, 4, byte_order, SPUADDR_ADDR (bp_addr));
1304 regcache_cooked_write (regcache, SPU_LR_REGNUM, buf);
1305
1306 /* If STRUCT_RETURN is true, then the struct return address (in
1307 STRUCT_ADDR) will consume the first argument-passing register.
1308 Both adjust the register count and store that value. */
1309 if (struct_return)
1310 {
1311 memset (buf, 0, sizeof buf);
1312 store_unsigned_integer (buf, 4, byte_order, SPUADDR_ADDR (struct_addr));
1313 regcache_cooked_write (regcache, regnum++, buf);
1314 }
1315
1316 /* Fill in argument registers. */
1317 for (i = 0; i < nargs; i++)
1318 {
1319 struct value *arg = args[i];
1320 struct type *type = check_typedef (value_type (arg));
1321 const gdb_byte *contents = value_contents (arg);
1322 int len = TYPE_LENGTH (type);
1323 int n_regs = align_up (len, 16) / 16;
1324
1325 /* If the argument doesn't wholly fit into registers, it and
1326 all subsequent arguments go to the stack. */
1327 if (regnum + n_regs - 1 > SPU_ARGN_REGNUM)
1328 {
1329 stack_arg = i;
1330 break;
1331 }
1332
1333 spu_value_to_regcache (regcache, regnum, type, contents);
1334 regnum += n_regs;
1335 }
1336
1337 /* Overflow arguments go to the stack. */
1338 if (stack_arg != -1)
1339 {
1340 CORE_ADDR ap;
1341
1342 /* Allocate all required stack size. */
1343 for (i = stack_arg; i < nargs; i++)
1344 {
1345 struct type *type = check_typedef (value_type (args[i]));
1346 sp -= align_up (TYPE_LENGTH (type), 16);
1347 }
1348
1349 /* Fill in stack arguments. */
1350 ap = sp;
1351 for (i = stack_arg; i < nargs; i++)
1352 {
1353 struct value *arg = args[i];
1354 struct type *type = check_typedef (value_type (arg));
1355 int len = TYPE_LENGTH (type);
1356 int preferred_slot;
1357
1358 if (spu_scalar_value_p (type))
1359 preferred_slot = len < 4 ? 4 - len : 0;
1360 else
1361 preferred_slot = 0;
1362
1363 target_write_memory (ap + preferred_slot, value_contents (arg), len);
1364 ap += align_up (TYPE_LENGTH (type), 16);
1365 }
1366 }
1367
1368 /* Allocate stack frame header. */
1369 sp -= 32;
1370
1371 /* Store stack back chain. */
1372 regcache_cooked_read (regcache, SPU_RAW_SP_REGNUM, buf);
1373 target_write_memory (sp, buf, 16);
1374
1375 /* Finally, update all slots of the SP register. */
1376 sp_delta = sp - extract_unsigned_integer (buf, 4, byte_order);
1377 for (i = 0; i < 4; i++)
1378 {
1379 CORE_ADDR sp_slot = extract_unsigned_integer (buf + 4*i, 4, byte_order);
1380 store_unsigned_integer (buf + 4*i, 4, byte_order, sp_slot + sp_delta);
1381 }
1382 regcache_cooked_write (regcache, SPU_RAW_SP_REGNUM, buf);
1383
1384 return sp;
1385 }
1386
1387 static struct frame_id
1388 spu_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1389 {
1390 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1391 CORE_ADDR pc = get_frame_register_unsigned (this_frame, SPU_PC_REGNUM);
1392 CORE_ADDR sp = get_frame_register_unsigned (this_frame, SPU_SP_REGNUM);
1393 return frame_id_build (SPUADDR (tdep->id, sp), SPUADDR (tdep->id, pc & -4));
1394 }
1395
1396 /* Function return value access. */
1397
1398 static enum return_value_convention
1399 spu_return_value (struct gdbarch *gdbarch, struct type *func_type,
1400 struct type *type, struct regcache *regcache,
1401 gdb_byte *out, const gdb_byte *in)
1402 {
1403 enum return_value_convention rvc;
1404
1405 if (TYPE_LENGTH (type) <= (SPU_ARGN_REGNUM - SPU_ARG1_REGNUM + 1) * 16)
1406 rvc = RETURN_VALUE_REGISTER_CONVENTION;
1407 else
1408 rvc = RETURN_VALUE_STRUCT_CONVENTION;
1409
1410 if (in)
1411 {
1412 switch (rvc)
1413 {
1414 case RETURN_VALUE_REGISTER_CONVENTION:
1415 spu_value_to_regcache (regcache, SPU_ARG1_REGNUM, type, in);
1416 break;
1417
1418 case RETURN_VALUE_STRUCT_CONVENTION:
1419 error ("Cannot set function return value.");
1420 break;
1421 }
1422 }
1423 else if (out)
1424 {
1425 switch (rvc)
1426 {
1427 case RETURN_VALUE_REGISTER_CONVENTION:
1428 spu_regcache_to_value (regcache, SPU_ARG1_REGNUM, type, out);
1429 break;
1430
1431 case RETURN_VALUE_STRUCT_CONVENTION:
1432 error ("Function return value unknown.");
1433 break;
1434 }
1435 }
1436
1437 return rvc;
1438 }
1439
1440
1441 /* Breakpoints. */
1442
1443 static const gdb_byte *
1444 spu_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR * pcptr, int *lenptr)
1445 {
1446 static const gdb_byte breakpoint[] = { 0x00, 0x00, 0x3f, 0xff };
1447
1448 *lenptr = sizeof breakpoint;
1449 return breakpoint;
1450 }
1451
1452
1453 /* Software single-stepping support. */
1454
1455 static int
1456 spu_software_single_step (struct frame_info *frame)
1457 {
1458 struct gdbarch *gdbarch = get_frame_arch (frame);
1459 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1460 CORE_ADDR pc, next_pc;
1461 unsigned int insn;
1462 int offset, reg;
1463 gdb_byte buf[4];
1464
1465 pc = get_frame_pc (frame);
1466
1467 if (target_read_memory (pc, buf, 4))
1468 return 1;
1469 insn = extract_unsigned_integer (buf, 4, byte_order);
1470
1471 /* Next sequential instruction is at PC + 4, except if the current
1472 instruction is a PPE-assisted call, in which case it is at PC + 8.
1473 Wrap around LS limit to be on the safe side. */
1474 if ((insn & 0xffffff00) == 0x00002100)
1475 next_pc = (SPUADDR_ADDR (pc) + 8) & (SPU_LS_SIZE - 1);
1476 else
1477 next_pc = (SPUADDR_ADDR (pc) + 4) & (SPU_LS_SIZE - 1);
1478
1479 insert_single_step_breakpoint (gdbarch, SPUADDR (SPUADDR_SPU (pc), next_pc));
1480
1481 if (is_branch (insn, &offset, &reg))
1482 {
1483 CORE_ADDR target = offset;
1484
1485 if (reg == SPU_PC_REGNUM)
1486 target += SPUADDR_ADDR (pc);
1487 else if (reg != -1)
1488 {
1489 get_frame_register_bytes (frame, reg, 0, 4, buf);
1490 target += extract_unsigned_integer (buf, 4, byte_order) & -4;
1491 }
1492
1493 target = target & (SPU_LS_SIZE - 1);
1494 if (target != next_pc)
1495 insert_single_step_breakpoint (gdbarch,
1496 SPUADDR (SPUADDR_SPU (pc), target));
1497 }
1498
1499 return 1;
1500 }
1501
1502
1503 /* Longjmp support. */
1504
1505 static int
1506 spu_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
1507 {
1508 struct gdbarch *gdbarch = get_frame_arch (frame);
1509 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1510 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1511 gdb_byte buf[4];
1512 CORE_ADDR jb_addr;
1513
1514 /* Jump buffer is pointed to by the argument register $r3. */
1515 get_frame_register_bytes (frame, SPU_ARG1_REGNUM, 0, 4, buf);
1516 jb_addr = extract_unsigned_integer (buf, 4, byte_order);
1517 if (target_read_memory (SPUADDR (tdep->id, jb_addr), buf, 4))
1518 return 0;
1519
1520 *pc = extract_unsigned_integer (buf, 4, byte_order);
1521 *pc = SPUADDR (tdep->id, *pc);
1522 return 1;
1523 }
1524
1525
1526 /* Disassembler. */
1527
1528 struct spu_dis_asm_data
1529 {
1530 struct gdbarch *gdbarch;
1531 int id;
1532 };
1533
1534 static void
1535 spu_dis_asm_print_address (bfd_vma addr, struct disassemble_info *info)
1536 {
1537 struct spu_dis_asm_data *data = info->application_data;
1538 print_address (data->gdbarch, SPUADDR (data->id, addr), info->stream);
1539 }
1540
1541 static int
1542 gdb_print_insn_spu (bfd_vma memaddr, struct disassemble_info *info)
1543 {
1544 /* The opcodes disassembler does 18-bit address arithmetic. Make sure the
1545 SPU ID encoded in the high bits is added back when we call print_address. */
1546 struct disassemble_info spu_info = *info;
1547 struct spu_dis_asm_data data;
1548 data.gdbarch = info->application_data;
1549 data.id = SPUADDR_SPU (memaddr);
1550
1551 spu_info.application_data = &data;
1552 spu_info.print_address_func = spu_dis_asm_print_address;
1553 return print_insn_spu (memaddr, &spu_info);
1554 }
1555
1556
1557 /* Target overlays for the SPU overlay manager.
1558
1559 See the documentation of simple_overlay_update for how the
1560 interface is supposed to work.
1561
1562 Data structures used by the overlay manager:
1563
1564 struct ovly_table
1565 {
1566 u32 vma;
1567 u32 size;
1568 u32 pos;
1569 u32 buf;
1570 } _ovly_table[]; -- one entry per overlay section
1571
1572 struct ovly_buf_table
1573 {
1574 u32 mapped;
1575 } _ovly_buf_table[]; -- one entry per overlay buffer
1576
1577 _ovly_table should never change.
1578
1579 Both tables are aligned to a 16-byte boundary, the symbols _ovly_table
1580 and _ovly_buf_table are of type STT_OBJECT and their size set to the size
1581 of the respective array. buf in _ovly_table is an index into _ovly_buf_table.
1582
1583 mapped is an index into _ovly_table. Both the mapped and buf indices start
1584 from one to reference the first entry in their respective tables. */
1585
1586 /* Using the per-objfile private data mechanism, we store for each
1587 objfile an array of "struct spu_overlay_table" structures, one
1588 for each obj_section of the objfile. This structure holds two
1589 fields, MAPPED_PTR and MAPPED_VAL. If MAPPED_PTR is zero, this
1590 is *not* an overlay section. If it is non-zero, it represents
1591 a target address. The overlay section is mapped iff the target
1592 integer at this location equals MAPPED_VAL. */
1593
1594 static const struct objfile_data *spu_overlay_data;
1595
1596 struct spu_overlay_table
1597 {
1598 CORE_ADDR mapped_ptr;
1599 CORE_ADDR mapped_val;
1600 };
1601
1602 /* Retrieve the overlay table for OBJFILE. If not already cached, read
1603 the _ovly_table data structure from the target and initialize the
1604 spu_overlay_table data structure from it. */
1605 static struct spu_overlay_table *
1606 spu_get_overlay_table (struct objfile *objfile)
1607 {
1608 enum bfd_endian byte_order = bfd_big_endian (objfile->obfd)?
1609 BFD_ENDIAN_BIG : BFD_ENDIAN_LITTLE;
1610 struct minimal_symbol *ovly_table_msym, *ovly_buf_table_msym;
1611 CORE_ADDR ovly_table_base, ovly_buf_table_base;
1612 unsigned ovly_table_size, ovly_buf_table_size;
1613 struct spu_overlay_table *tbl;
1614 struct obj_section *osect;
1615 char *ovly_table;
1616 int i;
1617
1618 tbl = objfile_data (objfile, spu_overlay_data);
1619 if (tbl)
1620 return tbl;
1621
1622 ovly_table_msym = lookup_minimal_symbol ("_ovly_table", NULL, objfile);
1623 if (!ovly_table_msym)
1624 return NULL;
1625
1626 ovly_buf_table_msym = lookup_minimal_symbol ("_ovly_buf_table", NULL, objfile);
1627 if (!ovly_buf_table_msym)
1628 return NULL;
1629
1630 ovly_table_base = SYMBOL_VALUE_ADDRESS (ovly_table_msym);
1631 ovly_table_size = MSYMBOL_SIZE (ovly_table_msym);
1632
1633 ovly_buf_table_base = SYMBOL_VALUE_ADDRESS (ovly_buf_table_msym);
1634 ovly_buf_table_size = MSYMBOL_SIZE (ovly_buf_table_msym);
1635
1636 ovly_table = xmalloc (ovly_table_size);
1637 read_memory (ovly_table_base, ovly_table, ovly_table_size);
1638
1639 tbl = OBSTACK_CALLOC (&objfile->objfile_obstack,
1640 objfile->sections_end - objfile->sections,
1641 struct spu_overlay_table);
1642
1643 for (i = 0; i < ovly_table_size / 16; i++)
1644 {
1645 CORE_ADDR vma = extract_unsigned_integer (ovly_table + 16*i + 0,
1646 4, byte_order);
1647 CORE_ADDR size = extract_unsigned_integer (ovly_table + 16*i + 4,
1648 4, byte_order);
1649 CORE_ADDR pos = extract_unsigned_integer (ovly_table + 16*i + 8,
1650 4, byte_order);
1651 CORE_ADDR buf = extract_unsigned_integer (ovly_table + 16*i + 12,
1652 4, byte_order);
1653
1654 if (buf == 0 || (buf - 1) * 4 >= ovly_buf_table_size)
1655 continue;
1656
1657 ALL_OBJFILE_OSECTIONS (objfile, osect)
1658 if (vma == bfd_section_vma (objfile->obfd, osect->the_bfd_section)
1659 && pos == osect->the_bfd_section->filepos)
1660 {
1661 int ndx = osect - objfile->sections;
1662 tbl[ndx].mapped_ptr = ovly_buf_table_base + (buf - 1) * 4;
1663 tbl[ndx].mapped_val = i + 1;
1664 break;
1665 }
1666 }
1667
1668 xfree (ovly_table);
1669 set_objfile_data (objfile, spu_overlay_data, tbl);
1670 return tbl;
1671 }
1672
1673 /* Read _ovly_buf_table entry from the target to dermine whether
1674 OSECT is currently mapped, and update the mapped state. */
1675 static void
1676 spu_overlay_update_osect (struct obj_section *osect)
1677 {
1678 enum bfd_endian byte_order = bfd_big_endian (osect->objfile->obfd)?
1679 BFD_ENDIAN_BIG : BFD_ENDIAN_LITTLE;
1680 struct spu_overlay_table *ovly_table;
1681 CORE_ADDR id, val;
1682
1683 ovly_table = spu_get_overlay_table (osect->objfile);
1684 if (!ovly_table)
1685 return;
1686
1687 ovly_table += osect - osect->objfile->sections;
1688 if (ovly_table->mapped_ptr == 0)
1689 return;
1690
1691 id = SPUADDR_SPU (obj_section_addr (osect));
1692 val = read_memory_unsigned_integer (SPUADDR (id, ovly_table->mapped_ptr),
1693 4, byte_order);
1694 osect->ovly_mapped = (val == ovly_table->mapped_val);
1695 }
1696
1697 /* If OSECT is NULL, then update all sections' mapped state.
1698 If OSECT is non-NULL, then update only OSECT's mapped state. */
1699 static void
1700 spu_overlay_update (struct obj_section *osect)
1701 {
1702 /* Just one section. */
1703 if (osect)
1704 spu_overlay_update_osect (osect);
1705
1706 /* All sections. */
1707 else
1708 {
1709 struct objfile *objfile;
1710
1711 ALL_OBJSECTIONS (objfile, osect)
1712 if (section_is_overlay (osect))
1713 spu_overlay_update_osect (osect);
1714 }
1715 }
1716
1717 /* Whenever a new objfile is loaded, read the target's _ovly_table.
1718 If there is one, go through all sections and make sure for non-
1719 overlay sections LMA equals VMA, while for overlay sections LMA
1720 is larger than local store size. */
1721 static void
1722 spu_overlay_new_objfile (struct objfile *objfile)
1723 {
1724 struct spu_overlay_table *ovly_table;
1725 struct obj_section *osect;
1726
1727 /* If we've already touched this file, do nothing. */
1728 if (!objfile || objfile_data (objfile, spu_overlay_data) != NULL)
1729 return;
1730
1731 /* Consider only SPU objfiles. */
1732 if (bfd_get_arch (objfile->obfd) != bfd_arch_spu)
1733 return;
1734
1735 /* Check if this objfile has overlays. */
1736 ovly_table = spu_get_overlay_table (objfile);
1737 if (!ovly_table)
1738 return;
1739
1740 /* Now go and fiddle with all the LMAs. */
1741 ALL_OBJFILE_OSECTIONS (objfile, osect)
1742 {
1743 bfd *obfd = objfile->obfd;
1744 asection *bsect = osect->the_bfd_section;
1745 int ndx = osect - objfile->sections;
1746
1747 if (ovly_table[ndx].mapped_ptr == 0)
1748 bfd_section_lma (obfd, bsect) = bfd_section_vma (obfd, bsect);
1749 else
1750 bfd_section_lma (obfd, bsect) = bsect->filepos + SPU_LS_SIZE;
1751 }
1752 }
1753
1754
1755 /* "info spu" commands. */
1756
1757 static void
1758 info_spu_event_command (char *args, int from_tty)
1759 {
1760 struct frame_info *frame = get_selected_frame (NULL);
1761 ULONGEST event_status = 0;
1762 ULONGEST event_mask = 0;
1763 struct cleanup *chain;
1764 gdb_byte buf[100];
1765 char annex[32];
1766 LONGEST len;
1767 int rc, id;
1768
1769 if (gdbarch_bfd_arch_info (get_frame_arch (frame))->arch != bfd_arch_spu)
1770 error (_("\"info spu\" is only supported on the SPU architecture."));
1771
1772 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
1773
1774 xsnprintf (annex, sizeof annex, "%d/event_status", id);
1775 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1776 buf, 0, (sizeof (buf) - 1));
1777 if (len <= 0)
1778 error (_("Could not read event_status."));
1779 buf[len] = '\0';
1780 event_status = strtoulst (buf, NULL, 16);
1781
1782 xsnprintf (annex, sizeof annex, "%d/event_mask", id);
1783 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1784 buf, 0, (sizeof (buf) - 1));
1785 if (len <= 0)
1786 error (_("Could not read event_mask."));
1787 buf[len] = '\0';
1788 event_mask = strtoulst (buf, NULL, 16);
1789
1790 chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoEvent");
1791
1792 if (ui_out_is_mi_like_p (uiout))
1793 {
1794 ui_out_field_fmt (uiout, "event_status",
1795 "0x%s", phex_nz (event_status, 4));
1796 ui_out_field_fmt (uiout, "event_mask",
1797 "0x%s", phex_nz (event_mask, 4));
1798 }
1799 else
1800 {
1801 printf_filtered (_("Event Status 0x%s\n"), phex (event_status, 4));
1802 printf_filtered (_("Event Mask 0x%s\n"), phex (event_mask, 4));
1803 }
1804
1805 do_cleanups (chain);
1806 }
1807
1808 static void
1809 info_spu_signal_command (char *args, int from_tty)
1810 {
1811 struct frame_info *frame = get_selected_frame (NULL);
1812 struct gdbarch *gdbarch = get_frame_arch (frame);
1813 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1814 ULONGEST signal1 = 0;
1815 ULONGEST signal1_type = 0;
1816 int signal1_pending = 0;
1817 ULONGEST signal2 = 0;
1818 ULONGEST signal2_type = 0;
1819 int signal2_pending = 0;
1820 struct cleanup *chain;
1821 char annex[32];
1822 gdb_byte buf[100];
1823 LONGEST len;
1824 int rc, id;
1825
1826 if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
1827 error (_("\"info spu\" is only supported on the SPU architecture."));
1828
1829 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
1830
1831 xsnprintf (annex, sizeof annex, "%d/signal1", id);
1832 len = target_read (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 4);
1833 if (len < 0)
1834 error (_("Could not read signal1."));
1835 else if (len == 4)
1836 {
1837 signal1 = extract_unsigned_integer (buf, 4, byte_order);
1838 signal1_pending = 1;
1839 }
1840
1841 xsnprintf (annex, sizeof annex, "%d/signal1_type", id);
1842 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1843 buf, 0, (sizeof (buf) - 1));
1844 if (len <= 0)
1845 error (_("Could not read signal1_type."));
1846 buf[len] = '\0';
1847 signal1_type = strtoulst (buf, NULL, 16);
1848
1849 xsnprintf (annex, sizeof annex, "%d/signal2", id);
1850 len = target_read (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 4);
1851 if (len < 0)
1852 error (_("Could not read signal2."));
1853 else if (len == 4)
1854 {
1855 signal2 = extract_unsigned_integer (buf, 4, byte_order);
1856 signal2_pending = 1;
1857 }
1858
1859 xsnprintf (annex, sizeof annex, "%d/signal2_type", id);
1860 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1861 buf, 0, (sizeof (buf) - 1));
1862 if (len <= 0)
1863 error (_("Could not read signal2_type."));
1864 buf[len] = '\0';
1865 signal2_type = strtoulst (buf, NULL, 16);
1866
1867 chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoSignal");
1868
1869 if (ui_out_is_mi_like_p (uiout))
1870 {
1871 ui_out_field_int (uiout, "signal1_pending", signal1_pending);
1872 ui_out_field_fmt (uiout, "signal1", "0x%s", phex_nz (signal1, 4));
1873 ui_out_field_int (uiout, "signal1_type", signal1_type);
1874 ui_out_field_int (uiout, "signal2_pending", signal2_pending);
1875 ui_out_field_fmt (uiout, "signal2", "0x%s", phex_nz (signal2, 4));
1876 ui_out_field_int (uiout, "signal2_type", signal2_type);
1877 }
1878 else
1879 {
1880 if (signal1_pending)
1881 printf_filtered (_("Signal 1 control word 0x%s "), phex (signal1, 4));
1882 else
1883 printf_filtered (_("Signal 1 not pending "));
1884
1885 if (signal1_type)
1886 printf_filtered (_("(Type Or)\n"));
1887 else
1888 printf_filtered (_("(Type Overwrite)\n"));
1889
1890 if (signal2_pending)
1891 printf_filtered (_("Signal 2 control word 0x%s "), phex (signal2, 4));
1892 else
1893 printf_filtered (_("Signal 2 not pending "));
1894
1895 if (signal2_type)
1896 printf_filtered (_("(Type Or)\n"));
1897 else
1898 printf_filtered (_("(Type Overwrite)\n"));
1899 }
1900
1901 do_cleanups (chain);
1902 }
1903
1904 static void
1905 info_spu_mailbox_list (gdb_byte *buf, int nr, enum bfd_endian byte_order,
1906 const char *field, const char *msg)
1907 {
1908 struct cleanup *chain;
1909 int i;
1910
1911 if (nr <= 0)
1912 return;
1913
1914 chain = make_cleanup_ui_out_table_begin_end (uiout, 1, nr, "mbox");
1915
1916 ui_out_table_header (uiout, 32, ui_left, field, msg);
1917 ui_out_table_body (uiout);
1918
1919 for (i = 0; i < nr; i++)
1920 {
1921 struct cleanup *val_chain;
1922 ULONGEST val;
1923 val_chain = make_cleanup_ui_out_tuple_begin_end (uiout, "mbox");
1924 val = extract_unsigned_integer (buf + 4*i, 4, byte_order);
1925 ui_out_field_fmt (uiout, field, "0x%s", phex (val, 4));
1926 do_cleanups (val_chain);
1927
1928 if (!ui_out_is_mi_like_p (uiout))
1929 printf_filtered ("\n");
1930 }
1931
1932 do_cleanups (chain);
1933 }
1934
1935 static void
1936 info_spu_mailbox_command (char *args, int from_tty)
1937 {
1938 struct frame_info *frame = get_selected_frame (NULL);
1939 struct gdbarch *gdbarch = get_frame_arch (frame);
1940 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1941 struct cleanup *chain;
1942 char annex[32];
1943 gdb_byte buf[1024];
1944 LONGEST len;
1945 int i, id;
1946
1947 if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
1948 error (_("\"info spu\" is only supported on the SPU architecture."));
1949
1950 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
1951
1952 chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoMailbox");
1953
1954 xsnprintf (annex, sizeof annex, "%d/mbox_info", id);
1955 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1956 buf, 0, sizeof buf);
1957 if (len < 0)
1958 error (_("Could not read mbox_info."));
1959
1960 info_spu_mailbox_list (buf, len / 4, byte_order,
1961 "mbox", "SPU Outbound Mailbox");
1962
1963 xsnprintf (annex, sizeof annex, "%d/ibox_info", id);
1964 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1965 buf, 0, sizeof buf);
1966 if (len < 0)
1967 error (_("Could not read ibox_info."));
1968
1969 info_spu_mailbox_list (buf, len / 4, byte_order,
1970 "ibox", "SPU Outbound Interrupt Mailbox");
1971
1972 xsnprintf (annex, sizeof annex, "%d/wbox_info", id);
1973 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1974 buf, 0, sizeof buf);
1975 if (len < 0)
1976 error (_("Could not read wbox_info."));
1977
1978 info_spu_mailbox_list (buf, len / 4, byte_order,
1979 "wbox", "SPU Inbound Mailbox");
1980
1981 do_cleanups (chain);
1982 }
1983
1984 static ULONGEST
1985 spu_mfc_get_bitfield (ULONGEST word, int first, int last)
1986 {
1987 ULONGEST mask = ~(~(ULONGEST)0 << (last - first + 1));
1988 return (word >> (63 - last)) & mask;
1989 }
1990
1991 static void
1992 info_spu_dma_cmdlist (gdb_byte *buf, int nr, enum bfd_endian byte_order)
1993 {
1994 static char *spu_mfc_opcode[256] =
1995 {
1996 /* 00 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1997 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1998 /* 10 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1999 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2000 /* 20 */ "put", "putb", "putf", NULL, "putl", "putlb", "putlf", NULL,
2001 "puts", "putbs", "putfs", NULL, NULL, NULL, NULL, NULL,
2002 /* 30 */ "putr", "putrb", "putrf", NULL, "putrl", "putrlb", "putrlf", NULL,
2003 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2004 /* 40 */ "get", "getb", "getf", NULL, "getl", "getlb", "getlf", NULL,
2005 "gets", "getbs", "getfs", NULL, NULL, NULL, NULL, NULL,
2006 /* 50 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2007 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2008 /* 60 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2009 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2010 /* 70 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2011 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2012 /* 80 */ "sdcrt", "sdcrtst", NULL, NULL, NULL, NULL, NULL, NULL,
2013 NULL, "sdcrz", NULL, NULL, NULL, "sdcrst", NULL, "sdcrf",
2014 /* 90 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2015 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2016 /* a0 */ "sndsig", "sndsigb", "sndsigf", NULL, NULL, NULL, NULL, NULL,
2017 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2018 /* b0 */ "putlluc", NULL, NULL, NULL, "putllc", NULL, NULL, NULL,
2019 "putqlluc", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2020 /* c0 */ "barrier", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2021 "mfceieio", NULL, NULL, NULL, "mfcsync", NULL, NULL, NULL,
2022 /* d0 */ "getllar", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2023 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2024 /* e0 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2025 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2026 /* f0 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2027 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2028 };
2029
2030 int *seq = alloca (nr * sizeof (int));
2031 int done = 0;
2032 struct cleanup *chain;
2033 int i, j;
2034
2035
2036 /* Determine sequence in which to display (valid) entries. */
2037 for (i = 0; i < nr; i++)
2038 {
2039 /* Search for the first valid entry all of whose
2040 dependencies are met. */
2041 for (j = 0; j < nr; j++)
2042 {
2043 ULONGEST mfc_cq_dw3;
2044 ULONGEST dependencies;
2045
2046 if (done & (1 << (nr - 1 - j)))
2047 continue;
2048
2049 mfc_cq_dw3
2050 = extract_unsigned_integer (buf + 32*j + 24,8, byte_order);
2051 if (!spu_mfc_get_bitfield (mfc_cq_dw3, 16, 16))
2052 continue;
2053
2054 dependencies = spu_mfc_get_bitfield (mfc_cq_dw3, 0, nr - 1);
2055 if ((dependencies & done) != dependencies)
2056 continue;
2057
2058 seq[i] = j;
2059 done |= 1 << (nr - 1 - j);
2060 break;
2061 }
2062
2063 if (j == nr)
2064 break;
2065 }
2066
2067 nr = i;
2068
2069
2070 chain = make_cleanup_ui_out_table_begin_end (uiout, 10, nr, "dma_cmd");
2071
2072 ui_out_table_header (uiout, 7, ui_left, "opcode", "Opcode");
2073 ui_out_table_header (uiout, 3, ui_left, "tag", "Tag");
2074 ui_out_table_header (uiout, 3, ui_left, "tid", "TId");
2075 ui_out_table_header (uiout, 3, ui_left, "rid", "RId");
2076 ui_out_table_header (uiout, 18, ui_left, "ea", "EA");
2077 ui_out_table_header (uiout, 7, ui_left, "lsa", "LSA");
2078 ui_out_table_header (uiout, 7, ui_left, "size", "Size");
2079 ui_out_table_header (uiout, 7, ui_left, "lstaddr", "LstAddr");
2080 ui_out_table_header (uiout, 7, ui_left, "lstsize", "LstSize");
2081 ui_out_table_header (uiout, 1, ui_left, "error_p", "E");
2082
2083 ui_out_table_body (uiout);
2084
2085 for (i = 0; i < nr; i++)
2086 {
2087 struct cleanup *cmd_chain;
2088 ULONGEST mfc_cq_dw0;
2089 ULONGEST mfc_cq_dw1;
2090 ULONGEST mfc_cq_dw2;
2091 int mfc_cmd_opcode, mfc_cmd_tag, rclass_id, tclass_id;
2092 int lsa, size, list_lsa, list_size, mfc_lsa, mfc_size;
2093 ULONGEST mfc_ea;
2094 int list_valid_p, noop_valid_p, qw_valid_p, ea_valid_p, cmd_error_p;
2095
2096 /* Decode contents of MFC Command Queue Context Save/Restore Registers.
2097 See "Cell Broadband Engine Registers V1.3", section 3.3.2.1. */
2098
2099 mfc_cq_dw0
2100 = extract_unsigned_integer (buf + 32*seq[i], 8, byte_order);
2101 mfc_cq_dw1
2102 = extract_unsigned_integer (buf + 32*seq[i] + 8, 8, byte_order);
2103 mfc_cq_dw2
2104 = extract_unsigned_integer (buf + 32*seq[i] + 16, 8, byte_order);
2105
2106 list_lsa = spu_mfc_get_bitfield (mfc_cq_dw0, 0, 14);
2107 list_size = spu_mfc_get_bitfield (mfc_cq_dw0, 15, 26);
2108 mfc_cmd_opcode = spu_mfc_get_bitfield (mfc_cq_dw0, 27, 34);
2109 mfc_cmd_tag = spu_mfc_get_bitfield (mfc_cq_dw0, 35, 39);
2110 list_valid_p = spu_mfc_get_bitfield (mfc_cq_dw0, 40, 40);
2111 rclass_id = spu_mfc_get_bitfield (mfc_cq_dw0, 41, 43);
2112 tclass_id = spu_mfc_get_bitfield (mfc_cq_dw0, 44, 46);
2113
2114 mfc_ea = spu_mfc_get_bitfield (mfc_cq_dw1, 0, 51) << 12
2115 | spu_mfc_get_bitfield (mfc_cq_dw2, 25, 36);
2116
2117 mfc_lsa = spu_mfc_get_bitfield (mfc_cq_dw2, 0, 13);
2118 mfc_size = spu_mfc_get_bitfield (mfc_cq_dw2, 14, 24);
2119 noop_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 37, 37);
2120 qw_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 38, 38);
2121 ea_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 39, 39);
2122 cmd_error_p = spu_mfc_get_bitfield (mfc_cq_dw2, 40, 40);
2123
2124 cmd_chain = make_cleanup_ui_out_tuple_begin_end (uiout, "cmd");
2125
2126 if (spu_mfc_opcode[mfc_cmd_opcode])
2127 ui_out_field_string (uiout, "opcode", spu_mfc_opcode[mfc_cmd_opcode]);
2128 else
2129 ui_out_field_int (uiout, "opcode", mfc_cmd_opcode);
2130
2131 ui_out_field_int (uiout, "tag", mfc_cmd_tag);
2132 ui_out_field_int (uiout, "tid", tclass_id);
2133 ui_out_field_int (uiout, "rid", rclass_id);
2134
2135 if (ea_valid_p)
2136 ui_out_field_fmt (uiout, "ea", "0x%s", phex (mfc_ea, 8));
2137 else
2138 ui_out_field_skip (uiout, "ea");
2139
2140 ui_out_field_fmt (uiout, "lsa", "0x%05x", mfc_lsa << 4);
2141 if (qw_valid_p)
2142 ui_out_field_fmt (uiout, "size", "0x%05x", mfc_size << 4);
2143 else
2144 ui_out_field_fmt (uiout, "size", "0x%05x", mfc_size);
2145
2146 if (list_valid_p)
2147 {
2148 ui_out_field_fmt (uiout, "lstaddr", "0x%05x", list_lsa << 3);
2149 ui_out_field_fmt (uiout, "lstsize", "0x%05x", list_size << 3);
2150 }
2151 else
2152 {
2153 ui_out_field_skip (uiout, "lstaddr");
2154 ui_out_field_skip (uiout, "lstsize");
2155 }
2156
2157 if (cmd_error_p)
2158 ui_out_field_string (uiout, "error_p", "*");
2159 else
2160 ui_out_field_skip (uiout, "error_p");
2161
2162 do_cleanups (cmd_chain);
2163
2164 if (!ui_out_is_mi_like_p (uiout))
2165 printf_filtered ("\n");
2166 }
2167
2168 do_cleanups (chain);
2169 }
2170
2171 static void
2172 info_spu_dma_command (char *args, int from_tty)
2173 {
2174 struct frame_info *frame = get_selected_frame (NULL);
2175 struct gdbarch *gdbarch = get_frame_arch (frame);
2176 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2177 ULONGEST dma_info_type;
2178 ULONGEST dma_info_mask;
2179 ULONGEST dma_info_status;
2180 ULONGEST dma_info_stall_and_notify;
2181 ULONGEST dma_info_atomic_command_status;
2182 struct cleanup *chain;
2183 char annex[32];
2184 gdb_byte buf[1024];
2185 LONGEST len;
2186 int i, id;
2187
2188 if (gdbarch_bfd_arch_info (get_frame_arch (frame))->arch != bfd_arch_spu)
2189 error (_("\"info spu\" is only supported on the SPU architecture."));
2190
2191 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
2192
2193 xsnprintf (annex, sizeof annex, "%d/dma_info", id);
2194 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2195 buf, 0, 40 + 16 * 32);
2196 if (len <= 0)
2197 error (_("Could not read dma_info."));
2198
2199 dma_info_type
2200 = extract_unsigned_integer (buf, 8, byte_order);
2201 dma_info_mask
2202 = extract_unsigned_integer (buf + 8, 8, byte_order);
2203 dma_info_status
2204 = extract_unsigned_integer (buf + 16, 8, byte_order);
2205 dma_info_stall_and_notify
2206 = extract_unsigned_integer (buf + 24, 8, byte_order);
2207 dma_info_atomic_command_status
2208 = extract_unsigned_integer (buf + 32, 8, byte_order);
2209
2210 chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoDMA");
2211
2212 if (ui_out_is_mi_like_p (uiout))
2213 {
2214 ui_out_field_fmt (uiout, "dma_info_type", "0x%s",
2215 phex_nz (dma_info_type, 4));
2216 ui_out_field_fmt (uiout, "dma_info_mask", "0x%s",
2217 phex_nz (dma_info_mask, 4));
2218 ui_out_field_fmt (uiout, "dma_info_status", "0x%s",
2219 phex_nz (dma_info_status, 4));
2220 ui_out_field_fmt (uiout, "dma_info_stall_and_notify", "0x%s",
2221 phex_nz (dma_info_stall_and_notify, 4));
2222 ui_out_field_fmt (uiout, "dma_info_atomic_command_status", "0x%s",
2223 phex_nz (dma_info_atomic_command_status, 4));
2224 }
2225 else
2226 {
2227 const char *query_msg = _("no query pending");
2228
2229 if (dma_info_type & 4)
2230 switch (dma_info_type & 3)
2231 {
2232 case 1: query_msg = _("'any' query pending"); break;
2233 case 2: query_msg = _("'all' query pending"); break;
2234 default: query_msg = _("undefined query type"); break;
2235 }
2236
2237 printf_filtered (_("Tag-Group Status 0x%s\n"),
2238 phex (dma_info_status, 4));
2239 printf_filtered (_("Tag-Group Mask 0x%s (%s)\n"),
2240 phex (dma_info_mask, 4), query_msg);
2241 printf_filtered (_("Stall-and-Notify 0x%s\n"),
2242 phex (dma_info_stall_and_notify, 4));
2243 printf_filtered (_("Atomic Cmd Status 0x%s\n"),
2244 phex (dma_info_atomic_command_status, 4));
2245 printf_filtered ("\n");
2246 }
2247
2248 info_spu_dma_cmdlist (buf + 40, 16, byte_order);
2249 do_cleanups (chain);
2250 }
2251
2252 static void
2253 info_spu_proxydma_command (char *args, int from_tty)
2254 {
2255 struct frame_info *frame = get_selected_frame (NULL);
2256 struct gdbarch *gdbarch = get_frame_arch (frame);
2257 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2258 ULONGEST dma_info_type;
2259 ULONGEST dma_info_mask;
2260 ULONGEST dma_info_status;
2261 struct cleanup *chain;
2262 char annex[32];
2263 gdb_byte buf[1024];
2264 LONGEST len;
2265 int i, id;
2266
2267 if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
2268 error (_("\"info spu\" is only supported on the SPU architecture."));
2269
2270 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
2271
2272 xsnprintf (annex, sizeof annex, "%d/proxydma_info", id);
2273 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2274 buf, 0, 24 + 8 * 32);
2275 if (len <= 0)
2276 error (_("Could not read proxydma_info."));
2277
2278 dma_info_type = extract_unsigned_integer (buf, 8, byte_order);
2279 dma_info_mask = extract_unsigned_integer (buf + 8, 8, byte_order);
2280 dma_info_status = extract_unsigned_integer (buf + 16, 8, byte_order);
2281
2282 chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoProxyDMA");
2283
2284 if (ui_out_is_mi_like_p (uiout))
2285 {
2286 ui_out_field_fmt (uiout, "proxydma_info_type", "0x%s",
2287 phex_nz (dma_info_type, 4));
2288 ui_out_field_fmt (uiout, "proxydma_info_mask", "0x%s",
2289 phex_nz (dma_info_mask, 4));
2290 ui_out_field_fmt (uiout, "proxydma_info_status", "0x%s",
2291 phex_nz (dma_info_status, 4));
2292 }
2293 else
2294 {
2295 const char *query_msg;
2296
2297 switch (dma_info_type & 3)
2298 {
2299 case 0: query_msg = _("no query pending"); break;
2300 case 1: query_msg = _("'any' query pending"); break;
2301 case 2: query_msg = _("'all' query pending"); break;
2302 default: query_msg = _("undefined query type"); break;
2303 }
2304
2305 printf_filtered (_("Tag-Group Status 0x%s\n"),
2306 phex (dma_info_status, 4));
2307 printf_filtered (_("Tag-Group Mask 0x%s (%s)\n"),
2308 phex (dma_info_mask, 4), query_msg);
2309 printf_filtered ("\n");
2310 }
2311
2312 info_spu_dma_cmdlist (buf + 24, 8, byte_order);
2313 do_cleanups (chain);
2314 }
2315
2316 static void
2317 info_spu_command (char *args, int from_tty)
2318 {
2319 printf_unfiltered (_("\"info spu\" must be followed by the name of an SPU facility.\n"));
2320 help_list (infospucmdlist, "info spu ", -1, gdb_stdout);
2321 }
2322
2323
2324 /* Set up gdbarch struct. */
2325
2326 static struct gdbarch *
2327 spu_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2328 {
2329 struct gdbarch *gdbarch;
2330 struct gdbarch_tdep *tdep;
2331 int id = -1;
2332
2333 /* Which spufs ID was requested as address space? */
2334 if (info.tdep_info)
2335 id = *(int *)info.tdep_info;
2336 /* For objfile architectures of SPU solibs, decode the ID from the name.
2337 This assumes the filename convention employed by solib-spu.c. */
2338 else if (info.abfd)
2339 {
2340 char *name = strrchr (info.abfd->filename, '@');
2341 if (name)
2342 sscanf (name, "@0x%*x <%d>", &id);
2343 }
2344
2345 /* Find a candidate among extant architectures. */
2346 for (arches = gdbarch_list_lookup_by_info (arches, &info);
2347 arches != NULL;
2348 arches = gdbarch_list_lookup_by_info (arches->next, &info))
2349 {
2350 tdep = gdbarch_tdep (arches->gdbarch);
2351 if (tdep && tdep->id == id)
2352 return arches->gdbarch;
2353 }
2354
2355 /* None found, so create a new architecture. */
2356 tdep = XCALLOC (1, struct gdbarch_tdep);
2357 tdep->id = id;
2358 gdbarch = gdbarch_alloc (&info, tdep);
2359
2360 /* Disassembler. */
2361 set_gdbarch_print_insn (gdbarch, gdb_print_insn_spu);
2362
2363 /* Registers. */
2364 set_gdbarch_num_regs (gdbarch, SPU_NUM_REGS);
2365 set_gdbarch_num_pseudo_regs (gdbarch, SPU_NUM_PSEUDO_REGS);
2366 set_gdbarch_sp_regnum (gdbarch, SPU_SP_REGNUM);
2367 set_gdbarch_pc_regnum (gdbarch, SPU_PC_REGNUM);
2368 set_gdbarch_read_pc (gdbarch, spu_read_pc);
2369 set_gdbarch_write_pc (gdbarch, spu_write_pc);
2370 set_gdbarch_register_name (gdbarch, spu_register_name);
2371 set_gdbarch_register_type (gdbarch, spu_register_type);
2372 set_gdbarch_pseudo_register_read (gdbarch, spu_pseudo_register_read);
2373 set_gdbarch_pseudo_register_write (gdbarch, spu_pseudo_register_write);
2374 set_gdbarch_value_from_register (gdbarch, spu_value_from_register);
2375 set_gdbarch_register_reggroup_p (gdbarch, spu_register_reggroup_p);
2376
2377 /* Data types. */
2378 set_gdbarch_char_signed (gdbarch, 0);
2379 set_gdbarch_ptr_bit (gdbarch, 32);
2380 set_gdbarch_addr_bit (gdbarch, 32);
2381 set_gdbarch_short_bit (gdbarch, 16);
2382 set_gdbarch_int_bit (gdbarch, 32);
2383 set_gdbarch_long_bit (gdbarch, 32);
2384 set_gdbarch_long_long_bit (gdbarch, 64);
2385 set_gdbarch_float_bit (gdbarch, 32);
2386 set_gdbarch_double_bit (gdbarch, 64);
2387 set_gdbarch_long_double_bit (gdbarch, 64);
2388 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2389 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2390 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double);
2391
2392 /* Address conversion. */
2393 set_gdbarch_address_to_pointer (gdbarch, spu_address_to_pointer);
2394 set_gdbarch_pointer_to_address (gdbarch, spu_pointer_to_address);
2395 set_gdbarch_integer_to_address (gdbarch, spu_integer_to_address);
2396
2397 /* Inferior function calls. */
2398 set_gdbarch_call_dummy_location (gdbarch, ON_STACK);
2399 set_gdbarch_frame_align (gdbarch, spu_frame_align);
2400 set_gdbarch_frame_red_zone_size (gdbarch, 2000);
2401 set_gdbarch_push_dummy_code (gdbarch, spu_push_dummy_code);
2402 set_gdbarch_push_dummy_call (gdbarch, spu_push_dummy_call);
2403 set_gdbarch_dummy_id (gdbarch, spu_dummy_id);
2404 set_gdbarch_return_value (gdbarch, spu_return_value);
2405
2406 /* Frame handling. */
2407 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2408 frame_unwind_append_unwinder (gdbarch, &spu_frame_unwind);
2409 frame_base_set_default (gdbarch, &spu_frame_base);
2410 set_gdbarch_unwind_pc (gdbarch, spu_unwind_pc);
2411 set_gdbarch_unwind_sp (gdbarch, spu_unwind_sp);
2412 set_gdbarch_virtual_frame_pointer (gdbarch, spu_virtual_frame_pointer);
2413 set_gdbarch_frame_args_skip (gdbarch, 0);
2414 set_gdbarch_skip_prologue (gdbarch, spu_skip_prologue);
2415 set_gdbarch_in_function_epilogue_p (gdbarch, spu_in_function_epilogue_p);
2416
2417 /* Cell/B.E. cross-architecture unwinder support. */
2418 frame_unwind_prepend_unwinder (gdbarch, &spu2ppu_unwind);
2419
2420 /* Breakpoints. */
2421 set_gdbarch_decr_pc_after_break (gdbarch, 4);
2422 set_gdbarch_breakpoint_from_pc (gdbarch, spu_breakpoint_from_pc);
2423 set_gdbarch_cannot_step_breakpoint (gdbarch, 1);
2424 set_gdbarch_software_single_step (gdbarch, spu_software_single_step);
2425 set_gdbarch_get_longjmp_target (gdbarch, spu_get_longjmp_target);
2426
2427 /* Overlays. */
2428 set_gdbarch_overlay_update (gdbarch, spu_overlay_update);
2429
2430 return gdbarch;
2431 }
2432
2433 /* Provide a prototype to silence -Wmissing-prototypes. */
2434 extern initialize_file_ftype _initialize_spu_tdep;
2435
2436 void
2437 _initialize_spu_tdep (void)
2438 {
2439 register_gdbarch_init (bfd_arch_spu, spu_gdbarch_init);
2440
2441 /* Add ourselves to objfile event chain. */
2442 observer_attach_new_objfile (spu_overlay_new_objfile);
2443 spu_overlay_data = register_objfile_data ();
2444
2445 /* Add root prefix command for all "info spu" commands. */
2446 add_prefix_cmd ("spu", class_info, info_spu_command,
2447 _("Various SPU specific commands."),
2448 &infospucmdlist, "info spu ", 0, &infolist);
2449
2450 /* Add various "info spu" commands. */
2451 add_cmd ("event", class_info, info_spu_event_command,
2452 _("Display SPU event facility status.\n"),
2453 &infospucmdlist);
2454 add_cmd ("signal", class_info, info_spu_signal_command,
2455 _("Display SPU signal notification facility status.\n"),
2456 &infospucmdlist);
2457 add_cmd ("mailbox", class_info, info_spu_mailbox_command,
2458 _("Display SPU mailbox facility status.\n"),
2459 &infospucmdlist);
2460 add_cmd ("dma", class_info, info_spu_dma_command,
2461 _("Display MFC DMA status.\n"),
2462 &infospucmdlist);
2463 add_cmd ("proxydma", class_info, info_spu_proxydma_command,
2464 _("Display MFC Proxy-DMA status.\n"),
2465 &infospucmdlist);
2466 }