Change addressing in interrupt code to meet physical addressing requirements
[gem5.git] / system / alpha / palcode / platform_m5.s
1 // build_fixed_image: not sure what means
2 // real_mm to be replaced during rewrite
3 // remove_save_state remove_restore_state can be remooved to save space ??
4
5
6 #define egore 0
7 #define acore 0
8 #define beh_model 0
9 #define ev5_p2 1
10 #define ev5_p1 0
11 #define ldvpte_bug_fix 1
12 #define spe_fix 0
13 #define osf_chm_fix 0
14 #define build_fixed_image 0
15 #define enable_p4_fixups 0
16 #define osf_svmin 1
17 #define enable_physical_console 0
18 #define fill_err_hack 0
19 #define icflush_on_tbix 0
20 #define max_cpuid 1
21 #define perfmon_debug 0
22 #define rax_mode 0
23
24 #define hw_rei_spe hw_rei
25
26 #include "ev5_defs.h"
27 #include "ev5_impure.h"
28 #include "ev5_alpha_defs.h"
29 #include "ev5_paldef.h"
30 #include "ev5_osfalpha_defs.h"
31 #include "fromHudsonMacros.h"
32 #include "fromHudsonOsf.h"
33 #include "dc21164FromGasSources.h"
34 #include "cserve.h"
35 #include "tlaserreg.h"
36 //#include "simos.h"
37
38
39 #define ldlp ldl_p
40 #define ldqp ldq_p
41
42 #define stlp stl_p
43 #define stqp stq_p
44 #define stqpc stqp
45
46 #ifdef SIMOS
47 #define ldqpl ldq_p
48 #define sdqpl sdq_p
49 #else
50 <--bomb>
51 #endif
52
53 #define pt_entInt pt_entint
54 #define pt_entArith pt_entarith
55 #define mchk_size ((mchk_cpu_base + 7 + 8) &0xfff8)
56 #define mchk_flag CNS_Q_FLAG
57 #define mchk_sys_base 56
58 #define mchk_cpu_base (CNS_Q_LD_LOCK + 8)
59 #define mchk_offsets CNS_Q_EXC_ADDR
60 #define mchk_mchk_code 8
61 #define mchk_ic_perr_stat CNS_Q_ICPERR_STAT
62 #define mchk_dc_perr_stat CNS_Q_DCPERR_STAT
63 #define mchk_sc_addr CNS_Q_SC_ADDR
64 #define mchk_sc_stat CNS_Q_SC_STAT
65 #define mchk_ei_addr CNS_Q_EI_ADDR
66 #define mchk_bc_tag_addr CNS_Q_BC_TAG_ADDR
67 #define mchk_fill_syn CNS_Q_FILL_SYN
68 #define mchk_ei_stat CNS_Q_EI_STAT
69 #define mchk_exc_addr CNS_Q_EXC_ADDR
70 #define mchk_ld_lock CNS_Q_LD_LOCK
71 #define osfpcb_q_Ksp pcb_q_ksp
72 #define pal_impure_common_size ((0x200 + 7) & 0xfff8)
73
74 #define ALIGN_BLOCK \
75 .align 5
76
77 #define ALIGN_BRANCH \
78 .align 3
79
80 #define EXPORT(_x) \
81 .align 5; \
82 .globl _x; \
83 _x:
84
85 // XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
86 // XXX the following is 'made up'
87 // XXX bugnion
88
89 // XXX bugnion not sure how to align 'quad'
90 #define ALIGN_QUAD \
91 .align 3
92
93 #define ALIGN_128 \
94 .align 7
95
96
97 #define GET_IMPURE(_r) mfpr _r,pt_impure
98 #define GET_ADDR(_r1,_off,_r2) lda _r1,_off(_r2)
99
100
101 #define BIT(_x) (1<<(_x))
102
103
104 // XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
105 // XXX back to original code
106
107 // .sbttl "System specific code - beh model version"
108
109 //
110 // Entry points
111 // SYS$CFLUSH - Cache flush
112 // SYS$CSERVE - Console service
113 // SYS$WRIPIR - interprocessor interrupts
114 // SYS$HALT_INTERRUPT - Halt interrupt
115 // SYS$PASSIVE_RELEASE - Interrupt, passive release
116 // SYS$INTERRUPT - Interrupt
117 // SYS$RESET - Reset
118 // SYS$ENTER_CONSOLE
119
120 //
121 // Macro to read TLINTRSUMx
122 //
123 // Based on the CPU_NUMBER, read either the TLINTRSUM0 or TLINTRSUM1 register
124 //
125 // Assumed register usage:
126 // rsum TLINTRSUMx contents
127 // raddr node space address
128 // scratch scratch register
129
130
131 // .macro Read_TLINTRSUMx rsum, raddr, scratch, ?label1, ?label2
132 //
133 // nop
134 // mfpr 'scratch', pt_whami // Get our whami (VID)
135 //
136 // extbl 'scratch', #1, 'scratch' // shift down to bit 0
137 // lda 'raddr', ^xff88(r31) // Get base node space address bits
138 //
139 // sll 'raddr', #24, 'raddr' // Shift up to proper position
140 // srl 'scratch', #1, 'rsum' // Shift off the cpu number
141 //
142 // sll 'rsum', #22, 'rsum' // Get our node offset
143 // addq 'raddr', 'rsum', 'raddr' // Get our base node space address
144 //
145 // blbs 'scratch', label1
146 // lda 'raddr', <tlep$tlintrsum0_offset>('raddr')
147 //
148 // br r31, label2
149 //label1: lda 'raddr', <tlep$tlintrsum1_offset>('raddr')
150 //
151 //label2: ldlp 'rsum', 0('raddr') // read the right tlintrsum reg
152 //.endm
153
154 #define Read_TLINTRSUMx(_rsum,_raddr,_scratch) \
155 nop; \
156 mfpr _scratch,pt_whami; \
157 extbl _scratch,1,_scratch; \
158 lda _raddr,0xff88(zero); \
159 sll _raddr,24,_raddr; \
160 srl _scratch,1,_rsum; \
161 sll _rsum,22,_rsum; \
162 addq _raddr,_rsum,_raddr; \
163 blbs _scratch,1f; \
164 lda _raddr,0x1180(_raddr); \
165 br r31,2f; \
166 1: \
167 lda _raddr,0x11c0(_raddr); \
168 2: ldlp _rsum,0(_raddr)
169
170
171
172 //
173 // Macro to write TLINTRSUMx
174 //
175 // Based on the CPU_NUMBER, write either the TLINTRSUM0 or TLINTRSUM1 register
176 //
177 // Assumed register usage:
178 // rsum TLINTRSUMx write data
179 // raddr node space address
180 // scratch scratch register
181
182 // .macro Write_TLINTRSUMx rsum, raddr, whami, ?label1, ?label2
183 //
184 // nop
185 // mfpr 'whami', pt_whami // Get our whami (VID)
186 //
187 // extbl 'whami', #1, 'whami' // shift down to bit 0
188 // lda 'raddr', ^xff88(r31) // Get base node space address bits
189 //
190 // sll 'raddr', #24, 'raddr' // Shift up to proper position
191 // blbs 'whami', label1
192 //
193 // lda 'raddr', <tlep$tlintrsum0_offset>('raddr')
194 // br r31, label2
195 //
196 // label1: lda 'raddr', <tlep$tlintrsum1_offset>('raddr')
197 // label2: srl 'whami', #1, 'whami' // Shift off the cpu number
198 //
199 // sll 'whami', #22, 'whami' // Get our node offset
200 // addq 'raddr', 'whami', 'raddr' // Get our base node space address
201 //
202 // mb
203 // stqp 'rsum', 0('raddr') // write the right tlintrsum reg
204 // mb
205 // ldqp 'rsum', 0('raddr') // dummy read to tlintrsum
206 // bis 'rsum', 'rsum', 'rsum' // needed to complete the ldqp above -jpo
207 // .endm
208
209
210 #define Write_TLINTRSUMx(_rsum,_raddr,_whami) \
211 nop; \
212 mfpr _whami,pt_whami; \
213 extbl _whami,1,_whami; \
214 lda _raddr,0xff88(zero); \
215 sll _raddr,24,_raddr; \
216 blbs _whami,1f; \
217 lda _raddr,0x1180(_raddr);\
218 br zero,2f; \
219 1: lda _raddr,0x11c0(_raddr);\
220 2: srl _whami,1,_whami; \
221 addq _raddr,_whami,_raddr; \
222 mb; \
223 stqp _rsum,0(_raddr); \
224 ldqp _rsum,0(_raddr); \
225 bis _rsum,_rsum,_rsum
226
227
228 //
229 // Macro to determine highest priority TIOP Node ID from interrupt pending mask
230 //
231 // Assumed register usage:
232 // rmask - TLINTRSUMx contents, shifted to isolate IOx bits
233 // rid - TLSB Node ID of highest TIOP
234
235 //.macro Intr_Find_TIOP rmask, rid, ?l1, ?l2, ?l3, ?l4, ?l5, ?l6
236 // srl 'rmask', #4, 'rid' // check IOP8
237 // blbc 'rid', l1 // not IOP8
238 //
239 // lda 'rid', 8(r31) // IOP8
240 // br r31, l6
241 //
242 // l1: srl 'rmask', #3, 'rid' // check IOP7
243 // blbc 'rid', l2 // not IOP7
244 //
245 // lda 'rid', 7(r31) // IOP7
246 // br r31, l6
247 //
248 // l2: srl 'rmask', #2, 'rid' // check IOP6
249 // blbc 'rid', l3 // not IOP6
250 //
251 // lda 'rid', 6(r31) // IOP6
252 // br r31, l6
253 //
254 // l3: srl 'rmask', #1, 'rid' // check IOP5
255 // blbc 'rid', l4 // not IOP5
256 //
257 // lda 'rid', 5(r31) // IOP5
258 // br r31, l6
259 //
260 // l4: srl 'rmask', #0, 'rid' // check IOP4
261 // blbc 'rid', l5 // not IOP4
262 //
263 // lda r14, 4(r31) // IOP4
264 // br r31, l6
265 //
266 // l5: lda r14, 0(r31) // passive release
267 // l6:
268 // .endm
269
270
271 #define Intr_Find_TIOP(_rmask,_rid) \
272 srl _rmask,3,_rid; \
273 blbc _rid,1f; \
274 lda _rid,8(zero); \
275 br zero,6f; \
276 1: srl _rmask,3,_rid; \
277 blbc _rid, 2f; \
278 lda _rid, 7(r31); \
279 br r31, 6f; \
280 2: srl _rmask, 2, _rid; \
281 blbc _rid, 3f; \
282 lda _rid, 6(r31); \
283 br r31, 6f; \
284 3: srl _rmask, 1, _rid; \
285 blbc _rid, 4f; \
286 lda _rid, 5(r31); \
287 br r31, 6f; \
288 4: srl _rmask, 0, _rid; \
289 blbc _rid, 5f; \
290 lda r14, 4(r31); \
291 br r31, 6f; \
292 5: lda r14, 0(r31); \
293 6:
294
295
296
297
298 //
299 // Macro to calculate base node space address for given node id
300 //
301 // Assumed register usage:
302 // rid - TLSB node id
303 // raddr - base node space address
304
305 //.macro Get_TLSB_Node_Address rid, raddr
306 // sll 'rid', #22, 'rid' // Get offset of IOP node
307 // lda 'raddr', ^xff88(r31) // Get base node space address bits
308 //
309 // sll 'raddr', #24, 'raddr' // Shift up to proper position
310 // addq 'raddr', 'rid', 'raddr' // Get TIOP node space address
311 // .iif ne turbo_pcia_intr_fix, srl 'rid', #22, 'rid' // Restore IOP node id
312 //.endm
313
314
315 #define turbo_pcia_intr_fix 0
316
317
318 #if turbo_pcia_intr_fix != 0
319 #define Get_TLSB_Node_Address(_rid,_raddr) \
320 sll _rid,22,_rid; \
321 lda _raddr,0xff88(zero); \
322 sll _raddr,24,_raddr; \
323 addq _raddr,_rid,_raddr; \
324 srl _rid,22,_rid
325 #else
326 #define Get_TLSB_Node_Address(_rid,_raddr) \
327 sll _rid,22,_rid; \
328 lda _raddr,0xff88(zero); \
329 sll _raddr,24,_raddr; \
330 addq _raddr,_rid,_raddr
331 #endif
332
333
334
335 \f
336
337 // .macro mchk$TLEPstore rlog, rs, rs1, nodebase, tlepreg, clr, tlsb, crd
338 // .iif eq tlsb, lda 'rs1',<tlep$'tlepreg'_offset>(r31)
339 // .iif ne tlsb, lda 'rs1',<tlsb$'tlepreg'_offset>(r31)
340 // or 'rs1', 'nodebase', 'rs1'
341 // ldlp 'rs', 0('rs1')
342 // .iif eq crd, stlp 'rs', mchk$'tlepreg'('rlog') // store in frame
343 // .iif ne crd, stlp 'rs', mchk$crd_'tlepreg'('rlog') // store in frame
344 // .iif ne clr, stlp 'rs',0('rs1') // optional write to clear
345 // .endm
346
347
348 // .macro OSFmchk$TLEPstore tlepreg, clr=0, tlsb=0
349 // mchk$TLEPstore r14, r8, r4, r13, <tlepreg>, <clr>, <tlsb>, crd=0
350 // .endm
351
352 #define CONCAT(_a,_b) _a ## _b
353
354 #define OSFmchk_TLEPstore_1(_rlog,_rs,_rs1,_nodebase,_tlepreg) \
355 lda _rs1,CONCAT(tlep_,_tlepreg)(zero); \
356 or _rs1,_nodebase,_rs1; \
357 ldlp _rs1,0(_rs1); \
358 stlp _rs,CONCAT(mchk_,_tlepreg)(_rlog)
359
360
361 #define OSFmchk_TLEPstore(_tlepreg) OSFmchk_TLEPstore_1(r14,r8,r4,r13,_tlepreg)
362
363
364 // .macro OSFcrd$TLEPstore tlepreg, clr=0, tlsb=0
365 // mchk$TLEPstore r14, r10, r1, r0, <tlepreg>, <clr>, <tlsb>, crd=1
366 // .endm
367
368 #define OSFcrd_TLEPstore_1(_rlog,_rs,_rs1,_nodebase,_tlepreg) \
369 lda _rs1,CONCAT(tlep_,_tlepreg)(zero); \
370 or _rs1,_nodebase,_rs1; \
371 ldlp _rs1,0(_rs1); \
372 stlp _rs,CONCAT(mchk_crd_,_tlepreg)(_rlog)
373
374 #define OSFcrd_TLEPstore_tlsb_1(_rlog,_rs,_rs1,_nodebase,_tlepreg) \
375 lda _rs1,CONCAT(tlsb_,_tlepreg)(zero); \
376 or _rs1,_nodebase,_rs1; \
377 ldlp _rs1,0(_rs1); \
378 stlp _rs,CONCAT(mchk_crd_,_tlepreg)(_rlog)
379
380 #define OSFcrd_TLEPstore_tlsb_clr_1(_rlog,_rs,_rs1,_nodebase,_tlepreg) \
381 lda _rs1,CONCAT(tlsb_,_tlepreg)(zero); \
382 or _rs1,_nodebase,_rs1; \
383 ldlp _rs1,0(_rs1); \
384 stlp _rs,CONCAT(mchk_crd_,_tlepreg)(_rlog); \
385 stlp _rs,0(_rs1)
386
387
388 #define OSFcrd_TLEPstore(_tlepreg) OSFcrd_TLEPstore_1(r14,r8,r4,r13,_tlepreg)
389 #define OSFcrd_TLEPstore_tlsb(_tlepreg) OSFcrd_TLEPstore_tlsb_1(r14,r8,r4,r13,_tlepreg)
390 #define OSFcrd_TLEPstore_tlsb_clr(_tlepreg) OSFcrd_TLEPstore_tlsb_clr_1(r14,r8,r4,r13,_tlepreg)
391
392
393 \f
394
395 // .macro save_pcia_intr irq
396 // and r13, #^xf, r25 // isolate low 4 bits
397 // addq r14, #4, r14 // format the TIOP Node id field
398 // sll r14, #4, r14 // shift the TIOP Node id
399 // or r14, r25, r10 // merge Node id/hose/HPC
400 // mfpr r14, pt14 // get saved value
401 // extbl r14, #'irq', r25 // confirm none outstanding
402 // bne r25, sys$machine_check_while_in_pal
403 // insbl r10, #'irq', r10 // align new info
404 // or r14, r10, r14 // merge info
405 // mtpr r14, pt14 // save it
406 // bic r13, #^xf, r13 // clear low 4 bits of vector
407 // .endm
408
409 #define save_pcia_intr(_irq) \
410 and r13, 0xf, r25; \
411 addq r14, 4, r14; \
412 sll r14, 4, r14; \
413 or r14, r25, r10; \
414 mfpr r14, pt14; \
415 extbl r14, _irq, r25; \
416 bne r25, sys_machine_check_while_in_pal; \
417 insbl r10, _irq, r10; \
418 or r14, r10, r14; \
419 mtpr r14, pt14; \
420 bic r13, 0xf, r13
421
422
423
424 ALIGN_BLOCK
425 \f
426 // .sbttl "wripir - PALcode for wripir instruction"
427 //orig SYS$WRIPIR: // R16 has the processor number.
428
429 EXPORT(sys_wripir)
430
431 //++
432 // Convert the processor number to a CPU mask
433 //--
434
435 and r16,0xf, r14 // mask the top stuff (16 CPUs supported)
436 bis r31,0x1,r16 // get a one
437 sll r16,r14,r14 // shift the bit to the right place
438
439 //++
440 // Build the Broadcast Space base address
441 //--
442 lda r13,0xff8e(r31) // Load the upper address bits
443 sll r13,24,r13 // shift them to the top
444
445 //++
446 // Send out the IP Intr
447 //--
448 stqp r14, 0x40(r13) // Write to TLIPINTR reg WAS TLSB_TLIPINTR_OFFSET
449 wmb // Push out the store
450
451 hw_rei
452
453 \f
454 ALIGN_BLOCK
455 // .sbttl "CFLUSH- PALcode for CFLUSH instruction"
456 //+
457 // SYS$CFLUSH
458 // Entry:
459 //
460 // R16 - contains the PFN of the page to be flushed
461 //
462 // Function:
463 // Flush all Dstream caches of 1 entire page
464 //
465 //-
466
467 EXPORT(sys_cflush)
468
469 // #convert pfn to addr, and clean off <63:20>
470 // #sll r16, <page_offset_size_bits>+<63-20>>, r12
471 sll r16, page_offset_size_bits+(63-20),r12
472
473 // #ldah r13,<<1@22>+32768>@-16(r31)// + xxx<31:16>
474 // # stolen from srcmax code. XXX bugnion
475 lda r13, 0x10(r31) // assume 16Mbytes of cache
476 sll r13, 20, r13 // convert to bytes
477
478
479 srl r12, 63-20, r12 // shift back to normal position
480 xor r12, r13, r12 // xor addr<18>
481
482 or r31, 8192/(32*8), r13 // get count of loads
483 nop
484
485 cflush_loop:
486 subq r13, 1, r13 // decr counter
487 mfpr r25, ev5__intid // Fetch level of interruptor
488
489 ldqp r31, 32*0(r12) // do a load
490 ldqp r31, 32*1(r12) // do next load
491
492 ldqp r31, 32*2(r12) // do next load
493 ldqp r31, 32*3(r12) // do next load
494
495 ldqp r31, 32*4(r12) // do next load
496 ldqp r31, 32*5(r12) // do next load
497
498 ldqp r31, 32*6(r12) // do next load
499 ldqp r31, 32*7(r12) // do next load
500
501 mfpr r14, ev5__ipl // Fetch current level
502 lda r12, (32*8)(r12) // skip to next cache block addr
503
504 cmple r25, r14, r25 // R25 = 1 if intid .less than or eql ipl
505 beq r25, 1f // if any int's pending, re-queue CFLUSH -- need to check for hlt interrupt???
506
507 bne r13, cflush_loop // loop till done
508 hw_rei // back to user
509
510 ALIGN_BRANCH
511 1: // Here if interrupted
512 mfpr r12, exc_addr
513 subq r12, 4, r12 // Backup PC to point to CFLUSH
514
515 mtpr r12, exc_addr
516 nop
517
518 mfpr r31, pt0 // Pad exc_addr write
519 hw_rei
520
521 \f
522 ALIGN_BLOCK
523 // .sbttl "CSERVE- PALcode for CSERVE instruction"
524 //+
525 // SYS$CSERVE
526 //
527 // Function:
528 // Various functions for private use of console software
529 //
530 // option selector in r0
531 // arguments in r16....
532 //
533 //
534 // r0 = 0 unknown
535 //
536 // r0 = 1 ldqp
537 // r0 = 2 stqp
538 // args, are as for normal STQP/LDQP in VMS PAL
539 //
540 // r0 = 3 dump_tb's
541 // r16 = detination PA to dump tb's to.
542 //
543 // r0<0> = 1, success
544 // r0<0> = 0, failure, or option not supported
545 // r0<63:1> = (generally 0, but may be function dependent)
546 // r0 - load data on ldqp
547 //
548 //-
549 EXPORT(sys_cserve)
550
551 #ifdef SIMOS
552 /* taken from scrmax */
553 cmpeq r18, CSERVE_K_RD_IMPURE, r0
554 bne r0, Sys_Cserve_Rd_Impure
555
556 cmpeq r18, CSERVE_K_JTOPAL, r0
557 bne r0, Sys_Cserve_Jtopal
558 call_pal 0
559
560 or r31, r31, r0
561 hw_rei // and back we go
562
563 Sys_Cserve_Rd_Impure:
564 mfpr r0, pt_impure // Get base of impure scratch area.
565 hw_rei
566
567 ALIGN_BRANCH
568
569 Sys_Cserve_Jtopal:
570 bic a0, 3, t8 // Clear out low 2 bits of address
571 bis t8, 1, t8 // Or in PAL mode bit
572 mtpr t8,exc_addr
573 hw_rei
574
575
576 #else /* SIMOS */
577
578 cmpeq r16, cserve_ldlp, r12 // check for ldqp
579 bne r12, 1f // br if
580
581 cmpeq r16, cserve_stlp, r12 // check for stqp
582 bne r12, 2f // br if
583
584 cmpeq r16, cserve_callback, r12 // check for callback entry
585 bne r12, csrv_callback // br if
586
587 cmpeq r16, cserve_identify, r12 // check for callback entry
588 bne r12, csrv_identify // br if
589
590 or r31, r31, r0 // set failure
591 nop // pad palshadow write
592
593 hw_rei // and back we go
594 #endif /* SIMOS */
595
596 // ldqp
597 ALIGN_QUAD
598 1:
599 ldqp r0,0(r17) // get the data
600 nop // pad palshadow write
601
602 hw_rei // and back we go
603
604
605 // stqp
606 ALIGN_QUAD
607 2:
608 stqp r18, 0(r17) // store the data
609 #ifdef SIMOS
610 lda r0,17(r31) // bogus
611 #else
612 lda r0, CSERVE_SUCCESS(r31) // set success
613 #endif
614 hw_rei // and back we go
615
616
617 ALIGN_QUAD
618 csrv_callback:
619 ldq r16, 0(r17) // restore r16
620 ldq r17, 8(r17) // restore r17
621 lda r0, hlt_c_callback(r31)
622 br r31, sys_enter_console
623
624
625 csrv_identify:
626 mfpr r0, pal_base
627 ldqp r0, 8(r0)
628 hw_rei
629
630
631 // dump tb's
632 ALIGN_QUAD
633 0:
634 // DTB PTEs - 64 entries
635 addq r31, 64, r0 // initialize loop counter
636 nop
637
638 1: mfpr r12, ev5__dtb_pte_temp // read out next pte to temp
639 mfpr r12, ev5__dtb_pte // read out next pte to reg file
640
641 subq r0, 1, r0 // decrement loop counter
642 nop // Pad - no Mbox instr in cycle after mfpr
643
644 stqp r12, 0(r16) // store out PTE
645 addq r16, 8 ,r16 // increment pointer
646
647 bne r0, 1b
648
649 ALIGN_BRANCH
650 // ITB PTEs - 48 entries
651 addq r31, 48, r0 // initialize loop counter
652 nop
653
654 2: mfpr r12, ev5__itb_pte_temp // read out next pte to temp
655 mfpr r12, ev5__itb_pte // read out next pte to reg file
656
657 subq r0, 1, r0 // decrement loop counter
658 nop //
659
660 stqp r12, 0(r16) // store out PTE
661 addq r16, 8 ,r16 // increment pointer
662
663 bne r0, 2b
664 or r31, 1, r0 // set success
665
666 hw_rei // and back we go
667
668 \f
669 // .sbttl "SYS$INTERRUPT - Interrupt processing code"
670
671 //+
672 // SYS$INTERRUPT
673 //
674 // Current state:
675 // Stack is pushed
676 // ps, sp and gp are updated
677 // r12, r14 - available
678 // r13 - INTID (new EV5 IPL)
679 // r25 - ISR
680 // r16, r17, r18 - available
681 //
682 //-
683
684
685 EXPORT(sys_interrupt)
686 cmpeq r13, 31, r12
687 bne r12, sys_int_mchk_or_crd // Check for level 31 interrupt (machine check or crd)
688
689 cmpeq r13, 30, r12
690 bne r12, sys_int_powerfail // Check for level 30 interrupt (powerfail)
691
692 cmpeq r13, 29, r12
693 bne r12, sys_int_perf_cnt // Check for level 29 interrupt (performance counters)
694
695 cmpeq r13, 23, r12
696 bne r12, sys_int_23 // Check for level 23 interrupt
697
698 cmpeq r13, 22, r12
699 bne r12, sys_int_22 // Check for level 22 interrupt (might be
700 // interprocessor or timer interrupt)
701
702 cmpeq r13, 21, r12
703 bne r12, sys_int_21 // Check for level 21 interrupt
704
705 cmpeq r13, 20, r12
706 bne r12, sys_int_20 // Check for level 20 interrupt (might be corrected
707 // system error interrupt)
708
709 mfpr r14, exc_addr // ooops, something is wrong
710 br r31, pal_pal_bug_check_from_int
711
712
713
714 \f
715 //+
716 //sys$int_2*
717 // Routines to handle device interrupts at IPL 23-20.
718 // System specific method to ack/clear the interrupt, detect passive release,
719 // detect interprocessor (22), interval clock (22), corrected
720 // system error (20)
721 //
722 // Current state:
723 // Stack is pushed
724 // ps, sp and gp are updated
725 // r12, r14 - available
726 // r13 - INTID (new EV5 IPL)
727 // r25 - ISR
728 //
729 // On exit:
730 // Interrupt has been ack'd/cleared
731 // a0/r16 - signals IO device interrupt
732 // a1/r17 - contains interrupt vector
733 // exit to ent_int address
734 //
735 //-
736 ALIGN_BRANCH
737 sys_int_23:
738 Read_TLINTRSUMx(r13,r10,r14) // read the right TLINTRSUMx
739 srl r13, 22, r13 // shift down to examine IPL17
740
741 Intr_Find_TIOP(r13,r14)
742 beq r14, 1f
743
744 Get_TLSB_Node_Address(r14,r10)
745 lda r10, 0xac0(r10) // Get base TLILID address
746
747 ldlp r13, 0(r10) // Read the TLILID register
748 bne r13, pal_post_dev_interrupt
749
750 1: lda r16, osfint_c_passrel(r31) // passive release
751 br r31, pal_post_interrupt //
752
753
754 ALIGN_BRANCH
755 sys_int_22:
756 or r31,1,r16 // a0 means it is a I/O interrupt
757 lda r8,0xf01(r31)
758 sll r8,16,r8
759 lda r8,0xa000(r8)
760 sll r8,16,r8
761 lda r8,0x080(r8)
762 or r31,0x10,r9
763 stq_p r9, 0(r8) // clear the rtc interrupt
764
765 br r31, pal_post_interrupt //
766
767
768 ALIGN_BRANCH
769 sys_int_20:
770 Read_TLINTRSUMx(r13,r10,r14) // read the right TLINTRSUMx
771 srl r13, 12, r13 // shift down to examine IPL15
772
773 Intr_Find_TIOP(r13,r14)
774 beq r14, 1f
775
776 Get_TLSB_Node_Address(r14,r10)
777 lda r10, 0xa40(r10) // Get base TLILID address
778
779 ldlp r13, 0(r10) // Read the TLILID register
780 #if turbo_pcia_intr_fix == 0
781 //orig .if eq turbo_pcia_intr_fix
782 bne r13, pal_post_dev_interrupt
783 //orig .iff
784 beq r13, 1f
785
786 and r13, 0x3, r10 // check for PCIA bits
787 beq r10, pal_post_dev_interrupt // done if nothing set
788 save_pcia_intr(1)
789 br r31, pal_post_dev_interrupt //
790 // orig .endc
791 #endif /* turbo_pcia_intr_fix == 0 */
792
793 1: lda r16, osfint_c_passrel(r31) // passive release
794 br r31, pal_post_interrupt //
795
796
797 ALIGN_BRANCH
798 sys_int_21:
799 or r31,3,r16 // a0 means it is a I/O interrupt
800
801 lda r8,0xf01(r31)
802 sll r8,32,r8
803 ldah r9,0xa0(r31)
804 sll r9,8,r9
805 bis r8,r9,r8
806 lda r8,0x0080(r8)
807 ldqp r9, 0(r8) // read the MISC register
808
809 and r9,0x1,r10 // grab LSB and shift left 2
810 sll r10,2,r10
811 and r9,0x2,r11 // grabl LSB+1 and shift left 5
812 sll r11,5,r11
813
814 mskbl r8,0,r8 // calculate DIRn address
815 lda r9,0x280(r31)
816 bis r8,r9,r8
817 or r8,r10,r8
818 or r8,r11,r8
819 ldqp r9, 0(r8) // read DIRn
820
821 or r31,63,r17 // load 63 into the counter
822 or r31,1,r11
823 sll r11,63,r11 // load a 1 into the msb
824
825 find_msb:
826 and r9,r11,r10
827 bne r10, found_msb
828 srl r11,1,r11
829 subl r17,1,r17
830 br r31, find_msb
831
832 found_msb:
833 lda r9,0x10(r31)
834 mulq r17,r9,r17 // compute 0x900 + (0x10 * Highest DIRn-bit)
835 lda r9,0x900(r31)
836 addq r17,r9,r17
837
838 br r31, pal_post_interrupt
839
840
841 ALIGN_BRANCH
842 pal_post_dev_interrupt:
843 or r13, r31, r17 // move vector to a1
844 or r31, osfint_c_dev, r16 // a0 signals IO device interrupt
845
846 pal_post_interrupt:
847 mfpr r12, pt_entint
848
849 mtpr r12, exc_addr
850
851 nop
852 nop
853
854 hw_rei_spe
855
856
857 \f
858 //+
859 // sys_passive_release
860 // Just pretend the interrupt never occurred.
861 //-
862
863 EXPORT(sys_passive_release)
864 mtpr r11, ev5__dtb_cm // Restore Mbox current mode for ps
865 nop
866
867 mfpr r31, pt0 // Pad write to dtb_cm
868 hw_rei
869
870 //+
871 //sys_int_powerfail
872 // A powerfail interrupt has been detected. The stack has been pushed.
873 // IPL and PS are updated as well.
874 //
875 // I'm not sure what to do here, I'm treating it as an IO device interrupt
876 //
877 //-
878
879 ALIGN_BLOCK
880 sys_int_powerfail:
881 lda r12, 0xffc4(r31) // get GBUS_MISCR address bits
882 sll r12, 24, r12 // shift to proper position
883 ldqp r12, 0(r12) // read GBUS_MISCR
884 srl r12, 5, r12 // isolate bit <5>
885 blbc r12, 1f // if clear, no missed mchk
886
887 // Missed a CFAIL mchk
888 lda r13, 0xffc7(r31) // get GBUS$SERNUM address bits
889 sll r13, 24, r13 // shift to proper position
890 lda r14, 0x40(r31) // get bit <6> mask
891 ldqp r12, 0(r13) // read GBUS$SERNUM
892 or r12, r14, r14 // set bit <6>
893 stqp r14, 0(r13) // clear GBUS$SERNUM<6>
894 mb
895 mb
896
897 1: br r31, sys_int_mchk // do a machine check
898
899 lda r17, scb_v_pwrfail(r31) // a1 to interrupt vector
900 mfpr r25, pt_entint
901
902 lda r16, osfint_c_dev(r31) // a0 to device code
903 mtpr r25, exc_addr
904
905 nop // pad exc_addr write
906 nop
907
908 hw_rei_spe
909
910 //+
911 // sys$halt_interrupt
912 // A halt interrupt has been detected. Pass control to the console.
913 //
914 //
915 //-
916 EXPORT(sys_halt_interrupt)
917
918 ldah r13, 0x1800(r31) // load Halt/^PHalt bits
919 Write_TLINTRSUMx(r13,r10,r14) // clear the ^PHalt bits
920
921 mtpr r11, dtb_cm // Restore Mbox current mode
922 nop
923 nop
924 mtpr r0, pt0
925 #ifndef SIMOS
926 pvc_jsr updpcb, bsr=1
927 bsr r0, pal_update_pcb // update the pcb
928 #endif
929 lda r0, hlt_c_hw_halt(r31) // set halt code to hw halt
930 br r31, sys_enter_console // enter the console
931
932
933 \f
934 //+
935 // sys$int_mchk_or_crd
936 //
937 // Current state:
938 // Stack is pushed
939 // ps, sp and gp are updated
940 // r12
941 // r13 - INTID (new EV5 IPL)
942 // r14 - exc_addr
943 // r25 - ISR
944 // r16, r17, r18 - available
945 //
946 //-
947 ALIGN_BLOCK
948
949 sys_int_mchk_or_crd:
950 srl r25, isr_v_mck, r12
951 blbs r12, sys_int_mchk
952 //+
953 // Not a Machine check interrupt, so must be an Internal CRD interrupt
954 //-
955
956 mb //Clear out Cbox prior to reading IPRs
957 srl r25, isr_v_crd, r13 //Check for CRD
958 blbc r13, pal_pal_bug_check_from_int //If CRD not set, shouldn't be here!!!
959
960 lda r9, 1(r31)
961 sll r9, hwint_clr_v_crdc, r9 // get ack bit for crd
962 mtpr r9, ev5__hwint_clr // ack the crd interrupt
963
964 or r31, r31, r12 // clear flag
965 lda r9, mchk_c_ecc_c(r31) // Correctable error MCHK code
966
967 sys_merge_sys_corr:
968 ldah r14, 0xfff0(r31)
969 mtpr r0, pt0 // save r0 for scratch
970 zap r14, 0xE0, r14 // Get Cbox IPR base
971 mtpr r1, pt1 // save r0 for scratch
972
973 ldqp r0, ei_addr(r14) // EI_ADDR IPR
974 ldqp r10, fill_syn(r14) // FILL_SYN IPR
975 bis r0, r10, r31 // Touch lds to make sure they complete before doing scrub
976
977 blbs r12, 1f // no scrubbing for IRQ0 case
978 // XXX bugnion pvc_jsr crd_scrub_mem, bsr=1
979 bsr r13, sys_crd_scrub_mem // and go scrub
980
981 // ld/st pair in scrub routine will have finished due
982 // to ibox stall of stx_c. Don't need another mb.
983 ldqp r8, ei_stat(r14) // EI_STAT, unlock EI_ADDR, BC_TAG_ADDR, FILL_SYN
984 or r8, r31, r12 // Must only be executed once in this flow, and must
985 br r31, 2f // be after the scrub routine.
986
987 1: ldqp r8, ei_stat(r14) // EI_STAT, unlock EI_ADDR, BC_TAG_ADDR, FILL_SYN
988 // For IRQ0 CRD case only - meaningless data.
989
990 2: mfpr r13, pt_mces // Get MCES
991 srl r12, ei_stat_v_ei_es, r14 // Isolate EI_STAT:EI_ES
992 blbc r14, 6f // branch if 630
993 srl r13, mces_v_dsc, r14 // check if 620 reporting disabled
994 blbc r14, 5f // branch if enabled
995 or r13, r31, r14 // don't set SCE if disabled
996 br r31, 8f // continue
997 5: bis r13, BIT(mces_v_sce), r14 // Set MCES<SCE> bit
998 br r31, 8f
999
1000 6: srl r13, mces_v_dpc, r14 // check if 630 reporting disabled
1001 blbc r14, 7f // branch if enabled
1002 or r13, r31, r14 // don't set PCE if disabled
1003 br r31, 8f // continue
1004 7: bis r13, BIT(mces_v_pce), r14 // Set MCES<PCE> bit
1005
1006 // Setup SCB if dpc is not set
1007 8: mtpr r14, pt_mces // Store updated MCES
1008 srl r13, mces_v_sce, r1 // Get SCE
1009 srl r13, mces_v_pce, r14 // Get PCE
1010 or r1, r14, r1 // SCE OR PCE, since they share
1011 // the CRD logout frame
1012 // Get base of the logout area.
1013 GET_IMPURE(r14) // addr of per-cpu impure area
1014 GET_ADDR(r14,(pal_logout_area+mchk_crd_base),r14)
1015
1016 blbc r1, sys_crd_write_logout_frame // If pce/sce not set, build the frame
1017
1018 // Set the 2nd error flag in the logout area:
1019
1020 lda r1, 3(r31) // Set retry and 2nd error flags
1021 sll r1, 30, r1 // Move to bits 31:30 of logout frame flag longword
1022 stlp r1, mchk_crd_flag+4(r14) // store flag longword
1023 br sys_crd_ack
1024
1025 sys_crd_write_logout_frame:
1026 // should only be here if neither the pce or sce bits are set
1027
1028 //+
1029 // Write the mchk code to the logout area
1030 //-
1031 stqp r9, mchk_crd_mchk_code(r14)
1032
1033
1034 //+
1035 // Write the first 2 quadwords of the logout area:
1036 //-
1037 lda r1, 1(r31) // Set retry flag
1038 sll r1, 63, r9 // Move retry flag to bit 63
1039 lda r1, mchk_crd_size(r9) // Combine retry flag and frame size
1040 stqp r1, mchk_crd_flag(r14) // store flag/frame size
1041
1042 #ifndef SIMOS
1043 /* needed? bugnion */
1044 lda r1, mchk_crd_sys_base(r31) // sys offset
1045 sll r1, 32, r1
1046 lda r1, mchk_crd_cpu_base(r1) // cpu offset
1047 stqp r1, mchk_crd_offsets(r14) // store sys offset/cpu offset into logout frame
1048
1049 #endif
1050 //+
1051 // Write error IPRs already fetched to the logout area
1052 //-
1053 stqp r0, mchk_crd_ei_addr(r14)
1054 stqp r10, mchk_crd_fill_syn(r14)
1055 stqp r8, mchk_crd_ei_stat(r14)
1056 stqp r25, mchk_crd_isr(r14)
1057 //+
1058 // Log system specific info here
1059 //-
1060 crd_storeTLEP_:
1061 lda r1, 0xffc4(r31) // Get GBUS$MISCR address
1062 sll r1, 24, r1
1063 ldqp r1, 0(r1) // Read GBUS$MISCR
1064 sll r1, 16, r1 // shift up to proper field
1065 mfpr r10, pt_whami // get our node id
1066 extbl r10, 1, r10 // shift to bit 0
1067 or r1, r10, r1 // merge MISCR and WHAMI
1068 stlp r1, mchk_crd_whami(r14) // write to crd logout area
1069 srl r10, 1, r10 // shift off cpu number
1070
1071 Get_TLSB_Node_Address(r10,r0) // compute our nodespace address
1072
1073 OSFcrd_TLEPstore_tlsb(tldev)
1074 OSFcrd_TLEPstore_tlsb_clr(tlber)
1075 OSFcrd_TLEPstore_tlsb_clr(tlesr0)
1076 OSFcrd_TLEPstore_tlsb_clr(tlesr1)
1077 OSFcrd_TLEPstore_tlsb_clr(tlesr2)
1078 OSFcrd_TLEPstore_tlsb_clr(tlesr3)
1079
1080 sys_crd_ack:
1081 mfpr r0, pt0 // restore r0
1082 mfpr r1, pt1 // restore r1
1083
1084 srl r12, ei_stat_v_ei_es, r12
1085 blbc r12, 5f
1086 srl r13, mces_v_dsc, r10 // logging enabled?
1087 br r31, 6f
1088 5: srl r13, mces_v_dpc, r10 // logging enabled?
1089 6: blbc r10, sys_crd_post_interrupt // logging enabled -- report it
1090
1091 // logging not enabled --
1092 // Get base of the logout area.
1093 GET_IMPURE(r13) // addr of per-cpu impure area
1094 GET_ADDR(r13,(pal_logout_area+mchk_crd_base),r13)
1095 ldlp r10, mchk_crd_rsvd(r13) // bump counter
1096 addl r10, 1, r10
1097 stlp r10, mchk_crd_rsvd(r13)
1098 mb
1099 br r31, sys_crd_dismiss_interrupt // just return
1100
1101 //+
1102 // The stack is pushed. Load up a0,a1,a2 and vector via entInt
1103 //
1104 //-
1105
1106 ALIGN_BRANCH
1107 sys_crd_post_interrupt:
1108 lda r16, osfint_c_mchk(r31) // flag as mchk/crd in a0
1109 lda r17, scb_v_proc_corr_err(r31) // a1 <- interrupt vector
1110
1111 blbc r12, 1f
1112 lda r17, scb_v_sys_corr_err(r31) // a1 <- interrupt vector
1113
1114 1: subq r31, 1, r18 // get a -1
1115 mfpr r25, pt_entInt
1116
1117 srl r18, 42, r18 // shift off low bits of kseg addr
1118 mtpr r25, exc_addr // load interrupt vector
1119
1120 sll r18, 42, r18 // shift back into position
1121 or r14, r18, r18 // EV4 algorithm - pass pointer to mchk frame as kseg address
1122
1123 hw_rei_spe // done
1124
1125
1126 //+
1127 // The stack is pushed. Need to back out of it all.
1128 //-
1129
1130 sys_crd_dismiss_interrupt:
1131 br r31, Call_Pal_Rti
1132
1133 \f
1134 // .sbttl sys_crd_scrub_mem
1135
1136 //+
1137 //
1138 // sys_crd_scrub_mem
1139 // called
1140 // jsr r13, sys$crd_scrub_mem
1141 // r0 = addr of cache block
1142 //
1143 //-
1144
1145
1146
1147 ALIGN_BLOCK // align for branch target
1148 sys_crd_scrub_mem:
1149 // now find error in memory, and attempt to scrub that cache block
1150 // This routine just scrubs the failing octaword
1151 // Only need to "touch" one quadword per octaword to accomplish the scrub
1152 srl r0, 39, r8 // get high bit of bad pa
1153 blbs r8, 1f // don't attempt fixup on IO space addrs
1154 nop // needed to align the ldqpl to octaword boundary
1155 nop // "
1156
1157 ldqpl r8, 0(r0) // attempt to read the bad memory
1158 // location
1159 // (Note bits 63:40,3:0 of ei_addr
1160 // are set to 1, but as long as
1161 // we are doing a phys ref, should
1162 // be ok)
1163 nop // Needed to keep the Ibox from swapping the ldqpl into E1
1164
1165 stqpc r8, 0(r0) // Store it back if it is still there.
1166 // If store fails, location already
1167 // scrubbed by someone else
1168
1169 nop // needed to align the ldqpl to octaword boundary
1170
1171 lda r8, 0x20(r31) // flip bit 5 to touch next hexaword
1172 xor r8, r0, r0
1173 nop // needed to align the ldqpl to octaword boundary
1174 nop // "
1175
1176 ldqpl r8, 0(r0) // attempt to read the bad memory
1177 // location
1178 // (Note bits 63:40,3:0 of ei_addr
1179 // are set to 1, but as long as
1180 // we are doing a phys ref, should
1181 // be ok)
1182 nop // Needed to keep the Ibox from swapping the ldqpl into E1
1183
1184 stqpc r8, 0(r0) // Store it back if it is still there.
1185 // If store fails, location already
1186 // scrubbed by someone else
1187
1188 lda r8, 0x20(r31) // restore r0 to original address
1189 xor r8, r0, r0
1190
1191 //at this point, ei_stat could be locked due to a new corr error on the ld,
1192 //so read ei_stat to unlock AFTER this routine.
1193
1194 // XXX bugnion pvc$jsr crd_scrub_mem, bsr=1, dest=1
1195 1: ret r31, (r13) // and back we go
1196
1197 \f
1198 // .sbttl "SYS$INT_MCHK - MCHK Interrupt code"
1199 //+
1200 // Machine check interrupt from the system. Setup and join the
1201 // regular machine check flow.
1202 // On exit:
1203 // pt0 - saved r0
1204 // pt1 - saved r1
1205 // pt4 - saved r4
1206 // pt5 - saved r5
1207 // pt6 - saved r6
1208 // pt10 - saved exc_addr
1209 // pt_misc<47:32> - mchk code
1210 // pt_misc<31:16> - scb vector
1211 // r14 - base of Cbox IPRs in IO space
1212 // MCES<mchk> is set
1213 //-
1214 ALIGN_BLOCK
1215 sys_int_mchk:
1216 lda r14, mchk_c_sys_hrd_error(r31)
1217 mfpr r12, exc_addr
1218
1219 addq r14, 1, r14 // Flag as interrupt
1220 nop
1221
1222 sll r14, 32, r14 // Move mchk code to position
1223 mtpr r12, pt10 // Stash exc_addr
1224
1225 mfpr r12, pt_misc // Get MCES and scratch
1226 mtpr r0, pt0 // Stash for scratch
1227
1228 zap r12, 0x3c, r12 // Clear scratch
1229 blbs r12, sys_double_machine_check // MCHK halt if double machine check
1230
1231 or r12, r14, r12 // Combine mchk code
1232 lda r14, scb_v_sysmchk(r31) // Get SCB vector
1233
1234 sll r14, 16, r14 // Move SCBv to position
1235 or r12, r14, r14 // Combine SCBv
1236
1237 bis r14, BIT(mces_v_mchk), r14 // Set MCES<MCHK> bit
1238 mtpr r14, pt_misc // Save mchk code!scbv!whami!mces
1239
1240 ldah r14, 0xfff0(r31)
1241 mtpr r1, pt1 // Stash for scratch
1242
1243 zap r14, 0xE0, r14 // Get Cbox IPR base
1244 mtpr r4, pt4
1245
1246 mtpr r5, pt5
1247
1248 #if beh_model
1249 // .if ne beh_model
1250 ldah r25, 0xC000(r31) // Get base of demon space
1251 lda r25, 0x340(r25) // Add interrupt demon offset
1252
1253 ldqp r13, 0(r25) // Read the control register
1254 nop
1255
1256 and r13, 0x10, r8 // For debug, check that the interrupt is expected
1257 beq r8, interrupt_not_expected
1258
1259 bic r13, 0x10, r13
1260 stqp r13, 0(r25) // Ack and clear the interrupt
1261 // XXX bugnion pvc$violate 379 // stqp can't trap except replay. mt ipr only problem if mf same ipr in same shadow
1262 .endc
1263 #endif
1264
1265 mtpr r6, pt6
1266 br r31, sys_mchk_collect_iprs // Join common machine check flow
1267
1268 \f
1269 // .sbttl "SYS$INT_PERF_CNT - Performance counter interrupt code"
1270 //+
1271 //sys$int_perf_cnt
1272 //
1273 // A performance counter interrupt has been detected. The stack has been pushed.
1274 // IPL and PS are updated as well.
1275 //
1276 // on exit to interrupt entry point ENTINT::
1277 // a0 = osfint$c_perf
1278 // a1 = scb$v_perfmon (650)
1279 // a2 = 0 if performance counter 0 fired
1280 // a2 = 1 if performance counter 1 fired
1281 // a2 = 2 if performance counter 2 fired
1282 // (if more than one counter overflowed, an interrupt will be
1283 // generated for each counter that overflows)
1284 //
1285 //
1286 //-
1287 ALIGN_BLOCK
1288 sys_int_perf_cnt: // Performance counter interrupt
1289 lda r17, scb_v_perfmon(r31) // a1 to interrupt vector
1290 mfpr r25, pt_entint
1291
1292 lda r16, osfint_c_perf(r31) // a0 to perf counter code
1293 mtpr r25, exc_addr
1294
1295 //isolate which perf ctr fired, load code in a2, and ack
1296 mfpr r25, isr
1297 or r31, r31, r18 // assume interrupt was pc0
1298
1299 srl r25, isr_v_pc1, r25 // isolate
1300 cmovlbs r25, 1, r18 // if pc1 set, load 1 into r14
1301
1302 srl r25, 1, r25 // get pc2
1303 cmovlbs r25, 2, r18 // if pc2 set, load 2 into r14
1304
1305 lda r25, 1(r31) // get a one
1306 sll r25, r18, r25
1307
1308 sll r25, hwint_clr_v_pc0c, r25 // ack only the perf counter that generated the interrupt
1309 mtpr r25, hwint_clr
1310
1311 hw_rei_spe
1312
1313
1314 \f
1315 ALIGN_BLOCK
1316 // .sbttl "System specific RESET code"
1317 //+
1318 // RESET code
1319 // On entry:
1320 // r1 = pal_base +8
1321 //
1322 // Entry state on trap:
1323 // r0 = whami
1324 // r2 = base of scratch area
1325 // r3 = halt code
1326 // and the following 3 if init_cbox is enabled:
1327 // r5 = sc_ctl
1328 // r6 = bc_ctl
1329 // r7 = bc_cnfg
1330 //
1331 // Entry state on switch:
1332 // r17 - new PC
1333 // r18 - new PCBB
1334 // r19 - new VPTB
1335 //
1336 //-
1337
1338 #if rax_mode==0
1339 .globl sys_reset
1340 sys_reset:
1341 // mtpr r31, ic_flush_ctl // do not flush the icache - done by hardware before SROM load
1342 mtpr r31, itb_ia // clear the ITB
1343 mtpr r31, dtb_ia // clear the DTB
1344
1345 lda r1, -8(r1) // point to start of code
1346 mtpr r1, pal_base // initialize PAL_BASE
1347
1348 // Interrupts
1349 mtpr r31, astrr // stop ASTs
1350 mtpr r31, aster // stop ASTs
1351 mtpr r31, sirr // clear software interrupts
1352
1353 mtpr r0, pt1 // r0 is whami (unless we entered via swp)
1354
1355 //orig ldah r1, <<1@<icsr$v_sde-16>> ! <1@<icsr$v_fpe-16>> ! <2@<icsr$v_spe-16>>>(r31)
1356 ldah r1,(BIT(icsr_v_sde-16)|BIT(icsr_v_fpe-16)|BIT(icsr_v_spe-16+1))(zero)
1357
1358 #if disable_crd == 0
1359 // .if eq disable_crd
1360 bis r31, 1, r0
1361 sll r0, icsr_v_crde, r0 // A 1 in iscr<corr_read_enable>
1362 or r0, r1, r1 // Set the bit
1363 #endif
1364
1365 mtpr r1, icsr // ICSR - Shadows enabled, Floating point enable,
1366 // super page enabled, correct read per assembly option
1367
1368 // Mbox/Dcache init
1369 //orig lda r1, <1@<mcsr$v_sp1>>(r31)
1370 lda r1,BIT(mcsr_v_sp1)(zero)
1371
1372 mtpr r1, mcsr // MCSR - Super page enabled
1373 lda r1, BIT(dc_mode_v_dc_ena)(r31)
1374 ALIGN_BRANCH
1375 // mtpr r1, dc_mode // turn Dcache on
1376 nop
1377
1378 mfpr r31, pt0 // No Mbox instr in 1,2,3,4
1379 mfpr r31, pt0
1380 mfpr r31, pt0
1381 mfpr r31, pt0
1382 mtpr r31, dc_flush // flush Dcache
1383
1384 // build PS (IPL=7,CM=K,VMM=0,SW=0)
1385 lda r11, 0x7(r31) // Set shadow copy of PS - kern mode, IPL=7
1386 lda r1, 0x1F(r31)
1387 mtpr r1, ipl // set internal <ipl>=1F
1388 mtpr r31, ev5__ps // set new ps<cm>=0, Ibox copy
1389 mtpr r31, dtb_cm // set new ps<cm>=0, Mbox copy
1390
1391 // Create the PALtemp pt_intmask -
1392 // MAP:
1393 // OSF IPL EV5 internal IPL(hex) note
1394 // 0 0
1395 // 1 1
1396 // 2 2
1397 // 3 14 device
1398 // 4 15 device
1399 // 5 16 device
1400 // 6 1E device,performance counter, powerfail
1401 // 7 1F
1402 //
1403
1404 ldah r1, 0x1f1E(r31) // Create upper lw of int_mask
1405 lda r1, 0x1615(r1)
1406
1407 sll r1, 32, r1
1408 ldah r1, 0x1402(r1) // Create lower lw of int_mask
1409
1410 lda r1, 0x0100(r1)
1411 mtpr r1, pt_intmask // Stash in PALtemp
1412
1413 // Unlock a bunch of chip internal IPRs
1414 mtpr r31, exc_sum // clear out exeception summary and exc_mask
1415 mfpr r31, va // unlock va, mmstat
1416 //rig lda r8, <<1@icperr_stat$v_dpe> ! <1@icperr_stat$v_tpe> ! <1@icperr_stat$v_tmr>>(r31)
1417 lda r8,(BIT(icperr_stat_v_dpe)|BIT(icperr_stat_v_tpe)|BIT(icperr_stat_v_tmr))(zero)
1418
1419 mtpr r8, icperr_stat // Clear Icache parity error & timeout status
1420 //orig lda r8, <<1@dcperr_stat$v_lock> ! <1@dcperr_stat$v_seo>>(r31)
1421 lda r8,(BIT(dcperr_stat_v_lock)|BIT(dcperr_stat_v_seo))(r31)
1422
1423 mtpr r8, dcperr_stat // Clear Dcache parity error status
1424
1425 rc r0 // clear intr_flag
1426 mtpr r31, pt_trap
1427
1428 mfpr r0, pt_misc
1429 srl r0, pt_misc_v_switch, r1
1430 blbs r1, sys_reset_switch // see if we got here from swppal
1431
1432 // Rest of the "real" reset flow
1433 // ASN
1434 mtpr r31, dtb_asn
1435 mtpr r31, itb_asn
1436
1437 lda r1, 0x67(r31)
1438 sll r1, hwint_clr_v_pc0c, r1
1439 mtpr r1, hwint_clr // Clear hardware interrupt requests
1440
1441 lda r1, BIT(mces_v_dpc)(r31) // 1 in disable processor correctable error
1442 mfpr r0, pt1 // get whami
1443 insbl r0, 1, r0 // isolate whami in correct pt_misc position
1444 or r0, r1, r1 // combine whami and mces
1445 mtpr r1, pt_misc // store whami and mces, swap bit clear
1446
1447 zapnot r3, 1, r0 // isolate halt code
1448 mtpr r0, pt0 // save entry type
1449
1450 // Cycle counter
1451 or r31, 1, r9 // get a one
1452 sll r9, 32, r9 // shift to <32>
1453 mtpr r31, cc // clear Cycle Counter
1454 mtpr r9, cc_ctl // clear and enable the Cycle Counter
1455 mtpr r31, pt_scc // clear System Cycle Counter
1456
1457
1458 // Misc PALtemps
1459 mtpr r31, maf_mode // no mbox instructions for 3 cycles
1460 or r31, 1, r1 // get bogus scbb value
1461 mtpr r1, pt_scbb // load scbb
1462 mtpr r31, pt_prbr // clear out prbr
1463 #ifdef SIMOS
1464 // or zero,kludge_initial_pcbb,r1
1465 GET_ADDR(r1, (kludge_initial_pcbb-pal_base), r1)
1466 #else
1467 mfpr r1, pal_base
1468 //orig sget_addr r1, (kludge_initial_pcbb-pal$base), r1, verify=0// get address for temp pcbb
1469 GET_ADDR(r1, (kludge_initial_pcbb-pal_base), r1)
1470 #endif
1471 mtpr r1, pt_pcbb // load pcbb
1472 lda r1, 2(r31) // get a two
1473 sll r1, 32, r1 // gen up upper bits
1474 mtpr r1, mvptbr
1475 mtpr r1, ivptbr
1476 mtpr r31, pt_ptbr
1477 // Performance counters
1478 mtpr r31, pmctr
1479
1480 #if init_cbox != 0
1481 // .if ne init_cbox
1482 // Only init the Scache and the Bcache if there have been no previous
1483 // cacheable dstream loads or stores.
1484 //
1485 // Inputs:
1486 // r5 - sc_ctl
1487 // r6 - bc_ctl
1488 // r7 - bc_cnfg
1489
1490 ldah r0, 0xfff0(r31)
1491 zap r0, 0xE0, r0 // Get Cbox IPR base
1492 ldqp r19, ev5__sc_ctl(r0) // read current sc_ctl
1493 temp = <<<1@bc_ctl$v_ei_dis_err> + <1@bc_ctl$v_ei_ecc_or_parity> + <1@bc_ctl$v_corr_fill_dat>>@-1>
1494 lda r20, temp(r31) // create default bc_ctl (bc disabled, errors disabled, ecc mode)
1495 sll r20, 1, r20
1496 temp = 0x017441 // default bc_config
1497 get_addr r21, temp, r31 // create default bc_config
1498 lda r23, <1@sc_ctl_v_sc_flush>(r31) //set flag to invalidate scache in set_sc_bc_ctl
1499
1500 // XXX bugnion pvc$jsr scbcctl, bsr=1
1501 bsr r10, set_sc_bc_ctl
1502 update_bc_ctl_shadow r6, r23 // update bc_ctl shadow using r6 as input// r23 gets adjusted impure pointer
1503 store_reg1 bc_config, r7, r23, ipr=1 // update bc_config shadow in impure area
1504 // .endc
1505 #endif
1506 // Clear pmctr_ctl in impure area
1507
1508 #ifndef SIMOS
1509 // can't assemble ???
1510 update_pmctr_ctl r31, r1 // clear pmctr_ctl // r1 trashed
1511 #endif
1512
1513 ldah r14, 0xfff0(r31)
1514 zap r14, 0xE0, r14 // Get Cbox IPR base
1515 #ifndef SIMOS
1516 ldqp r31, sc_stat(r14) // Clear sc_stat and sc_addr
1517 ldqp r31, ei_stat(r14)
1518 ldqp r31, ei_stat(r14) // Clear ei_stat, ei_addr, bc_tag_addr, fill_syn
1519 #endif
1520 GET_IMPURE(r13)
1521 stqpc r31, 0(r13) // Clear lock_flag
1522
1523 mfpr r0, pt0 // get entry type
1524 br r31, sys_enter_console // enter the cosole
1525
1526 #endif /* rax_mode == 0 */
1527
1528
1529
1530 \f
1531 //.if ne rax_mode
1532 #if rax_mode != 0
1533
1534 // For RAX:
1535 // r0 - icsr at first, then used for cbox ipr base offset
1536 // r2 - mcsr
1537 // r3 - dc_mode
1538 // r4 - maf_mode
1539 // r5 - sc_ctl
1540 // r6 - bc_ctl
1541 // r7 - bc_cnfg
1542 .globl sys_reset
1543 sys_reset:
1544 mtpr r31, ev5__dtb_cm // set mbox mode to kernel
1545 mtpr r31, ev5__ps // set Ibox mode to kernel - E1
1546
1547 mtpr r0, ev5__icsr // Load ICSR - E1
1548
1549 mtpr r2, ev5__mcsr
1550 mfpr r8, pal_base
1551
1552 ldah r0, 0xfff0(r31)
1553 zap r0, 0xE0, r0 // Get Cbox IPR base
1554
1555 mtpr r31, ev5__itb_asn // clear asn - E1
1556 ldqp r19, ev5__sc_ctl(r0) // read current sc_ctl
1557
1558 temp = <<<1@bc_ctl$v_ei_dis_err> + <1@bc_ctl$v_ei_ecc_or_parity> + <1@bc_ctl$v_corr_fill_dat>>@-1>
1559 lda r20, temp(r31) // create default bc_ctl (bc disabled, errors disabled, ecc mode)
1560 sll r20, 1, r20
1561
1562 temp = 0x017441 // default bc_config
1563 get_addr r21, temp, r31 // create default bc_config
1564 lda r23, <1@sc_ctl_v_sc_flush>(r31) //set flag to invalidate scache in set_sc_bc_ctl
1565
1566 // XXX bugnion pvc$jsr scbcctl, bsr=1
1567 bsr r10, set_sc_bc_ctl
1568 update_bc_ctl_shadow r6, r2 // initialize bc_ctl shadow// adjusted impure pointer in r2
1569 store_reg1 pmctr_ctl, r31, r2, ipr=1 // clear pmctr_ctl
1570 store_reg1 bc_config, r7, r2, ipr=1 // initialize bc_config shadow
1571
1572 mtpr r3, ev5__dc_mode // write dc_mode
1573 mtpr r31, ev5__dc_flush // flush dcache
1574
1575 mtpr r31, ev5__exc_sum // clear exc_sum - E1
1576 mtpr r31, ev5__exc_mask // clear exc_mask - E1
1577
1578 ldah r2, 4(r31) // For EXC_ADDR
1579 mtpr r2, ev5__exc_addr // EXC_ADDR to 40000 (hex)
1580
1581 mtpr r31, ev5__sirr // Clear SW interrupts (for ISP)
1582 mtpr r4, ev5__maf_mode // write maf_mode
1583
1584 mtpr r31, ev5__alt_mode // set alt_mode to kernel
1585 mtpr r31, ev5__itb_ia // clear ITB - E1
1586
1587 lda r1, 0x1F(r31) // For IPL
1588 mtpr r1, ev5__ipl // IPL to 1F
1589
1590 mtpr r31, ev5__hwint_clr // clear hardware interrupts
1591 mtpr r31, ev5__aster // disable AST interrupts
1592
1593 mtpr r31, ev5__astrr // clear AST requests
1594 mtpr r31, ev5__dtb_ia // clear dtb
1595
1596 nop
1597 mtpr r31, pt_trap
1598
1599 srl r2, page_offset_size_bits, r9 // Start to make PTE for address 40000
1600 sll r9, 32, r9
1601
1602 lda r9, 0x7F01(r9) // Make PTE, V set, all RE set, all but UWE set
1603 nop
1604
1605 mtpr r9, dtb_pte // ACORE hack, load TB with 1-1 translation for address 40000
1606 mtpr r2, itb_tag // ACORE hack, load TB with 1-1 translation for address 40000
1607
1608 mtpr r2, dtb_tag
1609 mtpr r9, itb_pte
1610
1611 and r31, r31, r0 // clear deposited registers, note: r2 already overwritten
1612 and r31, r31, r3
1613
1614 and r31, r31, r4
1615 and r31, r31, r5
1616
1617 and r31, r31, r6
1618 and r31, r31, r7
1619
1620 hw_rei //May need to be a rei_stall since
1621 //we write to TB's above
1622 //However, it currently works ok. (JH)
1623
1624
1625 // .endc
1626 #endif /*rax_mode != 0 */
1627
1628 \f
1629 // swppal entry
1630 // r0 - pt_misc
1631 // r17 - new PC
1632 // r18 - new PCBB
1633 // r19 - new VPTB
1634 sys_reset_switch:
1635 or r31, 1, r9
1636 sll r9, pt_misc_v_switch, r9
1637 bic r0, r9, r0 // clear switch bit
1638 mtpr r0, pt_misc
1639
1640 rpcc r1 // get cyccounter
1641
1642 ldqp r22, osfpcb_q_fen(r18) // get new fen/pme
1643 ldlp r23, osfpcb_l_cc(r18) // get cycle counter
1644 ldlp r24, osfpcb_l_asn(r18) // get new asn
1645
1646
1647 ldqp r25, osfpcb_q_Mmptr(r18)// get new mmptr
1648 sll r25, page_offset_size_bits, r25 // convert pfn to pa
1649 mtpr r25, pt_ptbr // load the new mmptr
1650 mtpr r18, pt_pcbb // set new pcbb
1651
1652 bic r17, 3, r17 // clean use pc
1653 mtpr r17, exc_addr // set new pc
1654 mtpr r19, mvptbr
1655 mtpr r19, ivptbr
1656
1657 ldqp r30, osfpcb_q_Usp(r18) // get new usp
1658 mtpr r30, pt_usp // save usp
1659
1660 sll r24, dtb_asn_v_asn, r8
1661 mtpr r8, dtb_asn
1662 sll r24, itb_asn_v_asn, r24
1663 mtpr r24, itb_asn
1664
1665 mfpr r25, icsr // get current icsr
1666 lda r24, 1(r31)
1667 sll r24, icsr_v_fpe, r24 // 1 in icsr<fpe> position
1668 bic r25, r24, r25 // clean out old fpe
1669 and r22, 1, r22 // isolate new fen bit
1670 sll r22, icsr_v_fpe, r22
1671 or r22, r25, r25 // or in new fpe
1672 mtpr r25, icsr // update ibox ipr
1673
1674 subl r23, r1, r1 // gen new cc offset
1675 insll r1, 4, r1 // << 32
1676 mtpr r1, cc // set new offset
1677
1678 or r31, r31, r0 // set success
1679 ldqp r30, osfpcb_q_Ksp(r18) // get new ksp
1680 mfpr r31, pt0 // stall
1681 hw_rei_stall
1682 \f
1683 // .sbttl "SYS_MACHINE_CHECK - Machine check PAL"
1684 ALIGN_BLOCK
1685 //+
1686 //sys$machine_check
1687 // A machine_check trap has occurred. The Icache has been flushed.
1688 //
1689 //-
1690
1691 EXPORT(sys_machine_check)
1692 // Need to fill up the refill buffer (32 instructions) and
1693 // then flush the Icache again.
1694 // Also, due to possible 2nd Cbox register file write for
1695 // uncorrectable errors, no register file read or write for 7 cycles.
1696
1697 nop
1698 mtpr r0, pt0 // Stash for scratch -- OK if Cbox overwrites r0 later
1699
1700 nop
1701 nop
1702
1703 nop
1704 nop
1705
1706 nop
1707 nop
1708
1709 nop
1710 nop
1711 // 10 instructions// 5 cycles
1712
1713 nop
1714 nop
1715
1716 nop
1717 nop
1718
1719 // Register file can now be written
1720 lda r0, scb_v_procmchk(r31) // SCB vector
1721 mfpr r13, pt_mces // Get MCES
1722 sll r0, 16, r0 // Move SCBv to correct position
1723 // bis r13, #<1@mces$v_mchk>, r14 // Set MCES<MCHK> bit
1724 bis r13, BIT(mces_v_mchk), r14 // Set MCES<MCHK> bit
1725
1726
1727 zap r14, 0x3C, r14 // Clear mchk_code word and SCBv word
1728 mtpr r14, pt_mces
1729 // 20 instructions
1730
1731 nop
1732 or r14, r0, r14 // Insert new SCB vector
1733 lda r0, mchk_c_proc_hrd_error(r31) // MCHK code
1734 mfpr r12, exc_addr
1735
1736 sll r0, 32, r0 // Move MCHK code to correct position
1737 mtpr r4, pt4
1738 or r14, r0, r14 // Insert new MCHK code
1739 mtpr r14, pt_misc // Store updated MCES, MCHK code, and SCBv
1740
1741 ldah r14, 0xfff0(r31)
1742 mtpr r1, pt1 // Stash for scratch - 30 instructions
1743
1744 zap r14, 0xE0, r14 // Get Cbox IPR base
1745 mtpr r12, pt10 // Stash exc_addr
1746
1747
1748
1749 mtpr r31, ic_flush_ctl // Second Icache flush, now it is really flushed.
1750 blbs r13, sys_double_machine_check // MCHK halt if double machine check
1751
1752 mtpr r6, pt6
1753 mtpr r5, pt5
1754
1755 // Look for the powerfail cases here....
1756 mfpr r4, isr
1757 srl r4, isr_v_pfl, r4
1758 blbc r4, sys_mchk_collect_iprs // skip if no powerfail interrupt pending
1759 lda r4, 0xffc4(r31) // get GBUS$MISCR address bits
1760 sll r4, 24, r4 // shift to proper position
1761 ldqp r4, 0(r4) // read GBUS$MISCR
1762 srl r4, 5, r4 // isolate bit <5>
1763 blbc r4, sys_mchk_collect_iprs // skip if already cleared
1764 // No missed CFAIL mchk
1765 lda r5, 0xffc7(r31) // get GBUS$SERNUM address bits
1766 sll r5, 24, r5 // shift to proper position
1767 lda r6, 0x40(r31) // get bit <6> mask
1768 ldqp r4, 0(r5) // read GBUS$SERNUM
1769 or r4, r6, r6 // set bit <6>
1770 stqp r6, 0(r5) // clear GBUS$SERNUM<6>
1771 mb
1772 mb
1773
1774 \f
1775 //+
1776 // Start to collect the IPRs. Common entry point for mchk flows.
1777 //
1778 // Current state:
1779 // pt0 - saved r0
1780 // pt1 - saved r1
1781 // pt4 - saved r4
1782 // pt5 - saved r5
1783 // pt6 - saved r6
1784 // pt10 - saved exc_addr
1785 // pt_misc<47:32> - mchk code
1786 // pt_misc<31:16> - scb vector
1787 // r14 - base of Cbox IPRs in IO space
1788 // r0, r1, r4, r5, r6, r12, r13, r25 - available
1789 // r8, r9, r10 - available as all loads are physical
1790 // MCES<mchk> is set
1791 //
1792 //-
1793
1794 EXPORT(sys_mchk_collect_iprs)
1795 mb // MB before reading Scache IPRs
1796 mfpr r1, icperr_stat
1797
1798 mfpr r8, dcperr_stat
1799 mtpr r31, dc_flush // Flush the Dcache
1800
1801 mfpr r31, pt0 // Pad Mbox instructions from dc_flush
1802 mfpr r31, pt0
1803 nop
1804 nop
1805
1806 ldqp r9, sc_addr(r14) // SC_ADDR IPR
1807 bis r9, r31, r31 // Touch ld to make sure it completes before
1808 // read of SC_STAT
1809 ldqp r10, sc_stat(r14) // SC_STAT, also unlocks SC_ADDR
1810
1811 ldqp r12, ei_addr(r14) // EI_ADDR IPR
1812 ldqp r13, bc_tag_addr(r14) // BC_TAG_ADDR IPR
1813 ldqp r0, fill_syn(r14) // FILL_SYN IPR
1814 bis r12, r13, r31 // Touch lds to make sure they complete before reading EI_STAT
1815 bis r0, r0, r31 // Touch lds to make sure they complete before reading EI_STAT
1816 ldqp r25, ei_stat(r14) // EI_STAT, unlock EI_ADDR, BC_TAG_ADDR, FILL_SYN
1817 ldqp r31, ei_stat(r14) // Read again to insure it is unlocked
1818
1819
1820
1821 \f
1822 //+
1823 // Look for nonretryable cases
1824 // In this segment:
1825 // r5<0> = 1 means retryable
1826 // r4, r6, and r14 are available for scratch
1827 //
1828 //-
1829
1830
1831 bis r31, r31, r5 // Clear local retryable flag
1832 srl r25, ei_stat_v_bc_tperr, r25 // Move EI_STAT status bits to low bits
1833
1834 lda r4, 1(r31)
1835 sll r4, icperr_stat_v_tmr, r4
1836 and r1, r4, r4 // Timeout reset
1837 bne r4, sys_cpu_mchk_not_retryable
1838
1839 and r8, BIT(dcperr_stat_v_lock), r4 // DCache parity error locked
1840 bne r4, sys_cpu_mchk_not_retryable
1841
1842 lda r4, 1(r31)
1843 sll r4, sc_stat_v_sc_scnd_err, r4
1844 and r10, r4, r4 // 2nd Scache error occurred
1845 bne r4, sys_cpu_mchk_not_retryable
1846
1847
1848 bis r31, 0xa3, r4 // EI_STAT Bcache Tag Parity Error, Bcache Tag Control
1849 // Parity Error, Interface Parity Error, 2nd Error
1850
1851 and r25, r4, r4
1852 bne r4, sys_cpu_mchk_not_retryable
1853
1854 // bis r31, #<1@<ei_stat$v_unc_ecc_err-ei_stat$v_bc_tperr>>, r4
1855 bis r31, BIT((ei_stat_v_unc_ecc_err-ei_stat_v_bc_tperr)), r4
1856 and r25, r4, r4 // Isolate the Uncorrectable Error Bit
1857 // bis r31, #<1@<ei_stat$v_fil_ird-ei_stat$v_bc_tperr>>, r6
1858 bis r31, BIT((ei_stat_v_fil_ird-ei_stat_v_bc_tperr)), r6 // Isolate the Iread bit
1859 cmovne r6, 0, r4 // r4 = 0 if IRD or if No Uncorrectable Error
1860 bne r4, sys_cpu_mchk_not_retryable
1861
1862 lda r4, 7(r31)
1863 and r10, r4, r4 // Isolate the Scache Tag Parity Error bits
1864 bne r4, sys_cpu_mchk_not_retryable // All Scache Tag PEs are not retryable
1865
1866
1867 lda r4, 0x7f8(r31)
1868 and r10, r4, r4 // Isolate the Scache Data Parity Error bits
1869 srl r10, sc_stat_v_cbox_cmd, r6
1870 and r6, 0x1f, r6 // Isolate Scache Command field
1871 subq r6, 1, r6 // Scache Iread command = 1
1872 cmoveq r6, 0, r4 // r4 = 0 if IRD or if No Parity Error
1873 bne r4, sys_cpu_mchk_not_retryable
1874
1875 // Look for the system unretryable cases here....
1876
1877 mfpr r4, isr // mchk_interrupt pin asserted
1878 srl r4, isr_v_mck, r4
1879 blbs r4, sys_cpu_mchk_not_retryable
1880
1881
1882 \f
1883 //+
1884 // Look for retryable cases
1885 // In this segment:
1886 // r5<0> = 1 means retryable
1887 // r6 - holds the mchk code
1888 // r4 and r14 are available for scratch
1889 //
1890 //-
1891
1892
1893 // Within the chip, the retryable cases are Istream errors
1894 lda r4, 3(r31)
1895 sll r4, icperr_stat_v_dpe, r4
1896 and r1, r4, r4
1897 cmovne r4, 1, r5 // Retryable if just Icache parity error
1898
1899
1900 lda r4, 0x7f8(r31)
1901 and r10, r4, r4 // Isolate the Scache Data Parity Error bits
1902 srl r10, sc_stat_v_cbox_cmd, r14
1903 and r14, 0x1f, r14 // Isolate Scache Command field
1904 subq r14, 1, r14 // Scache Iread command = 1
1905 cmovne r4, 1, r4 // r4 = 1 if Scache data parity error bit set
1906 cmovne r14, 0, r4 // r4 = 1 if Scache PE and Iread
1907 bis r4, r5, r5 // Accumulate
1908
1909
1910 bis r31, BIT((ei_stat_v_unc_ecc_err-ei_stat_v_bc_tperr)), r4
1911 and r25, r4, r4 // Isolate the Uncorrectable Error Bit
1912 and r25, BIT((ei_stat_v_fil_ird-ei_stat_v_bc_tperr)), r14 // Isolate the Iread bit
1913 cmovne r4, 1, r4 // r4 = 1 if uncorr error
1914 cmoveq r14, 0, r4 // r4 = 1 if uncorr and Iread
1915 bis r4, r5, r5 // Accumulate
1916
1917 mfpr r6, pt_misc
1918 extwl r6, 4, r6 // Fetch mchk code
1919 bic r6, 1, r6 // Clear flag from interrupt flow
1920 cmovne r5, mchk_c_retryable_ird, r6 // Set mchk code
1921
1922
1923
1924 // In the system, the retryable cases are ...
1925 // (code here handles beh model read NXM)
1926
1927 #if beh_model != 0
1928 // .if ne beh_model
1929 ldah r4, 0xC000(r31) // Get base of demon space
1930 lda r4, 0x550(r4) // Add NXM demon flag offset
1931
1932 ldqp r4, 0(r4) // Read the demon register
1933 lda r14, mchk_c_read_nxm(r31)
1934 cmovlbs r4, r14, r6 // Set mchk code if read NXM
1935 cmovlbs r4, 1, r4
1936 bis r4, r5, r5 // Accumulate retry bit
1937 #endif
1938
1939 \f
1940 //+
1941 // Write the logout frame
1942 //
1943 // Current state:
1944 // r0 - fill_syn
1945 // r1 - icperr_stat
1946 // r4 - available
1947 // r5<0> - retry flag
1948 // r6 - mchk code
1949 // r8 - dcperr_stat
1950 // r9 - sc_addr
1951 // r10 - sc_stat
1952 // r12 - ei_addr
1953 // r13 - bc_tag_addr
1954 // r14 - available
1955 // r25 - ei_stat (shifted)
1956 // pt0 - saved r0
1957 // pt1 - saved r1
1958 // pt4 - saved r4
1959 // pt5 - saved r5
1960 // pt6 - saved r6
1961 // pt10 - saved exc_addr
1962 //
1963 //-
1964
1965 sys_mchk_write_logout_frame:
1966 // Get base of the logout area.
1967 GET_IMPURE(r14) // addr of per-cpu impure area
1968 GET_ADDR(r14,pal_logout_area+mchk_mchk_base,r14)
1969
1970 // Write the first 2 quadwords of the logout area:
1971
1972 sll r5, 63, r5 // Move retry flag to bit 63
1973 lda r4, mchk_size(r5) // Combine retry flag and frame size
1974 stqp r4, mchk_flag(r14) // store flag/frame size
1975 lda r4, mchk_sys_base(r31) // sys offset
1976 sll r4, 32, r4
1977 lda r4, mchk_cpu_base(r4) // cpu offset
1978 stqp r4, mchk_offsets(r14) // store sys offset/cpu offset into logout frame
1979
1980 //+
1981 // Write the mchk code to the logout area
1982 // Write error IPRs already fetched to the logout area
1983 // Restore some GPRs from PALtemps
1984 //-
1985
1986 mfpr r5, pt5
1987 stqp r6, mchk_mchk_code(r14)
1988 mfpr r4, pt4
1989 stqp r1, mchk_ic_perr_stat(r14)
1990 mfpr r6, pt6
1991 stqp r8, mchk_dc_perr_stat(r14)
1992 mfpr r1, pt1
1993 stqp r9, mchk_sc_addr(r14)
1994 stqp r10, mchk_sc_stat(r14)
1995 stqp r12, mchk_ei_addr(r14)
1996 stqp r13, mchk_bc_tag_addr(r14)
1997 stqp r0, mchk_fill_syn(r14)
1998 mfpr r0, pt0
1999 sll r25, ei_stat_v_bc_tperr, r25 // Move EI_STAT status bits back to expected position
2000 // retrieve lower 28 bits again from ei_stat and restore before storing to logout frame
2001 ldah r13, 0xfff0(r31)
2002 zapnot r13, 0x1f, r13
2003 ldqp r13, ei_stat(r13)
2004 sll r13, 64-ei_stat_v_bc_tperr, r13
2005 srl r13, 64-ei_stat_v_bc_tperr, r13
2006 or r25, r13, r25
2007 stqp r25, mchk_ei_stat(r14)
2008
2009
2010
2011 \f
2012 //+
2013 // complete the CPU-specific part of the logout frame
2014 //-
2015
2016 #ifndef SIMOS
2017 // cant' assemble.Where is the macro ?
2018 mchk_logout mm_stat
2019 mchk_logout va // Unlocks VA and MM_STAT
2020 mchk_logout isr
2021 mchk_logout icsr
2022 mchk_logout pal_base
2023 mchk_logout exc_mask
2024 mchk_logout exc_sum
2025 #endif
2026
2027 ldah r13, 0xfff0(r31)
2028 zap r13, 0xE0, r13 // Get Cbox IPR base
2029 ldqp r13, ld_lock(r13) // Get ld_lock IPR
2030 stqp r13, mchk_ld_lock(r14) // and stash it in the frame
2031
2032 //+
2033 // complete the PAL-specific part of the logout frame
2034 //-
2035 #ifdef vms
2036 t = 0
2037 .repeat 24
2038 pt_mchk_logout \t
2039 t = t + 1
2040 .endr
2041 #endif
2042 #ifndef SIMOS
2043 //can't assemble ?
2044 pt_mchk_logout 0
2045 pt_mchk_logout 1
2046 pt_mchk_logout 2
2047 pt_mchk_logout 3
2048 pt_mchk_logout 4
2049 pt_mchk_logout 5
2050 pt_mchk_logout 6
2051 pt_mchk_logout 7
2052 pt_mchk_logout 8
2053 pt_mchk_logout 9
2054 pt_mchk_logout 10
2055 pt_mchk_logout 11
2056 pt_mchk_logout 12
2057 pt_mchk_logout 13
2058 pt_mchk_logout 14
2059 pt_mchk_logout 15
2060 pt_mchk_logout 16
2061 pt_mchk_logout 17
2062 pt_mchk_logout 18
2063 pt_mchk_logout 19
2064 pt_mchk_logout 20
2065 pt_mchk_logout 21
2066 pt_mchk_logout 22
2067 pt_mchk_logout 23
2068 #endif
2069
2070
2071 //+
2072 // Log system specific info here
2073 //-
2074
2075 #if alpha_fw != 0
2076 // .if ne alpha_fw
2077 storeTLEP_:
2078 lda r13, 0xffc4(r31) // Get GBUS$MISCR address
2079 sll r13, 24, r13
2080 ldqp r13, 0(r13) // Read GBUS$MISCR
2081 sll r13, 16, r13 // shift up to proper field
2082 mfpr r8, pt_whami // get our node id
2083 extbl r8, 1, r8 // shift to bit 0
2084 or r13, r8, r13 // merge MISCR and WHAMI
2085 stlp r13, mchk$gbus(r14) // write to logout area
2086 srl r8, 1, r8 // shift off cpu number
2087
2088 Get_TLSB_Node_Address r8,r13 // compute our nodespace address
2089
2090 OSFmchk_TLEPstore tldev, tlsb=1
2091 OSFmchk_TLEPstore tlber, tlsb=1, clr=1
2092 OSFmchk_TLEPstore tlcnr, tlsb=1
2093 OSFmchk_TLEPstore tlvid, tlsb=1
2094 OSFmchk_TLEPstore tlesr0, tlsb=1, clr=1
2095 OSFmchk_TLEPstore tlesr1, tlsb=1, clr=1
2096 OSFmchk_TLEPstore tlesr2, tlsb=1, clr=1
2097 OSFmchk_TLEPstore tlesr3, tlsb=1, clr=1
2098 OSFmchk_TLEPstore tlmodconfig
2099 OSFmchk_TLEPstore tlepaerr, clr=1
2100 OSFmchk_TLEPstore tlepderr, clr=1
2101 OSFmchk_TLEPstore tlepmerr, clr=1
2102 OSFmchk_TLEPstore tlintrmask0
2103 OSFmchk_TLEPstore tlintrmask1
2104 OSFmchk_TLEPstore tlintrsum0
2105 OSFmchk_TLEPstore tlintrsum1
2106 OSFmchk_TLEPstore tlep_vmg
2107 // .endc
2108 #endif /*alpha_fw != 0 */
2109 // Unlock IPRs
2110 lda r8, (BIT(dcperr_stat_v_lock)|BIT(dcperr_stat_v_seo))(r31)
2111 mtpr r8, dcperr_stat // Clear Dcache parity error status
2112
2113 lda r8, (BIT(icperr_stat_v_dpe)|BIT(icperr_stat_v_tpe)|BIT(icperr_stat_v_tmr))(r31)
2114 mtpr r8, icperr_stat // Clear Icache parity error & timeout status
2115
2116 1: ldqp r8, mchk_ic_perr_stat(r14) // get ICPERR_STAT value
2117 GET_ADDR(r0,0x1800,r31) // get ICPERR_STAT value
2118 and r0, r8, r0 // compare
2119 beq r0, 2f // check next case if nothing set
2120 lda r0, mchk_c_retryable_ird(r31) // set new MCHK code
2121 br r31, do_670 // setup new vector
2122
2123 2: ldqp r8, mchk_dc_perr_stat(r14) // get DCPERR_STAT value
2124 GET_ADDR(r0,0x3f,r31) // get DCPERR_STAT value
2125 and r0, r8, r0 // compare
2126 beq r0, 3f // check next case if nothing set
2127 lda r0, mchk_c_dcperr(r31) // set new MCHK code
2128 br r31, do_670 // setup new vector
2129
2130 3: ldqp r8, mchk_sc_stat(r14) // get SC_STAT value
2131 GET_ADDR(r0,0x107ff,r31) // get SC_STAT value
2132 and r0, r8, r0 // compare
2133 beq r0, 4f // check next case if nothing set
2134 lda r0, mchk_c_scperr(r31) // set new MCHK code
2135 br r31, do_670 // setup new vector
2136
2137 4: ldqp r8, mchk_ei_stat(r14) // get EI_STAT value
2138 GET_ADDR(r0,0x30000000,r31) // get EI_STAT value
2139 and r0, r8, r0 // compare
2140 beq r0, 5f // check next case if nothing set
2141 lda r0, mchk_c_bcperr(r31) // set new MCHK code
2142 br r31, do_670 // setup new vector
2143
2144 5: ldlp r8, mchk_tlber(r14) // get TLBER value
2145 GET_ADDR(r0,0xfe01,r31) // get high TLBER mask value
2146 sll r0, 16, r0 // shift into proper position
2147 GET_ADDR(r1,0x03ff,r31) // get low TLBER mask value
2148 or r0, r1, r0 // merge mask values
2149 and r0, r8, r0 // compare
2150 beq r0, 6f // check next case if nothing set
2151 GET_ADDR(r0, 0xfff0, r31) // set new MCHK code
2152 br r31, do_660 // setup new vector
2153
2154 6: ldlp r8, mchk_tlepaerr(r14) // get TLEPAERR value
2155 GET_ADDR(r0,0xff7f,r31) // get TLEPAERR mask value
2156 and r0, r8, r0 // compare
2157 beq r0, 7f // check next case if nothing set
2158 GET_ADDR(r0, 0xfffa, r31) // set new MCHK code
2159 br r31, do_660 // setup new vector
2160
2161 7: ldlp r8, mchk_tlepderr(r14) // get TLEPDERR value
2162 GET_ADDR(r0,0x7,r31) // get TLEPDERR mask value
2163 and r0, r8, r0 // compare
2164 beq r0, 8f // check next case if nothing set
2165 GET_ADDR(r0, 0xfffb, r31) // set new MCHK code
2166 br r31, do_660 // setup new vector
2167
2168 8: ldlp r8, mchk_tlepmerr(r14) // get TLEPMERR value
2169 GET_ADDR(r0,0x3f,r31) // get TLEPMERR mask value
2170 and r0, r8, r0 // compare
2171 beq r0, 9f // check next case if nothing set
2172 GET_ADDR(r0, 0xfffc, r31) // set new MCHK code
2173 br r31, do_660 // setup new vector
2174
2175 9: ldqp r8, mchk_ei_stat(r14) // get EI_STAT value
2176 GET_ADDR(r0,0xb,r31) // get EI_STAT mask value
2177 sll r0, 32, r0 // shift to upper lw
2178 and r0, r8, r0 // compare
2179 beq r0, 1f // check next case if nothing set
2180 GET_ADDR(r0,0xfffd,r31) // set new MCHK code
2181 br r31, do_660 // setup new vector
2182
2183 1: ldlp r8, mchk_tlepaerr(r14) // get TLEPAERR value
2184 GET_ADDR(r0,0x80,r31) // get TLEPAERR mask value
2185 and r0, r8, r0 // compare
2186 beq r0, cont_logout_frame // check next case if nothing set
2187 GET_ADDR(r0, 0xfffe, r31) // set new MCHK code
2188 br r31, do_660 // setup new vector
2189
2190 do_670: lda r8, scb_v_procmchk(r31) // SCB vector
2191 br r31, do_6x0_cont
2192 do_660: lda r8, scb_v_sysmchk(r31) // SCB vector
2193 do_6x0_cont:
2194 sll r8, 16, r8 // shift to proper position
2195 mfpr r1, pt_misc // fetch current pt_misc
2196 GET_ADDR(r4,0xffff, r31) // mask for vector field
2197 sll r4, 16, r4 // shift to proper position
2198 bic r1, r4, r1 // clear out old vector field
2199 or r1, r8, r1 // merge in new vector
2200 mtpr r1, pt_misc // save new vector field
2201 stlp r0, mchk_mchk_code(r14) // save new mchk code
2202
2203 cont_logout_frame:
2204 // Restore some GPRs from PALtemps
2205 mfpr r0, pt0
2206 mfpr r1, pt1
2207 mfpr r4, pt4
2208
2209 mfpr r12, pt10 // fetch original PC
2210 blbs r12, sys_machine_check_while_in_pal // MCHK halt if machine check in pal
2211
2212 //XXXbugnion pvc_jsr armc, bsr=1
2213 bsr r12, sys_arith_and_mchk // go check for and deal with arith trap
2214
2215 mtpr r31, exc_sum // Clear Exception Summary
2216
2217 mfpr r25, pt10 // write exc_addr after arith_and_mchk to pickup new pc
2218 stqp r25, mchk_exc_addr(r14)
2219
2220 //+
2221 // Set up the km trap
2222 //-
2223
2224
2225 sys_post_mchk_trap:
2226 mfpr r25, pt_misc // Check for flag from mchk interrupt
2227 extwl r25, 4, r25
2228 blbs r25, sys_mchk_stack_done // Stack from already pushed if from interrupt flow
2229
2230 bis r14, r31, r12 // stash pointer to logout area
2231 mfpr r14, pt10 // get exc_addr
2232
2233 sll r11, 63-3, r25 // get mode to msb
2234 bge r25, 3f
2235
2236 mtpr r31, dtb_cm
2237 mtpr r31, ev5__ps
2238
2239 mtpr r30, pt_usp // save user stack
2240 mfpr r30, pt_ksp
2241
2242 3:
2243 lda sp, 0-osfsf_c_size(sp) // allocate stack space
2244 nop
2245
2246 stq r18, osfsf_a2(sp) // a2
2247 stq r11, osfsf_ps(sp) // save ps
2248
2249 stq r14, osfsf_pc(sp) // save pc
2250 mfpr r25, pt_entint // get the VA of the interrupt routine
2251
2252 stq r16, osfsf_a0(sp) // a0
2253 lda r16, osfint_c_mchk(r31) // flag as mchk in a0
2254
2255 stq r17, osfsf_a1(sp) // a1
2256 mfpr r17, pt_misc // get vector
2257
2258 stq r29, osfsf_gp(sp) // old gp
2259 mtpr r25, exc_addr //
2260
2261 or r31, 7, r11 // get new ps (km, high ipl)
2262 subq r31, 1, r18 // get a -1
2263
2264 extwl r17, 2, r17 // a1 <- interrupt vector
2265 bis r31, ipl_machine_check, r25
2266
2267 mtpr r25, ipl // Set internal ipl
2268 srl r18, 42, r18 // shift off low bits of kseg addr
2269
2270 sll r18, 42, r18 // shift back into position
2271 mfpr r29, pt_kgp // get the kern r29
2272
2273 or r12, r18, r18 // EV4 algorithm - pass pointer to mchk frame as kseg address
2274 hw_rei_spe // out to interrupt dispatch routine
2275
2276 \f
2277 //+
2278 // The stack is pushed. Load up a0,a1,a2 and vector via entInt
2279 //
2280 //-
2281 ALIGN_BRANCH
2282 sys_mchk_stack_done:
2283 lda r16, osfint_c_mchk(r31) // flag as mchk/crd in a0
2284 lda r17, scb_v_sysmchk(r31) // a1 <- interrupt vector
2285
2286 subq r31, 1, r18 // get a -1
2287 mfpr r25, pt_entInt
2288
2289 srl r18, 42, r18 // shift off low bits of kseg addr
2290 mtpr r25, exc_addr // load interrupt vector
2291
2292 sll r18, 42, r18 // shift back into position
2293 or r14, r18, r18 // EV4 algorithm - pass pointer to mchk frame as kseg address
2294
2295 hw_rei_spe // done
2296
2297
2298 ALIGN_BRANCH
2299 sys_cpu_mchk_not_retryable:
2300 mfpr r6, pt_misc
2301 extwl r6, 4, r6 // Fetch mchk code
2302 br r31, sys_mchk_write_logout_frame //
2303
2304
2305 \f
2306 //+
2307 //sys$double_machine_check - a machine check was started, but MCES<MCHK> was
2308 // already set. We will now double machine check halt.
2309 //
2310 // pt0 - old R0
2311 //
2312 //+
2313
2314 EXPORT(sys_double_machine_check)
2315 #ifndef SIMOS
2316 pvc$jsr updpcb, bsr=1
2317 bsr r0, pal_update_pcb // update the pcb
2318 #endif
2319 lda r0, hlt_c_dbl_mchk(r31)
2320 br r31, sys_enter_console
2321
2322 //+
2323 //sys$machine_check_while_in_pal - a machine check was started, exc_addr points to
2324 // a PAL PC. We will now machine check halt.
2325 //
2326 // pt0 - old R0
2327 //
2328 //+
2329 sys_machine_check_while_in_pal:
2330 stqp r12, mchk_exc_addr(r14) // exc_addr has not yet been written
2331
2332 #ifndef SIMOS
2333 pvc$jsr updpcb, bsr=1
2334 bsr r0, pal_update_pcb // update the pcb
2335 #endif
2336 lda r0, hlt_c_mchk_from_pal(r31)
2337 br r31, sys_enter_console
2338
2339
2340 //ARITH and MCHK
2341 // Check for arithmetic errors and build trap frame,
2342 // but don't post the trap.
2343 // on entry:
2344 // pt10 - exc_addr
2345 // r12 - return address
2346 // r14 - logout frame pointer
2347 // r13 - available
2348 // r8,r9,r10 - available except across stq's
2349 // pt0,1,6 - available
2350 //
2351 // on exit:
2352 // pt10 - new exc_addr
2353 // r17 = exc_mask
2354 // r16 = exc_sum
2355 // r14 - logout frame pointer
2356 //
2357 ALIGN_BRANCH
2358 sys_arith_and_mchk:
2359 mfpr r13, ev5__exc_sum
2360 srl r13, exc_sum_v_swc, r13
2361 bne r13, handle_arith_and_mchk
2362
2363 // XXX bugnion pvc$jsr armc, bsr=1, dest=1
2364 ret r31, (r12) // return if no outstanding arithmetic error
2365
2366 handle_arith_and_mchk:
2367 mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
2368 // no virt ref for next 2 cycles
2369 mtpr r14, pt0
2370
2371 mtpr r1, pt1 // get a scratch reg
2372 and r11, osfps_m_mode, r1 // get mode bit
2373
2374 bis r11, r31, r25 // save ps
2375 beq r1, 1f // if zero we are in kern now
2376
2377 bis r31, r31, r25 // set the new ps
2378 mtpr r30, pt_usp // save user stack
2379
2380 mfpr r30, pt_ksp // get kern stack
2381 1:
2382 mfpr r14, exc_addr // get pc into r14 in case stack writes fault
2383
2384 lda sp, 0-osfsf_c_size(sp) // allocate stack space
2385 mtpr r31, ev5__ps // Set Ibox current mode to kernel
2386
2387 mfpr r1, pt_entArith
2388 stq r14, osfsf_pc(sp) // save pc
2389
2390 stq r17, osfsf_a1(sp)
2391 mfpr r17, ev5__exc_mask // Get exception register mask IPR - no mtpr exc_sum in next cycle
2392
2393 stq r29, osfsf_gp(sp)
2394 stq r16, osfsf_a0(sp) // save regs
2395
2396 bis r13, r31, r16 // move exc_sum to r16
2397 stq r18, osfsf_a2(sp)
2398
2399 stq r11, osfsf_ps(sp) // save ps
2400 mfpr r29, pt_kgp // get the kern gp
2401
2402 mfpr r14, pt0 // restore logout frame pointer from pt0
2403 bis r25, r31, r11 // set new ps
2404
2405 mtpr r1, pt10 // Set new PC
2406 mfpr r1, pt1
2407
2408 // XXX bugnion pvc$jsr armc, bsr=1, dest=1
2409 ret r31, (r12) // return if no outstanding arithmetic error
2410
2411
2412 \f
2413 // .sbttl "SYS$ENTER_CONSOLE - Common PALcode for ENTERING console"
2414
2415 ALIGN_BLOCK
2416
2417 // SYS$enter_console
2418 //
2419 // Entry:
2420 // Entered when PAL wants to enter the console.
2421 // usually as the result of a HALT instruction or button,
2422 // or catastrophic error.
2423 //
2424 // Regs on entry...
2425 //
2426 // R0 = halt code
2427 // pt0 <- r0
2428 //
2429 // Function:
2430 //
2431 // Save all readable machine state, and "call" the console
2432 //
2433 // Returns:
2434 //
2435 //
2436 // Notes:
2437 //
2438 // In these routines, once the save state routine has been executed,
2439 // the remainder of the registers become scratchable, as the only
2440 // "valid" copy of them is the "saved" copy.
2441 //
2442 // Any registers or PTs that are modified before calling the save
2443 // routine will have there data lost. The code below will save all
2444 // state, but will loose pt 0,4,5.
2445 //
2446 //-
2447
2448 EXPORT(sys_enter_console)
2449 mtpr r1, pt4
2450 mtpr r3, pt5
2451 #ifdef SIMOS
2452 subq r31, 1, r1
2453 sll r1, 42, r1
2454 ldah r1, 1(r1)
2455 #else /* SIMOS */
2456 lda r3, pal_enter_console_ptr(r31) //find stored vector
2457 ldqp r1, 0(r3)
2458 #endif /* SIMOS */
2459
2460 #ifdef SIMOS
2461 /* taken from scrmax, seems like the obvious thing to do */
2462 mtpr r1, exc_addr
2463 mfpr r1, pt4
2464 mfpr r3, pt5
2465 STALL
2466 STALL
2467 hw_rei_stall
2468 #else
2469 pvc$violate 1007
2470 jmp r31, (r1) // off to common routine
2471 #endif
2472
2473 \f
2474 // .sbttl "SYS$EXIT_CONSOLE - Common PALcode for ENTERING console"
2475 //+
2476 // sys$exit_console
2477 //
2478 // Entry:
2479 // Entered when console wants to reenter PAL.
2480 // usually as the result of a CONTINUE.
2481 //
2482 //
2483 // Regs' on entry...
2484 //
2485 //
2486 // Function:
2487 //
2488 // Restore all readable machine state, and return to user code.
2489 //
2490 //
2491 //
2492 //-
2493 ALIGN_BLOCK
2494 sys_exit_console:
2495 //Disable physical mode:
2496 #if enable_physical_console != 0
2497 // .if ne enable_physical_console
2498 mfpr r25, pt_ptbr
2499 bic r25, 1, r25 // clear physical console flag
2500 mtpr r25, pt_ptbr
2501 #endif
2502
2503 GET_IMPURE(r1)
2504
2505 // clear lock and intr_flags prior to leaving console
2506 rc r31 // clear intr_flag
2507 // lock flag cleared by restore_state
2508 #ifndef SIMOS
2509 pvc$jsr rststa, bsr=1
2510 bsr r3, pal_restore_state // go restore all state
2511 // note, R1 and R3 are NOT restored
2512 // by restore_state.
2513 #endif
2514 // TB's have been flushed
2515
2516 ldqp r3, (cns_gpr+(8*3))(r1) // restore r3
2517 ldqp r1, (cns_gpr+8)(r1) // restore r1
2518 hw_rei_stall // back to user
2519
2520 #if turbo_pcia_intr_fix != 0
2521 // .if ne turbo_pcia_intr_fix
2522 check_pcia_intr:
2523 mfpr r14, pt14 // fetch saved PCIA interrupt info
2524 beq r14, check_done // don't bother checking if no info
2525 mfpr r13, ipl // check the current IPL
2526 bic r13, 3, r25 // isolate ipl<5:2>
2527 cmpeq r25, 0x14, r25 // is it an I/O interrupt?
2528 beq r25, check_done // no, return
2529 and r13, 3, r25 // get I/O interrupt index
2530 extbl r14, r25, r13 // extract info for this interrupt
2531 beq r13, check_done // if no info, return
2532
2533 // This is an RTI from a PCIA interrupt
2534 lda r12, 1(r31) // get initial bit mask
2535 sll r12, r25, r25 // shift to select interrupt index
2536 zap r14, r25, r14 // clear out info from this interrupt
2537 mtpr r14, pt14 // and save it
2538
2539 and r13, 3, r25 // isolate HPC field
2540 subq r25, 1, r25 // subtract 1 to get HPC number
2541 srl r13, 2, r13 // generate base register address
2542 sll r13, 6, r13 // get slot/hose address bits
2543 lda r13, 0x38(r13) // insert other high bits
2544 sll r13, 28, r13 // shift high bits into position
2545
2546 // Read the IPROGx register
2547 sll r25, 21, r14 // HPC address bit position
2548 or r13, r14, r14 // add in upper bits
2549 lda r14, 0x400(r14) // add in lower bits
2550 ldqp r14, 0(r14) // read IPROG
2551 srl r14, 4, r12 // check the In Progress bit
2552 blbc r12, 1f // skip if none in progress
2553 and r14, 0xf, r14 // isolate interrupt source
2554 lda r12, 1(r31) // make initial mask
2555 sll r12, r14, r14 // shift to make new intr source mask
2556 br r31, 2f
2557 // Write the SMPLIRQx register
2558 1: or r31, r31, r14 // default interrupt source mask
2559 2: GET_ADDR(r12, 0xffff, r31) // default SMPLIRQx data
2560 bic r12, r14, r12 // clear any interrupts in progres
2561 //orig lda r14, <0xbffc@-2>(r31) // get register address bits
2562 lda r14,(0xbffc>>2)(r31)
2563
2564 sll r14, 10, r14 // shift into position
2565 or r14, r13, r14 // add in upper bits
2566 sll r25, 8, r25 // shift HPC number into position
2567 or r14, r25, r14 // add in lower bits
2568 stqp r12, 0(r14) // write SMPLIRQx register
2569 mb
2570 ldqp r12, 0(r14) // read it back
2571 bis r12, r12, r12 // touch register to insure completion
2572
2573 check_done: // do these now and return
2574 lda r25, osfsf_c_size(sp) // get updated sp
2575 bis r25, r31, r14 // touch r14,r25 to stall mf exc_addr
2576 br r31, pcia_check_return
2577 #endif
2578
2579 \f
2580 // .sbttl KLUDGE_INITIAL_PCBB - PCB for Boot use only
2581
2582 ALIGN_128
2583
2584 kludge_initial_pcbb: // PCB is 128 bytes long
2585 // .repeat 16
2586 // .quad 0
2587 // .endr
2588
2589 nop
2590 nop
2591 nop
2592 nop
2593
2594 nop
2595 nop
2596 nop
2597 nop
2598
2599 nop
2600 nop
2601 nop
2602 nop
2603
2604 nop
2605 nop
2606 nop
2607 nop
2608 \f
2609 // .sbttl "SET_SC_BC_CTL subroutine"
2610 //
2611 // Subroutine to set the SC_CTL, BC_CONFIG, and BC_CTL registers and flush the Scache
2612 // There must be no outstanding memory references -- istream or dstream -- when
2613 // these registers are written. EV5 prefetcher is difficult to turn off. So,
2614 // this routine needs to be exactly 32 instructions long// the final jmp must
2615 // be in the last octaword of a page (prefetcher doesn't go across page)
2616 //
2617 //
2618 // Register expecations:
2619 // r0 base address of CBOX iprs
2620 // r5 value to set sc_ctl to (flush bit is added in)
2621 // r6 value to set bc_ctl to
2622 // r7 value to set bc_config to
2623 // r10 return address
2624 // r19 old sc_ctl value
2625 // r20 old value of bc_ctl
2626 // r21 old value of bc_config
2627 // r23 flush scache flag
2628 // Register usage:
2629 // r17 sc_ctl with flush bit cleared
2630 // r22 loop address
2631 //
2632 //
2633 #ifndef SIMOS
2634 align_page <32*4> // puts start of routine at next page boundary minus 32 longwords.
2635 #endif
2636
2637 set_sc_bc_ctl:
2638
2639 #ifndef SIMOS
2640 br r22, sc_ctl_loop //this branch must be in the same 4 instruction block as it's dest
2641 sc_ctl_loop:
2642 // XXX bugnion pvc$jsr scloop, dest=1
2643 mb
2644 mb
2645
2646 bis r5, r23, r5 //r5 <- same sc_ctl with flush bit set (if flag set in r23)
2647
2648 stqp r19, ev5__sc_ctl(r0) // write sc_ctl
2649 stqp r20, ev5__bc_ctl(r0) // write bc_ctl
2650 bis r31, r6, r20 // update r20 with new bc_ctl for 2nd time through loop
2651 stqp r21, bc_config(r0) // write bc_config register
2652 bis r31, r7, r21 // update r21 with new bc_config for 2nd time through loop
2653
2654 bic r19, BIT(sc_ctl_v_sc_flush), r17 //r17 <- same sc_ctl without flush bit set
2655 //NOTE: only works because flush bit is in lower 16 bits
2656
2657 wmb // don't merge with other writes
2658 stqp r17, ev5__sc_ctl(r0) // write sc_ctl without flush bit
2659 ldqp r17, ev5__sc_ctl(r0) // read sc_ctl
2660 bis r17, r17, r17 // stall until the data comes back
2661 bis r31, r5, r19 // update r19 with new sc_ctl for 2nd time through loop
2662
2663 // fill with requisite number of nops (unops ok) to make exactly 32 instructions in loop
2664 t = 0
2665 .repeat 15
2666 unop
2667 t = t + 1
2668 .endr
2669 $opdef mnemonic= myjmp, -
2670 format= <custom=iregister, iregister, branch_offset>, -
2671 encoding= <26:31=0x1A, 21:25=%OP1,16:20=%OP2,14:15=0x00,0:13=%op3>
2672
2673 // XXXbugnion pvc$jsr scloop
2674 myjmp r22,r22,sc_ctl_loop // first time, jump to sc_ctl_loop (hint will cause prefetcher to go to loop instead
2675 // of straight) // r22 gets sc_ctl_done
2676 // 2nd time, code continues at sc_ctl_done (I hope)
2677 sc_ctl_done:
2678 // XXX bugnion pvc$jsr scloop, dest=1
2679 // XXX bugnion pvc$jsr scbcctl
2680 #endif /*SIMOS*/
2681 ret r31, (r10) // return to where we came from
2682
2683
2684 .end
2685
2686
2687
2688