#include <vector>
#include "arch/alpha/alpha_memory.hh"
-#include "arch/alpha/ev5.hh"
#include "base/inifile.hh"
#include "base/str.hh"
#include "base/trace.hh"
#include "sim/builder.hh"
using namespace std;
+using namespace EV5;
///////////////////////////////////////////////////////////////////////
//
bool uncacheBit40 = false;
#endif
+#define MODE2MASK(X) (1 << (X))
+
AlphaTLB::AlphaTLB(const string &name, int s)
: SimObject(name), size(s), nlu(0)
{
#ifdef ALPHA_TLASER
- if (req->paddr & PA_UNCACHED_BIT_39) {
+ if (req->paddr & PAddrUncachedBit39) {
#else
- if (req->paddr & PA_UNCACHED_BIT_43) {
+ if (req->paddr & PAddrUncachedBit43) {
#endif
// IPR memory space not implemented
- if (PA_IPR_SPACE(req->paddr)) {
+ if (PAddrIprSpace(req->paddr)) {
if (!req->xc->misspeculating()) {
switch (req->paddr) {
case ULL(0xFFFFF00188):
#ifndef ALPHA_TLASER
// Clear bits 42:35 of the physical address (10-2 in Tsunami manual)
- req->paddr &= PA_UNCACHED_MASK;
+ req->paddr &= PAddrUncachedMask;
#endif
}
}
// insert a new TLB entry
void
-AlphaTLB::insert(Addr vaddr, AlphaISA::PTE &pte)
+AlphaTLB::insert(Addr addr, AlphaISA::PTE &pte)
{
+ AlphaISA::VAddr vaddr = addr;
if (table[nlu].valid) {
Addr oldvpn = table[nlu].tag;
PageTable::iterator i = lookupTable.find(oldvpn);
lookupTable.erase(i);
}
- Addr vpn = VA_VPN(vaddr);
- DPRINTF(TLB, "insert @%d: %#x -> %#x\n", nlu, vpn, pte.ppn);
+ DPRINTF(TLB, "insert @%d: %#x -> %#x\n", nlu, vaddr.vpn(), pte.ppn);
table[nlu] = pte;
- table[nlu].tag = vpn;
+ table[nlu].tag = vaddr.vpn();
table[nlu].valid = true;
- lookupTable.insert(make_pair(vpn, nlu));
+ lookupTable.insert(make_pair(vaddr.vpn(), nlu));
nextnlu();
}
}
void
-AlphaTLB::flushAddr(Addr vaddr, uint8_t asn)
+AlphaTLB::flushAddr(Addr addr, uint8_t asn)
{
- Addr vpn = VA_VPN(vaddr);
+ AlphaISA::VAddr vaddr = addr;
- PageTable::iterator i = lookupTable.find(vpn);
+ PageTable::iterator i = lookupTable.find(vaddr.vpn());
if (i == lookupTable.end())
return;
- while (i->first == vpn) {
+ while (i->first == vaddr.vpn()) {
int index = i->second;
AlphaISA::PTE *pte = &table[index];
assert(pte->valid);
- if (vpn == pte->tag && (pte->asma || pte->asn == asn)) {
- DPRINTF(TLB, "flushaddr @%d: %#x -> %#x\n", index, vpn, pte->ppn);
+ if (vaddr.vpn() == pte->tag && (pte->asma || pte->asn == asn)) {
+ DPRINTF(TLB, "flushaddr @%d: %#x -> %#x\n", index, vaddr.vpn(),
+ pte->ppn);
// invalidate this entry
pte->valid = false;
if (!xc->misspeculating()) {
ipr[AlphaISA::IPR_ITB_TAG] = pc;
ipr[AlphaISA::IPR_IFAULT_VA_FORM] =
- ipr[AlphaISA::IPR_IVPTBR] | (VA_VPN(pc) << 3);
+ ipr[AlphaISA::IPR_IVPTBR] | (AlphaISA::VAddr(pc).vpn() << 3);
}
}
{
InternalProcReg *ipr = req->xc->regs.ipr;
- if (PC_PAL(req->vaddr)) {
+ if (AlphaISA::PcPAL(req->vaddr)) {
// strip off PAL PC marker (lsb is 1)
- req->paddr = (req->vaddr & ~3) & PA_IMPL_MASK;
+ req->paddr = (req->vaddr & ~3) & PAddrImplMask;
hits++;
return No_Fault;
}
// VA<47:41> == 0x7e, VA<40:13> maps directly to PA<40:13> for EV6
#ifdef ALPHA_TLASER
if ((MCSR_SP(ipr[AlphaISA::IPR_MCSR]) & 2) &&
- VA_SPACE_EV5(req->vaddr) == 2) {
+ VAddrSpaceEV5(req->vaddr) == 2) {
#else
- if (VA_SPACE_EV6(req->vaddr) == 0x7e) {
+ if (VAddrSpaceEV6(req->vaddr) == 0x7e) {
#endif
-
-
// only valid in kernel mode
- if (ICM_CM(ipr[AlphaISA::IPR_ICM]) != AlphaISA::mode_kernel) {
+ if (ICM_CM(ipr[AlphaISA::IPR_ICM]) !=
+ AlphaISA::mode_kernel) {
fault(req->vaddr, req->xc);
acv++;
return ITB_Acv_Fault;
}
- req->paddr = req->vaddr & PA_IMPL_MASK;
+ req->paddr = req->vaddr & PAddrImplMask;
#ifndef ALPHA_TLASER
// sign extend the physical address properly
- if (req->paddr & PA_UNCACHED_BIT_40)
+ if (req->paddr & PAddrUncachedBit40)
req->paddr |= ULL(0xf0000000000);
else
req->paddr &= ULL(0xffffffffff);
} else {
// not a physical address: need to look up pte
- AlphaISA::PTE *pte = lookup(VA_VPN(req->vaddr),
- DTB_ASN_ASN(ipr[AlphaISA::IPR_DTB_ASN]));
+ AlphaISA::PTE *pte = lookup(AlphaISA::VAddr(req->vaddr).vpn(),
+ DTB_ASN_ASN(ipr[AlphaISA::IPR_DTB_ASN]));
if (!pte) {
fault(req->vaddr, req->xc);
return ITB_Fault_Fault;
}
- req->paddr = PA_PFN2PA(pte->ppn) + VA_POFS(req->vaddr & ~3);
+ req->paddr = (pte->ppn << AlphaISA::PageShift) +
+ (AlphaISA::VAddr(req->vaddr).offset() & ~3);
// check permissions for this access
if (!(pte->xre & (1 << ICM_CM(ipr[AlphaISA::IPR_ICM])))) {
}
// check that the physical address is ok (catch bad physical addresses)
- if (req->paddr & ~PA_IMPL_MASK)
+ if (req->paddr & ~PAddrImplMask)
return Machine_Check_Fault;
checkCacheability(req);
AlphaDTB::fault(MemReqPtr &req, uint64_t flags) const
{
ExecContext *xc = req->xc;
- Addr vaddr = req->vaddr;
+ AlphaISA::VAddr vaddr = req->vaddr;
uint64_t *ipr = xc->regs.ipr;
// Set fault address and flags. Even though we're modeling an
if (!xc->misspeculating()
&& !(req->flags & VPTE) && !(req->flags & NO_FAULT)) {
// set VA register with faulting address
- ipr[AlphaISA::IPR_VA] = vaddr;
+ ipr[AlphaISA::IPR_VA] = req->vaddr;
// set MM_STAT register flags
- ipr[AlphaISA::IPR_MM_STAT] = (((OPCODE(xc->getInst()) & 0x3f) << 11)
- | ((RA(xc->getInst()) & 0x1f) << 6)
- | (flags & 0x3f));
+ ipr[AlphaISA::IPR_MM_STAT] =
+ (((Opcode(xc->getInst()) & 0x3f) << 11)
+ | ((Ra(xc->getInst()) & 0x1f) << 6)
+ | (flags & 0x3f));
// set VA_FORM register with faulting formatted address
ipr[AlphaISA::IPR_VA_FORM] =
- ipr[AlphaISA::IPR_MVPTBR] | (VA_VPN(vaddr) << 3);
+ ipr[AlphaISA::IPR_MVPTBR] | (vaddr.vpn() << 3);
}
}
return Alignment_Fault;
}
- if (PC_PAL(pc)) {
+ if (pc & 0x1) {
mode = (req->flags & ALTMODE) ?
(AlphaISA::mode_type)ALT_MODE_AM(ipr[AlphaISA::IPR_ALT_MODE])
: AlphaISA::mode_kernel;
} else {
// verify that this is a good virtual address
if (!validVirtualAddress(req->vaddr)) {
- fault(req, (write ? MM_STAT_WR_MASK : 0) | MM_STAT_BAD_VA_MASK |
- MM_STAT_ACV_MASK);
+ fault(req, (write ? MM_STAT_WR_MASK : 0) |
+ MM_STAT_BAD_VA_MASK |
+ MM_STAT_ACV_MASK);
if (write) { write_acv++; } else { read_acv++; }
return DTB_Fault_Fault;
// Check for "superpage" mapping
#ifdef ALPHA_TLASER
if ((MCSR_SP(ipr[AlphaISA::IPR_MCSR]) & 2) &&
- VA_SPACE_EV5(req->vaddr) == 2) {
+ VAddrSpaceEV5(req->vaddr) == 2) {
#else
- if (VA_SPACE_EV6(req->vaddr) == 0x7e) {
+ if (VAddrSpaceEV6(req->vaddr) == 0x7e) {
#endif
// only valid in kernel mode
if (DTB_CM_CM(ipr[AlphaISA::IPR_DTB_CM]) !=
AlphaISA::mode_kernel) {
- fault(req, ((write ? MM_STAT_WR_MASK : 0) | MM_STAT_ACV_MASK));
+ fault(req, ((write ? MM_STAT_WR_MASK : 0) |
+ MM_STAT_ACV_MASK));
if (write) { write_acv++; } else { read_acv++; }
return DTB_Acv_Fault;
}
- req->paddr = req->vaddr & PA_IMPL_MASK;
+ req->paddr = req->vaddr & PAddrImplMask;
#ifndef ALPHA_TLASER
// sign extend the physical address properly
- if (req->paddr & PA_UNCACHED_BIT_40)
+ if (req->paddr & PAddrUncachedBit40)
req->paddr |= ULL(0xf0000000000);
else
req->paddr &= ULL(0xffffffffff);
read_accesses++;
// not a physical address: need to look up pte
- AlphaISA::PTE *pte = lookup(VA_VPN(req->vaddr),
- DTB_ASN_ASN(ipr[AlphaISA::IPR_DTB_ASN]));
+ AlphaISA::PTE *pte = lookup(AlphaISA::VAddr(req->vaddr).vpn(),
+ DTB_ASN_ASN(ipr[AlphaISA::IPR_DTB_ASN]));
if (!pte) {
// page fault
- fault(req,
- (write ? MM_STAT_WR_MASK : 0) | MM_STAT_DTB_MISS_MASK);
+ fault(req, (write ? MM_STAT_WR_MASK : 0) |
+ MM_STAT_DTB_MISS_MASK);
if (write) { write_misses++; } else { read_misses++; }
return (req->flags & VPTE) ? Pdtb_Miss_Fault : Ndtb_Miss_Fault;
}
- req->paddr = PA_PFN2PA(pte->ppn) | VA_POFS(req->vaddr);
+ req->paddr = (pte->ppn << AlphaISA::PageShift) +
+ AlphaISA::VAddr(req->vaddr).offset();
if (write) {
if (!(pte->xwe & MODE2MASK(mode))) {
// declare the instruction access fault
- fault(req, (MM_STAT_WR_MASK | MM_STAT_ACV_MASK |
- (pte->fonw ? MM_STAT_FONW_MASK : 0)));
+ fault(req, MM_STAT_WR_MASK |
+ MM_STAT_ACV_MASK |
+ (pte->fonw ? MM_STAT_FONW_MASK : 0));
write_acv++;
return DTB_Fault_Fault;
}
if (pte->fonw) {
- fault(req, MM_STAT_WR_MASK | MM_STAT_FONW_MASK);
+ fault(req, MM_STAT_WR_MASK |
+ MM_STAT_FONW_MASK);
write_acv++;
return DTB_Fault_Fault;
}
} else {
if (!(pte->xre & MODE2MASK(mode))) {
- fault(req, (MM_STAT_ACV_MASK |
- (pte->fonr ? MM_STAT_FONR_MASK : 0)));
+ fault(req, MM_STAT_ACV_MASK |
+ (pte->fonr ? MM_STAT_FONR_MASK : 0));
read_acv++;
return DTB_Acv_Fault;
}
}
// check that the physical address is ok (catch bad physical addresses)
- if (req->paddr & ~PA_IMPL_MASK)
+ if (req->paddr & ~PAddrImplMask)
return Machine_Check_Fault;
checkCacheability(req);
#include <map>
+#include "arch/alpha/isa_traits.hh"
+#include "base/statistics.hh"
#include "mem/mem_req.hh"
#include "sim/sim_object.hh"
-#include "base/statistics.hh"
class ExecContext;
// static helper functions... really EV5 VM traits
static bool validVirtualAddress(Addr vaddr) {
// unimplemented bits must be all 0 or all 1
- Addr unimplBits = vaddr & VA_UNIMPL_MASK;
- return (unimplBits == 0) || (unimplBits == VA_UNIMPL_MASK);
+ Addr unimplBits = vaddr & EV5::VAddrUnImplMask;
+ return (unimplBits == 0) || (unimplBits == EV5::VAddrUnImplMask);
}
static void checkCacheability(MemReqPtr &req);
#ifdef FULL_SYSTEM
+using namespace EV5;
+
////////////////////////////////////////////////////////////////////////
//
//
uint64_t *ipr = regs->ipr;
bzero((char *)ipr, NumInternalProcRegs * sizeof(InternalProcReg));
- ipr[IPR_PAL_BASE] = PAL_BASE;
+ ipr[IPR_PAL_BASE] = PalBase;
ipr[IPR_MCSR] = 0x6;
}
/* $Id$ */
-#ifndef __EV5_H__
-#define __EV5_H__
+#ifndef __ARCH_ALPHA_EV5_HH__
+#define __ARCH_ALPHA_EV5_HH__
-#define MODE2MASK(X) (1 << (X))
-
-// Alpha IPR register accessors
-#define PC_PAL(X) ((X) & 0x1)
-#define MCSR_SP(X) (((X) >> 1) & 0x3)
-
-#define ICSR_SDE(X) (((X) >> 30) & 0x1)
-#define ICSR_SPE(X) (((X) >> 28) & 0x3)
-#define ICSR_FPE(X) (((X) >> 26) & 0x1)
-
-#define ALT_MODE_AM(X) (((X) >> 3) & 0x3)
-
-#define DTB_CM_CM(X) (((X) >> 3) & 0x3)
+namespace EV5 {
#ifdef ALPHA_TLASER
-#define DTB_ASN_ASN(X) (((X) >> 57) & 0x7f)
-#define DTB_PTE_PPN(X) (((X) >> 32) & 0x07ffffff)
+const uint64_t AsnMask = ULL(0x7f);
#else
-#define DTB_ASN_ASN(X) (((X) >> 57) & 0xff)
-#define DTB_PTE_PPN(X) (((X) >> 32) & 0x07fffffff)
+const uint64_t AsnMask = ULL(0xff);
#endif
-#define DTB_PTE_XRE(X) (((X) >> 8) & 0xf)
-#define DTB_PTE_XWE(X) (((X) >> 12) & 0xf)
-#define DTB_PTE_FONR(X) (((X) >> 1) & 0x1)
-#define DTB_PTE_FONW(X) (((X) >> 2) & 0x1)
-#define DTB_PTE_GH(X) (((X) >> 5) & 0x3)
-#define DTB_PTE_ASMA(X) (((X) >> 4) & 0x1)
-
-#define ICM_CM(X) (((X) >> 3) & 0x3)
+const int VAddrImplBits = 43;
+const Addr VAddrImplMask = (ULL(1) << VAddrImplBits) - 1;
+const Addr VAddrUnImplMask = ~VAddrImplMask;
+inline Addr VAddrImpl(Addr a) { return a & VAddrImplMask; }
+inline Addr VAddrVPN(Addr a) { return a >> AlphaISA::PageShift; }
+inline Addr VAddrOffset(Addr a) { return a & AlphaISA::PageOffset; }
+inline Addr VAddrSpaceEV5(Addr a) { return a >> 41 & 0x3; }
+inline Addr VAddrSpaceEV6(Addr a) { return a >> 41 & 0x7f; }
#ifdef ALPHA_TLASER
-#define ITB_ASN_ASN(X) (((X) >> 4) & 0x7f)
-#define ITB_PTE_PPN(X) (((X) >> 32) & 0x07ffffff)
+inline bool PAddrIprSpace(Addr a) { return a >= ULL(0xFFFFF00000); }
+const int PAddrImplBits = 40;
#else
-#define ITB_ASN_ASN(X) (((X) >> 4) & 0xff)
-#define ITB_PTE_PPN(X) (((X) >> 32) & 0x07fffffff)
+inline bool PAddrIprSpace(Addr a) { return a >= ULL(0xFFFFFF00000); }
+const int PAddrImplBits = 44; // for Tsunami
#endif
-
-#define ITB_PTE_XRE(X) (((X) >> 8) & 0xf)
-#define ITB_PTE_FONR(X) (((X) >> 1) & 0x1)
-#define ITB_PTE_FONW(X) (((X) >> 2) & 0x1)
-#define ITB_PTE_GH(X) (((X) >> 5) & 0x3)
-#define ITB_PTE_ASMA(X) (((X) >> 4) & 0x1)
-
-#define VA_UNIMPL_MASK ULL(0xfffff80000000000)
-#define VA_IMPL_MASK ULL(0x000007ffffffffff)
-#define VA_IMPL(X) ((X) & VA_IMPL_MASK)
-#define VA_VPN(X) (VA_IMPL(X) >> 13)
-#define VA_SPACE_EV5(X) (((X) >> 41) & 0x3)
-#define VA_SPACE_EV6(X) (((X) >> 41) & 0x7f)
-#define VA_POFS(X) ((X) & 0x1fff)
-
-#define PA_UNCACHED_BIT_39 ULL(0x8000000000)
-#define PA_UNCACHED_BIT_40 ULL(0x10000000000)
-#define PA_UNCACHED_BIT_43 ULL(0x80000000000)
-#define PA_UNCACHED_MASK ULL(0x807ffffffff) // Clear PA<42:35>
-#ifdef ALPHA_TLASER
-#define PA_IPR_SPACE(X) ((X) >= ULL(0xFFFFF00000))
-#define PA_IMPL_MASK ULL(0xffffffffff)
-#else
-#define PA_IPR_SPACE(X) ((X) >= ULL(0xFFFFFF00000))
-#define PA_IMPL_MASK ULL(0xfffffffffff) // for Tsunami
-#endif
-
-#define PA_PFN2PA(X) ((X) << 13)
-
-
-#define MM_STAT_BAD_VA_MASK 0x0020
-#define MM_STAT_DTB_MISS_MASK 0x0010
-#define MM_STAT_FONW_MASK 0x0008
-#define MM_STAT_FONR_MASK 0x0004
-#define MM_STAT_ACV_MASK 0x0002
-#define MM_STAT_WR_MASK 0x0001
-
-#define OPCODE(X) (X >> 26) & 0x3f
-#define RA(X) (X >> 21) & 0x1f
-
-////////////////////////////////////////////////////////////////////////
-//
-//
-//
-
-// VPTE size for HW_LD/HW_ST
-#define HW_VPTE ((inst >> 11) & 0x1)
-
-// QWORD size for HW_LD/HW_ST
-#define HW_QWORD ((inst >> 12) & 0x1)
-
-// ALT mode for HW_LD/HW_ST
-#define HW_ALT (((inst >> 14) & 0x1) ? ALTMODE : 0)
-
-// LOCK/COND mode for HW_LD/HW_ST
-#define HW_LOCK (((inst >> 10) & 0x1) ? LOCKED : 0)
-#define HW_COND (((inst >> 10) & 0x1) ? LOCKED : 0)
-
-// PHY size for HW_LD/HW_ST
-#define HW_PHY (((inst >> 15) & 0x1) ? PHYSICAL : 0)
-
-// OFFSET for HW_LD/HW_ST
-#define HW_OFS (inst & 0x3ff)
-
-
-#define PAL_BASE 0x4000
-#define PAL_MAX 0x10000
-
-#endif //__EV5_H__
+const Addr PAddrImplMask = (ULL(1) << PAddrImplBits) - 1;
+const Addr PAddrUncachedBit39 = ULL(0x8000000000);
+const Addr PAddrUncachedBit40 = ULL(0x10000000000);
+const Addr PAddrUncachedBit43 = ULL(0x80000000000);
+const Addr PAddrUncachedMask = ULL(0x807ffffffff); // Clear PA<42:35>
+
+inline int DTB_ASN_ASN(uint64_t reg) { return reg >> 57 & AsnMask; }
+inline Addr DTB_PTE_PPN(uint64_t reg)
+{ return reg >> 32 & (ULL(1) << PAddrImplBits - AlphaISA::PageShift) - 1; }
+inline int DTB_PTE_XRE(uint64_t reg) { return reg >> 8 & 0xf; }
+inline int DTB_PTE_XWE(uint64_t reg) { return reg >> 12 & 0xf; }
+inline int DTB_PTE_FONR(uint64_t reg) { return reg >> 1 & 0x1; }
+inline int DTB_PTE_FONW(uint64_t reg) { return reg >> 2 & 0x1; }
+inline int DTB_PTE_GH(uint64_t reg) { return reg >> 5 & 0x3; }
+inline int DTB_PTE_ASMA(uint64_t reg) { return reg >> 4 & 0x1; }
+
+inline int ITB_ASN_ASN(uint64_t reg) { return reg >> 4 & AsnMask; }
+inline Addr ITB_PTE_PPN(uint64_t reg)
+{ return reg >> 32 & (ULL(1) << PAddrImplBits - AlphaISA::PageShift) - 1; }
+inline int ITB_PTE_XRE(uint64_t reg) { return reg >> 8 & 0xf; }
+inline bool ITB_PTE_FONR(uint64_t reg) { return reg >> 1 & 0x1; }
+inline bool ITB_PTE_FONW(uint64_t reg) { return reg >> 2 & 0x1; }
+inline int ITB_PTE_GH(uint64_t reg) { return reg >> 5 & 0x3; }
+inline bool ITB_PTE_ASMA(uint64_t reg) { return reg >> 4 & 0x1; }
+
+inline uint64_t MCSR_SP(uint64_t reg) { return reg >> 1 & 0x3; }
+
+inline bool ICSR_SDE(uint64_t reg) { return reg >> 30 & 0x1; }
+inline int ICSR_SPE(uint64_t reg) { return reg >> 28 & 0x3; }
+inline bool ICSR_FPE(uint64_t reg) { return reg >> 26 & 0x1; }
+
+inline uint64_t ALT_MODE_AM(uint64_t reg) { return reg >> 3 & 0x3; }
+inline uint64_t DTB_CM_CM(uint64_t reg) { return reg >> 3 & 0x3; }
+inline uint64_t ICM_CM(uint64_t reg) { return reg >> 3 & 0x3; }
+
+const uint64_t MM_STAT_BAD_VA_MASK = ULL(0x0020);
+const uint64_t MM_STAT_DTB_MISS_MASK = ULL(0x0010);
+const uint64_t MM_STAT_FONW_MASK = ULL(0x0008);
+const uint64_t MM_STAT_FONR_MASK = ULL(0x0004);
+const uint64_t MM_STAT_ACV_MASK = ULL(0x0002);
+const uint64_t MM_STAT_WR_MASK = ULL(0x0001);
+inline int Opcode(AlphaISA::MachInst inst) { return inst >> 26 & 0x3f; }
+inline int Ra(AlphaISA::MachInst inst) { return inst >> 21 & 0x1f; }
+
+const Addr PalBase = 0x4000;
+const Addr PalMax = 0x10000;
+
+/* namespace EV5 */ }
+
+#endif // __ARCH_ALPHA_EV5_HH__
#include <fenv.h>
#endif
-#include "cpu/base_cpu.hh"
-#include "cpu/exetrace.hh"
-#include "sim/sim_exit.hh"
-
#ifdef FULL_SYSTEM
-#include "arch/alpha/ev5.hh"
#include "arch/alpha/pseudo_inst.hh"
#endif
+#include "cpu/base_cpu.hh"
+#include "cpu/exetrace.hh"
+#include "sim/sim_exit.hh"
}};
////////////////////////////////////////////////////////////////////
inline Fault checkFpEnableFault(%(CPU_exec_context)s *xc)
{
Fault fault = No_Fault; // dummy... this ipr access should not fault
- if (!ICSR_FPE(xc->readIpr(AlphaISA::IPR_ICSR, fault))) {
+ if (!EV5::ICSR_FPE(xc->readIpr(AlphaISA::IPR_ICSR, fault))) {
fault = Fen_Fault;
}
return fault;
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef __ISA_TRAITS_HH__
-#define __ISA_TRAITS_HH__
+#ifndef __ARCH_ALPHA_ISA_TRAITS_HH__
+#define __ARCH_ALPHA_ISA_TRAITS_HH__
#include "arch/alpha/faults.hh"
#include "base/misc.hh"
template <class ISA> class StaticInst;
template <class ISA> class StaticInstPtr;
+namespace EV5 {
+int DTB_ASN_ASN(uint64_t reg);
+int ITB_ASN_ASN(uint64_t reg);
+}
+
class AlphaISA
{
public:
InternalProcReg ipr[NumInternalProcRegs]; // internal processor regs
int intrflag; // interrupt flag
bool pal_shadow; // using pal_shadow registers
+ inline int instAsid() { return EV5::ITB_ASN_ASN(ipr[IPR_ITB_ASN]); }
+ inline int dataAsid() { return EV5::DTB_ASN_ASN(ipr[IPR_DTB_ASN]); }
#endif // FULL_SYSTEM
void serialize(std::ostream &os);
const int NumInternalProcRegs = TheISA::NumInternalProcRegs;
const int NumInterruptLevels = TheISA::NumInterruptLevels;
-// more stuff that should be imported here, but I'm too tired to do it
-// right now...
#include "arch/alpha/ev5.hh"
#endif
-#endif // __ALPHA_ISA_H__
+#endif // __ARCH_ALPHA_ISA_TRAITS_HH__
Addr paddr = 0;
//@todo Andrew couldn't remember why he commented some of this code
//so I put it back in. Perhaps something to do with gdb debugging?
- if (PC_PAL(vaddr) && (vaddr < PAL_MAX)) {
+ if (AlphaISA::PcPAL(vaddr) && (vaddr < EV5::PalMax)) {
paddr = vaddr & ~ULL(1);
} else {
if (AlphaISA::IsK0Seg(vaddr)) {
* but there is no easy way to do it.
*/
- if (PC_PAL(va) || va < 0x10000)
+ if (AlphaISA::PcPAL(va) || va < 0x10000)
return true;
Addr ptbr = context->regs.ipr[AlphaISA::IPR_PALtemp20];
#ifdef FULL_SYSTEM
bool validInstAddr(Addr addr) { return true; }
bool validDataAddr(Addr addr) { return true; }
- int getInstAsid() { return ITB_ASN_ASN(regs.ipr[TheISA::IPR_ITB_ASN]); }
- int getDataAsid() { return DTB_ASN_ASN(regs.ipr[TheISA::IPR_DTB_ASN]); }
+ int getInstAsid() { return regs.instAsid(); }
+ int getDataAsid() { return regs.dataAsid(); }
Fault translateInstReq(MemReqPtr &req)
{
int readIntrFlag() { return regs.intrflag; }
void setIntrFlag(int val) { regs.intrflag = val; }
Fault hwrei();
- bool inPalMode() { return PC_PAL(regs.pc); }
+ bool inPalMode() { return AlphaISA::PcPAL(regs.pc); }
void ev5_trap(Fault fault);
bool simPalCheck(int palFunc);
#endif
{
memset(data, 0, req->size);
- Addr daddr = req->paddr - (addr & PA_IMPL_MASK);
+ Addr daddr = req->paddr - (addr & EV5::PAddrImplMask);
switch (req->size)
{
return Machine_Check_Fault;
}
- Addr daddr = req->paddr - (addr & PA_IMPL_MASK);
+ Addr daddr = req->paddr - (addr & EV5::PAddrImplMask);
ExecContext *other_xc;
switch (daddr) {
#include "base/trace.hh"
#include "cpu/intr_control.hh"
#include "dev/dma.hh"
-#include "dev/pcireg.h"
-#include "dev/pciconfigall.hh"
-#include "dev/ide_disk.hh"
#include "dev/ide_ctrl.hh"
+#include "dev/ide_disk.hh"
+#include "dev/pciconfigall.hh"
+#include "dev/pcireg.h"
+#include "dev/platform.hh"
#include "dev/tsunami_cchip.hh"
#include "mem/bus/bus.hh"
+#include "mem/bus/dma_interface.hh"
#include "mem/bus/pio_interface.hh"
#include "mem/bus/pio_interface_impl.hh"
-#include "mem/bus/dma_interface.hh"
-#include "dev/tsunami.hh"
#include "mem/functional_mem/memory_control.hh"
#include "mem/functional_mem/physical_memory.hh"
#include "sim/builder.hh"
pioInterface->addAddrRange(RangeSize(pri_cmd_addr,
pri_cmd_size));
- pri_cmd_addr &= PA_UNCACHED_MASK;
+ pri_cmd_addr &= EV5::PAddrUncachedMask;
}
break;
pioInterface->addAddrRange(RangeSize(pri_ctrl_addr,
pri_ctrl_size));
- pri_ctrl_addr &= PA_UNCACHED_MASK;
+ pri_ctrl_addr &= EV5::PAddrUncachedMask;
}
break;
pioInterface->addAddrRange(RangeSize(sec_cmd_addr,
sec_cmd_size));
- sec_cmd_addr &= PA_UNCACHED_MASK;
+ sec_cmd_addr &= EV5::PAddrUncachedMask;
}
break;
pioInterface->addAddrRange(RangeSize(sec_ctrl_addr,
sec_ctrl_size));
- sec_ctrl_addr &= PA_UNCACHED_MASK;
+ sec_ctrl_addr &= EV5::PAddrUncachedMask;
}
break;
if (pioInterface)
pioInterface->addAddrRange(RangeSize(bmi_addr, bmi_size));
- bmi_addr &= PA_UNCACHED_MASK;
+ bmi_addr &= EV5::PAddrUncachedMask;
}
break;
}
if (pioInterface)
pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0]));
- BARAddrs[0] &= PA_UNCACHED_MASK;
+ BARAddrs[0] &= EV5::PAddrUncachedMask;
}
break;
case PCI0_BASE_ADDR1:
if (pioInterface)
pioInterface->addAddrRange(RangeSize(BARAddrs[1], BARSize[1]));
- BARAddrs[1] &= PA_UNCACHED_MASK;
+ BARAddrs[1] &= EV5::PAddrUncachedMask;
}
break;
}
DPRINTF(PciConfigAll, "read va=%#x size=%d\n",
req->vaddr, req->size);
- Addr daddr = (req->paddr - (addr & PA_IMPL_MASK));
+ Addr daddr = (req->paddr - (addr & EV5::PAddrImplMask));
int device = (daddr >> 11) & 0x1F;
int func = (daddr >> 8) & 0x7;
Fault
PciConfigAll::write(MemReqPtr &req, const uint8_t *data)
{
- Addr daddr = (req->paddr - (addr & PA_IMPL_MASK));
+ Addr daddr = (req->paddr - (addr & EV5::PAddrImplMask));
int device = (daddr >> 11) & 0x1F;
int func = (daddr >> 8) & 0x7;
DPRINTF(Tsunami, "read va=%#x size=%d\n",
req->vaddr, req->size);
- Addr daddr = (req->paddr - (addr & PA_IMPL_MASK)) >> 6;
+ Addr daddr = (req->paddr - (addr & EV5::PAddrImplMask)) >> 6;
ExecContext *xc = req->xc;
switch (req->size) {
DPRINTF(Tsunami, "write - va=%#x value=%#x size=%d \n",
req->vaddr, *(uint64_t*)data, req->size);
- Addr daddr = (req->paddr - (addr & PA_IMPL_MASK)) >> 6;
+ Addr daddr = (req->paddr - (addr & EV5::PAddrImplMask)) >> 6;
bool supportedWrite = false;
uint64_t size = tsunami->intrctrl->cpu->system->execContexts.size();
DPRINTF(Tsunami, "io read va=%#x size=%d IOPorrt=%#x\n",
req->vaddr, req->size, req->vaddr & 0xfff);
- Addr daddr = (req->paddr - (addr & PA_IMPL_MASK));
+ Addr daddr = (req->paddr - (addr & EV5::PAddrImplMask));
switch(req->size) {
DPRINTF(Tsunami, "io write - va=%#x size=%d IOPort=%#x Data=%#x\n",
req->vaddr, req->size, req->vaddr & 0xfff, dt64);
- Addr daddr = (req->paddr - (addr & PA_IMPL_MASK));
+ Addr daddr = (req->paddr - (addr & EV5::PAddrImplMask));
switch(req->size) {
case sizeof(uint8_t):
DPRINTF(Tsunami, "read va=%#x size=%d\n",
req->vaddr, req->size);
- Addr daddr = (req->paddr - (addr & PA_IMPL_MASK)) >> 6;
+ Addr daddr = (req->paddr - (addr & EV5::PAddrImplMask)) >> 6;
switch (req->size) {
DPRINTF(Tsunami, "write - va=%#x size=%d \n",
req->vaddr, req->size);
- Addr daddr = (req->paddr - (addr & PA_IMPL_MASK)) >> 6;
+ Addr daddr = (req->paddr - (addr & EV5::PAddrImplMask)) >> 6;
switch (req->size) {
#include "mem/bus/pio_interface_impl.hh"
#include "mem/functional_mem/memory_control.hh"
#include "sim/builder.hh"
-#include "targetarch/ev5.hh"
using namespace std;
Fault
Uart::read(MemReqPtr &req, uint8_t *data)
{
- Addr daddr = req->paddr - (addr & PA_IMPL_MASK);
+ Addr daddr = req->paddr - (addr & EV5::PAddrImplMask);
DPRINTF(Uart, " read register %#x\n", daddr);
Fault
Uart::write(MemReqPtr &req, const uint8_t *data)
{
- Addr daddr = req->paddr - (addr & PA_IMPL_MASK);
+ Addr daddr = req->paddr - (addr & EV5::PAddrImplMask);
DPRINTF(Uart, " write register %#x value %#x\n", daddr, *(uint8_t*)data);
uint64_t a0 = xc->regs.intRegFile[ArgumentReg0];
if (!TheISA::IsK0Seg(a0) ||
- xc->memctrl->badaddr(TheISA::K0Seg2Phys(a0) & PA_IMPL_MASK)) {
+ xc->memctrl->badaddr(TheISA::K0Seg2Phys(a0) & EV5::PAddrImplMask)) {
DPRINTF(BADADDR, "badaddr arg=%#x bad\n", a0);
xc->regs.intRegFile[ReturnValueReg] = 0x1;