From: Alec Roelke Date: Wed, 30 Nov 2016 22:10:28 +0000 (-0500) Subject: riscv: [Patch 5/5] Added missing support for timing CPU models X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=126c0360e2efd9588f38128bad94c7fa82c79f25;p=gem5.git riscv: [Patch 5/5] Added missing support for timing CPU models Last of five patches adding RISC-V to GEM5. This patch adds support for timing, minor, and detailed CPU models that was missing in the last four, which basically consists of handling timing-mode memory accesses and telling the minor and detailed models what a no-op instruction should be (addi zero, zero, 0). Patches 1-4 introduced RISC-V and implemented the base instruction set, RV64I, and added the multiply, floating point, and atomic memory extensions, RV64MAFD. [Fixed compatibility with edit from patch 1.] [Fixed compatibility with hg copy edit from patch 1.] [Fixed some style errors in locked_mem.hh.] Signed-off by: Alec Roelke Signed-off by: Jason Lowe-Power --- diff --git a/build_opts/RISCV b/build_opts/RISCV index 3b5053a79..38abd9216 100644 --- a/build_opts/RISCV +++ b/build_opts/RISCV @@ -1,3 +1,3 @@ TARGET_ISA = 'riscv' -CPU_MODELS = 'AtomicSimpleCPU' +CPU_MODELS = 'AtomicSimpleCPU,TimingSimpleCPU,MinorCPU,O3CPU' PROTOCOL = 'MI_example' diff --git a/src/arch/riscv/isa_traits.hh b/src/arch/riscv/isa_traits.hh index a794a1889..f7a2c8762 100644 --- a/src/arch/riscv/isa_traits.hh +++ b/src/arch/riscv/isa_traits.hh @@ -63,6 +63,8 @@ using namespace LittleEndianGuest; const Addr PageShift = 12; const Addr PageBytes = ULL(1) << PageShift; +const ExtMachInst NoopMachInst = 0x00000013; + // Memory accesses can not be unaligned const bool HasUnalignedMemAcc = false; diff --git a/src/arch/riscv/locked_mem.hh b/src/arch/riscv/locked_mem.hh index 92d320fd7..a2e48b65c 100644 --- a/src/arch/riscv/locked_mem.hh +++ b/src/arch/riscv/locked_mem.hh @@ -1,7 +1,22 @@ -/* +/* * Copyright (c) 2006 The Regents of The University of Michigan * Copyright (c) 2007-2008 The Florida State University * Copyright (c) 2009 The University of Edinburgh + * Copyright (c) 2012 ARM Limited + * Copyright (c) 2014-2015 Sven Karlsson + * All rights reserved. + * + * The license below extends only to copyright in the software and shall + * not be construed as granting a license to any other intellectual + * property including but not limited to intellectual property relating + * to a hardware implementation of the functionality of the software + * licensed hereunder. You may use the software subject to the license + * terms below provided that you ensure that this notice is replicated + * unmodified and in its entirety in all distributions of the software, + * modified or unmodified, in source code or in binary form. + * + * Copyright (c) 2006-2007 The Regents of The University of Michigan + * Copyright (c) 2016 The University of Virginia * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -28,47 +43,97 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Authors: Steve Reinhardt - * Stephen Hines - * Timothy M. Jones + * Alec Roelke */ - #ifndef __ARCH_RISCV_LOCKED_MEM_HH__ #define __ARCH_RISCV_LOCKED_MEM_HH__ -/** - * @file - * - * ISA-specific helper functions for locked memory accesses. - */ - +#include "arch/registers.hh" +#include "base/misc.hh" +#include "base/trace.hh" +#include "debug/LLSC.hh" #include "mem/packet.hh" #include "mem/request.hh" +/* + * ISA-specific helper functions for locked memory accesses. + */ namespace RiscvISA { +static bool lock_flag = false; +static Addr lock_addr = 0; template -inline void -handleLockedSnoop(XC *xc, PacketPtr pkt, Addr cacheBlockMask) +inline void handleLockedSnoop(XC *xc, PacketPtr pkt, Addr cacheBlockMask) { + if (!lock_flag) + return; + + DPRINTF(LLSC, "Locked snoop on address %x.\n", + pkt->getAddr()&cacheBlockMask); + + Addr snoop_addr = pkt->getAddr()&cacheBlockMask; + + if ((lock_addr&cacheBlockMask) == snoop_addr) + lock_flag = false; } + template -inline void -handleLockedRead(XC *xc, Request *req) +inline void handleLockedRead(XC *xc, Request *req) { + lock_addr = req->getPaddr()&~0xF; + lock_flag = true; + DPRINTF(LLSC, "[cid:%i]: " + "Load-Link Flag Set & Load-Link Address set to %x.\n", + req->contextId(), req->getPaddr()&~0xF); } template -inline void -handleLockedSnoopHit(XC *xc) -{ -} +inline void handleLockedSnoopHit(XC *xc) +{} template -inline bool -handleLockedWrite(XC *xc, Request *req, Addr cacheBlockMask) +inline bool handleLockedWrite(XC *xc, Request *req, Addr cacheBlockMask) { + if (req->isUncacheable()) { + // Funky Turbolaser mailbox access...don't update + // result register (see stq_c in decoder.isa) + req->setExtraData(2); + } else { + // standard store conditional + if (!lock_flag || (req->getPaddr()&~0xF) != lock_addr) { + // Lock flag not set or addr mismatch in CPU; + // don't even bother sending to memory system + req->setExtraData(0); + lock_flag = false; + + // the rest of this code is not architectural; + // it's just a debugging aid to help detect + // livelock by warning on long sequences of failed + // store conditionals + int stCondFailures = xc->readStCondFailures(); + stCondFailures++; + xc->setStCondFailures(stCondFailures); + if (stCondFailures % 100000 == 0) { + warn("%i:"" context %d:" + " %d consecutive store conditional failures\n", + curTick(), xc->contextId(), stCondFailures); + } + + if (!lock_flag){ + DPRINTF(LLSC, "[cid:%i]:" + " Lock Flag Set, Store Conditional Failed.\n", + req->contextId()); + } else if ((req->getPaddr() & ~0xf) != lock_addr) { + DPRINTF(LLSC, "[cid:%i]: Load-Link Address Mismatch, " + "Store Conditional Failed.\n", req->contextId()); + } + // store conditional failed already, so don't issue it to mem + return false; + } + } + return true; }