# -*- mode:python -*-
-# Copyright (c) 2009, 2012-2013 ARM Limited
+# Copyright (c) 2009, 2012-2013, 2018 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
Source('insts/static_inst.cc')
Source('insts/vfp.cc')
Source('insts/fplib.cc')
+ Source('insts/crypto.cc')
Source('interrupts.cc')
Source('isa.cc')
Source('isa_device.cc')
--- /dev/null
+/*
+ * Copyright (c) 2018 ARM Limited
+ * All rights reserved
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder. You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Matt Horsnell
+ * Prakash Ramrakhyani
+ */
+
+#include <cstdio>
+#include <iostream>
+#include <string>
+
+#include "crypto.hh"
+
+namespace ArmISA {
+
+void
+Crypto::sha256Op(
+ uint32_t *X,
+ uint32_t *Y,
+ uint32_t *Z)
+{
+ uint32_t T0, T1, T2, T3;
+ for (int i = 0; i < 4; ++i) {
+ T0 = choose(Y[0], Y[1], Y[2]);
+ T1 = majority(X[0], X[1], X[2]);
+ T2 = Y[3] + sigma1(Y[0]) + T0 + Z[i];
+ X[3] = T2 + X[3];
+ Y[3] = T2 + sigma0(X[0]) + T1;
+ // Rotate
+ T3 = Y[3];
+ Y[3] = Y[2]; Y[2] = Y[1]; Y[1] = Y[0]; Y[0] = X[3];
+ X[3] = X[2]; X[2] = X[1]; X[1] = X[0]; X[0] = T3;
+ }
+}
+
+void
+Crypto::_sha1Op(
+ uint32_t *X,
+ uint32_t *Y,
+ uint32_t *Z,
+ SHAOp op)
+{
+ uint32_t T1, T2;
+
+ for (int i = 0; i < 4; ++i) {
+ switch (op) {
+ case CHOOSE: T1 = choose(X[1], X[2], X[3]); break;
+ case PARITY: T1 = parity(X[1], X[2], X[3]); break;
+ case MAJORITY: T1 = majority(X[1], X[2], X[3]); break;
+ default: return;
+ }
+ Y[0] += ror(X[0], 27) + T1 + Z[i];
+ X[1] = ror(X[1], 2);
+ T2 = Y[0];
+ Y[0] = X[3];
+ X[3] = X[2]; X[2] = X[1]; X[1] = X[0]; X[0] = T2;
+ }
+}
+
+void
+Crypto::sha256H(
+ uint8_t *output,
+ uint8_t *input,
+ uint8_t *input2)
+{
+ uint32_t X[4], Y[4], Z[4];
+ load3Reg(&X[0], &Y[0], &Z[0], output, input, input2);
+ sha256Op(&X[0], &Y[0], &Z[0]);
+ store1Reg(output, &X[0]);
+}
+
+void
+Crypto::sha256H2(
+ uint8_t *output,
+ uint8_t *input,
+ uint8_t *input2)
+{
+ uint32_t X[4], Y[4], Z[4];
+ load3Reg(&X[0], &Y[0], &Z[0], output, input, input2);
+ sha256Op(&Y[0], &X[0], &Z[0]);
+ store1Reg(output, &X[0]);
+}
+
+void
+Crypto::sha256Su0(uint8_t *output, uint8_t *input)
+{
+ uint32_t X[4], Y[4];
+ uint32_t T[4];
+
+ load2Reg(&X[0], &Y[0], output, input);
+
+ T[3] = Y[0]; T[2] = X[3]; T[1] = X[2]; T[0] = X[1];
+
+ T[3] = ror(T[3], 7) ^ ror(T[3], 18) ^ (T[3] >> 3);
+ T[2] = ror(T[2], 7) ^ ror(T[2], 18) ^ (T[2] >> 3);
+ T[1] = ror(T[1], 7) ^ ror(T[1], 18) ^ (T[1] >> 3);
+ T[0] = ror(T[0], 7) ^ ror(T[0], 18) ^ (T[0] >> 3);
+
+ X[3] += T[3];
+ X[2] += T[2];
+ X[1] += T[1];
+ X[0] += T[0];
+
+ store1Reg(output, &X[0]);
+}
+
+void
+Crypto::sha256Su1(
+ uint8_t *output,
+ uint8_t *input,
+ uint8_t *input2)
+{
+ uint32_t X[4], Y[4], Z[4];
+ uint32_t T0[4], T1[4], T2[4], T3[4];
+
+ load3Reg(&X[0], &Y[0], &Z[0], output, input, input2);
+
+ T0[3] = Z[0]; T0[2] = Y[3]; T0[1] = Y[2]; T0[0] = Y[1];
+ T1[1] = Z[3]; T1[0] = Z[2];
+ T1[1] = ror(T1[1], 17) ^ ror(T1[1], 19) ^ (T1[1] >> 10);
+ T1[0] = ror(T1[0], 17) ^ ror(T1[0], 19) ^ (T1[0] >> 10);
+ T3[1] = X[1] + T0[1]; T3[0] = X[0] + T0[0];
+ T1[1] = T3[1] + T1[1]; T1[0] = T3[0] + T1[0];
+ T2[1] = ror(T1[1], 17) ^ ror(T1[1], 19) ^ (T1[1] >> 10);
+ T2[0] = ror(T1[0], 17) ^ ror(T1[0], 19) ^ (T1[0] >> 10);
+ T3[1] = X[3] + T0[3]; T3[0] = X[2] + T0[2];
+ X[3] = T3[1] + T2[1];
+ X[2] = T3[0] + T2[0];
+ X[1] = T1[1]; X[0] = T1[0];
+
+ store1Reg(output, &X[0]);
+}
+
+void
+Crypto::sha1Op(
+ uint8_t *output,
+ uint8_t *input,
+ uint8_t *input2,
+ SHAOp op)
+{
+ uint32_t X[4], Y[4], Z[4];
+ load3Reg(&X[0], &Y[0], &Z[0], output, input, input2);
+ _sha1Op(&X[0], &Y[0], &Z[0], op);
+ store1Reg(output, &X[0]);
+}
+
+void
+Crypto::sha1C(
+ uint8_t *output,
+ uint8_t *input,
+ uint8_t *input2)
+{
+ sha1Op(output, input, input2, CHOOSE);
+}
+
+void
+Crypto::sha1P(
+ uint8_t *output,
+ uint8_t *input,
+ uint8_t *input2)
+{
+ sha1Op(output, input, input2, PARITY);
+}
+
+void
+Crypto::sha1M(
+ uint8_t *output,
+ uint8_t *input,
+ uint8_t *input2)
+{
+ sha1Op(output, input, input2, MAJORITY);
+}
+
+void
+Crypto::sha1H(uint8_t *output, uint8_t *input)
+{
+ uint32_t X[4], Y[4];
+ load2Reg(&X[0], &Y[0], output, input);
+ X[0] = ror(Y[0], 2);
+ store1Reg(output, &X[0]);
+}
+
+void
+Crypto::sha1Su0(
+ uint8_t *output,
+ uint8_t *input,
+ uint8_t *input2)
+{
+ uint32_t X[4], Y[4], Z[4], T[4];
+ load3Reg(&X[0], &Y[0], &Z[0], output, input, input2);
+
+ T[3] = Y[1]; T[2] = Y[0]; T[1] = X[3]; T[0] = X[2];
+ X[3] = T[3] ^ X[3] ^ Z[3];
+ X[2] = T[2] ^ X[2] ^ Z[2];
+ X[1] = T[1] ^ X[1] ^ Z[1];
+ X[0] = T[0] ^ X[0] ^ Z[0];
+
+ store1Reg(output, &X[0]);
+}
+
+void
+Crypto::sha1Su1(uint8_t *output, uint8_t *input)
+{
+ uint32_t X[4], Y[4], T[4];
+ load2Reg(&X[0], &Y[0], output, input);
+
+ T[3] = X[3] ^ 0x0;
+ T[2] = X[2] ^ Y[3];
+ T[1] = X[1] ^ Y[2];
+ T[0] = X[0] ^ Y[1];
+ X[2] = ror(T[2], 31); X[1] = ror(T[1], 31); X[0] = ror(T[0], 31);
+ X[3] = ror(T[3], 31) ^ ror(T[0], 30);
+
+ store1Reg(output, &X[0]);
+}
+
+void
+Crypto::load2Reg(
+ uint32_t *X,
+ uint32_t *Y,
+ uint8_t *output,
+ uint8_t *input)
+{
+ for (int i = 0; i < 4; ++i) {
+ X[i] = *((uint32_t *)&output[i*4]);
+ Y[i] = *((uint32_t *)&input[i*4]);
+ }
+}
+
+void
+Crypto::load3Reg(
+ uint32_t *X,
+ uint32_t *Y,
+ uint32_t *Z,
+ uint8_t *output,
+ uint8_t *input,
+ uint8_t *input2)
+{
+ for (int i = 0; i < 4; ++i) {
+ X[i] = *((uint32_t *)&output[i*4]);
+ Y[i] = *((uint32_t *)&input[i*4]);
+ Z[i] = *((uint32_t *)&input2[i*4]);
+ }
+}
+
+void
+Crypto::store1Reg(uint8_t *output, uint32_t *X)
+{
+ for (int i = 0; i < 4; ++i) {
+ output[i*4] = (uint8_t)(X[i]);
+ output[i*4+1] = (uint8_t)(X[i] >> 8);
+ output[i*4+2] = (uint8_t)(X[i] >> 16);
+ output[i*4+3] = (uint8_t)(X[i] >> 24);
+ }
+}
+
+} // namespace ArmISA
--- /dev/null
+/*
+ * Copyright (c) 2018 ARM Limited
+ * All rights reserved
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder. You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Matt Horsnell
+ * Prakash Ramrakhyani
+ */
+
+#ifndef __ARCH_ARM_INSTS_CRYPTO_HH__
+#define __ARCH_ARM_INSTS_CRYPTO_HH__
+
+namespace ArmISA {
+
+class Crypto
+{
+ enum SHAOp : uint8_t
+ {
+ CHOOSE = 0,
+ PARITY,
+ MAJORITY
+ };
+
+ uint32_t ror(uint32_t x, uint8_t shift)
+ {
+ return (x >> shift) | (x << (32 - shift));
+ }
+
+ uint32_t choose(uint32_t X, uint32_t Y, uint32_t Z)
+ {
+ return (((Y ^ Z) & X) ^ Z);
+ }
+
+ uint32_t parity(uint32_t X, uint32_t Y, uint32_t Z)
+ {
+ return (X ^ Y ^ Z);
+ }
+
+ uint32_t majority(uint32_t X, uint32_t Y, uint32_t Z)
+ {
+ return ((X & Y) | ((X | Y) & Z));
+ }
+
+ uint32_t sigma0(uint32_t X)
+ {
+ return ror(X,2) ^ ror(X,13) ^ ror(X,22);
+ }
+
+ uint32_t sigma1(uint32_t X)
+ {
+ return ror(X,6) ^ ror(X,11) ^ ror(X,25);
+ }
+
+ void sha256Op(uint32_t *X, uint32_t *Y, uint32_t *Z);
+ void sha1Op(uint8_t *output, uint8_t *input, uint8_t *input2, SHAOp op);
+ void _sha1Op(uint32_t *X, uint32_t *Y, uint32_t *Z, SHAOp op);
+
+ void load2Reg(uint32_t *X, uint32_t *Y, uint8_t *output, uint8_t *input);
+ void load3Reg(uint32_t *X, uint32_t *Y, uint32_t *Z,
+ uint8_t *output, uint8_t *input, uint8_t *input2);
+ void store1Reg(uint8_t *output, uint32_t *X);
+
+ public:
+ void sha256H(uint8_t *output, uint8_t *input, uint8_t *input2);
+ void sha256H2(uint8_t *output, uint8_t *input, uint8_t *input2);
+ void sha256Su0(uint8_t *output, uint8_t *input);
+ void sha256Su1(uint8_t *output, uint8_t *input, uint8_t *input2);
+
+ void sha1C(uint8_t *output, uint8_t *input, uint8_t *input2);
+ void sha1P(uint8_t *output, uint8_t *input, uint8_t *input2);
+ void sha1M(uint8_t *output, uint8_t *input, uint8_t *input2);
+ void sha1H(uint8_t *output, uint8_t *input);
+ void sha1Su0(uint8_t *output, uint8_t *input, uint8_t *input2);
+ void sha1Su1(uint8_t *output, uint8_t *input);
+};
+
+} // namespace ArmISA
+
+#endif //__ARCH_ARM_INSTS_CRYPTO_HH__
}
}
}
+ } else {
+ if (u) {
+ switch (c) {
+ case 0x0:
+ return new SHA256H(machInst, vd, vn, vm);
+ case 0x1:
+ return new SHA256H2(machInst, vd, vn, vm);
+ case 0x2:
+ return new SHA256SU1(machInst, vd, vn, vm);
+ case 0x3:
+ return new Unknown(machInst);
+ default:
+ M5_UNREACHABLE;
+ }
+ } else {
+ switch (c) {
+ case 0x0:
+ return new SHA1C(machInst, vd, vn, vm);
+ case 0x1:
+ return new SHA1P(machInst, vd, vn, vm);
+ case 0x2:
+ return new SHA1M(machInst, vd, vn, vm);
+ case 0x3:
+ return new SHA1SU0(machInst, vd, vn, vm);
+ default:
+ M5_UNREACHABLE;
+ }
+ }
}
return new Unknown(machInst);
case 0xd:
return decodeNeonSTwoMiscReg<NVcltD, NVcltQ>(
q, size, machInst, vd, vm);
}
+ case 0x5:
+ if (q) {
+ return new SHA1H(machInst, vd, vm);
+ } else {
+ return new Unknown(machInst);
+ }
case 0x6:
if (bits(machInst, 10)) {
if (q)
} else {
return new Unknown(machInst);
}
+ case 0x7:
+ if (q) {
+ return new SHA256SU0(machInst, vd, vm);
+ } else {
+ return new SHA1SU1(machInst, vd, vm);
+ }
case 0xc:
case 0xe:
if (b == 0x18) {
#include "arch/arm/insts/branch.hh"
#include "arch/arm/insts/branch64.hh"
+#include "arch/arm/insts/crypto.hh"
#include "arch/arm/insts/data64.hh"
#include "arch/arm/insts/fplib.hh"
#include "arch/arm/insts/macromem.hh"
--- /dev/null
+// -*- mode:c++ -*-
+//
+// Copyright (c) 2018 ARM Limited
+// All rights reserved
+//
+// The license below extends only to copyright in the software and shall
+// not be construed as granting a license to any other intellectual
+// property including but not limited to intellectual property relating
+// to a hardware implementation of the functionality of the software
+// licensed hereunder. You may use the software subject to the license
+// terms below provided that you ensure that this notice is replicated
+// unmodified and in its entirety in all distributions of the software,
+// modified or unmodified, in source code or in binary form.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met: redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer;
+// redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution;
+// neither the name of the copyright holders nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: Matt Horsnell
+// Prakash Ramrakhyani
+
+let {{
+
+ cryptoEnabledCheckCode = '''
+ auto crypto_reg = xc->tcBase()->readMiscReg(MISCREG_ID_ISAR5);
+ if (!(crypto_reg & %(mask)d)) {
+ return std::make_shared<UndefinedInstruction>(machInst, true);
+ }
+ '''
+
+ header_output = ""
+ decoder_output = ""
+ exec_output = ""
+
+ cryptoRegRegRegPrefix = '''
+ Crypto crypto;
+ RegVect srcReg1, srcReg2, destReg;
+ // Read source and destination registers.
+ '''
+ for reg in range(4):
+ cryptoRegRegRegPrefix += '''
+ srcReg1.regs[%(reg)d] = htog(FpOp1P%(reg)d_uw);
+ srcReg2.regs[%(reg)d] = htog(FpOp2P%(reg)d_uw);
+ destReg.regs[%(reg)d] = htog(FpDestP%(reg)d_uw);
+ ''' % { "reg" : reg }
+ cryptoRegRegRegPrefix += '''
+ unsigned char *output = (unsigned char *)(&destReg.regs[0]);
+ unsigned char *input = (unsigned char *)(&srcReg1.regs[0]);
+ unsigned char *input2 = (unsigned char *)(&srcReg2.regs[0]);
+ '''
+
+ cryptoSuffix = ""
+ for reg in range(4):
+ cryptoSuffix += '''
+ FpDestP%(reg)d_uw = gtoh(destReg.regs[%(reg)d]);
+ ''' % { "reg" : reg }
+
+ cryptoRegRegPrefix = '''
+ Crypto crypto;
+ RegVect srcReg1, destReg;
+ // Read source and destination registers.
+ '''
+ for reg in range(4):
+ cryptoRegRegPrefix += '''
+ srcReg1.regs[%(reg)d] = htog(FpOp1P%(reg)d_uw);
+ destReg.regs[%(reg)d] = htog(FpDestP%(reg)d_uw);
+ ''' % { "reg" : reg }
+
+ cryptoRegRegPrefix += '''
+ // cast into format passed to aes encrypt method.
+ unsigned char *output = (unsigned char *)(&destReg.regs[0]);
+ unsigned char *input = (unsigned char *)(&srcReg1.regs[0]);
+ '''
+
+ def cryptoRegRegRegInst(name, Name, opClass, enable_check, crypto_func):
+ global header_output, decoder_output, exec_output
+
+ crypto_prefix = enable_check + cryptoRegRegRegPrefix
+ cryptocode = crypto_prefix + crypto_func + cryptoSuffix
+
+ cryptoiop = InstObjParams(name, Name, "RegRegRegOp",
+ { "code": cryptocode,
+ "r_count": 4,
+ "predicate_test": predicateTest,
+ "op_class": opClass}, [])
+ header_output += RegRegRegOpDeclare.subst(cryptoiop)
+ decoder_output += RegRegRegOpConstructor.subst(cryptoiop)
+ exec_output += CryptoPredOpExecute.subst(cryptoiop)
+
+ def cryptoRegRegInst(name, Name, opClass, enable_check, crypto_func):
+ global header_output, decoder_output, exec_output
+
+ crypto_prefix = enable_check + cryptoRegRegPrefix
+ cryptocode = crypto_prefix + crypto_func + cryptoSuffix
+
+ cryptoiop = InstObjParams(name, Name, "RegRegOp",
+ { "code": cryptocode,
+ "r_count": 4,
+ "predicate_test": predicateTest,
+ "op_class": opClass}, [])
+ header_output += RegRegOpDeclare.subst(cryptoiop)
+ decoder_output += RegRegOpConstructor.subst(cryptoiop)
+ exec_output += CryptoPredOpExecute.subst(cryptoiop)
+
+ def cryptoRegRegImmInst(name, Name, opClass, enable_check, crypto_func):
+ global header_output, decoder_output, exec_output
+
+ crypto_prefix = enable_check + cryptoRegRegPrefix
+ cryptocode = crypto_prefix + crypto_func + cryptoSuffix
+
+ cryptoiop = InstObjParams(name, Name, "RegRegImmOp",
+ { "code": cryptocode,
+ "r_count": 4,
+ "predicate_test": predicateTest,
+ "op_class": opClass}, [])
+ header_output += RegRegImmOpDeclare.subst(cryptoiop)
+ decoder_output += RegRegImmOpConstructor.subst(cryptoiop)
+ exec_output += CryptoPredOpExecute.subst(cryptoiop)
+
+ sha1_cCode = "crypto.sha1C(output, input, input2);"
+ sha1_pCode = "crypto.sha1P(output, input, input2);"
+ sha1_mCode = "crypto.sha1M(output, input, input2);"
+ sha1_hCode = "crypto.sha1H(output, input);"
+ sha1_su0Code = "crypto.sha1Su0(output, input, input2);"
+ sha1_su1Code = "crypto.sha1Su1(output, input);"
+
+ sha256_hCode = "crypto.sha256H(output, input, input2);"
+ sha256_h2Code = "crypto.sha256H2(output, input, input2);"
+ sha256_su0Code = "crypto.sha256Su0(output, input);"
+ sha256_su1Code = "crypto.sha256Su1(output, input, input2);"
+
+ sha1_enabled = cryptoEnabledCheckCode % { "mask" : 0xF00 }
+ cryptoRegRegRegInst("sha1c", "SHA1C", "SimdSha1HashOp",
+ sha1_enabled, sha1_cCode)
+ cryptoRegRegRegInst("sha1p", "SHA1P", "SimdSha1HashOp",
+ sha1_enabled, sha1_pCode)
+ cryptoRegRegRegInst("sha1m", "SHA1M", "SimdSha1HashOp",
+ sha1_enabled, sha1_mCode)
+ cryptoRegRegInst("sha1h", "SHA1H", "SimdSha1Hash2Op",
+ sha1_enabled, sha1_hCode)
+ cryptoRegRegRegInst("sha1su0", "SHA1SU0", "SimdShaSigma3Op",
+ sha1_enabled, sha1_su0Code)
+ cryptoRegRegInst("sha1su1", "SHA1SU1", "SimdShaSigma2Op",
+ sha1_enabled, sha1_su1Code)
+
+ sha2_enabled = cryptoEnabledCheckCode % { "mask" : 0xF000 }
+ cryptoRegRegRegInst("sha256h", "SHA256H", "SimdSha256HashOp",
+ sha2_enabled, sha256_hCode)
+ cryptoRegRegRegInst("sha256h2", "SHA256H2", "SimdSha256Hash2Op",
+ sha2_enabled, sha256_h2Code)
+ cryptoRegRegInst("sha256su0", "SHA256SU0", "SimdShaSigma2Op",
+ sha2_enabled, sha256_su0Code)
+ cryptoRegRegRegInst("sha256su1", "SHA256SU1", "SimdShaSigma3Op",
+ sha2_enabled, sha256_su1Code)
+}};
//m5 Pseudo-ops
##include "m5ops.isa"
+
+//Crypto
+##include "crypto.isa"
--- /dev/null
+// -*- mode:c++ -*-
+
+// Copyright (c) 2018 ARM Limited
+// All rights reserved
+//
+// The license below extends only to copyright in the software and shall
+// not be construed as granting a license to any other intellectual
+// property including but not limited to intellectual property relating
+// to a hardware implementation of the functionality of the software
+// licensed hereunder. You may use the software subject to the license
+// terms below provided that you ensure that this notice is replicated
+// unmodified and in its entirety in all distributions of the software,
+// modified or unmodified, in source code or in binary form.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met: redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer;
+// redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution;
+// neither the name of the copyright holders nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: Matt Horsnell
+
+// These currently all work on quad words, so some element/register
+// storage/extraction here is fixed as constants.
+def template CryptoPredOpExecute {{
+ Fault %(class_name)s::execute(ExecContext *xc,
+ Trace::InstRecord *traceData) const
+ {
+ Fault fault = NoFault;
+ %(op_decl)s;
+ %(op_rd)s;
+
+ const unsigned rCount = %(r_count)d;
+
+ union RegVect {
+ FloatRegBits regs[rCount];
+ };
+
+ if (%(predicate_test)s)
+ {
+ %(code)s;
+ if (fault == NoFault)
+ {
+ %(op_wb)s;
+ }
+ } else {
+ xc->setPredicate(false);
+ }
+
+ return fault;
+ }
+}};
//Templates for Neon instructions
##include "neon.isa"
+//Templates for Crypto instructions
+##include "crypto.isa"
+
##include "neon64.isa"
-# Copyright (c) 2010 ARM Limited
+# Copyright (c) 2010,2018 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
'SimdSqrt', 'SimdFloatAdd', 'SimdFloatAlu', 'SimdFloatCmp',
'SimdFloatCvt', 'SimdFloatDiv', 'SimdFloatMisc', 'SimdFloatMult',
'SimdFloatMultAcc', 'SimdFloatSqrt',
+ 'SimdSha1Hash', 'SimdSha1Hash2', 'SimdSha256Hash',
+ 'SimdSha256Hash2', 'SimdShaSigma2', 'SimdShaSigma3',
'MemRead', 'MemWrite', 'FloatMemRead', 'FloatMemWrite',
'IprAccess', 'InstPrefetch']
/*
- * Copyright (c) 2010 ARM Limited
+ * Copyright (c) 2010,2018 ARM Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
static const OpClass SimdFloatMultOp = Enums::SimdFloatMult;
static const OpClass SimdFloatMultAccOp = Enums::SimdFloatMultAcc;
static const OpClass SimdFloatSqrtOp = Enums::SimdFloatSqrt;
+static const OpClass SimdSha1HashOp = Enums::SimdSha1Hash;
+static const OpClass SimdSha1Hash2Op = Enums::SimdSha1Hash2;
+static const OpClass SimdSha256HashOp = Enums::SimdSha256Hash;
+static const OpClass SimdSha256Hash2Op = Enums::SimdSha256Hash2;
+static const OpClass SimdShaSigma2Op = Enums::SimdShaSigma2;
+static const OpClass SimdShaSigma3Op = Enums::SimdShaSigma3;
static const OpClass MemReadOp = Enums::MemRead;
static const OpClass MemWriteOp = Enums::MemWrite;
static const OpClass FloatMemReadOp = Enums::FloatMemRead;