Preparation for #1.4.0 (#30)

- Fixed CPU affinity on Windows for NUMA and CPUs with lot of cores
- Implemented per thread configurable Multihash mode (double, triple, quadruple, quintuple)
- Rebased from XMRig 2.4.4
This commit is contained in:
Ben Gräf 2018-01-19 19:42:06 +01:00 committed by GitHub
parent 5f8ea98764
commit acf27e9341
41 changed files with 2575 additions and 1104 deletions

View file

@ -5,6 +5,8 @@
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2016-2017 XMRig <support@xmrig.com>
* Copyright 2018 Sebastian Stolzenberg <https://github.com/sebastianstolzenberg>
* Copyright 2018 BenDroid <ben@graef.in>
*
*
* This program is free software: you can redistribute it and/or modify
@ -21,7 +23,6 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "crypto/CryptoNight.h"
#if defined(XMRIG_ARM)
@ -31,129 +32,105 @@
#endif
#include "crypto/CryptoNight_test.h"
#include "net/Job.h"
#include "net/JobResult.h"
#include "Options.h"
void (*cryptonight_hash_ctx_s)(const void *input, size_t size, void *output, cryptonight_ctx *ctx) = nullptr;
void (*cryptonight_hash_ctx_d)(const void *input, size_t size, void *output, cryptonight_ctx *ctx) = nullptr;
static void cryptonight_av1_aesni(const void *input, size_t size, void *output, struct cryptonight_ctx *ctx) {
template <size_t NUM_HASH_BLOCKS>
static void cryptonight_aesni(const void *input, size_t size, void *output, cryptonight_ctx *ctx) {
# if !defined(XMRIG_ARMv7)
cryptonight_hash<0x80000, MEMORY, 0x1FFFF0, false>(input, size, output, ctx);
CryptoNightMultiHash<0x80000, MEMORY, 0x1FFFF0, false, NUM_HASH_BLOCKS>::hash(input, size, output, ctx);
# endif
}
template <size_t NUM_HASH_BLOCKS>
static void cryptonight_softaes(const void *input, size_t size, void *output, cryptonight_ctx *ctx) {
CryptoNightMultiHash<0x80000, MEMORY, 0x1FFFF0, true, NUM_HASH_BLOCKS>::hash(input, size, output, ctx);
}
static void cryptonight_av2_aesni_double(const void *input, size_t size, void *output, cryptonight_ctx *ctx) {
template <size_t NUM_HASH_BLOCKS>
static void cryptonight_lite_aesni(const void *input, size_t size, void *output, cryptonight_ctx *ctx) {
# if !defined(XMRIG_ARMv7)
cryptonight_double_hash<0x80000, MEMORY, 0x1FFFF0, false>(input, size, output, ctx);
CryptoNightMultiHash<0x40000, MEMORY_LITE, 0xFFFF0, false, NUM_HASH_BLOCKS>::hash(input, size, output, ctx);
# endif
}
static void cryptonight_av3_softaes(const void *input, size_t size, void *output, cryptonight_ctx *ctx) {
cryptonight_hash<0x80000, MEMORY, 0x1FFFF0, true>(input, size, output, ctx);
template <size_t NUM_HASH_BLOCKS>
static void cryptonight_lite_softaes(const void *input, size_t size, void *output, cryptonight_ctx *ctx) {
CryptoNightMultiHash<0x40000, MEMORY_LITE, 0xFFFF0, true, NUM_HASH_BLOCKS>::hash(input, size, output, ctx);
}
void (*cryptonight_hash_ctx[MAX_NUM_HASH_BLOCKS])(const void *input, size_t size, void *output, cryptonight_ctx *ctx);
static void cryptonight_av4_softaes_double(const void *input, size_t size, void *output, cryptonight_ctx *ctx) {
cryptonight_double_hash<0x80000, MEMORY, 0x1FFFF0, true>(input, size, output, ctx);
template <size_t HASH_FACTOR>
void setCryptoNightHashMethods(Options::Algo algo, bool aesni)
{
switch (algo) {
case Options::ALGO_CRYPTONIGHT:
if (aesni) {
cryptonight_hash_ctx[HASH_FACTOR - 1] = cryptonight_aesni<HASH_FACTOR>;
} else {
cryptonight_hash_ctx[HASH_FACTOR - 1] = cryptonight_softaes<HASH_FACTOR>;
}
break;
case Options::ALGO_CRYPTONIGHT_LITE:
if (aesni) {
cryptonight_hash_ctx[HASH_FACTOR - 1] = cryptonight_lite_aesni<HASH_FACTOR>;
} else {
cryptonight_hash_ctx[HASH_FACTOR - 1] = cryptonight_lite_softaes<HASH_FACTOR>;
}
break;
}
// next iteration
setCryptoNightHashMethods<HASH_FACTOR-1>(algo, aesni);
}
static void cryptonight_lite_av1_aesni(const void *input, size_t size, void *output, cryptonight_ctx *ctx) {
# if !defined(XMRIG_ARMv7)
cryptonight_hash<0x40000, MEMORY_LITE, 0xFFFF0, false>(input, size, output, ctx);
#endif
}
static void cryptonight_lite_av2_aesni_double(const void *input, size_t size, void *output, cryptonight_ctx *ctx) {
# if !defined(XMRIG_ARMv7)
cryptonight_double_hash<0x40000, MEMORY_LITE, 0xFFFF0, false>(input, size, output, ctx);
# endif
}
static void cryptonight_lite_av3_softaes(const void *input, size_t size, void *output, cryptonight_ctx *ctx) {
cryptonight_hash<0x40000, MEMORY_LITE, 0xFFFF0, true>(input, size, output, ctx);
}
static void cryptonight_lite_av4_softaes_double(const void *input, size_t size, void *output, cryptonight_ctx *ctx) {
cryptonight_double_hash<0x40000, MEMORY_LITE, 0xFFFF0, true>(input, size, output, ctx);
}
void (*cryptonight_variations[8])(const void *input, size_t size, void *output, cryptonight_ctx *ctx) = {
cryptonight_av1_aesni,
cryptonight_av2_aesni_double,
cryptonight_av3_softaes,
cryptonight_av4_softaes_double,
cryptonight_lite_av1_aesni,
cryptonight_lite_av2_aesni_double,
cryptonight_lite_av3_softaes,
cryptonight_lite_av4_softaes_double
template <>
void setCryptoNightHashMethods<0>(Options::Algo algo, bool aesni)
{
// template recursion abort
};
void CryptoNight::hash(const uint8_t* input, size_t size, uint8_t* output, cryptonight_ctx* ctx)
bool CryptoNight::init(int algo, bool aesni)
{
cryptonight_hash_ctx_s(input, size, output, ctx);
}
void CryptoNight::hashDouble(const uint8_t* input, size_t size, uint8_t* output, cryptonight_ctx* ctx)
{
cryptonight_hash_ctx_d(input, size, output, ctx);
}
bool CryptoNight::init(int algo, int variant)
{
if (variant < 1 || variant > 4)
{
return false;
}
int index = 0;
if (algo == Options::ALGO_CRYPTONIGHT_LITE) {
index += 4;
}
if (variant == 3 || variant == 4)
{
index += 2;
}
cryptonight_hash_ctx_s = cryptonight_variations[index];
cryptonight_hash_ctx_d = cryptonight_variations[index+1];
setCryptoNightHashMethods<MAX_NUM_HASH_BLOCKS>(static_cast<Options::Algo>(algo), aesni);
return selfTest(algo);
}
void CryptoNight::hash(size_t factor, const uint8_t* input, size_t size, uint8_t* output, cryptonight_ctx* ctx)
{
cryptonight_hash_ctx[factor-1](input, size, output, ctx);
}
bool CryptoNight::selfTest(int algo)
{
if (cryptonight_hash_ctx_s == nullptr || cryptonight_hash_ctx_d == nullptr) {
if (cryptonight_hash_ctx[0] == nullptr || cryptonight_hash_ctx[2] == nullptr ||
cryptonight_hash_ctx[2] == nullptr || cryptonight_hash_ctx[3] == nullptr ||
cryptonight_hash_ctx[4] == nullptr) {
return false;
}
char output[64];
char output[160];
struct cryptonight_ctx *ctx = (struct cryptonight_ctx*) _mm_malloc(sizeof(struct cryptonight_ctx), 16);
ctx->memory = (uint8_t *) _mm_malloc(MEMORY * 2, 16);
cryptonight_hash_ctx_s(test_input, 76, output, ctx);
auto ctx = (struct cryptonight_ctx*) _mm_malloc(sizeof(struct cryptonight_ctx), 16);
ctx->memory = (uint8_t *) _mm_malloc(MEMORY * 6, 16);
cryptonight_hash_ctx[0](test_input, 76, output, ctx);
bool resultSingle = memcmp(output, algo == Options::ALGO_CRYPTONIGHT_LITE ? test_output1 : test_output0, 32) == 0;
cryptonight_hash_ctx_d(test_input, 76, output, ctx);
cryptonight_hash_ctx[1](test_input, 76, output, ctx);
bool resultDouble = memcmp(output, algo == Options::ALGO_CRYPTONIGHT_LITE ? test_output1 : test_output0, 64) == 0;
cryptonight_hash_ctx[2](test_input, 76, output, ctx);
bool resultTriple = memcmp(output, algo == Options::ALGO_CRYPTONIGHT_LITE ? test_output1 : test_output0, 96) == 0;
cryptonight_hash_ctx[3](test_input, 76, output, ctx);
bool resultQuadruple = memcmp(output, algo == Options::ALGO_CRYPTONIGHT_LITE ? test_output1 : test_output0, 128) == 0;
cryptonight_hash_ctx[4](test_input, 76, output, ctx);
bool resultQuintuple = memcmp(output, algo == Options::ALGO_CRYPTONIGHT_LITE ? test_output1 : test_output0, 160) == 0;
_mm_free(ctx->memory);
_mm_free(ctx);
bool resultDouble = memcmp(output, algo == Options::ALGO_CRYPTONIGHT_LITE ? test_output1 : test_output0, 64) == 0;
return resultSingle && resultDouble;
}
return resultSingle && resultDouble && resultTriple && resultQuadruple && resultQuintuple;
}

View file

@ -25,20 +25,17 @@
#define __CRYPTONIGHT_H__
#include <stddef.h>
#include <stdint.h>
#include <cstddef>
#include <cstdint>
#include "align.h"
#include "Options.h"
#define MEMORY 2097152 /* 2 MiB */
#define MEMORY_LITE 1048576 /* 1 MiB */
struct cryptonight_ctx {
VAR_ALIGN(16, uint8_t state0[200]);
VAR_ALIGN(16, uint8_t state1[200]);
VAR_ALIGN(16, uint8_t state[MAX_NUM_HASH_BLOCKS][208]); // 208 instead of 200 to maintain aligned to 16 byte boundaries
VAR_ALIGN(16, uint8_t* memory);
};
@ -46,16 +43,16 @@ struct cryptonight_ctx {
class Job;
class JobResult;
class CryptoNight
{
public:
static void hash(const uint8_t* input, size_t size, uint8_t* output, cryptonight_ctx* ctx);
static bool init(int algo, int variant);
static void hashDouble(const uint8_t* input, size_t size, uint8_t* output, cryptonight_ctx* ctx);
static bool init(int algo, bool aesni);
static void hash(size_t factor, const uint8_t* input, size_t size, uint8_t* output, cryptonight_ctx* ctx);
private:
static bool selfTest(int algo);
};
#endif /* __CRYPTONIGHT_H__ */

View file

@ -6,6 +6,8 @@
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2016 Imran Yusuff <https://github.com/imranyusuff>
* Copyright 2016-2017 XMRig <support@xmrig.com>
* Copyright 2018 Sebastian Stolzenberg <https://github.com/sebastianstolzenberg>
* Copyright 2018 BenDroid <ben@graef.in>
*
*
* This program is free software: you can redistribute it and/or modify
@ -47,27 +49,32 @@ extern "C"
}
static inline void do_blake_hash(const void* input, size_t len, char* output) {
static inline void do_blake_hash(const void* input, size_t len, char* output)
{
blake256_hash(reinterpret_cast<uint8_t*>(output), static_cast<const uint8_t*>(input), len);
}
static inline void do_groestl_hash(const void* input, size_t len, char* output) {
static inline void do_groestl_hash(const void* input, size_t len, char* output)
{
groestl(static_cast<const uint8_t*>(input), len * 8, reinterpret_cast<uint8_t*>(output));
}
static inline void do_jh_hash(const void* input, size_t len, char* output) {
static inline void do_jh_hash(const void* input, size_t len, char* output)
{
jh_hash(32 * 8, static_cast<const uint8_t*>(input), 8 * len, reinterpret_cast<uint8_t*>(output));
}
static inline void do_skein_hash(const void* input, size_t len, char* output) {
static inline void do_skein_hash(const void* input, size_t len, char* output)
{
xmr_skein(static_cast<const uint8_t*>(input), reinterpret_cast<uint8_t*>(output));
}
void (* const extra_hashes[4])(const void *, size_t, char *) = {do_blake_hash, do_groestl_hash, do_jh_hash, do_skein_hash};
void
(* const extra_hashes[4])(const void*, size_t, char*) = {do_blake_hash, do_groestl_hash, do_jh_hash, do_skein_hash};
static inline __attribute__((always_inline)) __m128i _mm_set_epi64x(const uint64_t a, const uint64_t b)
@ -94,7 +101,9 @@ static inline uint64_t __umul128(uint64_t a, uint64_t b, uint64_t* hi)
return (uint64_t) r;
}
#else
static inline uint64_t __umul128(uint64_t multiplier, uint64_t multiplicand, uint64_t *product_hi) {
static inline uint64_t __umul128(uint64_t multiplier, uint64_t multiplicand, uint64_t* product_hi)
{
// multiplier = ab = a * 2^32 + b
// multiplicand = cd = c * 2^32 + d
// ab * cd = a * c * 2^64 + (a * d + b * c) * 2^32 + b * d
@ -118,6 +127,7 @@ static inline uint64_t __umul128(uint64_t multiplier, uint64_t multiplicand, uin
return product_lo;
}
#endif
@ -154,18 +164,20 @@ template<uint8_t rcon>
static inline void soft_aes_genkey_sub(__m128i* xout0, __m128i* xout2)
{
__m128i xout1 = soft_aeskeygenassist<rcon>(*xout2);
xout1 = _mm_shuffle_epi32(xout1, 0xFF); // see PSHUFD, set all elems to 4th elem
xout1 = _mm_shuffle_epi32(xout1, 0xFF); // see PSHUFD, set all elems to 4th elem
*xout0 = sl_xor(*xout0);
*xout0 = _mm_xor_si128(*xout0, xout1);
xout1 = soft_aeskeygenassist<0x00>(*xout0);
xout1 = _mm_shuffle_epi32(xout1, 0xAA); // see PSHUFD, set all elems to 3rd elem
xout1 = soft_aeskeygenassist<0x00>(*xout0);
xout1 = _mm_shuffle_epi32(xout1, 0xAA); // see PSHUFD, set all elems to 3rd elem
*xout2 = sl_xor(*xout2);
*xout2 = _mm_xor_si128(*xout2, xout1);
}
template<bool SOFT_AES>
static inline void aes_genkey(const __m128i* memory, __m128i* k0, __m128i* k1, __m128i* k2, __m128i* k3, __m128i* k4, __m128i* k5, __m128i* k6, __m128i* k7, __m128i* k8, __m128i* k9)
static inline void
aes_genkey(const __m128i* memory, __m128i* k0, __m128i* k1, __m128i* k2, __m128i* k3, __m128i* k4, __m128i* k5,
__m128i* k6, __m128i* k7, __m128i* k8, __m128i* k9)
{
__m128i xout0 = _mm_load_si128(memory);
__m128i xout2 = _mm_load_si128(memory + 1);
@ -191,7 +203,9 @@ static inline void aes_genkey(const __m128i* memory, __m128i* k0, __m128i* k1, _
template<bool SOFT_AES>
static inline void aes_round(__m128i key, __m128i* x0, __m128i* x1, __m128i* x2, __m128i* x3, __m128i* x4, __m128i* x5, __m128i* x6, __m128i* x7)
static inline void
aes_round(__m128i key, __m128i* x0, __m128i* x1, __m128i* x2, __m128i* x3, __m128i* x4, __m128i* x5, __m128i* x6,
__m128i* x7)
{
if (SOFT_AES) {
*x0 = soft_aesenc(*x0, key);
@ -205,21 +219,21 @@ static inline void aes_round(__m128i key, __m128i* x0, __m128i* x1, __m128i* x2,
}
# ifndef XMRIG_ARMv7
else {
*x0 = vaesmcq_u8(vaeseq_u8(*((uint8x16_t *) x0), key));
*x1 = vaesmcq_u8(vaeseq_u8(*((uint8x16_t *) x1), key));
*x2 = vaesmcq_u8(vaeseq_u8(*((uint8x16_t *) x2), key));
*x3 = vaesmcq_u8(vaeseq_u8(*((uint8x16_t *) x3), key));
*x4 = vaesmcq_u8(vaeseq_u8(*((uint8x16_t *) x4), key));
*x5 = vaesmcq_u8(vaeseq_u8(*((uint8x16_t *) x5), key));
*x6 = vaesmcq_u8(vaeseq_u8(*((uint8x16_t *) x6), key));
*x7 = vaesmcq_u8(vaeseq_u8(*((uint8x16_t *) x7), key));
*x0 = vaesmcq_u8(vaeseq_u8(*((uint8x16_t*) x0), key));
*x1 = vaesmcq_u8(vaeseq_u8(*((uint8x16_t*) x1), key));
*x2 = vaesmcq_u8(vaeseq_u8(*((uint8x16_t*) x2), key));
*x3 = vaesmcq_u8(vaeseq_u8(*((uint8x16_t*) x3), key));
*x4 = vaesmcq_u8(vaeseq_u8(*((uint8x16_t*) x4), key));
*x5 = vaesmcq_u8(vaeseq_u8(*((uint8x16_t*) x5), key));
*x6 = vaesmcq_u8(vaeseq_u8(*((uint8x16_t*) x6), key));
*x7 = vaesmcq_u8(vaeseq_u8(*((uint8x16_t*) x7), key));
}
# endif
}
template<size_t MEM, bool SOFT_AES>
static inline void cn_explode_scratchpad(const __m128i *input, __m128i *output)
static inline void cn_explode_scratchpad(const __m128i* input, __m128i* output)
{
__m128i xin0, xin1, xin2, xin3, xin4, xin5, xin6, xin7;
__m128i k0, k1, k2, k3, k4, k5, k6, k7, k8, k9;
@ -259,8 +273,7 @@ static inline void cn_explode_scratchpad(const __m128i *input, __m128i *output)
xin5 ^= k9;
xin6 ^= k9;
xin7 ^= k9;
}
else {
} else {
aes_round<SOFT_AES>(k9, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7);
}
@ -277,7 +290,7 @@ static inline void cn_explode_scratchpad(const __m128i *input, __m128i *output)
template<size_t MEM, bool SOFT_AES>
static inline void cn_implode_scratchpad(const __m128i *input, __m128i *output)
static inline void cn_implode_scratchpad(const __m128i* input, __m128i* output)
{
__m128i xout0, xout1, xout2, xout3, xout4, xout5, xout6, xout7;
__m128i k0, k1, k2, k3, k4, k5, k6, k7, k8, k9;
@ -293,8 +306,7 @@ static inline void cn_implode_scratchpad(const __m128i *input, __m128i *output)
xout6 = _mm_load_si128(output + 10);
xout7 = _mm_load_si128(output + 11);
for (size_t i = 0; i < MEM / sizeof(__m128i); i += 8)
{
for (size_t i = 0; i < MEM / sizeof(__m128i); i += 8) {
xout0 = _mm_xor_si128(_mm_load_si128(input + i + 0), xout0);
xout1 = _mm_xor_si128(_mm_load_si128(input + i + 1), xout1);
xout2 = _mm_xor_si128(_mm_load_si128(input + i + 2), xout2);
@ -327,8 +339,7 @@ static inline void cn_implode_scratchpad(const __m128i *input, __m128i *output)
xout5 ^= k9;
xout6 ^= k9;
xout7 ^= k9;
}
else {
} else {
aes_round<SOFT_AES>(k9, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7);
}
}
@ -343,149 +354,723 @@ static inline void cn_implode_scratchpad(const __m128i *input, __m128i *output)
_mm_store_si128(output + 11, xout7);
}
// n-Loop version. Seems to be little bit slower then the hardcoded one.
template<size_t ITERATIONS, size_t MEM, size_t MASK, bool SOFT_AES, size_t NUM_HASH_BLOCKS>
class CryptoNightMultiHash
{
public:
inline static void hash(const void* __restrict__ input,
size_t size,
void* __restrict__ output,
cryptonight_ctx* __restrict__ ctx)
{
const uint8_t* l[NUM_HASH_BLOCKS];
uint64_t* h[NUM_HASH_BLOCKS];
uint64_t al[NUM_HASH_BLOCKS];
uint64_t ah[NUM_HASH_BLOCKS];
__m128i bx[NUM_HASH_BLOCKS];
uint64_t idx[NUM_HASH_BLOCKS];
for (size_t hashBlock = 0; hashBlock < NUM_HASH_BLOCKS; ++hashBlock) {
keccak(static_cast<const uint8_t*>(input) + hashBlock * size, (int) size,
ctx->state[hashBlock], 200);
}
for (size_t hashBlock = 0; hashBlock < NUM_HASH_BLOCKS; ++hashBlock) {
l[hashBlock] = ctx->memory + hashBlock * MEM;
h[hashBlock] = reinterpret_cast<uint64_t*>(ctx->state[hashBlock]);
cn_explode_scratchpad<MEM, SOFT_AES>((__m128i*) h[hashBlock], (__m128i*) l[hashBlock]);
al[hashBlock] = h[hashBlock][0] ^ h[hashBlock][4];
ah[hashBlock] = h[hashBlock][1] ^ h[hashBlock][5];
bx[hashBlock] =
_mm_set_epi64x(h[hashBlock][3] ^ h[hashBlock][7], h[hashBlock][2] ^ h[hashBlock][6]);
idx[hashBlock] = h[hashBlock][0] ^ h[hashBlock][4];
}
for (size_t i = 0; i < ITERATIONS; i++) {
for (size_t hashBlock = 0; hashBlock < NUM_HASH_BLOCKS; ++hashBlock) {
__m128i cx;
cx = _mm_load_si128((__m128i*) &l[hashBlock][idx[hashBlock] & MASK]);
if (SOFT_AES) {
cx = soft_aesenc(cx, _mm_set_epi64x(ah[hashBlock], al[hashBlock]));
} else {
cx = _mm_aesenc_si128(cx, _mm_set_epi64x(ah[hashBlock], al[hashBlock]));
}
_mm_store_si128((__m128i*) &l[hashBlock][idx[hashBlock] & MASK],
_mm_xor_si128(bx[hashBlock], cx));
idx[hashBlock] = EXTRACT64(cx);
bx[hashBlock] = cx;
uint64_t hi, lo, cl, ch;
cl = ((uint64_t*) &l[hashBlock][idx[hashBlock] & MASK])[0];
ch = ((uint64_t*) &l[hashBlock][idx[hashBlock] & MASK])[1];
lo = __umul128(idx[hashBlock], cl, &hi);
al[hashBlock] += hi;
ah[hashBlock] += lo;
((uint64_t*) &l[hashBlock][idx[hashBlock] & MASK])[0] = al[hashBlock];
((uint64_t*) &l[hashBlock][idx[hashBlock] & MASK])[1] = ah[hashBlock];
ah[hashBlock] ^= ch;
al[hashBlock] ^= cl;
idx[hashBlock] = al[hashBlock];
}
}
for (size_t hashBlock = 0; hashBlock < NUM_HASH_BLOCKS; ++hashBlock) {
cn_implode_scratchpad<MEM, SOFT_AES>((__m128i*) l[hashBlock], (__m128i*) h[hashBlock]);
keccakf(h[hashBlock], 24);
extra_hashes[ctx->state[hashBlock][0] & 3](ctx->state[hashBlock], 200,
static_cast<char*>(output) + hashBlock * 32);
}
}
};
template<size_t ITERATIONS, size_t MEM, size_t MASK, bool SOFT_AES>
inline void cryptonight_hash(const void *__restrict__ input, size_t size, void *__restrict__ output, cryptonight_ctx *__restrict__ ctx)
class CryptoNightMultiHash<ITERATIONS, MEM, MASK, SOFT_AES, 1>
{
keccak(static_cast<const uint8_t*>(input), (int) size, ctx->state0, 200);
public:
inline static void hash(const void* __restrict__ input,
size_t size,
void* __restrict__ output,
cryptonight_ctx* __restrict__ ctx)
{
const uint8_t* l;
uint64_t* h;
uint64_t al;
uint64_t ah;
__m128i bx;
uint64_t idx;
cn_explode_scratchpad<MEM, SOFT_AES>((__m128i*) ctx->state0, (__m128i*) ctx->memory);
keccak(static_cast<const uint8_t*>(input), (int) size, ctx->state[0], 200);
const uint8_t* l0 = ctx->memory;
uint64_t* h0 = reinterpret_cast<uint64_t*>(ctx->state0);
l = ctx->memory;
h = reinterpret_cast<uint64_t*>(ctx->state[0]);
uint64_t al0 = h0[0] ^ h0[4];
uint64_t ah0 = h0[1] ^ h0[5];
__m128i bx0 = _mm_set_epi64x(h0[3] ^ h0[7], h0[2] ^ h0[6]);
cn_explode_scratchpad<MEM, SOFT_AES>((__m128i*) h, (__m128i*) l);
uint64_t idx0 = h0[0] ^ h0[4];
al = h[0] ^ h[4];
ah = h[1] ^ h[5];
bx = _mm_set_epi64x(h[3] ^ h[7], h[2] ^ h[6]);
idx = h[0] ^ h[4];
for (size_t i = 0; i < ITERATIONS; i++) {
__m128i cx = _mm_load_si128((__m128i *) &l0[idx0 & MASK]);
for (size_t i = 0; i < ITERATIONS; i++) {
__m128i cx = _mm_load_si128((__m128i*) &l[idx & MASK]);
if (SOFT_AES) {
cx = soft_aesenc(cx, _mm_set_epi64x(ah0, al0));
}
else {
# ifndef XMRIG_ARMv7
cx = vreinterpretq_m128i_u8(vaesmcq_u8(vaeseq_u8(cx, vdupq_n_u8(0)))) ^ _mm_set_epi64x(ah0, al0);
if (SOFT_AES) {
cx = soft_aesenc(cx, _mm_set_epi64x(ah, al));
} else {
# ifndef XMRIG_ARMv7
cx = vreinterpretq_m128i_u8(vaesmcq_u8(vaeseq_u8(cx, vdupq_n_u8(0)))) ^ _mm_set_epi64x(ah, al);
# endif
}
_mm_store_si128((__m128i*) &l[idx & MASK], _mm_xor_si128(bx, cx));
idx = EXTRACT64(cx);
bx = cx;
uint64_t hi, lo, cl, ch;
cl = ((uint64_t*) &l[idx & MASK])[0];
ch = ((uint64_t*) &l[idx & MASK])[1];
lo = __umul128(idx, cl, &hi);
al += hi;
ah += lo;
((uint64_t*) &l[idx & MASK])[0] = al;
((uint64_t*) &l[idx & MASK])[1] = ah;
ah ^= ch;
al ^= cl;
idx = al;
}
_mm_store_si128((__m128i *) &l0[idx0 & MASK], _mm_xor_si128(bx0, cx));
idx0 = EXTRACT64(cx);
bx0 = cx;
uint64_t hi, lo, cl, ch;
cl = ((uint64_t*) &l0[idx0 & MASK])[0];
ch = ((uint64_t*) &l0[idx0 & MASK])[1];
lo = __umul128(idx0, cl, &hi);
al0 += hi;
ah0 += lo;
((uint64_t*)&l0[idx0 & MASK])[0] = al0;
((uint64_t*)&l0[idx0 & MASK])[1] = ah0;
ah0 ^= ch;
al0 ^= cl;
idx0 = al0;
cn_implode_scratchpad<MEM, SOFT_AES>((__m128i*) l, (__m128i*) h);
keccakf(h, 24);
extra_hashes[ctx->state[0][0] & 3](ctx->state[0], 200, static_cast<char*>(output));
}
cn_implode_scratchpad<MEM, SOFT_AES>((__m128i*) ctx->memory, (__m128i*) ctx->state0);
keccakf(h0, 24);
extra_hashes[ctx->state0[0] & 3](ctx->state0, 200, static_cast<char*>(output));
}
};
template<size_t ITERATIONS, size_t MEM, size_t MASK, bool SOFT_AES>
inline void cryptonight_double_hash(const void *__restrict__ input, size_t size, void *__restrict__ output, struct cryptonight_ctx *__restrict__ ctx)
class CryptoNightMultiHash<ITERATIONS, MEM, MASK, SOFT_AES, 2>
{
keccak((const uint8_t *) input, (int) size, ctx->state0, 200);
keccak((const uint8_t *) input + size, (int) size, ctx->state1, 200);
public:
inline static void hash(const void* __restrict__ input,
size_t size,
void* __restrict__ output,
cryptonight_ctx* __restrict__ ctx)
{
keccak((const uint8_t*) input, (int) size, ctx->state[0], 200);
keccak((const uint8_t*) input + size, (int) size, ctx->state[1], 200);
const uint8_t* l0 = ctx->memory;
const uint8_t* l1 = ctx->memory + MEM;
uint64_t* h0 = reinterpret_cast<uint64_t*>(ctx->state0);
uint64_t* h1 = reinterpret_cast<uint64_t*>(ctx->state1);
const uint8_t* l0 = ctx->memory;
const uint8_t* l1 = ctx->memory + MEM;
uint64_t* h0 = reinterpret_cast<uint64_t*>(ctx->state[0]);
uint64_t* h1 = reinterpret_cast<uint64_t*>(ctx->state[1]);
cn_explode_scratchpad<MEM, SOFT_AES>((__m128i*) h0, (__m128i*) l0);
cn_explode_scratchpad<MEM, SOFT_AES>((__m128i*) h1, (__m128i*) l1);
cn_explode_scratchpad<MEM, SOFT_AES>((__m128i*) h0, (__m128i*) l0);
cn_explode_scratchpad<MEM, SOFT_AES>((__m128i*) h1, (__m128i*) l1);
uint64_t al0 = h0[0] ^ h0[4];
uint64_t al1 = h1[0] ^ h1[4];
uint64_t ah0 = h0[1] ^ h0[5];
uint64_t ah1 = h1[1] ^ h1[5];
uint64_t al0 = h0[0] ^h0[4];
uint64_t al1 = h1[0] ^h1[4];
uint64_t ah0 = h0[1] ^h0[5];
uint64_t ah1 = h1[1] ^h1[5];
__m128i bx0 = _mm_set_epi64x(h0[3] ^ h0[7], h0[2] ^ h0[6]);
__m128i bx1 = _mm_set_epi64x(h1[3] ^ h1[7], h1[2] ^ h1[6]);
__m128i bx0 = _mm_set_epi64x(h0[3] ^ h0[7], h0[2] ^ h0[6]);
__m128i bx1 = _mm_set_epi64x(h1[3] ^ h1[7], h1[2] ^ h1[6]);
uint64_t idx0 = h0[0] ^ h0[4];
uint64_t idx1 = h1[0] ^ h1[4];
uint64_t idx0 = h0[0] ^h0[4];
uint64_t idx1 = h1[0] ^h1[4];
for (size_t i = 0; i < ITERATIONS; i++) {
__m128i cx0 = _mm_load_si128((__m128i *) &l0[idx0 & MASK]);
__m128i cx1 = _mm_load_si128((__m128i *) &l1[idx1 & MASK]);
for (size_t i = 0; i < ITERATIONS; i++) {
__m128i cx0 = _mm_load_si128((__m128i*) &l0[idx0 & MASK]);
__m128i cx1 = _mm_load_si128((__m128i*) &l1[idx1 & MASK]);
if (SOFT_AES) {
cx0 = soft_aesenc(cx0, _mm_set_epi64x(ah0, al0));
cx1 = soft_aesenc(cx1, _mm_set_epi64x(ah1, al1));
}
else {
# ifndef XMRIG_ARMv7
cx0 = vreinterpretq_m128i_u8(vaesmcq_u8(vaeseq_u8(cx0, vdupq_n_u8(0)))) ^ _mm_set_epi64x(ah0, al0);
cx1 = vreinterpretq_m128i_u8(vaesmcq_u8(vaeseq_u8(cx1, vdupq_n_u8(0)))) ^ _mm_set_epi64x(ah1, al1);
if (SOFT_AES) {
cx0 = soft_aesenc(cx0, _mm_set_epi64x(ah0, al0));
cx1 = soft_aesenc(cx1, _mm_set_epi64x(ah1, al1));
} else {
# ifndef XMRIG_ARMv7
cx0 = vreinterpretq_m128i_u8(vaesmcq_u8(vaeseq_u8(cx0, vdupq_n_u8(0)))) ^ _mm_set_epi64x(ah0, al0);
cx1 = vreinterpretq_m128i_u8(vaesmcq_u8(vaeseq_u8(cx1, vdupq_n_u8(0)))) ^ _mm_set_epi64x(ah1, al1);
# endif
}
_mm_store_si128((__m128i*) &l0[idx0 & MASK], _mm_xor_si128(bx0, cx0));
_mm_store_si128((__m128i*) &l1[idx1 & MASK], _mm_xor_si128(bx1, cx1));
idx0 = EXTRACT64(cx0);
idx1 = EXTRACT64(cx1);
bx0 = cx0;
bx1 = cx1;
uint64_t hi, lo, cl, ch;
cl = ((uint64_t*) &l0[idx0 & MASK])[0];
ch = ((uint64_t*) &l0[idx0 & MASK])[1];
lo = __umul128(idx0, cl, &hi);
al0 += hi;
ah0 += lo;
((uint64_t*) &l0[idx0 & MASK])[0] = al0;
((uint64_t*) &l0[idx0 & MASK])[1] = ah0;
ah0 ^= ch;
al0 ^= cl;
idx0 = al0;
cl = ((uint64_t*) &l1[idx1 & MASK])[0];
ch = ((uint64_t*) &l1[idx1 & MASK])[1];
lo = __umul128(idx1, cl, &hi);
al1 += hi;
ah1 += lo;
((uint64_t*) &l1[idx1 & MASK])[0] = al1;
((uint64_t*) &l1[idx1 & MASK])[1] = ah1;
ah1 ^= ch;
al1 ^= cl;
idx1 = al1;
}
_mm_store_si128((__m128i *) &l0[idx0 & MASK], _mm_xor_si128(bx0, cx0));
_mm_store_si128((__m128i *) &l1[idx1 & MASK], _mm_xor_si128(bx1, cx1));
cn_implode_scratchpad<MEM, SOFT_AES>((__m128i*) l0, (__m128i*) h0);
cn_implode_scratchpad<MEM, SOFT_AES>((__m128i*) l1, (__m128i*) h1);
idx0 = EXTRACT64(cx0);
idx1 = EXTRACT64(cx1);
keccakf(h0, 24);
keccakf(h1, 24);
bx0 = cx0;
bx1 = cx1;
uint64_t hi, lo, cl, ch;
cl = ((uint64_t*) &l0[idx0 & MASK])[0];
ch = ((uint64_t*) &l0[idx0 & MASK])[1];
lo = __umul128(idx0, cl, &hi);
al0 += hi;
ah0 += lo;
((uint64_t*) &l0[idx0 & MASK])[0] = al0;
((uint64_t*) &l0[idx0 & MASK])[1] = ah0;
ah0 ^= ch;
al0 ^= cl;
idx0 = al0;
cl = ((uint64_t*) &l1[idx1 & MASK])[0];
ch = ((uint64_t*) &l1[idx1 & MASK])[1];
lo = __umul128(idx1, cl, &hi);
al1 += hi;
ah1 += lo;
((uint64_t*) &l1[idx1 & MASK])[0] = al1;
((uint64_t*) &l1[idx1 & MASK])[1] = ah1;
ah1 ^= ch;
al1 ^= cl;
idx1 = al1;
extra_hashes[ctx->state[0][0] & 3](ctx->state[0], 200, static_cast<char*>(output));
extra_hashes[ctx->state[1][0] & 3](ctx->state[1], 200, static_cast<char*>(output) + 32);
}
};
cn_implode_scratchpad<MEM, SOFT_AES>((__m128i*) l0, (__m128i*) h0);
cn_implode_scratchpad<MEM, SOFT_AES>((__m128i*) l1, (__m128i*) h1);
template<size_t ITERATIONS, size_t MEM, size_t MASK, bool SOFT_AES>
class CryptoNightMultiHash<ITERATIONS, MEM, MASK, SOFT_AES, 3>
{
public:
inline static void hash(const void* __restrict__ input,
size_t size,
void* __restrict__ output,
cryptonight_ctx* __restrict__ ctx)
{
keccak((const uint8_t*) input, (int) size, ctx->state[0], 200);
keccak((const uint8_t*) input + size, (int) size, ctx->state[1], 200);
keccak((const uint8_t*) input + 2 * size, (int) size, ctx->state[2], 200);
keccakf(h0, 24);
keccakf(h1, 24);
const uint8_t* l0 = ctx->memory;
const uint8_t* l1 = ctx->memory + MEM;
const uint8_t* l2 = ctx->memory + 2 * MEM;
uint64_t* h0 = reinterpret_cast<uint64_t*>(ctx->state[0]);
uint64_t* h1 = reinterpret_cast<uint64_t*>(ctx->state[1]);
uint64_t* h2 = reinterpret_cast<uint64_t*>(ctx->state[2]);
extra_hashes[ctx->state0[0] & 3](ctx->state0, 200, static_cast<char*>(output));
extra_hashes[ctx->state1[0] & 3](ctx->state1, 200, static_cast<char*>(output) + 32);
}
cn_explode_scratchpad<MEM, SOFT_AES>((__m128i*) h0, (__m128i*) l0);
cn_explode_scratchpad<MEM, SOFT_AES>((__m128i*) h1, (__m128i*) l1);
cn_explode_scratchpad<MEM, SOFT_AES>((__m128i*) h2, (__m128i*) l2);
uint64_t al0 = h0[0] ^h0[4];
uint64_t al1 = h1[0] ^h1[4];
uint64_t al2 = h2[0] ^h2[4];
uint64_t ah0 = h0[1] ^h0[5];
uint64_t ah1 = h1[1] ^h1[5];
uint64_t ah2 = h2[1] ^h2[5];
__m128i bx0 = _mm_set_epi64x(h0[3] ^ h0[7], h0[2] ^ h0[6]);
__m128i bx1 = _mm_set_epi64x(h1[3] ^ h1[7], h1[2] ^ h1[6]);
__m128i bx2 = _mm_set_epi64x(h2[3] ^ h2[7], h2[2] ^ h2[6]);
uint64_t idx0 = h0[0] ^h0[4];
uint64_t idx1 = h1[0] ^h1[4];
uint64_t idx2 = h2[0] ^h2[4];
for (size_t i = 0; i < ITERATIONS; i++) {
__m128i cx0 = _mm_load_si128((__m128i*) &l0[idx0 & MASK]);
__m128i cx1 = _mm_load_si128((__m128i*) &l1[idx1 & MASK]);
__m128i cx2 = _mm_load_si128((__m128i*) &l2[idx2 & MASK]);
if (SOFT_AES) {
cx0 = soft_aesenc(cx0, _mm_set_epi64x(ah0, al0));
cx1 = soft_aesenc(cx1, _mm_set_epi64x(ah1, al1));
cx2 = soft_aesenc(cx2, _mm_set_epi64x(ah2, al2));
} else {
# ifndef XMRIG_ARMv7
cx0 = vreinterpretq_m128i_u8(vaesmcq_u8(vaeseq_u8(cx0, vdupq_n_u8(0)))) ^ _mm_set_epi64x(ah0, al0);
cx1 = vreinterpretq_m128i_u8(vaesmcq_u8(vaeseq_u8(cx1, vdupq_n_u8(0)))) ^ _mm_set_epi64x(ah1, al1);
cx2 = vreinterpretq_m128i_u8(vaesmcq_u8(vaeseq_u8(cx2, vdupq_n_u8(0)))) ^ _mm_set_epi64x(ah2, al2);
# endif
}
_mm_store_si128((__m128i*) &l0[idx0 & MASK], _mm_xor_si128(bx0, cx0));
_mm_store_si128((__m128i*) &l1[idx1 & MASK], _mm_xor_si128(bx1, cx1));
_mm_store_si128((__m128i*) &l2[idx2 & MASK], _mm_xor_si128(bx2, cx2));
idx0 = EXTRACT64(cx0);
idx1 = EXTRACT64(cx1);
idx2 = EXTRACT64(cx2);
bx0 = cx0;
bx1 = cx1;
bx2 = cx2;
uint64_t hi, lo, cl, ch;
cl = ((uint64_t*) &l0[idx0 & MASK])[0];
ch = ((uint64_t*) &l0[idx0 & MASK])[1];
lo = __umul128(idx0, cl, &hi);
al0 += hi;
ah0 += lo;
((uint64_t*) &l0[idx0 & MASK])[0] = al0;
((uint64_t*) &l0[idx0 & MASK])[1] = ah0;
ah0 ^= ch;
al0 ^= cl;
idx0 = al0;
cl = ((uint64_t*) &l1[idx1 & MASK])[0];
ch = ((uint64_t*) &l1[idx1 & MASK])[1];
lo = __umul128(idx1, cl, &hi);
al1 += hi;
ah1 += lo;
((uint64_t*) &l1[idx1 & MASK])[0] = al1;
((uint64_t*) &l1[idx1 & MASK])[1] = ah1;
ah1 ^= ch;
al1 ^= cl;
idx1 = al1;
cl = ((uint64_t*) &l2[idx2 & MASK])[0];
ch = ((uint64_t*) &l2[idx2 & MASK])[1];
lo = __umul128(idx2, cl, &hi);
al2 += hi;
ah2 += lo;
((uint64_t*) &l2[idx2 & MASK])[0] = al2;
((uint64_t*) &l2[idx2 & MASK])[1] = ah2;
ah2 ^= ch;
al2 ^= cl;
idx2 = al2;
}
cn_implode_scratchpad<MEM, SOFT_AES>((__m128i*) l0, (__m128i*) h0);
cn_implode_scratchpad<MEM, SOFT_AES>((__m128i*) l1, (__m128i*) h1);
cn_implode_scratchpad<MEM, SOFT_AES>((__m128i*) l2, (__m128i*) h2);
keccakf(h0, 24);
keccakf(h1, 24);
keccakf(h2, 24);
extra_hashes[ctx->state[0][0] & 3](ctx->state[0], 200, static_cast<char*>(output));
extra_hashes[ctx->state[1][0] & 3](ctx->state[1], 200, static_cast<char*>(output) + 32);
extra_hashes[ctx->state[2][0] & 3](ctx->state[2], 200, static_cast<char*>(output) + 64);
}
};
template<size_t ITERATIONS, size_t MEM, size_t MASK, bool SOFT_AES>
class CryptoNightMultiHash<ITERATIONS, MEM, MASK, SOFT_AES, 4>
{
public:
inline static void hash(const void* __restrict__ input,
size_t size,
void* __restrict__ output,
cryptonight_ctx* __restrict__ ctx)
{
keccak((const uint8_t*) input, (int) size, ctx->state[0], 200);
keccak((const uint8_t*) input + size, (int) size, ctx->state[1], 200);
keccak((const uint8_t*) input + 2 * size, (int) size, ctx->state[2], 200);
keccak((const uint8_t*) input + 3 * size, (int) size, ctx->state[3], 200);
const uint8_t* l0 = ctx->memory;
const uint8_t* l1 = ctx->memory + MEM;
const uint8_t* l2 = ctx->memory + 2 * MEM;
const uint8_t* l3 = ctx->memory + 3 * MEM;
uint64_t* h0 = reinterpret_cast<uint64_t*>(ctx->state[0]);
uint64_t* h1 = reinterpret_cast<uint64_t*>(ctx->state[1]);
uint64_t* h2 = reinterpret_cast<uint64_t*>(ctx->state[2]);
uint64_t* h3 = reinterpret_cast<uint64_t*>(ctx->state[3]);
cn_explode_scratchpad<MEM, SOFT_AES>((__m128i*) h0, (__m128i*) l0);
cn_explode_scratchpad<MEM, SOFT_AES>((__m128i*) h1, (__m128i*) l1);
cn_explode_scratchpad<MEM, SOFT_AES>((__m128i*) h2, (__m128i*) l2);
cn_explode_scratchpad<MEM, SOFT_AES>((__m128i*) h3, (__m128i*) l3);
uint64_t al0 = h0[0] ^h0[4];
uint64_t al1 = h1[0] ^h1[4];
uint64_t al2 = h2[0] ^h2[4];
uint64_t al3 = h3[0] ^h3[4];
uint64_t ah0 = h0[1] ^h0[5];
uint64_t ah1 = h1[1] ^h1[5];
uint64_t ah2 = h2[1] ^h2[5];
uint64_t ah3 = h3[1] ^h3[5];
__m128i bx0 = _mm_set_epi64x(h0[3] ^ h0[7], h0[2] ^ h0[6]);
__m128i bx1 = _mm_set_epi64x(h1[3] ^ h1[7], h1[2] ^ h1[6]);
__m128i bx2 = _mm_set_epi64x(h2[3] ^ h2[7], h2[2] ^ h2[6]);
__m128i bx3 = _mm_set_epi64x(h3[3] ^ h3[7], h3[2] ^ h3[6]);
uint64_t idx0 = h0[0] ^h0[4];
uint64_t idx1 = h1[0] ^h1[4];
uint64_t idx2 = h2[0] ^h2[4];
uint64_t idx3 = h3[0] ^h3[4];
for (size_t i = 0; i < ITERATIONS; i++) {
__m128i cx0 = _mm_load_si128((__m128i*) &l0[idx0 & MASK]);
__m128i cx1 = _mm_load_si128((__m128i*) &l1[idx1 & MASK]);
__m128i cx2 = _mm_load_si128((__m128i*) &l2[idx2 & MASK]);
__m128i cx3 = _mm_load_si128((__m128i*) &l3[idx3 & MASK]);
if (SOFT_AES) {
cx0 = soft_aesenc(cx0, _mm_set_epi64x(ah0, al0));
cx1 = soft_aesenc(cx1, _mm_set_epi64x(ah1, al1));
cx2 = soft_aesenc(cx2, _mm_set_epi64x(ah2, al2));
cx3 = soft_aesenc(cx3, _mm_set_epi64x(ah3, al3));
} else {
# ifndef XMRIG_ARMv7
cx0 = vreinterpretq_m128i_u8(vaesmcq_u8(vaeseq_u8(cx0, vdupq_n_u8(0)))) ^ _mm_set_epi64x(ah0, al0);
cx1 = vreinterpretq_m128i_u8(vaesmcq_u8(vaeseq_u8(cx1, vdupq_n_u8(0)))) ^ _mm_set_epi64x(ah1, al1);
cx2 = vreinterpretq_m128i_u8(vaesmcq_u8(vaeseq_u8(cx2, vdupq_n_u8(0)))) ^ _mm_set_epi64x(ah2, al2);
cx3 = vreinterpretq_m128i_u8(vaesmcq_u8(vaeseq_u8(cx3, vdupq_n_u8(0)))) ^ _mm_set_epi64x(ah3, al3);
# endif
}
_mm_store_si128((__m128i*) &l0[idx0 & MASK], _mm_xor_si128(bx0, cx0));
_mm_store_si128((__m128i*) &l1[idx1 & MASK], _mm_xor_si128(bx1, cx1));
_mm_store_si128((__m128i*) &l2[idx2 & MASK], _mm_xor_si128(bx2, cx2));
_mm_store_si128((__m128i*) &l3[idx3 & MASK], _mm_xor_si128(bx3, cx3));
idx0 = EXTRACT64(cx0);
idx1 = EXTRACT64(cx1);
idx2 = EXTRACT64(cx2);
idx3 = EXTRACT64(cx3);
bx0 = cx0;
bx1 = cx1;
bx2 = cx2;
bx3 = cx3;
uint64_t hi, lo, cl, ch;
cl = ((uint64_t*) &l0[idx0 & MASK])[0];
ch = ((uint64_t*) &l0[idx0 & MASK])[1];
lo = __umul128(idx0, cl, &hi);
al0 += hi;
ah0 += lo;
((uint64_t*) &l0[idx0 & MASK])[0] = al0;
((uint64_t*) &l0[idx0 & MASK])[1] = ah0;
ah0 ^= ch;
al0 ^= cl;
idx0 = al0;
cl = ((uint64_t*) &l1[idx1 & MASK])[0];
ch = ((uint64_t*) &l1[idx1 & MASK])[1];
lo = __umul128(idx1, cl, &hi);
al1 += hi;
ah1 += lo;
((uint64_t*) &l1[idx1 & MASK])[0] = al1;
((uint64_t*) &l1[idx1 & MASK])[1] = ah1;
ah1 ^= ch;
al1 ^= cl;
idx1 = al1;
cl = ((uint64_t*) &l2[idx2 & MASK])[0];
ch = ((uint64_t*) &l2[idx2 & MASK])[1];
lo = __umul128(idx2, cl, &hi);
al2 += hi;
ah2 += lo;
((uint64_t*) &l2[idx2 & MASK])[0] = al2;
((uint64_t*) &l2[idx2 & MASK])[1] = ah2;
ah2 ^= ch;
al2 ^= cl;
idx2 = al2;
cl = ((uint64_t*) &l3[idx3 & MASK])[0];
ch = ((uint64_t*) &l3[idx3 & MASK])[1];
lo = __umul128(idx3, cl, &hi);
al3 += hi;
ah3 += lo;
((uint64_t*) &l3[idx3 & MASK])[0] = al3;
((uint64_t*) &l3[idx3 & MASK])[1] = ah3;
ah3 ^= ch;
al3 ^= cl;
idx3 = al3;
}
cn_implode_scratchpad<MEM, SOFT_AES>((__m128i*) l0, (__m128i*) h0);
cn_implode_scratchpad<MEM, SOFT_AES>((__m128i*) l1, (__m128i*) h1);
cn_implode_scratchpad<MEM, SOFT_AES>((__m128i*) l2, (__m128i*) h2);
cn_implode_scratchpad<MEM, SOFT_AES>((__m128i*) l3, (__m128i*) h3);
keccakf(h0, 24);
keccakf(h1, 24);
keccakf(h2, 24);
keccakf(h3, 24);
extra_hashes[ctx->state[0][0] & 3](ctx->state[0], 200, static_cast<char*>(output));
extra_hashes[ctx->state[1][0] & 3](ctx->state[1], 200, static_cast<char*>(output) + 32);
extra_hashes[ctx->state[2][0] & 3](ctx->state[2], 200, static_cast<char*>(output) + 64);
extra_hashes[ctx->state[3][0] & 3](ctx->state[3], 200, static_cast<char*>(output) + 96);
}
};
template<size_t ITERATIONS, size_t MEM, size_t MASK, bool SOFT_AES>
class CryptoNightMultiHash<ITERATIONS, MEM, MASK, SOFT_AES, 5>
{
public:
inline static void hash(const void* __restrict__ input,
size_t size,
void* __restrict__ output,
cryptonight_ctx* __restrict__ ctx)
{
keccak((const uint8_t*) input, (int) size, ctx->state[0], 200);
keccak((const uint8_t*) input + size, (int) size, ctx->state[1], 200);
keccak((const uint8_t*) input + 2 * size, (int) size, ctx->state[2], 200);
keccak((const uint8_t*) input + 3 * size, (int) size, ctx->state[3], 200);
keccak((const uint8_t*) input + 4 * size, (int) size, ctx->state[4], 200);
const uint8_t* l0 = ctx->memory;
const uint8_t* l1 = ctx->memory + MEM;
const uint8_t* l2 = ctx->memory + 2 * MEM;
const uint8_t* l3 = ctx->memory + 3 * MEM;
const uint8_t* l4 = ctx->memory + 4 * MEM;
uint64_t* h0 = reinterpret_cast<uint64_t*>(ctx->state[0]);
uint64_t* h1 = reinterpret_cast<uint64_t*>(ctx->state[1]);
uint64_t* h2 = reinterpret_cast<uint64_t*>(ctx->state[2]);
uint64_t* h3 = reinterpret_cast<uint64_t*>(ctx->state[3]);
uint64_t* h4 = reinterpret_cast<uint64_t*>(ctx->state[4]);
cn_explode_scratchpad<MEM, SOFT_AES>((__m128i*) h0, (__m128i*) l0);
cn_explode_scratchpad<MEM, SOFT_AES>((__m128i*) h1, (__m128i*) l1);
cn_explode_scratchpad<MEM, SOFT_AES>((__m128i*) h2, (__m128i*) l2);
cn_explode_scratchpad<MEM, SOFT_AES>((__m128i*) h3, (__m128i*) l3);
cn_explode_scratchpad<MEM, SOFT_AES>((__m128i*) h4, (__m128i*) l4);
uint64_t al0 = h0[0] ^h0[4];
uint64_t al1 = h1[0] ^h1[4];
uint64_t al2 = h2[0] ^h2[4];
uint64_t al3 = h3[0] ^h3[4];
uint64_t al4 = h4[0] ^h4[4];
uint64_t ah0 = h0[1] ^h0[5];
uint64_t ah1 = h1[1] ^h1[5];
uint64_t ah2 = h2[1] ^h2[5];
uint64_t ah3 = h3[1] ^h3[5];
uint64_t ah4 = h4[1] ^h4[5];
__m128i bx0 = _mm_set_epi64x(h0[3] ^ h0[7], h0[2] ^ h0[6]);
__m128i bx1 = _mm_set_epi64x(h1[3] ^ h1[7], h1[2] ^ h1[6]);
__m128i bx2 = _mm_set_epi64x(h2[3] ^ h2[7], h2[2] ^ h2[6]);
__m128i bx3 = _mm_set_epi64x(h3[3] ^ h3[7], h3[2] ^ h3[6]);
__m128i bx4 = _mm_set_epi64x(h4[3] ^ h4[7], h4[2] ^ h4[6]);
uint64_t idx0 = h0[0] ^h0[4];
uint64_t idx1 = h1[0] ^h1[4];
uint64_t idx2 = h2[0] ^h2[4];
uint64_t idx3 = h3[0] ^h3[4];
uint64_t idx4 = h4[0] ^h4[4];
for (size_t i = 0; i < ITERATIONS; i++) {
__m128i cx0 = _mm_load_si128((__m128i*) &l0[idx0 & MASK]);
__m128i cx1 = _mm_load_si128((__m128i*) &l1[idx1 & MASK]);
__m128i cx2 = _mm_load_si128((__m128i*) &l2[idx2 & MASK]);
__m128i cx3 = _mm_load_si128((__m128i*) &l3[idx3 & MASK]);
__m128i cx4 = _mm_load_si128((__m128i*) &l4[idx4 & MASK]);
if (SOFT_AES) {
cx0 = soft_aesenc(cx0, _mm_set_epi64x(ah0, al0));
cx1 = soft_aesenc(cx1, _mm_set_epi64x(ah1, al1));
cx2 = soft_aesenc(cx2, _mm_set_epi64x(ah2, al2));
cx3 = soft_aesenc(cx3, _mm_set_epi64x(ah3, al3));
cx4 = soft_aesenc(cx4, _mm_set_epi64x(ah4, al4));
} else {
# ifndef XMRIG_ARMv7
cx0 = vreinterpretq_m128i_u8(vaesmcq_u8(vaeseq_u8(cx0, vdupq_n_u8(0)))) ^ _mm_set_epi64x(ah0, al0);
cx1 = vreinterpretq_m128i_u8(vaesmcq_u8(vaeseq_u8(cx1, vdupq_n_u8(0)))) ^ _mm_set_epi64x(ah1, al1);
cx2 = vreinterpretq_m128i_u8(vaesmcq_u8(vaeseq_u8(cx2, vdupq_n_u8(0)))) ^ _mm_set_epi64x(ah2, al2);
cx3 = vreinterpretq_m128i_u8(vaesmcq_u8(vaeseq_u8(cx3, vdupq_n_u8(0)))) ^ _mm_set_epi64x(ah3, al3);
cx4 = vreinterpretq_m128i_u8(vaesmcq_u8(vaeseq_u8(cx4, vdupq_n_u8(0)))) ^ _mm_set_epi64x(ah4, al4);
# endif;
}
_mm_store_si128((__m128i*) &l0[idx0 & MASK], _mm_xor_si128(bx0, cx0));
_mm_store_si128((__m128i*) &l1[idx1 & MASK], _mm_xor_si128(bx1, cx1));
_mm_store_si128((__m128i*) &l2[idx2 & MASK], _mm_xor_si128(bx2, cx2));
_mm_store_si128((__m128i*) &l3[idx3 & MASK], _mm_xor_si128(bx3, cx3));
_mm_store_si128((__m128i*) &l4[idx4 & MASK], _mm_xor_si128(bx4, cx4));
idx0 = EXTRACT64(cx0);
idx1 = EXTRACT64(cx1);
idx2 = EXTRACT64(cx2);
idx3 = EXTRACT64(cx3);
idx4 = EXTRACT64(cx4);
bx0 = cx0;
bx1 = cx1;
bx2 = cx2;
bx3 = cx3;
bx4 = cx4;
uint64_t hi, lo, cl, ch;
cl = ((uint64_t*) &l0[idx0 & MASK])[0];
ch = ((uint64_t*) &l0[idx0 & MASK])[1];
lo = __umul128(idx0, cl, &hi);
al0 += hi;
ah0 += lo;
((uint64_t*) &l0[idx0 & MASK])[0] = al0;
((uint64_t*) &l0[idx0 & MASK])[1] = ah0;
ah0 ^= ch;
al0 ^= cl;
idx0 = al0;
cl = ((uint64_t*) &l1[idx1 & MASK])[0];
ch = ((uint64_t*) &l1[idx1 & MASK])[1];
lo = __umul128(idx1, cl, &hi);
al1 += hi;
ah1 += lo;
((uint64_t*) &l1[idx1 & MASK])[0] = al1;
((uint64_t*) &l1[idx1 & MASK])[1] = ah1;
ah1 ^= ch;
al1 ^= cl;
idx1 = al1;
cl = ((uint64_t*) &l2[idx2 & MASK])[0];
ch = ((uint64_t*) &l2[idx2 & MASK])[1];
lo = __umul128(idx2, cl, &hi);
al2 += hi;
ah2 += lo;
((uint64_t*) &l2[idx2 & MASK])[0] = al2;
((uint64_t*) &l2[idx2 & MASK])[1] = ah2;
ah2 ^= ch;
al2 ^= cl;
idx2 = al2;
cl = ((uint64_t*) &l3[idx3 & MASK])[0];
ch = ((uint64_t*) &l3[idx3 & MASK])[1];
lo = __umul128(idx3, cl, &hi);
al3 += hi;
ah3 += lo;
((uint64_t*) &l3[idx3 & MASK])[0] = al3;
((uint64_t*) &l3[idx3 & MASK])[1] = ah3;
ah3 ^= ch;
al3 ^= cl;
idx3 = al3;
cl = ((uint64_t*) &l4[idx4 & MASK])[0];
ch = ((uint64_t*) &l4[idx4 & MASK])[1];
lo = __umul128(idx4, cl, &hi);
al4 += hi;
ah4 += lo;
((uint64_t*) &l4[idx4 & MASK])[0] = al4;
((uint64_t*) &l4[idx4 & MASK])[1] = ah4;
ah4 ^= ch;
al4 ^= cl;
idx4 = al4;
}
cn_implode_scratchpad<MEM, SOFT_AES>((__m128i*) l0, (__m128i*) h0);
cn_implode_scratchpad<MEM, SOFT_AES>((__m128i*) l1, (__m128i*) h1);
cn_implode_scratchpad<MEM, SOFT_AES>((__m128i*) l2, (__m128i*) h2);
cn_implode_scratchpad<MEM, SOFT_AES>((__m128i*) l3, (__m128i*) h3);
cn_implode_scratchpad<MEM, SOFT_AES>((__m128i*) l4, (__m128i*) h4);
keccakf(h0, 24);
keccakf(h1, 24);
keccakf(h2, 24);
keccakf(h3, 24);
keccakf(h4, 24);
extra_hashes[ctx->state[0][0] & 3](ctx->state[0], 200, static_cast<char*>(output));
extra_hashes[ctx->state[1][0] & 3](ctx->state[1], 200, static_cast<char*>(output) + 32);
extra_hashes[ctx->state[2][0] & 3](ctx->state[2], 200, static_cast<char*>(output) + 64);
extra_hashes[ctx->state[3][0] & 3](ctx->state[3], 200, static_cast<char*>(output) + 96);
extra_hashes[ctx->state[4][0] & 3](ctx->state[4], 200, static_cast<char*>(output) + 128);
}
};
#endif /* __CRYPTONIGHT_ARM_H__ */

View file

@ -25,7 +25,7 @@
#define __CRYPTONIGHT_TEST_H__
const static uint8_t test_input[152] = {
const static uint8_t test_input[456] = {
0x01, 0x00, 0xFB, 0x8E, 0x8A, 0xC8, 0x05, 0x89, 0x93, 0x23, 0x37, 0x1B, 0xB7, 0x90, 0xDB, 0x19,
0x21, 0x8A, 0xFD, 0x8D, 0xB8, 0xE3, 0x75, 0x5D, 0x8B, 0x90, 0xF3, 0x9B, 0x3D, 0x55, 0x06, 0xA9,
0xAB, 0xCE, 0x4F, 0xA9, 0x12, 0x24, 0x45, 0x00, 0x00, 0x00, 0x00, 0xEE, 0x81, 0x46, 0xD4, 0x9F,
@ -35,23 +35,59 @@ const static uint8_t test_input[152] = {
0x7C, 0xBF, 0x34, 0x14, 0x43, 0x32, 0xEC, 0xBF, 0xC2, 0x2E, 0xD9, 0x5C, 0x87, 0x00, 0x38, 0x3B,
0x30, 0x9A, 0xCE, 0x19, 0x23, 0xA0, 0x96, 0x4B, 0x00, 0x00, 0x00, 0x08, 0xBA, 0x93, 0x9A, 0x62,
0x72, 0x4C, 0x0D, 0x75, 0x81, 0xFC, 0xE5, 0x76, 0x1E, 0x9D, 0x8A, 0x0E, 0x6A, 0x1C, 0x3F, 0x92,
0x4F, 0xDD, 0x84, 0x93, 0xD1, 0x11, 0x56, 0x49, 0xC0, 0x5E, 0xB6, 0x01,
0x01, 0x00, 0xFB, 0x8E, 0x8A, 0xC8, 0x05, 0x89, 0x93, 0x23, 0x37, 0x1B, 0xB7, 0x90, 0xDB, 0x19,
0x21, 0x8A, 0xFD, 0x8D, 0xB8, 0xE3, 0x75, 0x5D, 0x8B, 0x90, 0xF3, 0x9B, 0x3D, 0x55, 0x06, 0xA9,
0xAB, 0xCE, 0x4F, 0xA9, 0x12, 0x24, 0x45, 0x00, 0x00, 0x00, 0x00, 0xEE, 0x81, 0x46, 0xD4, 0x9F,
0xA9, 0x3E, 0xE7, 0x24, 0xDE, 0xB5, 0x7D, 0x12, 0xCB, 0xC6, 0xC6, 0xF3, 0xB9, 0x24, 0xD9, 0x46,
0x12, 0x7C, 0x7A, 0x97, 0x41, 0x8F, 0x93, 0x48, 0x82, 0x8F, 0x0F, 0x02,
0x03, 0x05, 0xA0, 0xDB, 0xD6, 0xBF, 0x05, 0xCF, 0x16, 0xE5, 0x03, 0xF3, 0xA6, 0x6F, 0x78, 0x00,
0x7C, 0xBF, 0x34, 0x14, 0x43, 0x32, 0xEC, 0xBF, 0xC2, 0x2E, 0xD9, 0x5C, 0x87, 0x00, 0x38, 0x3B,
0x30, 0x9A, 0xCE, 0x19, 0x23, 0xA0, 0x96, 0x4B, 0x00, 0x00, 0x00, 0x08, 0xBA, 0x93, 0x9A, 0x62,
0x72, 0x4C, 0x0D, 0x75, 0x81, 0xFC, 0xE5, 0x76, 0x1E, 0x9D, 0x8A, 0x0E, 0x6A, 0x1C, 0x3F, 0x92,
0x4F, 0xDD, 0x84, 0x93, 0xD1, 0x11, 0x56, 0x49, 0xC0, 0x5E, 0xB6, 0x01,
0x01, 0x00, 0xFB, 0x8E, 0x8A, 0xC8, 0x05, 0x89, 0x93, 0x23, 0x37, 0x1B, 0xB7, 0x90, 0xDB, 0x19,
0x21, 0x8A, 0xFD, 0x8D, 0xB8, 0xE3, 0x75, 0x5D, 0x8B, 0x90, 0xF3, 0x9B, 0x3D, 0x55, 0x06, 0xA9,
0xAB, 0xCE, 0x4F, 0xA9, 0x12, 0x24, 0x45, 0x00, 0x00, 0x00, 0x00, 0xEE, 0x81, 0x46, 0xD4, 0x9F,
0xA9, 0x3E, 0xE7, 0x24, 0xDE, 0xB5, 0x7D, 0x12, 0xCB, 0xC6, 0xC6, 0xF3, 0xB9, 0x24, 0xD9, 0x46,
0x12, 0x7C, 0x7A, 0x97, 0x41, 0x8F, 0x93, 0x48, 0x82, 0x8F, 0x0F, 0x02,
0x03, 0x05, 0xA0, 0xDB, 0xD6, 0xBF, 0x05, 0xCF, 0x16, 0xE5, 0x03, 0xF3, 0xA6, 0x6F, 0x78, 0x00,
0x7C, 0xBF, 0x34, 0x14, 0x43, 0x32, 0xEC, 0xBF, 0xC2, 0x2E, 0xD9, 0x5C, 0x87, 0x00, 0x38, 0x3B,
0x30, 0x9A, 0xCE, 0x19, 0x23, 0xA0, 0x96, 0x4B, 0x00, 0x00, 0x00, 0x08, 0xBA, 0x93, 0x9A, 0x62,
0x72, 0x4C, 0x0D, 0x75, 0x81, 0xFC, 0xE5, 0x76, 0x1E, 0x9D, 0x8A, 0x0E, 0x6A, 0x1C, 0x3F, 0x92,
0x4F, 0xDD, 0x84, 0x93, 0xD1, 0x11, 0x56, 0x49, 0xC0, 0x5E, 0xB6, 0x01
};
const static uint8_t test_output0[64] = {
const static uint8_t test_output0[192] = {
0x1B, 0x60, 0x6A, 0x3F, 0x4A, 0x07, 0xD6, 0x48, 0x9A, 0x1B, 0xCD, 0x07, 0x69, 0x7B, 0xD1, 0x66,
0x96, 0xB6, 0x1C, 0x8A, 0xE9, 0x82, 0xF6, 0x1A, 0x90, 0x16, 0x0F, 0x4E, 0x52, 0x82, 0x8A, 0x7F,
0x1A, 0x3F, 0xFB, 0xEE, 0x90, 0x9B, 0x42, 0x0D, 0x91, 0xF7, 0xBE, 0x6E, 0x5F, 0xB5, 0x6D, 0xB7,
0x1B, 0x31, 0x10, 0xD8, 0x86, 0x01, 0x1E, 0x87, 0x7E, 0xE5, 0x78, 0x6A, 0xFD, 0x08, 0x01, 0x00,
0x1B, 0x60, 0x6A, 0x3F, 0x4A, 0x07, 0xD6, 0x48, 0x9A, 0x1B, 0xCD, 0x07, 0x69, 0x7B, 0xD1, 0x66,
0x96, 0xB6, 0x1C, 0x8A, 0xE9, 0x82, 0xF6, 0x1A, 0x90, 0x16, 0x0F, 0x4E, 0x52, 0x82, 0x8A, 0x7F,
0x1A, 0x3F, 0xFB, 0xEE, 0x90, 0x9B, 0x42, 0x0D, 0x91, 0xF7, 0xBE, 0x6E, 0x5F, 0xB5, 0x6D, 0xB7,
0x1B, 0x31, 0x10, 0xD8, 0x86, 0x01, 0x1E, 0x87, 0x7E, 0xE5, 0x78, 0x6A, 0xFD, 0x08, 0x01, 0x00,
0x1B, 0x60, 0x6A, 0x3F, 0x4A, 0x07, 0xD6, 0x48, 0x9A, 0x1B, 0xCD, 0x07, 0x69, 0x7B, 0xD1, 0x66,
0x96, 0xB6, 0x1C, 0x8A, 0xE9, 0x82, 0xF6, 0x1A, 0x90, 0x16, 0x0F, 0x4E, 0x52, 0x82, 0x8A, 0x7F,
0x1A, 0x3F, 0xFB, 0xEE, 0x90, 0x9B, 0x42, 0x0D, 0x91, 0xF7, 0xBE, 0x6E, 0x5F, 0xB5, 0x6D, 0xB7,
0x1B, 0x31, 0x10, 0xD8, 0x86, 0x01, 0x1E, 0x87, 0x7E, 0xE5, 0x78, 0x6A, 0xFD, 0x08, 0x01, 0x00
};
const static uint8_t test_output1[64] = {
const static uint8_t test_output1[192] = {
0x28, 0xA2, 0x2B, 0xAD, 0x3F, 0x93, 0xD1, 0x40, 0x8F, 0xCA, 0x47, 0x2E, 0xB5, 0xAD, 0x1C, 0xBE,
0x75, 0xF2, 0x1D, 0x05, 0x3C, 0x8C, 0xE5, 0xB3, 0xAF, 0x10, 0x5A, 0x57, 0x71, 0x3E, 0x21, 0xDD,
0x36, 0x95, 0xB4, 0xB5, 0x3B, 0xB0, 0x03, 0x58, 0xB0, 0xAD, 0x38, 0xDC, 0x16, 0x0F, 0xEB, 0x9E,
0x00, 0x4E, 0xEC, 0xE0, 0x9B, 0x83, 0xA7, 0x2E, 0xF6, 0xBA, 0x98, 0x64, 0xD3, 0x51, 0x0C, 0x88,
0x28, 0xA2, 0x2B, 0xAD, 0x3F, 0x93, 0xD1, 0x40, 0x8F, 0xCA, 0x47, 0x2E, 0xB5, 0xAD, 0x1C, 0xBE,
0x75, 0xF2, 0x1D, 0x05, 0x3C, 0x8C, 0xE5, 0xB3, 0xAF, 0x10, 0x5A, 0x57, 0x71, 0x3E, 0x21, 0xDD,
0x36, 0x95, 0xB4, 0xB5, 0x3B, 0xB0, 0x03, 0x58, 0xB0, 0xAD, 0x38, 0xDC, 0x16, 0x0F, 0xEB, 0x9E,
0x00, 0x4E, 0xEC, 0xE0, 0x9B, 0x83, 0xA7, 0x2E, 0xF6, 0xBA, 0x98, 0x64, 0xD3, 0x51, 0x0C, 0x88,
0x28, 0xA2, 0x2B, 0xAD, 0x3F, 0x93, 0xD1, 0x40, 0x8F, 0xCA, 0x47, 0x2E, 0xB5, 0xAD, 0x1C, 0xBE,
0x75, 0xF2, 0x1D, 0x05, 0x3C, 0x8C, 0xE5, 0xB3, 0xAF, 0x10, 0x5A, 0x57, 0x71, 0x3E, 0x21, 0xDD,
0x36, 0x95, 0xB4, 0xB5, 0x3B, 0xB0, 0x03, 0x58, 0xB0, 0xAD, 0x38, 0xDC, 0x16, 0x0F, 0xEB, 0x9E,
0x00, 0x4E, 0xEC, 0xE0, 0x9B, 0x83, 0xA7, 0x2E, 0xF6, 0xBA, 0x98, 0x64, 0xD3, 0x51, 0x0C, 0x88
};

View file

@ -5,6 +5,8 @@
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2016-2017 XMRig <support@xmrig.com>
* Copyright 2018 Sebastian Stolzenberg <https://github.com/sebastianstolzenberg>
* Copyright 2018 BenDroid <ben@graef.in>
*
*
* This program is free software: you can redistribute it and/or modify
@ -47,42 +49,47 @@ extern "C"
}
static inline void do_blake_hash(const void* input, size_t len, char* output) {
static inline void do_blake_hash(const void* input, size_t len, char* output)
{
blake256_hash(reinterpret_cast<uint8_t*>(output), static_cast<const uint8_t*>(input), len);
}
static inline void do_groestl_hash(const void* input, size_t len, char* output) {
static inline void do_groestl_hash(const void* input, size_t len, char* output)
{
groestl(static_cast<const uint8_t*>(input), len * 8, reinterpret_cast<uint8_t*>(output));
}
static inline void do_jh_hash(const void* input, size_t len, char* output) {
static inline void do_jh_hash(const void* input, size_t len, char* output)
{
jh_hash(32 * 8, static_cast<const uint8_t*>(input), 8 * len, reinterpret_cast<uint8_t*>(output));
}
static inline void do_skein_hash(const void* input, size_t len, char* output) {
static inline void do_skein_hash(const void* input, size_t len, char* output)
{
xmr_skein(static_cast<const uint8_t*>(input), reinterpret_cast<uint8_t*>(output));
}
void (* const extra_hashes[4])(const void *, size_t, char *) = {do_blake_hash, do_groestl_hash, do_jh_hash, do_skein_hash};
void (* const extra_hashes[4])(const void*, size_t, char*) = {do_blake_hash, do_groestl_hash, do_jh_hash, do_skein_hash};
#if defined(__x86_64__) || defined(_M_AMD64)
# define EXTRACT64(X) _mm_cvtsi128_si64(X)
# ifdef __GNUC__
static inline uint64_t __umul128(uint64_t a, uint64_t b, uint64_t* hi)
{
unsigned __int128 r = (unsigned __int128) a * (unsigned __int128) b;
*hi = r >> 64;
return (uint64_t) r;
}
# else
#define __umul128 _umul128
#define __umul128 _umul128
# endif
#elif defined(__i386__) || defined(_M_IX86)
# define HI32(X) \
@ -139,11 +146,11 @@ template<uint8_t rcon>
static inline void aes_genkey_sub(__m128i* xout0, __m128i* xout2)
{
__m128i xout1 = _mm_aeskeygenassist_si128(*xout2, rcon);
xout1 = _mm_shuffle_epi32(xout1, 0xFF); // see PSHUFD, set all elems to 4th elem
xout1 = _mm_shuffle_epi32(xout1, 0xFF); // see PSHUFD, set all elems to 4th elem
*xout0 = sl_xor(*xout0);
*xout0 = _mm_xor_si128(*xout0, xout1);
xout1 = _mm_aeskeygenassist_si128(*xout0, 0x00);
xout1 = _mm_shuffle_epi32(xout1, 0xAA); // see PSHUFD, set all elems to 3rd elem
xout1 = _mm_aeskeygenassist_si128(*xout0, 0x00);
xout1 = _mm_shuffle_epi32(xout1, 0xAA); // see PSHUFD, set all elems to 3rd elem
*xout2 = sl_xor(*xout2);
*xout2 = _mm_xor_si128(*xout2, xout1);
}
@ -153,18 +160,20 @@ template<uint8_t rcon>
static inline void soft_aes_genkey_sub(__m128i* xout0, __m128i* xout2)
{
__m128i xout1 = soft_aeskeygenassist<rcon>(*xout2);
xout1 = _mm_shuffle_epi32(xout1, 0xFF); // see PSHUFD, set all elems to 4th elem
xout1 = _mm_shuffle_epi32(xout1, 0xFF); // see PSHUFD, set all elems to 4th elem
*xout0 = sl_xor(*xout0);
*xout0 = _mm_xor_si128(*xout0, xout1);
xout1 = soft_aeskeygenassist<0x00>(*xout0);
xout1 = _mm_shuffle_epi32(xout1, 0xAA); // see PSHUFD, set all elems to 3rd elem
xout1 = soft_aeskeygenassist<0x00>(*xout0);
xout1 = _mm_shuffle_epi32(xout1, 0xAA); // see PSHUFD, set all elems to 3rd elem
*xout2 = sl_xor(*xout2);
*xout2 = _mm_xor_si128(*xout2, xout1);
}
template<bool SOFT_AES>
static inline void aes_genkey(const __m128i* memory, __m128i* k0, __m128i* k1, __m128i* k2, __m128i* k3, __m128i* k4, __m128i* k5, __m128i* k6, __m128i* k7, __m128i* k8, __m128i* k9)
static inline void
aes_genkey(const __m128i* memory, __m128i* k0, __m128i* k1, __m128i* k2, __m128i* k3, __m128i* k4, __m128i* k5,
__m128i* k6, __m128i* k7, __m128i* k8, __m128i* k9)
{
__m128i xout0 = _mm_load_si128(memory);
__m128i xout2 = _mm_load_si128(memory + 1);
@ -190,7 +199,9 @@ static inline void aes_genkey(const __m128i* memory, __m128i* k0, __m128i* k1, _
template<bool SOFT_AES>
static inline void aes_round(__m128i key, __m128i* x0, __m128i* x1, __m128i* x2, __m128i* x3, __m128i* x4, __m128i* x5, __m128i* x6, __m128i* x7)
static inline void
aes_round(__m128i key, __m128i* x0, __m128i* x1, __m128i* x2, __m128i* x3, __m128i* x4, __m128i* x5, __m128i* x6,
__m128i* x7)
{
if (SOFT_AES) {
*x0 = soft_aesenc(*x0, key);
@ -201,8 +212,7 @@ static inline void aes_round(__m128i key, __m128i* x0, __m128i* x1, __m128i* x2,
*x5 = soft_aesenc(*x5, key);
*x6 = soft_aesenc(*x6, key);
*x7 = soft_aesenc(*x7, key);
}
else {
} else {
*x0 = _mm_aesenc_si128(*x0, key);
*x1 = _mm_aesenc_si128(*x1, key);
*x2 = _mm_aesenc_si128(*x2, key);
@ -216,7 +226,7 @@ static inline void aes_round(__m128i key, __m128i* x0, __m128i* x1, __m128i* x2,
template<size_t MEM, bool SOFT_AES>
static inline void cn_explode_scratchpad(const __m128i *input, __m128i *output)
static inline void cn_explode_scratchpad(const __m128i* input, __m128i* output)
{
__m128i xin0, xin1, xin2, xin3, xin4, xin5, xin6, xin7;
__m128i k0, k1, k2, k3, k4, k5, k6, k7, k8, k9;
@ -257,7 +267,7 @@ static inline void cn_explode_scratchpad(const __m128i *input, __m128i *output)
template<size_t MEM, bool SOFT_AES>
static inline void cn_implode_scratchpad(const __m128i *input, __m128i *output)
static inline void cn_implode_scratchpad(const __m128i* input, __m128i* output)
{
__m128i xout0, xout1, xout2, xout3, xout4, xout5, xout6, xout7;
__m128i k0, k1, k2, k3, k4, k5, k6, k7, k8, k9;
@ -273,8 +283,7 @@ static inline void cn_implode_scratchpad(const __m128i *input, __m128i *output)
xout6 = _mm_load_si128(output + 10);
xout7 = _mm_load_si128(output + 11);
for (size_t i = 0; i < MEM / sizeof(__m128i); i += 8)
{
for (size_t i = 0; i < MEM / sizeof(__m128i); i += 8) {
xout0 = _mm_xor_si128(_mm_load_si128(input + i + 0), xout0);
xout1 = _mm_xor_si128(_mm_load_si128(input + i + 1), xout1);
xout2 = _mm_xor_si128(_mm_load_si128(input + i + 2), xout2);
@ -306,146 +315,713 @@ static inline void cn_implode_scratchpad(const __m128i *input, __m128i *output)
_mm_store_si128(output + 11, xout7);
}
// n-Loop version. Seems to be little bit slower then the hardcoded one.
template<size_t ITERATIONS, size_t MEM, size_t MASK, bool SOFT_AES, size_t NUM_HASH_BLOCKS>
class CryptoNightMultiHash
{
public:
inline static void hash(const void* __restrict__ input,
size_t size,
void* __restrict__ output,
cryptonight_ctx* __restrict__ ctx)
{
const uint8_t* l[NUM_HASH_BLOCKS];
uint64_t* h[NUM_HASH_BLOCKS];
uint64_t al[NUM_HASH_BLOCKS];
uint64_t ah[NUM_HASH_BLOCKS];
__m128i bx[NUM_HASH_BLOCKS];
uint64_t idx[NUM_HASH_BLOCKS];
for (size_t hashBlock = 0; hashBlock < NUM_HASH_BLOCKS; ++hashBlock) {
keccak(static_cast<const uint8_t*>(input) + hashBlock * size, (int) size,
ctx->state[hashBlock], 200);
}
for (size_t hashBlock = 0; hashBlock < NUM_HASH_BLOCKS; ++hashBlock) {
l[hashBlock] = ctx->memory + hashBlock * MEM;
h[hashBlock] = reinterpret_cast<uint64_t*>(ctx->state[hashBlock]);
cn_explode_scratchpad<MEM, SOFT_AES>((__m128i*) h[hashBlock], (__m128i*) l[hashBlock]);
al[hashBlock] = h[hashBlock][0] ^ h[hashBlock][4];
ah[hashBlock] = h[hashBlock][1] ^ h[hashBlock][5];
bx[hashBlock] =
_mm_set_epi64x(h[hashBlock][3] ^ h[hashBlock][7], h[hashBlock][2] ^ h[hashBlock][6]);
idx[hashBlock] = h[hashBlock][0] ^ h[hashBlock][4];
}
for (size_t i = 0; i < ITERATIONS; i++) {
for (size_t hashBlock = 0; hashBlock < NUM_HASH_BLOCKS; ++hashBlock) {
__m128i cx;
cx = _mm_load_si128((__m128i*) &l[hashBlock][idx[hashBlock] & MASK]);
if (SOFT_AES) {
cx = soft_aesenc(cx, _mm_set_epi64x(ah[hashBlock], al[hashBlock]));
} else {
cx = _mm_aesenc_si128(cx, _mm_set_epi64x(ah[hashBlock], al[hashBlock]));
}
_mm_store_si128((__m128i*) &l[hashBlock][idx[hashBlock] & MASK],
_mm_xor_si128(bx[hashBlock], cx));
idx[hashBlock] = EXTRACT64(cx);
bx[hashBlock] = cx;
uint64_t hi, lo, cl, ch;
cl = ((uint64_t*) &l[hashBlock][idx[hashBlock] & MASK])[0];
ch = ((uint64_t*) &l[hashBlock][idx[hashBlock] & MASK])[1];
lo = __umul128(idx[hashBlock], cl, &hi);
al[hashBlock] += hi;
ah[hashBlock] += lo;
((uint64_t*) &l[hashBlock][idx[hashBlock] & MASK])[0] = al[hashBlock];
((uint64_t*) &l[hashBlock][idx[hashBlock] & MASK])[1] = ah[hashBlock];
ah[hashBlock] ^= ch;
al[hashBlock] ^= cl;
idx[hashBlock] = al[hashBlock];
}
}
for (size_t hashBlock = 0; hashBlock < NUM_HASH_BLOCKS; ++hashBlock) {
cn_implode_scratchpad<MEM, SOFT_AES>((__m128i*) l[hashBlock], (__m128i*) h[hashBlock]);
keccakf(h[hashBlock], 24);
extra_hashes[ctx->state[hashBlock][0] & 3](ctx->state[hashBlock], 200,
static_cast<char*>(output) + hashBlock * 32);
}
}
};
template<size_t ITERATIONS, size_t MEM, size_t MASK, bool SOFT_AES>
inline void cryptonight_hash(const void *__restrict__ input, size_t size, void *__restrict__ output, cryptonight_ctx *__restrict__ ctx)
class CryptoNightMultiHash<ITERATIONS, MEM, MASK, SOFT_AES, 1>
{
keccak(static_cast<const uint8_t*>(input), (int) size, ctx->state0, 200);
public:
inline static void hash(const void* __restrict__ input,
size_t size,
void* __restrict__ output,
cryptonight_ctx* __restrict__ ctx)
{
const uint8_t* l;
uint64_t* h;
uint64_t al;
uint64_t ah;
__m128i bx;
uint64_t idx;
cn_explode_scratchpad<MEM, SOFT_AES>((__m128i*) ctx->state0, (__m128i*) ctx->memory);
keccak(static_cast<const uint8_t*>(input), (int) size, ctx->state[0], 200);
const uint8_t* l0 = ctx->memory;
uint64_t* h0 = reinterpret_cast<uint64_t*>(ctx->state0);
l = ctx->memory;
h = reinterpret_cast<uint64_t*>(ctx->state[0]);
uint64_t al0 = h0[0] ^ h0[4];
uint64_t ah0 = h0[1] ^ h0[5];
__m128i bx0 = _mm_set_epi64x(h0[3] ^ h0[7], h0[2] ^ h0[6]);
cn_explode_scratchpad<MEM, SOFT_AES>((__m128i*) h, (__m128i*) l);
uint64_t idx0 = h0[0] ^ h0[4];
al = h[0] ^ h[4];
ah = h[1] ^ h[5];
bx = _mm_set_epi64x(h[3] ^ h[7], h[2] ^ h[6]);
idx = h[0] ^ h[4];
for (size_t i = 0; i < ITERATIONS; i++) {
__m128i cx;
cx = _mm_load_si128((__m128i *) &l0[idx0 & MASK]);
for (size_t i = 0; i < ITERATIONS; i++) {
__m128i cx = _mm_load_si128((__m128i*) &l[idx & MASK]);
if (SOFT_AES) {
cx = soft_aesenc(cx, _mm_set_epi64x(ah0, al0));
}
else {
cx = _mm_aesenc_si128(cx, _mm_set_epi64x(ah0, al0));
if (SOFT_AES) {
cx = soft_aesenc(cx, _mm_set_epi64x(ah, al));
} else {
cx = _mm_aesenc_si128(cx, _mm_set_epi64x(ah, al));
}
_mm_store_si128((__m128i*) &l[idx & MASK], _mm_xor_si128(bx, cx));
idx = EXTRACT64(cx);
bx = cx;
uint64_t hi, lo, cl, ch;
cl = ((uint64_t*) &l[idx & MASK])[0];
ch = ((uint64_t*) &l[idx & MASK])[1];
lo = __umul128(idx, cl, &hi);
al += hi;
ah += lo;
((uint64_t*) &l[idx & MASK])[0] = al;
((uint64_t*) &l[idx & MASK])[1] = ah;
ah ^= ch;
al ^= cl;
idx = al;
}
_mm_store_si128((__m128i *) &l0[idx0 & MASK], _mm_xor_si128(bx0, cx));
idx0 = EXTRACT64(cx);
bx0 = cx;
uint64_t hi, lo, cl, ch;
cl = ((uint64_t*) &l0[idx0 & MASK])[0];
ch = ((uint64_t*) &l0[idx0 & MASK])[1];
lo = __umul128(idx0, cl, &hi);
al0 += hi;
ah0 += lo;
((uint64_t*)&l0[idx0 & MASK])[0] = al0;
((uint64_t*)&l0[idx0 & MASK])[1] = ah0;
ah0 ^= ch;
al0 ^= cl;
idx0 = al0;
cn_implode_scratchpad<MEM, SOFT_AES>((__m128i*) l, (__m128i*) h);
keccakf(h, 24);
extra_hashes[ctx->state[0][0] & 3](ctx->state[0], 200, static_cast<char*>(output));
}
cn_implode_scratchpad<MEM, SOFT_AES>((__m128i*) ctx->memory, (__m128i*) ctx->state0);
keccakf(h0, 24);
extra_hashes[ctx->state0[0] & 3](ctx->state0, 200, static_cast<char*>(output));
}
};
template<size_t ITERATIONS, size_t MEM, size_t MASK, bool SOFT_AES>
inline void cryptonight_double_hash(const void *__restrict__ input, size_t size, void *__restrict__ output, struct cryptonight_ctx *__restrict__ ctx)
class CryptoNightMultiHash<ITERATIONS, MEM, MASK, SOFT_AES, 2>
{
keccak((const uint8_t *) input, (int) size, ctx->state0, 200);
keccak((const uint8_t *) input + size, (int) size, ctx->state1, 200);
public:
inline static void hash(const void* __restrict__ input,
size_t size,
void* __restrict__ output,
cryptonight_ctx* __restrict__ ctx)
{
keccak((const uint8_t*) input, (int) size, ctx->state[0], 200);
keccak((const uint8_t*) input + size, (int) size, ctx->state[1], 200);
const uint8_t* l0 = ctx->memory;
const uint8_t* l1 = ctx->memory + MEM;
uint64_t* h0 = reinterpret_cast<uint64_t*>(ctx->state0);
uint64_t* h1 = reinterpret_cast<uint64_t*>(ctx->state1);
const uint8_t* l0 = ctx->memory;
const uint8_t* l1 = ctx->memory + MEM;
uint64_t* h0 = reinterpret_cast<uint64_t*>(ctx->state[0]);
uint64_t* h1 = reinterpret_cast<uint64_t*>(ctx->state[1]);
cn_explode_scratchpad<MEM, SOFT_AES>((__m128i*) h0, (__m128i*) l0);
cn_explode_scratchpad<MEM, SOFT_AES>((__m128i*) h1, (__m128i*) l1);
cn_explode_scratchpad<MEM, SOFT_AES>((__m128i*) h0, (__m128i*) l0);
cn_explode_scratchpad<MEM, SOFT_AES>((__m128i*) h1, (__m128i*) l1);
uint64_t al0 = h0[0] ^ h0[4];
uint64_t al1 = h1[0] ^ h1[4];
uint64_t ah0 = h0[1] ^ h0[5];
uint64_t ah1 = h1[1] ^ h1[5];
uint64_t al0 = h0[0] ^h0[4];
uint64_t al1 = h1[0] ^h1[4];
uint64_t ah0 = h0[1] ^h0[5];
uint64_t ah1 = h1[1] ^h1[5];
__m128i bx0 = _mm_set_epi64x(h0[3] ^ h0[7], h0[2] ^ h0[6]);
__m128i bx1 = _mm_set_epi64x(h1[3] ^ h1[7], h1[2] ^ h1[6]);
__m128i bx0 = _mm_set_epi64x(h0[3] ^ h0[7], h0[2] ^ h0[6]);
__m128i bx1 = _mm_set_epi64x(h1[3] ^ h1[7], h1[2] ^ h1[6]);
uint64_t idx0 = h0[0] ^ h0[4];
uint64_t idx1 = h1[0] ^ h1[4];
uint64_t idx0 = h0[0] ^h0[4];
uint64_t idx1 = h1[0] ^h1[4];
for (size_t i = 0; i < ITERATIONS; i++) {
__m128i cx0 = _mm_load_si128((__m128i *) &l0[idx0 & MASK]);
__m128i cx1 = _mm_load_si128((__m128i *) &l1[idx1 & MASK]);
for (size_t i = 0; i < ITERATIONS; i++) {
__m128i cx0 = _mm_load_si128((__m128i*) &l0[idx0 & MASK]);
__m128i cx1 = _mm_load_si128((__m128i*) &l1[idx1 & MASK]);
if (SOFT_AES) {
cx0 = soft_aesenc(cx0, _mm_set_epi64x(ah0, al0));
cx1 = soft_aesenc(cx1, _mm_set_epi64x(ah1, al1));
}
else {
cx0 = _mm_aesenc_si128(cx0, _mm_set_epi64x(ah0, al0));
cx1 = _mm_aesenc_si128(cx1, _mm_set_epi64x(ah1, al1));
if (SOFT_AES) {
cx0 = soft_aesenc(cx0, _mm_set_epi64x(ah0, al0));
cx1 = soft_aesenc(cx1, _mm_set_epi64x(ah1, al1));
} else {
cx0 = _mm_aesenc_si128(cx0, _mm_set_epi64x(ah0, al0));
cx1 = _mm_aesenc_si128(cx1, _mm_set_epi64x(ah1, al1));
}
_mm_store_si128((__m128i*) &l0[idx0 & MASK], _mm_xor_si128(bx0, cx0));
_mm_store_si128((__m128i*) &l1[idx1 & MASK], _mm_xor_si128(bx1, cx1));
idx0 = EXTRACT64(cx0);
idx1 = EXTRACT64(cx1);
bx0 = cx0;
bx1 = cx1;
uint64_t hi, lo, cl, ch;
cl = ((uint64_t*) &l0[idx0 & MASK])[0];
ch = ((uint64_t*) &l0[idx0 & MASK])[1];
lo = __umul128(idx0, cl, &hi);
al0 += hi;
ah0 += lo;
((uint64_t*) &l0[idx0 & MASK])[0] = al0;
((uint64_t*) &l0[idx0 & MASK])[1] = ah0;
ah0 ^= ch;
al0 ^= cl;
idx0 = al0;
cl = ((uint64_t*) &l1[idx1 & MASK])[0];
ch = ((uint64_t*) &l1[idx1 & MASK])[1];
lo = __umul128(idx1, cl, &hi);
al1 += hi;
ah1 += lo;
((uint64_t*) &l1[idx1 & MASK])[0] = al1;
((uint64_t*) &l1[idx1 & MASK])[1] = ah1;
ah1 ^= ch;
al1 ^= cl;
idx1 = al1;
}
_mm_store_si128((__m128i *) &l0[idx0 & MASK], _mm_xor_si128(bx0, cx0));
_mm_store_si128((__m128i *) &l1[idx1 & MASK], _mm_xor_si128(bx1, cx1));
cn_implode_scratchpad<MEM, SOFT_AES>((__m128i*) l0, (__m128i*) h0);
cn_implode_scratchpad<MEM, SOFT_AES>((__m128i*) l1, (__m128i*) h1);
idx0 = EXTRACT64(cx0);
idx1 = EXTRACT64(cx1);
keccakf(h0, 24);
keccakf(h1, 24);
bx0 = cx0;
bx1 = cx1;
uint64_t hi, lo, cl, ch;
cl = ((uint64_t*) &l0[idx0 & MASK])[0];
ch = ((uint64_t*) &l0[idx0 & MASK])[1];
lo = __umul128(idx0, cl, &hi);
al0 += hi;
ah0 += lo;
((uint64_t*) &l0[idx0 & MASK])[0] = al0;
((uint64_t*) &l0[idx0 & MASK])[1] = ah0;
ah0 ^= ch;
al0 ^= cl;
idx0 = al0;
cl = ((uint64_t*) &l1[idx1 & MASK])[0];
ch = ((uint64_t*) &l1[idx1 & MASK])[1];
lo = __umul128(idx1, cl, &hi);
al1 += hi;
ah1 += lo;
((uint64_t*) &l1[idx1 & MASK])[0] = al1;
((uint64_t*) &l1[idx1 & MASK])[1] = ah1;
ah1 ^= ch;
al1 ^= cl;
idx1 = al1;
extra_hashes[ctx->state[0][0] & 3](ctx->state[0], 200, static_cast<char*>(output));
extra_hashes[ctx->state[1][0] & 3](ctx->state[1], 200, static_cast<char*>(output) + 32);
}
};
cn_implode_scratchpad<MEM, SOFT_AES>((__m128i*) l0, (__m128i*) h0);
cn_implode_scratchpad<MEM, SOFT_AES>((__m128i*) l1, (__m128i*) h1);
template<size_t ITERATIONS, size_t MEM, size_t MASK, bool SOFT_AES>
class CryptoNightMultiHash<ITERATIONS, MEM, MASK, SOFT_AES, 3>
{
public:
inline static void hash(const void* __restrict__ input,
size_t size,
void* __restrict__ output,
cryptonight_ctx* __restrict__ ctx)
{
keccak((const uint8_t*) input, (int) size, ctx->state[0], 200);
keccak((const uint8_t*) input + size, (int) size, ctx->state[1], 200);
keccak((const uint8_t*) input + 2 * size, (int) size, ctx->state[2], 200);
keccakf(h0, 24);
keccakf(h1, 24);
const uint8_t* l0 = ctx->memory;
const uint8_t* l1 = ctx->memory + MEM;
const uint8_t* l2 = ctx->memory + 2 * MEM;
uint64_t* h0 = reinterpret_cast<uint64_t*>(ctx->state[0]);
uint64_t* h1 = reinterpret_cast<uint64_t*>(ctx->state[1]);
uint64_t* h2 = reinterpret_cast<uint64_t*>(ctx->state[2]);
extra_hashes[ctx->state0[0] & 3](ctx->state0, 200, static_cast<char*>(output));
extra_hashes[ctx->state1[0] & 3](ctx->state1, 200, static_cast<char*>(output) + 32);
}
cn_explode_scratchpad<MEM, SOFT_AES>((__m128i*) h0, (__m128i*) l0);
cn_explode_scratchpad<MEM, SOFT_AES>((__m128i*) h1, (__m128i*) l1);
cn_explode_scratchpad<MEM, SOFT_AES>((__m128i*) h2, (__m128i*) l2);
uint64_t al0 = h0[0] ^h0[4];
uint64_t al1 = h1[0] ^h1[4];
uint64_t al2 = h2[0] ^h2[4];
uint64_t ah0 = h0[1] ^h0[5];
uint64_t ah1 = h1[1] ^h1[5];
uint64_t ah2 = h2[1] ^h2[5];
__m128i bx0 = _mm_set_epi64x(h0[3] ^ h0[7], h0[2] ^ h0[6]);
__m128i bx1 = _mm_set_epi64x(h1[3] ^ h1[7], h1[2] ^ h1[6]);
__m128i bx2 = _mm_set_epi64x(h2[3] ^ h2[7], h2[2] ^ h2[6]);
uint64_t idx0 = h0[0] ^h0[4];
uint64_t idx1 = h1[0] ^h1[4];
uint64_t idx2 = h2[0] ^h2[4];
for (size_t i = 0; i < ITERATIONS; i++) {
__m128i cx0 = _mm_load_si128((__m128i*) &l0[idx0 & MASK]);
__m128i cx1 = _mm_load_si128((__m128i*) &l1[idx1 & MASK]);
__m128i cx2 = _mm_load_si128((__m128i*) &l2[idx2 & MASK]);
if (SOFT_AES) {
cx0 = soft_aesenc(cx0, _mm_set_epi64x(ah0, al0));
cx1 = soft_aesenc(cx1, _mm_set_epi64x(ah1, al1));
cx2 = soft_aesenc(cx2, _mm_set_epi64x(ah2, al2));
} else {
cx0 = _mm_aesenc_si128(cx0, _mm_set_epi64x(ah0, al0));
cx1 = _mm_aesenc_si128(cx1, _mm_set_epi64x(ah1, al1));
cx2 = _mm_aesenc_si128(cx2, _mm_set_epi64x(ah2, al2));
}
_mm_store_si128((__m128i*) &l0[idx0 & MASK], _mm_xor_si128(bx0, cx0));
_mm_store_si128((__m128i*) &l1[idx1 & MASK], _mm_xor_si128(bx1, cx1));
_mm_store_si128((__m128i*) &l2[idx2 & MASK], _mm_xor_si128(bx2, cx2));
idx0 = EXTRACT64(cx0);
idx1 = EXTRACT64(cx1);
idx2 = EXTRACT64(cx2);
bx0 = cx0;
bx1 = cx1;
bx2 = cx2;
uint64_t hi, lo, cl, ch;
cl = ((uint64_t*) &l0[idx0 & MASK])[0];
ch = ((uint64_t*) &l0[idx0 & MASK])[1];
lo = __umul128(idx0, cl, &hi);
al0 += hi;
ah0 += lo;
((uint64_t*) &l0[idx0 & MASK])[0] = al0;
((uint64_t*) &l0[idx0 & MASK])[1] = ah0;
ah0 ^= ch;
al0 ^= cl;
idx0 = al0;
cl = ((uint64_t*) &l1[idx1 & MASK])[0];
ch = ((uint64_t*) &l1[idx1 & MASK])[1];
lo = __umul128(idx1, cl, &hi);
al1 += hi;
ah1 += lo;
((uint64_t*) &l1[idx1 & MASK])[0] = al1;
((uint64_t*) &l1[idx1 & MASK])[1] = ah1;
ah1 ^= ch;
al1 ^= cl;
idx1 = al1;
cl = ((uint64_t*) &l2[idx2 & MASK])[0];
ch = ((uint64_t*) &l2[idx2 & MASK])[1];
lo = __umul128(idx2, cl, &hi);
al2 += hi;
ah2 += lo;
((uint64_t*) &l2[idx2 & MASK])[0] = al2;
((uint64_t*) &l2[idx2 & MASK])[1] = ah2;
ah2 ^= ch;
al2 ^= cl;
idx2 = al2;
}
cn_implode_scratchpad<MEM, SOFT_AES>((__m128i*) l0, (__m128i*) h0);
cn_implode_scratchpad<MEM, SOFT_AES>((__m128i*) l1, (__m128i*) h1);
cn_implode_scratchpad<MEM, SOFT_AES>((__m128i*) l2, (__m128i*) h2);
keccakf(h0, 24);
keccakf(h1, 24);
keccakf(h2, 24);
extra_hashes[ctx->state[0][0] & 3](ctx->state[0], 200, static_cast<char*>(output));
extra_hashes[ctx->state[1][0] & 3](ctx->state[1], 200, static_cast<char*>(output) + 32);
extra_hashes[ctx->state[2][0] & 3](ctx->state[2], 200, static_cast<char*>(output) + 64);
}
};
template<size_t ITERATIONS, size_t MEM, size_t MASK, bool SOFT_AES>
class CryptoNightMultiHash<ITERATIONS, MEM, MASK, SOFT_AES, 4>
{
public:
inline static void hash(const void* __restrict__ input,
size_t size,
void* __restrict__ output,
cryptonight_ctx* __restrict__ ctx)
{
keccak((const uint8_t*) input, (int) size, ctx->state[0], 200);
keccak((const uint8_t*) input + size, (int) size, ctx->state[1], 200);
keccak((const uint8_t*) input + 2 * size, (int) size, ctx->state[2], 200);
keccak((const uint8_t*) input + 3 * size, (int) size, ctx->state[3], 200);
const uint8_t* l0 = ctx->memory;
const uint8_t* l1 = ctx->memory + MEM;
const uint8_t* l2 = ctx->memory + 2 * MEM;
const uint8_t* l3 = ctx->memory + 3 * MEM;
uint64_t* h0 = reinterpret_cast<uint64_t*>(ctx->state[0]);
uint64_t* h1 = reinterpret_cast<uint64_t*>(ctx->state[1]);
uint64_t* h2 = reinterpret_cast<uint64_t*>(ctx->state[2]);
uint64_t* h3 = reinterpret_cast<uint64_t*>(ctx->state[3]);
cn_explode_scratchpad<MEM, SOFT_AES>((__m128i*) h0, (__m128i*) l0);
cn_explode_scratchpad<MEM, SOFT_AES>((__m128i*) h1, (__m128i*) l1);
cn_explode_scratchpad<MEM, SOFT_AES>((__m128i*) h2, (__m128i*) l2);
cn_explode_scratchpad<MEM, SOFT_AES>((__m128i*) h3, (__m128i*) l3);
uint64_t al0 = h0[0] ^h0[4];
uint64_t al1 = h1[0] ^h1[4];
uint64_t al2 = h2[0] ^h2[4];
uint64_t al3 = h3[0] ^h3[4];
uint64_t ah0 = h0[1] ^h0[5];
uint64_t ah1 = h1[1] ^h1[5];
uint64_t ah2 = h2[1] ^h2[5];
uint64_t ah3 = h3[1] ^h3[5];
__m128i bx0 = _mm_set_epi64x(h0[3] ^ h0[7], h0[2] ^ h0[6]);
__m128i bx1 = _mm_set_epi64x(h1[3] ^ h1[7], h1[2] ^ h1[6]);
__m128i bx2 = _mm_set_epi64x(h2[3] ^ h2[7], h2[2] ^ h2[6]);
__m128i bx3 = _mm_set_epi64x(h3[3] ^ h3[7], h3[2] ^ h3[6]);
uint64_t idx0 = h0[0] ^h0[4];
uint64_t idx1 = h1[0] ^h1[4];
uint64_t idx2 = h2[0] ^h2[4];
uint64_t idx3 = h3[0] ^h3[4];
for (size_t i = 0; i < ITERATIONS; i++) {
__m128i cx0 = _mm_load_si128((__m128i*) &l0[idx0 & MASK]);
__m128i cx1 = _mm_load_si128((__m128i*) &l1[idx1 & MASK]);
__m128i cx2 = _mm_load_si128((__m128i*) &l2[idx2 & MASK]);
__m128i cx3 = _mm_load_si128((__m128i*) &l3[idx3 & MASK]);
if (SOFT_AES) {
cx0 = soft_aesenc(cx0, _mm_set_epi64x(ah0, al0));
cx1 = soft_aesenc(cx1, _mm_set_epi64x(ah1, al1));
cx2 = soft_aesenc(cx2, _mm_set_epi64x(ah2, al2));
cx3 = soft_aesenc(cx3, _mm_set_epi64x(ah3, al3));
} else {
cx0 = _mm_aesenc_si128(cx0, _mm_set_epi64x(ah0, al0));
cx1 = _mm_aesenc_si128(cx1, _mm_set_epi64x(ah1, al1));
cx2 = _mm_aesenc_si128(cx2, _mm_set_epi64x(ah2, al2));
cx3 = _mm_aesenc_si128(cx3, _mm_set_epi64x(ah3, al3));
}
_mm_store_si128((__m128i*) &l0[idx0 & MASK], _mm_xor_si128(bx0, cx0));
_mm_store_si128((__m128i*) &l1[idx1 & MASK], _mm_xor_si128(bx1, cx1));
_mm_store_si128((__m128i*) &l2[idx2 & MASK], _mm_xor_si128(bx2, cx2));
_mm_store_si128((__m128i*) &l3[idx3 & MASK], _mm_xor_si128(bx3, cx3));
idx0 = EXTRACT64(cx0);
idx1 = EXTRACT64(cx1);
idx2 = EXTRACT64(cx2);
idx3 = EXTRACT64(cx3);
bx0 = cx0;
bx1 = cx1;
bx2 = cx2;
bx3 = cx3;
uint64_t hi, lo, cl, ch;
cl = ((uint64_t*) &l0[idx0 & MASK])[0];
ch = ((uint64_t*) &l0[idx0 & MASK])[1];
lo = __umul128(idx0, cl, &hi);
al0 += hi;
ah0 += lo;
((uint64_t*) &l0[idx0 & MASK])[0] = al0;
((uint64_t*) &l0[idx0 & MASK])[1] = ah0;
ah0 ^= ch;
al0 ^= cl;
idx0 = al0;
cl = ((uint64_t*) &l1[idx1 & MASK])[0];
ch = ((uint64_t*) &l1[idx1 & MASK])[1];
lo = __umul128(idx1, cl, &hi);
al1 += hi;
ah1 += lo;
((uint64_t*) &l1[idx1 & MASK])[0] = al1;
((uint64_t*) &l1[idx1 & MASK])[1] = ah1;
ah1 ^= ch;
al1 ^= cl;
idx1 = al1;
cl = ((uint64_t*) &l2[idx2 & MASK])[0];
ch = ((uint64_t*) &l2[idx2 & MASK])[1];
lo = __umul128(idx2, cl, &hi);
al2 += hi;
ah2 += lo;
((uint64_t*) &l2[idx2 & MASK])[0] = al2;
((uint64_t*) &l2[idx2 & MASK])[1] = ah2;
ah2 ^= ch;
al2 ^= cl;
idx2 = al2;
cl = ((uint64_t*) &l3[idx3 & MASK])[0];
ch = ((uint64_t*) &l3[idx3 & MASK])[1];
lo = __umul128(idx3, cl, &hi);
al3 += hi;
ah3 += lo;
((uint64_t*) &l3[idx3 & MASK])[0] = al3;
((uint64_t*) &l3[idx3 & MASK])[1] = ah3;
ah3 ^= ch;
al3 ^= cl;
idx3 = al3;
}
cn_implode_scratchpad<MEM, SOFT_AES>((__m128i*) l0, (__m128i*) h0);
cn_implode_scratchpad<MEM, SOFT_AES>((__m128i*) l1, (__m128i*) h1);
cn_implode_scratchpad<MEM, SOFT_AES>((__m128i*) l2, (__m128i*) h2);
cn_implode_scratchpad<MEM, SOFT_AES>((__m128i*) l3, (__m128i*) h3);
keccakf(h0, 24);
keccakf(h1, 24);
keccakf(h2, 24);
keccakf(h3, 24);
extra_hashes[ctx->state[0][0] & 3](ctx->state[0], 200, static_cast<char*>(output));
extra_hashes[ctx->state[1][0] & 3](ctx->state[1], 200, static_cast<char*>(output) + 32);
extra_hashes[ctx->state[2][0] & 3](ctx->state[2], 200, static_cast<char*>(output) + 64);
extra_hashes[ctx->state[3][0] & 3](ctx->state[3], 200, static_cast<char*>(output) + 96);
}
};
template<size_t ITERATIONS, size_t MEM, size_t MASK, bool SOFT_AES>
class CryptoNightMultiHash<ITERATIONS, MEM, MASK, SOFT_AES, 5>
{
public:
inline static void hash(const void* __restrict__ input,
size_t size,
void* __restrict__ output,
cryptonight_ctx* __restrict__ ctx)
{
keccak((const uint8_t*) input, (int) size, ctx->state[0], 200);
keccak((const uint8_t*) input + size, (int) size, ctx->state[1], 200);
keccak((const uint8_t*) input + 2 * size, (int) size, ctx->state[2], 200);
keccak((const uint8_t*) input + 3 * size, (int) size, ctx->state[3], 200);
keccak((const uint8_t*) input + 4 * size, (int) size, ctx->state[4], 200);
const uint8_t* l0 = ctx->memory;
const uint8_t* l1 = ctx->memory + MEM;
const uint8_t* l2 = ctx->memory + 2 * MEM;
const uint8_t* l3 = ctx->memory + 3 * MEM;
const uint8_t* l4 = ctx->memory + 4 * MEM;
uint64_t* h0 = reinterpret_cast<uint64_t*>(ctx->state[0]);
uint64_t* h1 = reinterpret_cast<uint64_t*>(ctx->state[1]);
uint64_t* h2 = reinterpret_cast<uint64_t*>(ctx->state[2]);
uint64_t* h3 = reinterpret_cast<uint64_t*>(ctx->state[3]);
uint64_t* h4 = reinterpret_cast<uint64_t*>(ctx->state[4]);
cn_explode_scratchpad<MEM, SOFT_AES>((__m128i*) h0, (__m128i*) l0);
cn_explode_scratchpad<MEM, SOFT_AES>((__m128i*) h1, (__m128i*) l1);
cn_explode_scratchpad<MEM, SOFT_AES>((__m128i*) h2, (__m128i*) l2);
cn_explode_scratchpad<MEM, SOFT_AES>((__m128i*) h3, (__m128i*) l3);
cn_explode_scratchpad<MEM, SOFT_AES>((__m128i*) h4, (__m128i*) l4);
uint64_t al0 = h0[0] ^h0[4];
uint64_t al1 = h1[0] ^h1[4];
uint64_t al2 = h2[0] ^h2[4];
uint64_t al3 = h3[0] ^h3[4];
uint64_t al4 = h4[0] ^h4[4];
uint64_t ah0 = h0[1] ^h0[5];
uint64_t ah1 = h1[1] ^h1[5];
uint64_t ah2 = h2[1] ^h2[5];
uint64_t ah3 = h3[1] ^h3[5];
uint64_t ah4 = h4[1] ^h4[5];
__m128i bx0 = _mm_set_epi64x(h0[3] ^ h0[7], h0[2] ^ h0[6]);
__m128i bx1 = _mm_set_epi64x(h1[3] ^ h1[7], h1[2] ^ h1[6]);
__m128i bx2 = _mm_set_epi64x(h2[3] ^ h2[7], h2[2] ^ h2[6]);
__m128i bx3 = _mm_set_epi64x(h3[3] ^ h3[7], h3[2] ^ h3[6]);
__m128i bx4 = _mm_set_epi64x(h4[3] ^ h4[7], h4[2] ^ h4[6]);
uint64_t idx0 = h0[0] ^h0[4];
uint64_t idx1 = h1[0] ^h1[4];
uint64_t idx2 = h2[0] ^h2[4];
uint64_t idx3 = h3[0] ^h3[4];
uint64_t idx4 = h4[0] ^h4[4];
for (size_t i = 0; i < ITERATIONS; i++) {
__m128i cx0 = _mm_load_si128((__m128i*) &l0[idx0 & MASK]);
__m128i cx1 = _mm_load_si128((__m128i*) &l1[idx1 & MASK]);
__m128i cx2 = _mm_load_si128((__m128i*) &l2[idx2 & MASK]);
__m128i cx3 = _mm_load_si128((__m128i*) &l3[idx3 & MASK]);
__m128i cx4 = _mm_load_si128((__m128i*) &l4[idx4 & MASK]);
if (SOFT_AES) {
cx0 = soft_aesenc(cx0, _mm_set_epi64x(ah0, al0));
cx1 = soft_aesenc(cx1, _mm_set_epi64x(ah1, al1));
cx2 = soft_aesenc(cx2, _mm_set_epi64x(ah2, al2));
cx3 = soft_aesenc(cx3, _mm_set_epi64x(ah3, al3));
cx4 = soft_aesenc(cx4, _mm_set_epi64x(ah4, al4));
} else {
cx0 = _mm_aesenc_si128(cx0, _mm_set_epi64x(ah0, al0));
cx1 = _mm_aesenc_si128(cx1, _mm_set_epi64x(ah1, al1));
cx2 = _mm_aesenc_si128(cx2, _mm_set_epi64x(ah2, al2));
cx3 = _mm_aesenc_si128(cx3, _mm_set_epi64x(ah3, al3));
cx4 = _mm_aesenc_si128(cx4, _mm_set_epi64x(ah4, al4));
}
_mm_store_si128((__m128i*) &l0[idx0 & MASK], _mm_xor_si128(bx0, cx0));
_mm_store_si128((__m128i*) &l1[idx1 & MASK], _mm_xor_si128(bx1, cx1));
_mm_store_si128((__m128i*) &l2[idx2 & MASK], _mm_xor_si128(bx2, cx2));
_mm_store_si128((__m128i*) &l3[idx3 & MASK], _mm_xor_si128(bx3, cx3));
_mm_store_si128((__m128i*) &l4[idx4 & MASK], _mm_xor_si128(bx4, cx4));
idx0 = EXTRACT64(cx0);
idx1 = EXTRACT64(cx1);
idx2 = EXTRACT64(cx2);
idx3 = EXTRACT64(cx3);
idx4 = EXTRACT64(cx4);
bx0 = cx0;
bx1 = cx1;
bx2 = cx2;
bx3 = cx3;
bx4 = cx4;
uint64_t hi, lo, cl, ch;
cl = ((uint64_t*) &l0[idx0 & MASK])[0];
ch = ((uint64_t*) &l0[idx0 & MASK])[1];
lo = __umul128(idx0, cl, &hi);
al0 += hi;
ah0 += lo;
((uint64_t*) &l0[idx0 & MASK])[0] = al0;
((uint64_t*) &l0[idx0 & MASK])[1] = ah0;
ah0 ^= ch;
al0 ^= cl;
idx0 = al0;
cl = ((uint64_t*) &l1[idx1 & MASK])[0];
ch = ((uint64_t*) &l1[idx1 & MASK])[1];
lo = __umul128(idx1, cl, &hi);
al1 += hi;
ah1 += lo;
((uint64_t*) &l1[idx1 & MASK])[0] = al1;
((uint64_t*) &l1[idx1 & MASK])[1] = ah1;
ah1 ^= ch;
al1 ^= cl;
idx1 = al1;
cl = ((uint64_t*) &l2[idx2 & MASK])[0];
ch = ((uint64_t*) &l2[idx2 & MASK])[1];
lo = __umul128(idx2, cl, &hi);
al2 += hi;
ah2 += lo;
((uint64_t*) &l2[idx2 & MASK])[0] = al2;
((uint64_t*) &l2[idx2 & MASK])[1] = ah2;
ah2 ^= ch;
al2 ^= cl;
idx2 = al2;
cl = ((uint64_t*) &l3[idx3 & MASK])[0];
ch = ((uint64_t*) &l3[idx3 & MASK])[1];
lo = __umul128(idx3, cl, &hi);
al3 += hi;
ah3 += lo;
((uint64_t*) &l3[idx3 & MASK])[0] = al3;
((uint64_t*) &l3[idx3 & MASK])[1] = ah3;
ah3 ^= ch;
al3 ^= cl;
idx3 = al3;
cl = ((uint64_t*) &l4[idx4 & MASK])[0];
ch = ((uint64_t*) &l4[idx4 & MASK])[1];
lo = __umul128(idx4, cl, &hi);
al4 += hi;
ah4 += lo;
((uint64_t*) &l4[idx4 & MASK])[0] = al4;
((uint64_t*) &l4[idx4 & MASK])[1] = ah4;
ah4 ^= ch;
al4 ^= cl;
idx4 = al4;
}
cn_implode_scratchpad<MEM, SOFT_AES>((__m128i*) l0, (__m128i*) h0);
cn_implode_scratchpad<MEM, SOFT_AES>((__m128i*) l1, (__m128i*) h1);
cn_implode_scratchpad<MEM, SOFT_AES>((__m128i*) l2, (__m128i*) h2);
cn_implode_scratchpad<MEM, SOFT_AES>((__m128i*) l3, (__m128i*) h3);
cn_implode_scratchpad<MEM, SOFT_AES>((__m128i*) l4, (__m128i*) h4);
keccakf(h0, 24);
keccakf(h1, 24);
keccakf(h2, 24);
keccakf(h3, 24);
keccakf(h4, 24);
extra_hashes[ctx->state[0][0] & 3](ctx->state[0], 200, static_cast<char*>(output));
extra_hashes[ctx->state[1][0] & 3](ctx->state[1], 200, static_cast<char*>(output) + 32);
extra_hashes[ctx->state[2][0] & 3](ctx->state[2], 200, static_cast<char*>(output) + 64);
extra_hashes[ctx->state[3][0] & 3](ctx->state[3], 200, static_cast<char*>(output) + 96);
extra_hashes[ctx->state[4][0] & 3](ctx->state[4], 200, static_cast<char*>(output) + 128);
}
};
#endif /* __CRYPTONIGHT_X86_H__ */