This commit is contained in:
MoneroOcean 2020-06-10 18:14:06 -07:00
commit 0ada4ca4ac
150 changed files with 12300 additions and 8764 deletions

View file

@ -29,7 +29,7 @@
#include "crypto/astrobwt/AstroBWT.h"
#include "backend/cpu/Cpu.h"
#include "crypto/astrobwt/sha3.h"
#include "base/crypto/sha3.h"
#include "crypto/cn/CryptoNight.h"

View file

@ -1,258 +0,0 @@
/* -------------------------------------------------------------------------
* Works when compiled for either 32-bit or 64-bit targets, optimized for
* 64 bit.
*
* Canonical implementation of Init/Update/Finalize for SHA-3 byte input.
*
* SHA3-256, SHA3-384, SHA-512 are implemented. SHA-224 can easily be added.
*
* Based on code from http://keccak.noekeon.org/ .
*
* I place the code that I wrote into public domain, free to use.
*
* I would appreciate if you give credits to this work if you used it to
* write or test * your code.
*
* Aug 2015. Andrey Jivsov. crypto@brainhub.org
* ---------------------------------------------------------------------- */
#include <cstdio>
#include <cstdint>
#include <cstring>
#include "sha3.h"
#include "base/crypto/keccak.h"
#define SHA3_ASSERT( x )
#if defined(_MSC_VER)
#define SHA3_TRACE( format, ...)
#define SHA3_TRACE_BUF( format, buf, l, ...)
#else
#define SHA3_TRACE(format, args...)
#define SHA3_TRACE_BUF(format, buf, l, args...)
#endif
/*
* This flag is used to configure "pure" Keccak, as opposed to NIST SHA3.
*/
#define SHA3_USE_KECCAK_FLAG 0x80000000
#define SHA3_CW(x) ((x) & (~SHA3_USE_KECCAK_FLAG))
#if defined(_MSC_VER)
#define SHA3_CONST(x) x
#else
#define SHA3_CONST(x) x##L
#endif
#define KECCAK_ROUNDS 24
/* *************************** Public Inteface ************************ */
/* For Init or Reset call these: */
sha3_return_t
sha3_Init(void *priv, unsigned bitSize) {
sha3_context *ctx = (sha3_context *) priv;
if( bitSize != 256 && bitSize != 384 && bitSize != 512 )
return SHA3_RETURN_BAD_PARAMS;
memset(ctx, 0, sizeof(*ctx));
ctx->capacityWords = 2 * bitSize / (8 * sizeof(uint64_t));
return SHA3_RETURN_OK;
}
void
sha3_Init256(void *priv)
{
sha3_Init(priv, 256);
}
void
sha3_Init384(void *priv)
{
sha3_Init(priv, 384);
}
void
sha3_Init512(void *priv)
{
sha3_Init(priv, 512);
}
SHA3_FLAGS
sha3_SetFlags(void *priv, SHA3_FLAGS flags)
{
sha3_context *ctx = (sha3_context *) priv;
flags = static_cast<SHA3_FLAGS>(static_cast<int>(flags) & SHA3_FLAGS_KECCAK);
ctx->capacityWords |= (flags == SHA3_FLAGS_KECCAK ? SHA3_USE_KECCAK_FLAG : 0);
return flags;
}
void
sha3_Update(void *priv, void const *bufIn, size_t len)
{
sha3_context *ctx = (sha3_context *) priv;
/* 0...7 -- how much is needed to have a word */
unsigned old_tail = (8 - ctx->byteIndex) & 7;
size_t words;
unsigned tail;
size_t i;
const uint8_t *buf = reinterpret_cast<const uint8_t*>(bufIn);
SHA3_TRACE_BUF("called to update with:", buf, len);
SHA3_ASSERT(ctx->byteIndex < 8);
SHA3_ASSERT(ctx->wordIndex < sizeof(ctx->s) / sizeof(ctx->s[0]));
if(len < old_tail) { /* have no complete word or haven't started
* the word yet */
SHA3_TRACE("because %d<%d, store it and return", (unsigned)len,
(unsigned)old_tail);
/* endian-independent code follows: */
while (len--)
ctx->saved |= (uint64_t) (*(buf++)) << ((ctx->byteIndex++) * 8);
SHA3_ASSERT(ctx->byteIndex < 8);
return;
}
if(old_tail) { /* will have one word to process */
SHA3_TRACE("completing one word with %d bytes", (unsigned)old_tail);
/* endian-independent code follows: */
len -= old_tail;
while (old_tail--)
ctx->saved |= (uint64_t) (*(buf++)) << ((ctx->byteIndex++) * 8);
/* now ready to add saved to the sponge */
ctx->s[ctx->wordIndex] ^= ctx->saved;
SHA3_ASSERT(ctx->byteIndex == 8);
ctx->byteIndex = 0;
ctx->saved = 0;
if(++ctx->wordIndex ==
(SHA3_KECCAK_SPONGE_WORDS - SHA3_CW(ctx->capacityWords))) {
xmrig::keccakf(ctx->s, KECCAK_ROUNDS);
ctx->wordIndex = 0;
}
}
/* now work in full words directly from input */
SHA3_ASSERT(ctx->byteIndex == 0);
words = len / sizeof(uint64_t);
tail = len - words * sizeof(uint64_t);
SHA3_TRACE("have %d full words to process", (unsigned)words);
for(i = 0; i < words; i++, buf += sizeof(uint64_t)) {
const uint64_t t = (uint64_t) (buf[0]) |
((uint64_t) (buf[1]) << 8 * 1) |
((uint64_t) (buf[2]) << 8 * 2) |
((uint64_t) (buf[3]) << 8 * 3) |
((uint64_t) (buf[4]) << 8 * 4) |
((uint64_t) (buf[5]) << 8 * 5) |
((uint64_t) (buf[6]) << 8 * 6) |
((uint64_t) (buf[7]) << 8 * 7);
#if defined(__x86_64__ ) || defined(__i386__)
SHA3_ASSERT(memcmp(&t, buf, 8) == 0);
#endif
ctx->s[ctx->wordIndex] ^= t;
if(++ctx->wordIndex ==
(SHA3_KECCAK_SPONGE_WORDS - SHA3_CW(ctx->capacityWords))) {
xmrig::keccakf(ctx->s, KECCAK_ROUNDS);
ctx->wordIndex = 0;
}
}
SHA3_TRACE("have %d bytes left to process, save them", (unsigned)tail);
/* finally, save the partial word */
SHA3_ASSERT(ctx->byteIndex == 0 && tail < 8);
while (tail--) {
SHA3_TRACE("Store byte %02x '%c'", *buf, *buf);
ctx->saved |= (uint64_t) (*(buf++)) << ((ctx->byteIndex++) * 8);
}
SHA3_ASSERT(ctx->byteIndex < 8);
SHA3_TRACE("Have saved=0x%016" PRIx64 " at the end", ctx->saved);
}
/* This is simply the 'update' with the padding block.
* The padding block is 0x01 || 0x00* || 0x80. First 0x01 and last 0x80
* bytes are always present, but they can be the same byte.
*/
void const *
sha3_Finalize(void *priv)
{
sha3_context *ctx = (sha3_context *) priv;
SHA3_TRACE("called with %d bytes in the buffer", ctx->byteIndex);
/* Append 2-bit suffix 01, per SHA-3 spec. Instead of 1 for padding we
* use 1<<2 below. The 0x02 below corresponds to the suffix 01.
* Overall, we feed 0, then 1, and finally 1 to start padding. Without
* M || 01, we would simply use 1 to start padding. */
uint64_t t;
if( ctx->capacityWords & SHA3_USE_KECCAK_FLAG ) {
/* Keccak version */
t = (uint64_t)(((uint64_t) 1) << (ctx->byteIndex * 8));
}
else {
/* SHA3 version */
t = (uint64_t)(((uint64_t)(0x02 | (1 << 2))) << ((ctx->byteIndex) * 8));
}
ctx->s[ctx->wordIndex] ^= ctx->saved ^ t;
ctx->s[SHA3_KECCAK_SPONGE_WORDS - SHA3_CW(ctx->capacityWords) - 1] ^=
SHA3_CONST(0x8000000000000000UL);
xmrig::keccakf(ctx->s, KECCAK_ROUNDS);
/* Return first bytes of the ctx->s. This conversion is not needed for
* little-endian platforms e.g. wrap with #if !defined(__BYTE_ORDER__)
* || !defined(__ORDER_LITTLE_ENDIAN__) || __BYTE_ORDER__!=__ORDER_LITTLE_ENDIAN__
* ... the conversion below ...
* #endif */
{
unsigned i;
for(i = 0; i < SHA3_KECCAK_SPONGE_WORDS; i++) {
const unsigned t1 = (uint32_t) ctx->s[i];
const unsigned t2 = (uint32_t) ((ctx->s[i] >> 16) >> 16);
ctx->sb[i * 8 + 0] = (uint8_t) (t1);
ctx->sb[i * 8 + 1] = (uint8_t) (t1 >> 8);
ctx->sb[i * 8 + 2] = (uint8_t) (t1 >> 16);
ctx->sb[i * 8 + 3] = (uint8_t) (t1 >> 24);
ctx->sb[i * 8 + 4] = (uint8_t) (t2);
ctx->sb[i * 8 + 5] = (uint8_t) (t2 >> 8);
ctx->sb[i * 8 + 6] = (uint8_t) (t2 >> 16);
ctx->sb[i * 8 + 7] = (uint8_t) (t2 >> 24);
}
}
SHA3_TRACE_BUF("Hash: (first 32 bytes)", ctx->sb, 256 / 8);
return (ctx->sb);
}
sha3_return_t sha3_HashBuffer( unsigned bitSize, enum SHA3_FLAGS flags, const void *in, unsigned inBytes, void *out, unsigned outBytes ) {
sha3_return_t err;
sha3_context c;
err = sha3_Init(&c, bitSize);
if( err != SHA3_RETURN_OK )
return err;
if( sha3_SetFlags(&c, flags) != flags ) {
return SHA3_RETURN_BAD_PARAMS;
}
sha3_Update(&c, in, inBytes);
const void *h = sha3_Finalize(&c);
if(outBytes > bitSize/8)
outBytes = bitSize/8;
memcpy(out, h, outBytes);
return SHA3_RETURN_OK;
}

View file

@ -1,71 +0,0 @@
#ifndef SHA3_H
#define SHA3_H
/* -------------------------------------------------------------------------
* Works when compiled for either 32-bit or 64-bit targets, optimized for
* 64 bit.
*
* Canonical implementation of Init/Update/Finalize for SHA-3 byte input.
*
* SHA3-256, SHA3-384, SHA-512 are implemented. SHA-224 can easily be added.
*
* Based on code from http://keccak.noekeon.org/ .
*
* I place the code that I wrote into public domain, free to use.
*
* I would appreciate if you give credits to this work if you used it to
* write or test * your code.
*
* Aug 2015. Andrey Jivsov. crypto@brainhub.org
* ---------------------------------------------------------------------- */
/* 'Words' here refers to uint64_t */
#define SHA3_KECCAK_SPONGE_WORDS \
(((1600)/8/*bits to byte*/)/sizeof(uint64_t))
typedef struct sha3_context_ {
uint64_t saved; /* the portion of the input message that we
* didn't consume yet */
union { /* Keccak's state */
uint64_t s[SHA3_KECCAK_SPONGE_WORDS];
uint8_t sb[SHA3_KECCAK_SPONGE_WORDS * 8];
};
unsigned byteIndex; /* 0..7--the next byte after the set one
* (starts from 0; 0--none are buffered) */
unsigned wordIndex; /* 0..24--the next word to integrate input
* (starts from 0) */
unsigned capacityWords; /* the double size of the hash output in
* words (e.g. 16 for Keccak 512) */
} sha3_context;
enum SHA3_FLAGS {
SHA3_FLAGS_NONE=0,
SHA3_FLAGS_KECCAK=1
};
enum SHA3_RETURN {
SHA3_RETURN_OK=0,
SHA3_RETURN_BAD_PARAMS=1
};
typedef enum SHA3_RETURN sha3_return_t;
/* For Init or Reset call these: */
sha3_return_t sha3_Init(void *priv, unsigned bitSize);
void sha3_Init256(void *priv);
void sha3_Init384(void *priv);
void sha3_Init512(void *priv);
SHA3_FLAGS sha3_SetFlags(void *priv, SHA3_FLAGS);
void sha3_Update(void *priv, void const *bufIn, size_t len);
void const *sha3_Finalize(void *priv);
/* Single-call hashing */
sha3_return_t sha3_HashBuffer(
unsigned bitSize, /* 256, 384, 512 */
SHA3_FLAGS flags, /* SHA3_FLAGS_NONE or SHA3_FLAGS_KECCAK */
const void *in, unsigned inBytes,
void *out, unsigned outBytes ); /* up to bitSize/8; truncation OK */
#endif

View file

@ -79,6 +79,7 @@ public:
case Algorithm::CN_HEAVY_TUBE:
case Algorithm::CN_HEAVY_XHV:
# endif
case Algorithm::CN_CCX:
return CN_ITER / 2;
case Algorithm::CN_RWZ:
@ -89,11 +90,6 @@ public:
case Algorithm::CN_DOUBLE:
return CN_ITER * 2;
# ifdef XMRIG_ALGO_CN_GPU
case Algorithm::CN_GPU:
return 0xC000;
# endif
# ifdef XMRIG_ALGO_CN_PICO
case Algorithm::CN_PICO_0:
case Algorithm::CN_PICO_TLO:
@ -109,12 +105,6 @@ public:
inline static uint32_t mask(Algorithm::Id algo)
{
# ifdef XMRIG_ALGO_CN_GPU
if (algo == Algorithm::CN_GPU) {
return 0x1FFFC0;
}
# endif
# ifdef XMRIG_ALGO_CN_PICO
if (algo == Algorithm::CN_PICO_0) {
return 0x1FFF0;
@ -136,6 +126,7 @@ public:
case Algorithm::CN_HEAVY_0:
case Algorithm::CN_HEAVY_XHV:
# endif
case Algorithm::CN_CCX:
return Algorithm::CN_0;
case Algorithm::CN_1:
@ -161,11 +152,6 @@ public:
# endif
return Algorithm::CN_2;
# ifdef XMRIG_ALGO_CN_GPU
case Algorithm::CN_GPU:
return Algorithm::CN_GPU;
# endif
default:
break;
}
@ -184,6 +170,7 @@ template<> constexpr inline Algorithm::Id CnAlgo<Algorithm::CN_XAO>::base() cons
template<> constexpr inline Algorithm::Id CnAlgo<Algorithm::CN_LITE_0>::base() const { return Algorithm::CN_0; }
template<> constexpr inline Algorithm::Id CnAlgo<Algorithm::CN_HEAVY_0>::base() const { return Algorithm::CN_0; }
template<> constexpr inline Algorithm::Id CnAlgo<Algorithm::CN_HEAVY_XHV>::base() const { return Algorithm::CN_0; }
template<> constexpr inline Algorithm::Id CnAlgo<Algorithm::CN_CCX>::base() const { return Algorithm::CN_0; }
template<> constexpr inline Algorithm::Id CnAlgo<Algorithm::CN_1>::base() const { return Algorithm::CN_1; }
template<> constexpr inline Algorithm::Id CnAlgo<Algorithm::CN_FAST>::base() const { return Algorithm::CN_1; }
template<> constexpr inline Algorithm::Id CnAlgo<Algorithm::CN_RTO>::base() const { return Algorithm::CN_1; }
@ -202,9 +189,9 @@ template<> constexpr inline uint32_t CnAlgo<Algorithm::CN_XAO>::iterations() con
template<> constexpr inline uint32_t CnAlgo<Algorithm::CN_DOUBLE>::iterations() const { return CN_ITER * 2; }
template<> constexpr inline uint32_t CnAlgo<Algorithm::CN_RWZ>::iterations() const { return 0x60000; }
template<> constexpr inline uint32_t CnAlgo<Algorithm::CN_ZLS>::iterations() const { return 0x60000; }
template<> constexpr inline uint32_t CnAlgo<Algorithm::CN_GPU>::iterations() const { return 0xC000; }
template<> constexpr inline uint32_t CnAlgo<Algorithm::CN_PICO_0>::iterations() const { return CN_ITER / 8; }
template<> constexpr inline uint32_t CnAlgo<Algorithm::CN_PICO_TLO>::iterations() const { return CN_ITER / 8; }
template<> constexpr inline uint32_t CnAlgo<Algorithm::CN_CCX>::iterations() const { return CN_ITER / 2; }
template<> constexpr inline size_t CnAlgo<Algorithm::CN_LITE_0>::memory() const { return CN_MEMORY / 2; }
@ -216,7 +203,6 @@ template<> constexpr inline size_t CnAlgo<Algorithm::CN_PICO_0>::memory() const
template<> constexpr inline size_t CnAlgo<Algorithm::CN_PICO_TLO>::memory() const { return CN_MEMORY / 8; }
template<> constexpr inline uint32_t CnAlgo<Algorithm::CN_GPU>::mask() const { return 0x1FFFC0; }
template<> constexpr inline uint32_t CnAlgo<Algorithm::CN_PICO_0>::mask() const { return 0x1FFF0; }

View file

@ -252,11 +252,6 @@ xmrig::CnHash::CnHash()
ADD_FN_ASM(Algorithm::CN_ZLS);
ADD_FN_ASM(Algorithm::CN_DOUBLE);
# ifdef XMRIG_ALGO_CN_GPU
m_map[Algorithm::CN_GPU][AV_SINGLE][Assembly::NONE] = cryptonight_single_hash_gpu<Algorithm::CN_GPU, false>;
m_map[Algorithm::CN_GPU][AV_SINGLE_SOFT][Assembly::NONE] = cryptonight_single_hash_gpu<Algorithm::CN_GPU, true>;
# endif
# ifdef XMRIG_ALGO_CN_LITE
ADD_FN(Algorithm::CN_LITE_0);
ADD_FN(Algorithm::CN_LITE_1);
@ -275,6 +270,8 @@ xmrig::CnHash::CnHash()
ADD_FN_ASM(Algorithm::CN_PICO_TLO);
# endif
ADD_FN(Algorithm::CN_CCX);
# ifdef XMRIG_ALGO_ARGON2
m_map[Algorithm::AR2_CHUKWA][AV_SINGLE][Assembly::NONE] = argon2::single_hash<Algorithm::AR2_CHUKWA>;
m_map[Algorithm::AR2_CHUKWA][AV_SINGLE_SOFT][Assembly::NONE] = argon2::single_hash<Algorithm::AR2_CHUKWA>;

View file

@ -294,11 +294,7 @@ static inline void cn_implode_scratchpad(const __m128i *input, __m128i *output)
{
constexpr CnAlgo<ALGO> props;
# ifdef XMRIG_ALGO_CN_GPU
constexpr bool IS_HEAVY = props.isHeavy() || ALGO == Algorithm::CN_GPU;
# else
constexpr bool IS_HEAVY = props.isHeavy();
# endif
__m128i xout0, xout1, xout2, xout3, xout4, xout5, xout6, xout7;
__m128i k0, k1, k2, k3, k4, k5, k6, k7, k8, k9;
@ -446,6 +442,24 @@ static inline void cryptonight_monero_tweak(const uint8_t* l, uint64_t idx, __m1
}
static inline void cryptonight_conceal_tweak(__m128i& cx, __m128& conc_var)
{
__m128 r = _mm_add_ps(_mm_cvtepi32_ps(cx), conc_var);
r = _mm_mul_ps(r, _mm_mul_ps(r, r));
r = _mm_and_ps(_mm_castsi128_ps(_mm_set1_epi32(0x807FFFFF)), r);
r = _mm_or_ps(_mm_castsi128_ps(_mm_set1_epi32(0x40000000)), r);
__m128 c_old = conc_var;
conc_var = _mm_add_ps(conc_var, r);
c_old = _mm_and_ps(_mm_castsi128_ps(_mm_set1_epi32(0x807FFFFF)), c_old);
c_old = _mm_or_ps(_mm_castsi128_ps(_mm_set1_epi32(0x40000000)), c_old);
__m128 nc = _mm_mul_ps(c_old, _mm_set1_ps(536870880.0f));
cx = _mm_xor_si128(cx, _mm_cvttps_epi32(nc));
}
template<Algorithm::Id ALGO, bool SOFT_AES>
inline void cryptonight_single_hash(const uint8_t *__restrict__ input, size_t size, uint8_t *__restrict__ output, cryptonight_ctx **__restrict__ ctx, uint64_t height)
{
@ -479,12 +493,20 @@ inline void cryptonight_single_hash(const uint8_t *__restrict__ input, size_t si
__m128i bx0 = _mm_set_epi64x(h0[3] ^ h0[7], h0[2] ^ h0[6]);
__m128i bx1 = _mm_set_epi64x(h0[9] ^ h0[11], h0[8] ^ h0[10]);
__m128 conc_var;
if (ALGO == Algorithm::CN_CCX) {
conc_var = _mm_setzero_ps();
}
uint64_t idx0 = al0;
for (size_t i = 0; i < props.iterations(); i++) {
__m128i cx;
if (IS_CN_HEAVY_TUBE || !SOFT_AES) {
cx = _mm_load_si128(reinterpret_cast<const __m128i *>(&l0[idx0 & MASK]));
if (ALGO == Algorithm::CN_CCX) {
cryptonight_conceal_tweak(cx, conc_var);
}
}
const __m128i ax0 = _mm_set_epi64x(ah0, al0);
@ -492,7 +514,14 @@ inline void cryptonight_single_hash(const uint8_t *__restrict__ input, size_t si
cx = aes_round_tweak_div(cx, ax0);
}
else if (SOFT_AES) {
cx = soft_aesenc((uint32_t*)&l0[idx0 & MASK], ax0);
if (ALGO == Algorithm::CN_CCX) {
cx = _mm_load_si128(reinterpret_cast<const __m128i*>(&l0[idx0 & MASK]));
cryptonight_conceal_tweak(cx, conc_var);
cx = soft_aesenc((uint32_t*)&cx, ax0);
}
else {
cx = soft_aesenc((uint32_t*)&l0[idx0 & MASK], ax0);
}
}
else {
cx = _mm_aesenc_si128(cx, ax0);
@ -580,66 +609,6 @@ inline void cryptonight_single_hash(const uint8_t *__restrict__ input, size_t si
}
} /* namespace xmrig */
#ifdef XMRIG_ALGO_CN_GPU
template<size_t ITER, uint32_t MASK>
void cn_gpu_inner_arm(const uint8_t *spad, uint8_t *lpad);
namespace xmrig {
template<size_t MEM>
void cn_explode_scratchpad_gpu(const uint8_t *input, uint8_t *output)
{
constexpr size_t hash_size = 200; // 25x8 bytes
alignas(16) uint64_t hash[25];
for (uint64_t i = 0; i < MEM / 512; i++) {
memcpy(hash, input, hash_size);
hash[0] ^= i;
xmrig::keccakf(hash, 24);
memcpy(output, hash, 160);
output += 160;
xmrig::keccakf(hash, 24);
memcpy(output, hash, 176);
output += 176;
xmrig::keccakf(hash, 24);
memcpy(output, hash, 176);
output += 176;
}
}
template<xmrig::Algorithm::Id ALGO, bool SOFT_AES>
inline void cryptonight_single_hash_gpu(const uint8_t *__restrict__ input, size_t size, uint8_t *__restrict__ output, cryptonight_ctx **__restrict__ ctx, uint64_t height)
{
constexpr CnAlgo<ALGO> props;
keccak(input, size, ctx[0]->state);
cn_explode_scratchpad_gpu<props.memory()>(ctx[0]->state, ctx[0]->memory);
fesetround(FE_TONEAREST);
cn_gpu_inner_arm<props.iterations(), props.mask()>(ctx[0]->state, ctx[0]->memory);
cn_implode_scratchpad<ALGO, SOFT_AES>(reinterpret_cast<const __m128i *>(ctx[0]->memory), reinterpret_cast<__m128i *>(ctx[0]->state));
keccakf(reinterpret_cast<uint64_t*>(ctx[0]->state), 24);
memcpy(output, ctx[0]->state, 32);
}
} /* namespace xmrig */
#endif
namespace xmrig {
template<Algorithm::Id ALGO, bool SOFT_AES>
inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t size, uint8_t *__restrict__ output, struct cryptonight_ctx **__restrict__ ctx, uint64_t height)
{
@ -686,6 +655,12 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si
__m128i bx10 = _mm_set_epi64x(h1[3] ^ h1[7], h1[2] ^ h1[6]);
__m128i bx11 = _mm_set_epi64x(h1[9] ^ h1[11], h1[8] ^ h1[10]);
__m128 conc_var0, conc_var1;
if (ALGO == Algorithm::CN_CCX) {
conc_var0 = _mm_setzero_ps();
conc_var1 = _mm_setzero_ps();
}
uint64_t idx0 = al0;
uint64_t idx1 = al1;
@ -694,6 +669,10 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si
if (IS_CN_HEAVY_TUBE || !SOFT_AES) {
cx0 = _mm_load_si128((__m128i *) &l0[idx0 & MASK]);
cx1 = _mm_load_si128((__m128i *) &l1[idx1 & MASK]);
if (ALGO == Algorithm::CN_CCX) {
cryptonight_conceal_tweak(cx0, conc_var0);
cryptonight_conceal_tweak(cx1, conc_var1);
}
}
const __m128i ax0 = _mm_set_epi64x(ah0, al0);
@ -703,8 +682,18 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si
cx1 = aes_round_tweak_div(cx1, ax1);
}
else if (SOFT_AES) {
cx0 = soft_aesenc((uint32_t*)&l0[idx0 & MASK], ax0);
cx1 = soft_aesenc((uint32_t*)&l1[idx1 & MASK], ax1);
if (ALGO == Algorithm::CN_CCX) {
cx0 = _mm_load_si128((__m128i *) &l0[idx0 & MASK]);
cx1 = _mm_load_si128((__m128i *) &l1[idx1 & MASK]);
cryptonight_conceal_tweak(cx0, conc_var0);
cryptonight_conceal_tweak(cx1, conc_var1);
cx0 = soft_aesenc((uint32_t*)&cx0, ax0);
cx1 = soft_aesenc((uint32_t*)&cx1, ax1);
}
else {
cx0 = soft_aesenc((uint32_t*)&l0[idx0 & MASK], ax0);
cx1 = soft_aesenc((uint32_t*)&l1[idx1 & MASK], ax1);
}
}
else {
cx0 = _mm_aesenc_si128(cx0, ax0);

View file

@ -67,8 +67,10 @@
#ifdef _MSC_VER
# define VARIANT2_SET_ROUNDING_MODE() if (BASE == Algorithm::CN_2) { _control87(RC_DOWN, MCW_RC); }
# define RESTORE_ROUNDING_MODE() _control87(RC_NEAR, MCW_RC);
#else
# define VARIANT2_SET_ROUNDING_MODE() if (BASE == Algorithm::CN_2) { fesetround(FE_DOWNWARD); }
# define RESTORE_ROUNDING_MODE() fesetround(FE_TONEAREST);
#endif
# define VARIANT2_INTEGER_MATH(part, cl, cx) \

View file

@ -231,6 +231,20 @@ const static uint8_t test_output_zls[160] = {
0x00, 0x08, 0x64, 0xF0, 0xA6, 0xC8, 0x94, 0x45, 0x08, 0xED, 0x03, 0x95, 0x52, 0xE9, 0xBC, 0x5F
};
// "cn/ccx"
const static uint8_t test_output_ccx[160] = {
0xB3, 0xA1, 0x67, 0x86, 0xD2, 0xC9, 0x85, 0xEC, 0xAD, 0xC4, 0x5F, 0x91, 0x05, 0x27, 0xC7, 0xA1,
0x96, 0xF0, 0xE1, 0xE9, 0x7C, 0x87, 0x09, 0x38, 0x1D, 0x7D, 0x41, 0x93, 0x35, 0xF8, 0x16, 0x72,
0xC3, 0xBD, 0x8D, 0xE8, 0xD5, 0xAE, 0xB8, 0x59, 0x0A, 0x6C, 0xCB, 0x7B, 0x41, 0x30, 0xF7, 0x04,
0xA5, 0x7C, 0xF9, 0xCA, 0x20, 0x49, 0x9C, 0xFD, 0xE8, 0x43, 0xCF, 0x66, 0x78, 0xEA, 0x76, 0xDD,
0x91, 0x0C, 0xDE, 0x29, 0x2A, 0xE0, 0xA8, 0xCA, 0xBC, 0xAA, 0x53, 0x4C, 0x93, 0x3E, 0x7B, 0x2C,
0xF1, 0xF9, 0xE1, 0x98, 0xB2, 0x92, 0x1E, 0x19, 0x93, 0x2A, 0x74, 0x9D, 0xDB, 0x10, 0x0F, 0x16,
0xD5, 0x3D, 0xE4, 0xC4, 0x23, 0xD9, 0x2E, 0xFD, 0x79, 0x8D, 0x1E, 0x48, 0x4E, 0x46, 0x08, 0x6C,
0xFF, 0x8A, 0x49, 0xFA, 0x1E, 0xB0, 0xB6, 0x9A, 0x47, 0x1C, 0xC6, 0x30, 0x36, 0x5D, 0xFD, 0x76,
0x10, 0x07, 0x44, 0xE6, 0xC8, 0x20, 0x2A, 0x84, 0x9D, 0x70, 0x22, 0x00, 0x8B, 0x9B, 0xBD, 0x8D,
0x27, 0x49, 0xA6, 0x06, 0xDC, 0xF0, 0xA1, 0x4B, 0x50, 0xA0, 0x12, 0xCD, 0x77, 0x01, 0x4C, 0x28
};
// "cn/double"
const static uint8_t test_output_double[160] = {
0xAE, 0xFB, 0xB3, 0xF0, 0xCC, 0x88, 0x04, 0x6D, 0x11, 0x9F, 0x6C, 0x54, 0xB9, 0x6D, 0x90, 0xC9,
@ -356,23 +370,6 @@ const static uint8_t test_output_pico_tlo[160] = {
#endif
#ifdef XMRIG_ALGO_CN_GPU
// "cn/gpu"
const static uint8_t test_output_gpu[160] = {
0xE5, 0x5C, 0xB2, 0x3E, 0x51, 0x64, 0x9A, 0x59, 0xB1, 0x27, 0xB9, 0x6B, 0x51, 0x5F, 0x2B, 0xF7,
0xBF, 0xEA, 0x19, 0x97, 0x41, 0xA0, 0x21, 0x6C, 0xF8, 0x38, 0xDE, 0xD0, 0x6E, 0xFF, 0x82, 0xDF,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
#endif
#ifdef XMRIG_ALGO_ARGON2
// "argon2/chukwa"
const static uint8_t argon2_chukwa_test_out[160] = {

View file

@ -371,11 +371,7 @@ static inline void cn_implode_scratchpad(const __m128i *input, __m128i *output)
{
constexpr CnAlgo<ALGO> props;
# ifdef XMRIG_ALGO_CN_GPU
constexpr bool IS_HEAVY = props.isHeavy() || ALGO == Algorithm::CN_GPU;
# else
constexpr bool IS_HEAVY = props.isHeavy();
# endif
__m128i xout0, xout1, xout2, xout3, xout4, xout5, xout6, xout7;
__m128i k0, k1, k2, k3, k4, k5, k6, k7, k8, k9;
@ -545,6 +541,23 @@ static inline void cryptonight_monero_tweak(uint64_t *mem_out, const uint8_t *l,
}
static inline void cryptonight_conceal_tweak(__m128i& cx, __m128& conc_var)
{
__m128 r = _mm_add_ps(_mm_cvtepi32_ps(cx), conc_var);
r = _mm_mul_ps(r, _mm_mul_ps(r, r));
r = _mm_and_ps(_mm_castsi128_ps(_mm_set1_epi32(0x807FFFFF)), r);
r = _mm_or_ps(_mm_castsi128_ps(_mm_set1_epi32(0x40000000)), r);
__m128 c_old = conc_var;
conc_var = _mm_add_ps(conc_var, r);
c_old = _mm_and_ps(_mm_castsi128_ps(_mm_set1_epi32(0x807FFFFF)), c_old);
c_old = _mm_or_ps(_mm_castsi128_ps(_mm_set1_epi32(0x40000000)), c_old);
__m128 nc = _mm_mul_ps(c_old, _mm_set1_ps(536870880.0f));
cx = _mm_xor_si128(cx, _mm_cvttps_epi32(nc));
}
template<Algorithm::Id ALGO, bool SOFT_AES>
inline void cryptonight_single_hash(const uint8_t *__restrict__ input, size_t size, uint8_t *__restrict__ output, cryptonight_ctx **__restrict__ ctx, uint64_t height)
{
@ -598,10 +611,19 @@ inline void cryptonight_single_hash(const uint8_t *__restrict__ input, size_t si
__m128i bx0 = _mm_set_epi64x(static_cast<int64_t>(h0[3] ^ h0[7]), static_cast<int64_t>(h0[2] ^ h0[6]));
__m128i bx1 = _mm_set_epi64x(static_cast<int64_t>(h0[9] ^ h0[11]), static_cast<int64_t>(h0[8] ^ h0[10]));
__m128 conc_var;
if (ALGO == Algorithm::CN_CCX) {
conc_var = _mm_setzero_ps();
RESTORE_ROUNDING_MODE();
}
for (size_t i = 0; i < props.iterations(); i++) {
__m128i cx;
if (IS_CN_HEAVY_TUBE || !SOFT_AES) {
cx = _mm_load_si128(reinterpret_cast<const __m128i *>(&l0[idx0 & MASK]));
if (ALGO == Algorithm::CN_CCX) {
cryptonight_conceal_tweak(cx, conc_var);
}
}
const __m128i ax0 = _mm_set_epi64x(static_cast<int64_t>(ah0), static_cast<int64_t>(al0));
@ -609,7 +631,14 @@ inline void cryptonight_single_hash(const uint8_t *__restrict__ input, size_t si
cx = aes_round_tweak_div(cx, ax0);
}
else if (SOFT_AES) {
cx = soft_aesenc(&l0[idx0 & MASK], ax0, reinterpret_cast<const uint32_t*>(saes_table));
if (ALGO == Algorithm::CN_CCX) {
cx = _mm_load_si128(reinterpret_cast<const __m128i*>(&l0[idx0 & MASK]));
cryptonight_conceal_tweak(cx, conc_var);
cx = soft_aesenc(&cx, ax0, reinterpret_cast<const uint32_t*>(saes_table));
}
else {
cx = soft_aesenc(&l0[idx0 & MASK], ax0, reinterpret_cast<const uint32_t*>(saes_table));
}
}
else {
cx = _mm_aesenc_si128(cx, ax0);
@ -702,73 +731,6 @@ inline void cryptonight_single_hash(const uint8_t *__restrict__ input, size_t si
} /* namespace xmrig */
#ifdef XMRIG_ALGO_CN_GPU
template<size_t ITER, uint32_t MASK>
void cn_gpu_inner_avx(const uint8_t *spad, uint8_t *lpad);
template<size_t ITER, uint32_t MASK>
void cn_gpu_inner_ssse3(const uint8_t *spad, uint8_t *lpad);
namespace xmrig {
template<size_t MEM>
void cn_explode_scratchpad_gpu(const uint8_t *input, uint8_t *output)
{
constexpr size_t hash_size = 200; // 25x8 bytes
alignas(16) uint64_t hash[25];
for (uint64_t i = 0; i < MEM / 512; i++) {
memcpy(hash, input, hash_size);
hash[0] ^= i;
xmrig::keccakf(hash, 24);
memcpy(output, hash, 160);
output += 160;
xmrig::keccakf(hash, 24);
memcpy(output, hash, 176);
output += 176;
xmrig::keccakf(hash, 24);
memcpy(output, hash, 176);
output += 176;
}
}
template<Algorithm::Id ALGO, bool SOFT_AES>
inline void cryptonight_single_hash_gpu(const uint8_t *__restrict__ input, size_t size, uint8_t *__restrict__ output, cryptonight_ctx **__restrict__ ctx, uint64_t)
{
constexpr CnAlgo<ALGO> props;
keccak(input, size, ctx[0]->state);
cn_explode_scratchpad_gpu<props.memory()>(ctx[0]->state, ctx[0]->memory);
# ifdef _MSC_VER
_control87(RC_NEAR, MCW_RC);
# else
fesetround(FE_TONEAREST);
# endif
if (xmrig::Cpu::info()->hasAVX2()) {
cn_gpu_inner_avx<props.iterations(), props.mask()>(ctx[0]->state, ctx[0]->memory);
} else {
cn_gpu_inner_ssse3<props.iterations(), props.mask()>(ctx[0]->state, ctx[0]->memory);
}
cn_implode_scratchpad<ALGO, SOFT_AES>(reinterpret_cast<const __m128i *>(ctx[0]->memory), reinterpret_cast<__m128i *>(ctx[0]->state));
keccakf(reinterpret_cast<uint64_t*>(ctx[0]->state), 24);
memcpy(output, ctx[0]->state, 32);
}
} /* namespace xmrig */
#endif
#ifdef XMRIG_FEATURE_ASM
extern "C" void cnv2_mainloop_ivybridge_asm(cryptonight_ctx **ctx);
extern "C" void cnv2_mainloop_ryzen_asm(cryptonight_ctx **ctx);
@ -1042,6 +1004,13 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si
__m128i bx10 = _mm_set_epi64x(h1[3] ^ h1[7], h1[2] ^ h1[6]);
__m128i bx11 = _mm_set_epi64x(h1[9] ^ h1[11], h1[8] ^ h1[10]);
__m128 conc_var0, conc_var1;
if (ALGO == Algorithm::CN_CCX) {
conc_var0 = _mm_setzero_ps();
conc_var1 = _mm_setzero_ps();
RESTORE_ROUNDING_MODE();
}
uint64_t idx0 = al0;
uint64_t idx1 = al1;
@ -1050,6 +1019,10 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si
if (IS_CN_HEAVY_TUBE || !SOFT_AES) {
cx0 = _mm_load_si128(reinterpret_cast<const __m128i *>(&l0[idx0 & MASK]));
cx1 = _mm_load_si128(reinterpret_cast<const __m128i *>(&l1[idx1 & MASK]));
if (ALGO == Algorithm::CN_CCX) {
cryptonight_conceal_tweak(cx0, conc_var0);
cryptonight_conceal_tweak(cx1, conc_var1);
}
}
const __m128i ax0 = _mm_set_epi64x(ah0, al0);
@ -1059,8 +1032,18 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si
cx1 = aes_round_tweak_div(cx1, ax1);
}
else if (SOFT_AES) {
cx0 = soft_aesenc(&l0[idx0 & MASK], ax0, reinterpret_cast<const uint32_t*>(saes_table));
cx1 = soft_aesenc(&l1[idx1 & MASK], ax1, reinterpret_cast<const uint32_t*>(saes_table));
if (ALGO == Algorithm::CN_CCX) {
cx0 = _mm_load_si128(reinterpret_cast<const __m128i*>(&l0[idx0 & MASK]));
cx1 = _mm_load_si128(reinterpret_cast<const __m128i*>(&l1[idx1 & MASK]));
cryptonight_conceal_tweak(cx0, conc_var0);
cryptonight_conceal_tweak(cx1, conc_var1);
cx0 = soft_aesenc(&cx0, ax0, reinterpret_cast<const uint32_t*>(saes_table));
cx1 = soft_aesenc(&cx1, ax1, reinterpret_cast<const uint32_t*>(saes_table));
}
else {
cx0 = soft_aesenc(&l0[idx0 & MASK], ax0, reinterpret_cast<const uint32_t*>(saes_table));
cx1 = soft_aesenc(&l1[idx1 & MASK], ax1, reinterpret_cast<const uint32_t*>(saes_table));
}
}
else {
cx0 = _mm_aesenc_si128(cx0, ax0);
@ -1215,9 +1198,13 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si
}
#define CN_STEP1(a, b0, b1, c, l, ptr, idx) \
#define CN_STEP1(a, b0, b1, c, l, ptr, idx, conc_var) \
ptr = reinterpret_cast<__m128i*>(&l[idx & MASK]); \
c = _mm_load_si128(ptr);
c = _mm_load_si128(ptr); \
if (ALGO == Algorithm::CN_CCX) { \
cryptonight_conceal_tweak(c, conc_var); \
}
#define CN_STEP2(a, b0, b1, c, l, ptr, idx) \
@ -1317,6 +1304,10 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si
__m128i bx##n##0 = _mm_set_epi64x(h##n[3] ^ h##n[7], h##n[2] ^ h##n[6]); \
__m128i bx##n##1 = _mm_set_epi64x(h##n[9] ^ h##n[11], h##n[8] ^ h##n[10]); \
__m128i cx##n = _mm_setzero_si128(); \
__m128 conc_var##n; \
if (ALGO == Algorithm::CN_CCX) { \
conc_var##n = _mm_setzero_ps(); \
} \
VARIANT4_RANDOM_MATH_INIT(n);
@ -1356,6 +1347,9 @@ inline void cryptonight_triple_hash(const uint8_t *__restrict__ input, size_t si
CONST_INIT(ctx[1], 1);
CONST_INIT(ctx[2], 2);
VARIANT2_SET_ROUNDING_MODE();
if (ALGO == Algorithm::CN_CCX) {
RESTORE_ROUNDING_MODE();
}
uint64_t idx0, idx1, idx2;
idx0 = _mm_cvtsi128_si64(ax0);
@ -1366,9 +1360,9 @@ inline void cryptonight_triple_hash(const uint8_t *__restrict__ input, size_t si
uint64_t hi, lo;
__m128i *ptr0, *ptr1, *ptr2;
CN_STEP1(ax0, bx00, bx01, cx0, l0, ptr0, idx0);
CN_STEP1(ax1, bx10, bx11, cx1, l1, ptr1, idx1);
CN_STEP1(ax2, bx20, bx21, cx2, l2, ptr2, idx2);
CN_STEP1(ax0, bx00, bx01, cx0, l0, ptr0, idx0, conc_var0);
CN_STEP1(ax1, bx10, bx11, cx1, l1, ptr1, idx1, conc_var1);
CN_STEP1(ax2, bx20, bx21, cx2, l2, ptr2, idx2, conc_var2);
CN_STEP2(ax0, bx00, bx01, cx0, l0, ptr0, idx0);
CN_STEP2(ax1, bx10, bx11, cx1, l1, ptr1, idx1);
@ -1430,6 +1424,9 @@ inline void cryptonight_quad_hash(const uint8_t *__restrict__ input, size_t size
CONST_INIT(ctx[2], 2);
CONST_INIT(ctx[3], 3);
VARIANT2_SET_ROUNDING_MODE();
if (ALGO == Algorithm::CN_CCX) {
RESTORE_ROUNDING_MODE();
}
uint64_t idx0, idx1, idx2, idx3;
idx0 = _mm_cvtsi128_si64(ax0);
@ -1441,10 +1438,10 @@ inline void cryptonight_quad_hash(const uint8_t *__restrict__ input, size_t size
uint64_t hi, lo;
__m128i *ptr0, *ptr1, *ptr2, *ptr3;
CN_STEP1(ax0, bx00, bx01, cx0, l0, ptr0, idx0);
CN_STEP1(ax1, bx10, bx11, cx1, l1, ptr1, idx1);
CN_STEP1(ax2, bx20, bx21, cx2, l2, ptr2, idx2);
CN_STEP1(ax3, bx30, bx31, cx3, l3, ptr3, idx3);
CN_STEP1(ax0, bx00, bx01, cx0, l0, ptr0, idx0, conc_var0);
CN_STEP1(ax1, bx10, bx11, cx1, l1, ptr1, idx1, conc_var1);
CN_STEP1(ax2, bx20, bx21, cx2, l2, ptr2, idx2, conc_var2);
CN_STEP1(ax3, bx30, bx31, cx3, l3, ptr3, idx3, conc_var3);
CN_STEP2(ax0, bx00, bx01, cx0, l0, ptr0, idx0);
CN_STEP2(ax1, bx10, bx11, cx1, l1, ptr1, idx1);
@ -1512,6 +1509,9 @@ inline void cryptonight_penta_hash(const uint8_t *__restrict__ input, size_t siz
CONST_INIT(ctx[3], 3);
CONST_INIT(ctx[4], 4);
VARIANT2_SET_ROUNDING_MODE();
if (ALGO == Algorithm::CN_CCX) {
RESTORE_ROUNDING_MODE();
}
uint64_t idx0, idx1, idx2, idx3, idx4;
idx0 = _mm_cvtsi128_si64(ax0);
@ -1524,11 +1524,11 @@ inline void cryptonight_penta_hash(const uint8_t *__restrict__ input, size_t siz
uint64_t hi, lo;
__m128i *ptr0, *ptr1, *ptr2, *ptr3, *ptr4;
CN_STEP1(ax0, bx00, bx01, cx0, l0, ptr0, idx0);
CN_STEP1(ax1, bx10, bx11, cx1, l1, ptr1, idx1);
CN_STEP1(ax2, bx20, bx21, cx2, l2, ptr2, idx2);
CN_STEP1(ax3, bx30, bx31, cx3, l3, ptr3, idx3);
CN_STEP1(ax4, bx40, bx41, cx4, l4, ptr4, idx4);
CN_STEP1(ax0, bx00, bx01, cx0, l0, ptr0, idx0, conc_var0);
CN_STEP1(ax1, bx10, bx11, cx1, l1, ptr1, idx1, conc_var1);
CN_STEP1(ax2, bx20, bx21, cx2, l2, ptr2, idx2, conc_var2);
CN_STEP1(ax3, bx30, bx31, cx3, l3, ptr3, idx3, conc_var3);
CN_STEP1(ax4, bx40, bx41, cx4, l4, ptr4, idx4, conc_var4);
CN_STEP2(ax0, bx00, bx01, cx0, l0, ptr0, idx0);
CN_STEP2(ax1, bx10, bx11, cx1, l1, ptr1, idx1);

View file

@ -1,240 +0,0 @@
/* XMRig
* Copyright 2010 Jeff Garzik <jgarzik@pobox.com>
* Copyright 2012-2014 pooler <pooler@litecoinpool.org>
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2019 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <arm_neon.h>
#include "crypto/cn/CnAlgo.h"
inline void vandq_f32(float32x4_t &v, uint32_t v2)
{
uint32x4_t vc = vdupq_n_u32(v2);
v = (float32x4_t)vandq_u32((uint32x4_t)v, vc);
}
inline void vorq_f32(float32x4_t &v, uint32_t v2)
{
uint32x4_t vc = vdupq_n_u32(v2);
v = (float32x4_t)vorrq_u32((uint32x4_t)v, vc);
}
template <size_t v>
inline void vrot_si32(int32x4_t &r)
{
r = (int32x4_t)vextq_s8((int8x16_t)r, (int8x16_t)r, v);
}
template <>
inline void vrot_si32<0>(int32x4_t &r)
{
}
inline uint32_t vheor_s32(const int32x4_t &v)
{
int32x4_t v0 = veorq_s32(v, vrev64q_s32(v));
int32x2_t vf = veor_s32(vget_high_s32(v0), vget_low_s32(v0));
return (uint32_t)vget_lane_s32(vf, 0);
}
inline void prep_dv(int32_t *idx, int32x4_t &v, float32x4_t &n)
{
v = vld1q_s32(idx);
n = vcvtq_f32_s32(v);
}
inline void sub_round(const float32x4_t &n0, const float32x4_t &n1, const float32x4_t &n2, const float32x4_t &n3, const float32x4_t &rnd_c, float32x4_t &n, float32x4_t &d, float32x4_t &c)
{
float32x4_t ln1 = vaddq_f32(n1, c);
float32x4_t nn = vmulq_f32(n0, c);
nn = vmulq_f32(ln1, vmulq_f32(nn, nn));
vandq_f32(nn, 0xFEFFFFFF);
vorq_f32(nn, 0x00800000);
n = vaddq_f32(n, nn);
float32x4_t ln3 = vsubq_f32(n3, c);
float32x4_t dd = vmulq_f32(n2, c);
dd = vmulq_f32(ln3, vmulq_f32(dd, dd));
vandq_f32(dd, 0xFEFFFFFF);
vorq_f32(dd, 0x00800000);
d = vaddq_f32(d, dd);
//Constant feedback
c = vaddq_f32(c, rnd_c);
c = vaddq_f32(c, vdupq_n_f32(0.734375f));
float32x4_t r = vaddq_f32(nn, dd);
vandq_f32(r, 0x807FFFFF);
vorq_f32(r, 0x40000000);
c = vaddq_f32(c, r);
}
inline void round_compute(const float32x4_t &n0, const float32x4_t &n1, const float32x4_t &n2, const float32x4_t &n3, const float32x4_t &rnd_c, float32x4_t &c, float32x4_t &r)
{
float32x4_t n = vdupq_n_f32(0.0f), d = vdupq_n_f32(0.0f);
sub_round(n0, n1, n2, n3, rnd_c, n, d, c);
sub_round(n1, n2, n3, n0, rnd_c, n, d, c);
sub_round(n2, n3, n0, n1, rnd_c, n, d, c);
sub_round(n3, n0, n1, n2, rnd_c, n, d, c);
sub_round(n3, n2, n1, n0, rnd_c, n, d, c);
sub_round(n2, n1, n0, n3, rnd_c, n, d, c);
sub_round(n1, n0, n3, n2, rnd_c, n, d, c);
sub_round(n0, n3, n2, n1, rnd_c, n, d, c);
// Make sure abs(d) > 2.0 - this prevents division by zero and accidental overflows by division by < 1.0
vandq_f32(d, 0xFF7FFFFF);
vorq_f32(d, 0x40000000);
r = vaddq_f32(r, vdivq_f32(n, d));
}
// 112×4 = 448
template <bool add>
inline int32x4_t single_compute(const float32x4_t &n0, const float32x4_t &n1, const float32x4_t &n2, const float32x4_t &n3, float cnt, const float32x4_t &rnd_c, float32x4_t &sum)
{
float32x4_t c = vdupq_n_f32(cnt);
float32x4_t r = vdupq_n_f32(0.0f);
round_compute(n0, n1, n2, n3, rnd_c, c, r);
round_compute(n0, n1, n2, n3, rnd_c, c, r);
round_compute(n0, n1, n2, n3, rnd_c, c, r);
round_compute(n0, n1, n2, n3, rnd_c, c, r);
// do a quick fmod by setting exp to 2
vandq_f32(r, 0x807FFFFF);
vorq_f32(r, 0x40000000);
if (add) {
sum = vaddq_f32(sum, r);
} else {
sum = r;
}
const float32x4_t cc2 = vdupq_n_f32(536870880.0f);
r = vmulq_f32(r, cc2); // 35
return vcvtq_s32_f32(r);
}
template<size_t rot>
inline void single_compute_wrap(const float32x4_t &n0, const float32x4_t &n1, const float32x4_t &n2, const float32x4_t &n3, float cnt, const float32x4_t &rnd_c, float32x4_t &sum, int32x4_t &out)
{
int32x4_t r = single_compute<rot % 2 != 0>(n0, n1, n2, n3, cnt, rnd_c, sum);
vrot_si32<rot>(r);
out = veorq_s32(out, r);
}
template<uint32_t MASK>
inline int32_t *scratchpad_ptr(uint8_t* lpad, uint32_t idx, size_t n) { return reinterpret_cast<int32_t *>(lpad + (idx & MASK) + n * 16); }
template<size_t ITER, uint32_t MASK>
void cn_gpu_inner_arm(const uint8_t *spad, uint8_t *lpad)
{
uint32_t s = reinterpret_cast<const uint32_t*>(spad)[0] >> 8;
int32_t *idx0 = scratchpad_ptr<MASK>(lpad, s, 0);
int32_t *idx1 = scratchpad_ptr<MASK>(lpad, s, 1);
int32_t *idx2 = scratchpad_ptr<MASK>(lpad, s, 2);
int32_t *idx3 = scratchpad_ptr<MASK>(lpad, s, 3);
float32x4_t sum0 = vdupq_n_f32(0.0f);
for (size_t i = 0; i < ITER; i++) {
float32x4_t n0, n1, n2, n3;
int32x4_t v0, v1, v2, v3;
float32x4_t suma, sumb, sum1, sum2, sum3;
prep_dv(idx0, v0, n0);
prep_dv(idx1, v1, n1);
prep_dv(idx2, v2, n2);
prep_dv(idx3, v3, n3);
float32x4_t rc = sum0;
int32x4_t out, out2;
out = vdupq_n_s32(0);
single_compute_wrap<0>(n0, n1, n2, n3, 1.3437500f, rc, suma, out);
single_compute_wrap<1>(n0, n2, n3, n1, 1.2812500f, rc, suma, out);
single_compute_wrap<2>(n0, n3, n1, n2, 1.3593750f, rc, sumb, out);
single_compute_wrap<3>(n0, n3, n2, n1, 1.3671875f, rc, sumb, out);
sum0 = vaddq_f32(suma, sumb);
vst1q_s32(idx0, veorq_s32(v0, out));
out2 = out;
out = vdupq_n_s32(0);
single_compute_wrap<0>(n1, n0, n2, n3, 1.4296875f, rc, suma, out);
single_compute_wrap<1>(n1, n2, n3, n0, 1.3984375f, rc, suma, out);
single_compute_wrap<2>(n1, n3, n0, n2, 1.3828125f, rc, sumb, out);
single_compute_wrap<3>(n1, n3, n2, n0, 1.3046875f, rc, sumb, out);
sum1 = vaddq_f32(suma, sumb);
vst1q_s32(idx1, veorq_s32(v1, out));
out2 = veorq_s32(out2, out);
out = vdupq_n_s32(0);
single_compute_wrap<0>(n2, n1, n0, n3, 1.4140625f, rc, suma, out);
single_compute_wrap<1>(n2, n0, n3, n1, 1.2734375f, rc, suma, out);
single_compute_wrap<2>(n2, n3, n1, n0, 1.2578125f, rc, sumb, out);
single_compute_wrap<3>(n2, n3, n0, n1, 1.2890625f, rc, sumb, out);
sum2 = vaddq_f32(suma, sumb);
vst1q_s32(idx2, veorq_s32(v2, out));
out2 = veorq_s32(out2, out);
out = vdupq_n_s32(0);
single_compute_wrap<0>(n3, n1, n2, n0, 1.3203125f, rc, suma, out);
single_compute_wrap<1>(n3, n2, n0, n1, 1.3515625f, rc, suma, out);
single_compute_wrap<2>(n3, n0, n1, n2, 1.3359375f, rc, sumb, out);
single_compute_wrap<3>(n3, n0, n2, n1, 1.4609375f, rc, sumb, out);
sum3 = vaddq_f32(suma, sumb);
vst1q_s32(idx3, veorq_s32(v3, out));
out2 = veorq_s32(out2, out);
sum0 = vaddq_f32(sum0, sum1);
sum2 = vaddq_f32(sum2, sum3);
sum0 = vaddq_f32(sum0, sum2);
const float32x4_t cc1 = vdupq_n_f32(16777216.0f);
const float32x4_t cc2 = vdupq_n_f32(64.0f);
vandq_f32(sum0, 0x7fffffff); // take abs(va) by masking the float sign bit
// vs range 0 - 64
n0 = vmulq_f32(sum0, cc1);
v0 = vcvtq_s32_f32(n0);
v0 = veorq_s32(v0, out2);
uint32_t n = vheor_s32(v0);
// vs is now between 0 and 1
sum0 = vdivq_f32(sum0, cc2);
idx0 = scratchpad_ptr<MASK>(lpad, n, 0);
idx1 = scratchpad_ptr<MASK>(lpad, n, 1);
idx2 = scratchpad_ptr<MASK>(lpad, n, 2);
idx3 = scratchpad_ptr<MASK>(lpad, n, 3);
}
}
template void cn_gpu_inner_arm<xmrig::CnAlgo<xmrig::Algorithm::CN_GPU>().iterations(), xmrig::CnAlgo<xmrig::Algorithm::CN_GPU>().mask()>(const uint8_t* spad, uint8_t* lpad);

View file

@ -1,211 +0,0 @@
/* XMRig
* Copyright 2010 Jeff Garzik <jgarzik@pobox.com>
* Copyright 2012-2014 pooler <pooler@litecoinpool.org>
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2019 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018-2020 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2020 XMRig <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "crypto/cn/CnAlgo.h"
#ifdef __GNUC__
# include <x86intrin.h>
#else
# include <intrin.h>
# define __restrict__ __restrict
#endif
#ifndef _mm256_bslli_epi128
#define _mm256_bslli_epi128(a, count) _mm256_slli_si256((a), (count))
#endif
#ifndef _mm256_bsrli_epi128
#define _mm256_bsrli_epi128(a, count) _mm256_srli_si256((a), (count))
#endif
inline void prep_dv_avx(__m256i* idx, __m256i& v, __m256& n01)
{
v = _mm256_load_si256(idx);
n01 = _mm256_cvtepi32_ps(v);
}
inline __m256 fma_break(const __m256& x)
{
// Break the dependency chain by setitng the exp to ?????01
__m256 xx = _mm256_and_ps(_mm256_castsi256_ps(_mm256_set1_epi32(0xFEFFFFFF)), x);
return _mm256_or_ps(_mm256_castsi256_ps(_mm256_set1_epi32(0x00800000)), xx);
}
// 14
inline void sub_round(const __m256& n0, const __m256& n1, const __m256& n2, const __m256& n3, const __m256& rnd_c, __m256& n, __m256& d, __m256& c)
{
__m256 nn = _mm256_mul_ps(n0, c);
nn = _mm256_mul_ps(_mm256_add_ps(n1, c), _mm256_mul_ps(nn, nn));
nn = fma_break(nn);
n = _mm256_add_ps(n, nn);
__m256 dd = _mm256_mul_ps(n2, c);
dd = _mm256_mul_ps(_mm256_sub_ps(n3, c), _mm256_mul_ps(dd, dd));
dd = fma_break(dd);
d = _mm256_add_ps(d, dd);
//Constant feedback
c = _mm256_add_ps(c, rnd_c);
c = _mm256_add_ps(c, _mm256_set1_ps(0.734375f));
__m256 r = _mm256_add_ps(nn, dd);
r = _mm256_and_ps(_mm256_castsi256_ps(_mm256_set1_epi32(0x807FFFFF)), r);
r = _mm256_or_ps(_mm256_castsi256_ps(_mm256_set1_epi32(0x40000000)), r);
c = _mm256_add_ps(c, r);
}
// 14*8 + 2 = 112
inline void round_compute(const __m256& n0, const __m256& n1, const __m256& n2, const __m256& n3, const __m256& rnd_c, __m256& c, __m256& r)
{
__m256 n = _mm256_setzero_ps(), d = _mm256_setzero_ps();
sub_round(n0, n1, n2, n3, rnd_c, n, d, c);
sub_round(n1, n2, n3, n0, rnd_c, n, d, c);
sub_round(n2, n3, n0, n1, rnd_c, n, d, c);
sub_round(n3, n0, n1, n2, rnd_c, n, d, c);
sub_round(n3, n2, n1, n0, rnd_c, n, d, c);
sub_round(n2, n1, n0, n3, rnd_c, n, d, c);
sub_round(n1, n0, n3, n2, rnd_c, n, d, c);
sub_round(n0, n3, n2, n1, rnd_c, n, d, c);
// Make sure abs(d) > 2.0 - this prevents division by zero and accidental overflows by division by < 1.0
d = _mm256_and_ps(_mm256_castsi256_ps(_mm256_set1_epi32(0xFF7FFFFF)), d);
d = _mm256_or_ps(_mm256_castsi256_ps(_mm256_set1_epi32(0x40000000)), d);
r = _mm256_add_ps(r, _mm256_div_ps(n, d));
}
// 112×4 = 448
template <bool add>
inline __m256i double_compute(const __m256& n0, const __m256& n1, const __m256& n2, const __m256& n3,
float lcnt, float hcnt, const __m256& rnd_c, __m256& sum)
{
__m256 c = _mm256_insertf128_ps(_mm256_castps128_ps256(_mm_set1_ps(lcnt)), _mm_set1_ps(hcnt), 1);
__m256 r = _mm256_setzero_ps();
round_compute(n0, n1, n2, n3, rnd_c, c, r);
round_compute(n0, n1, n2, n3, rnd_c, c, r);
round_compute(n0, n1, n2, n3, rnd_c, c, r);
round_compute(n0, n1, n2, n3, rnd_c, c, r);
// do a quick fmod by setting exp to 2
r = _mm256_and_ps(_mm256_castsi256_ps(_mm256_set1_epi32(0x807FFFFF)), r);
r = _mm256_or_ps(_mm256_castsi256_ps(_mm256_set1_epi32(0x40000000)), r);
if(add)
sum = _mm256_add_ps(sum, r);
else
sum = r;
r = _mm256_mul_ps(r, _mm256_set1_ps(536870880.0f)); // 35
return _mm256_cvttps_epi32(r);
}
template <size_t rot>
inline void double_compute_wrap(const __m256& n0, const __m256& n1, const __m256& n2, const __m256& n3,
float lcnt, float hcnt, const __m256& rnd_c, __m256& sum, __m256i& out)
{
__m256i r = double_compute<rot % 2 != 0>(n0, n1, n2, n3, lcnt, hcnt, rnd_c, sum);
if(rot != 0)
r = _mm256_or_si256(_mm256_bslli_epi128(r, 16 - rot), _mm256_bsrli_epi128(r, rot));
out = _mm256_xor_si256(out, r);
}
template<uint32_t MASK>
inline __m256i* scratchpad_ptr(uint8_t* lpad, uint32_t idx, size_t n) { return reinterpret_cast<__m256i*>(lpad + (idx & MASK) + n*16); }
template<size_t ITER, uint32_t MASK>
void cn_gpu_inner_avx(const uint8_t* spad, uint8_t* lpad)
{
uint32_t s = reinterpret_cast<const uint32_t*>(spad)[0] >> 8;
__m256i* idx0 = scratchpad_ptr<MASK>(lpad, s, 0);
__m256i* idx2 = scratchpad_ptr<MASK>(lpad, s, 2);
__m256 sum0 = _mm256_setzero_ps();
for(size_t i = 0; i < ITER; i++)
{
__m256i v01, v23;
__m256 suma, sumb, sum1;
__m256 rc = sum0;
__m256 n01, n23;
prep_dv_avx(idx0, v01, n01);
prep_dv_avx(idx2, v23, n23);
__m256i out, out2;
__m256 n10, n22, n33;
n10 = _mm256_permute2f128_ps(n01, n01, 0x01);
n22 = _mm256_permute2f128_ps(n23, n23, 0x00);
n33 = _mm256_permute2f128_ps(n23, n23, 0x11);
out = _mm256_setzero_si256();
double_compute_wrap<0>(n01, n10, n22, n33, 1.3437500f, 1.4296875f, rc, suma, out);
double_compute_wrap<1>(n01, n22, n33, n10, 1.2812500f, 1.3984375f, rc, suma, out);
double_compute_wrap<2>(n01, n33, n10, n22, 1.3593750f, 1.3828125f, rc, sumb, out);
double_compute_wrap<3>(n01, n33, n22, n10, 1.3671875f, 1.3046875f, rc, sumb, out);
_mm256_store_si256(idx0, _mm256_xor_si256(v01, out));
sum0 = _mm256_add_ps(suma, sumb);
out2 = out;
__m256 n11, n02, n30;
n11 = _mm256_permute2f128_ps(n01, n01, 0x11);
n02 = _mm256_permute2f128_ps(n01, n23, 0x20);
n30 = _mm256_permute2f128_ps(n01, n23, 0x03);
out = _mm256_setzero_si256();
double_compute_wrap<0>(n23, n11, n02, n30, 1.4140625f, 1.3203125f, rc, suma, out);
double_compute_wrap<1>(n23, n02, n30, n11, 1.2734375f, 1.3515625f, rc, suma, out);
double_compute_wrap<2>(n23, n30, n11, n02, 1.2578125f, 1.3359375f, rc, sumb, out);
double_compute_wrap<3>(n23, n30, n02, n11, 1.2890625f, 1.4609375f, rc, sumb, out);
_mm256_store_si256(idx2, _mm256_xor_si256(v23, out));
sum1 = _mm256_add_ps(suma, sumb);
out2 = _mm256_xor_si256(out2, out);
out2 = _mm256_xor_si256(_mm256_permute2x128_si256(out2,out2,0x41), out2);
suma = _mm256_permute2f128_ps(sum0, sum1, 0x30);
sumb = _mm256_permute2f128_ps(sum0, sum1, 0x21);
sum0 = _mm256_add_ps(suma, sumb);
sum0 = _mm256_add_ps(sum0, _mm256_permute2f128_ps(sum0, sum0, 0x41));
// Clear the high 128 bits
__m128 sum = _mm256_castps256_ps128(sum0);
sum = _mm_and_ps(_mm_castsi128_ps(_mm_set1_epi32(0x7fffffff)), sum); // take abs(va) by masking the float sign bit
// vs range 0 - 64
__m128i v0 = _mm_cvttps_epi32(_mm_mul_ps(sum, _mm_set1_ps(16777216.0f)));
v0 = _mm_xor_si128(v0, _mm256_castsi256_si128(out2));
__m128i v1 = _mm_shuffle_epi32(v0, _MM_SHUFFLE(0, 1, 2, 3));
v0 = _mm_xor_si128(v0, v1);
v1 = _mm_shuffle_epi32(v0, _MM_SHUFFLE(0, 1, 0, 1));
v0 = _mm_xor_si128(v0, v1);
// vs is now between 0 and 1
sum = _mm_div_ps(sum, _mm_set1_ps(64.0f));
sum0 = _mm256_insertf128_ps(_mm256_castps128_ps256(sum), sum, 1);
uint32_t n = _mm_cvtsi128_si32(v0);
idx0 = scratchpad_ptr<MASK>(lpad, n, 0);
idx2 = scratchpad_ptr<MASK>(lpad, n, 2);
}
}
template void cn_gpu_inner_avx<xmrig::CnAlgo<xmrig::Algorithm::CN_GPU>().iterations(), xmrig::CnAlgo<xmrig::Algorithm::CN_GPU>().mask()>(const uint8_t* spad, uint8_t* lpad);

View file

@ -1,212 +0,0 @@
/* XMRig
* Copyright 2010 Jeff Garzik <jgarzik@pobox.com>
* Copyright 2012-2014 pooler <pooler@litecoinpool.org>
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2019 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018-2020 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2020 XMRig <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "crypto/cn/CnAlgo.h"
#ifdef __GNUC__
# include <x86intrin.h>
#else
# include <intrin.h>
# define __restrict__ __restrict
#endif
inline void prep_dv(__m128i* idx, __m128i& v, __m128& n)
{
v = _mm_load_si128(idx);
n = _mm_cvtepi32_ps(v);
}
inline __m128 fma_break(__m128 x)
{
// Break the dependency chain by setitng the exp to ?????01
x = _mm_and_ps(_mm_castsi128_ps(_mm_set1_epi32(0xFEFFFFFF)), x);
return _mm_or_ps(_mm_castsi128_ps(_mm_set1_epi32(0x00800000)), x);
}
// 14
inline void sub_round(__m128 n0, __m128 n1, __m128 n2, __m128 n3, __m128 rnd_c, __m128& n, __m128& d, __m128& c)
{
n1 = _mm_add_ps(n1, c);
__m128 nn = _mm_mul_ps(n0, c);
nn = _mm_mul_ps(n1, _mm_mul_ps(nn,nn));
nn = fma_break(nn);
n = _mm_add_ps(n, nn);
n3 = _mm_sub_ps(n3, c);
__m128 dd = _mm_mul_ps(n2, c);
dd = _mm_mul_ps(n3, _mm_mul_ps(dd,dd));
dd = fma_break(dd);
d = _mm_add_ps(d, dd);
//Constant feedback
c = _mm_add_ps(c, rnd_c);
c = _mm_add_ps(c, _mm_set1_ps(0.734375f));
__m128 r = _mm_add_ps(nn, dd);
r = _mm_and_ps(_mm_castsi128_ps(_mm_set1_epi32(0x807FFFFF)), r);
r = _mm_or_ps(_mm_castsi128_ps(_mm_set1_epi32(0x40000000)), r);
c = _mm_add_ps(c, r);
}
// 14*8 + 2 = 112
inline void round_compute(__m128 n0, __m128 n1, __m128 n2, __m128 n3, __m128 rnd_c, __m128& c, __m128& r)
{
__m128 n = _mm_setzero_ps(), d = _mm_setzero_ps();
sub_round(n0, n1, n2, n3, rnd_c, n, d, c);
sub_round(n1, n2, n3, n0, rnd_c, n, d, c);
sub_round(n2, n3, n0, n1, rnd_c, n, d, c);
sub_round(n3, n0, n1, n2, rnd_c, n, d, c);
sub_round(n3, n2, n1, n0, rnd_c, n, d, c);
sub_round(n2, n1, n0, n3, rnd_c, n, d, c);
sub_round(n1, n0, n3, n2, rnd_c, n, d, c);
sub_round(n0, n3, n2, n1, rnd_c, n, d, c);
// Make sure abs(d) > 2.0 - this prevents division by zero and accidental overflows by division by < 1.0
d = _mm_and_ps(_mm_castsi128_ps(_mm_set1_epi32(0xFF7FFFFF)), d);
d = _mm_or_ps(_mm_castsi128_ps(_mm_set1_epi32(0x40000000)), d);
r =_mm_add_ps(r, _mm_div_ps(n,d));
}
// 112×4 = 448
template<bool add>
inline __m128i single_compute(__m128 n0, __m128 n1, __m128 n2, __m128 n3, float cnt, __m128 rnd_c, __m128& sum)
{
__m128 c = _mm_set1_ps(cnt);
__m128 r = _mm_setzero_ps();
round_compute(n0, n1, n2, n3, rnd_c, c, r);
round_compute(n0, n1, n2, n3, rnd_c, c, r);
round_compute(n0, n1, n2, n3, rnd_c, c, r);
round_compute(n0, n1, n2, n3, rnd_c, c, r);
// do a quick fmod by setting exp to 2
r = _mm_and_ps(_mm_castsi128_ps(_mm_set1_epi32(0x807FFFFF)), r);
r = _mm_or_ps(_mm_castsi128_ps(_mm_set1_epi32(0x40000000)), r);
if(add)
sum = _mm_add_ps(sum, r);
else
sum = r;
r = _mm_mul_ps(r, _mm_set1_ps(536870880.0f)); // 35
return _mm_cvttps_epi32(r);
}
template<size_t rot>
inline void single_compute_wrap(__m128 n0, __m128 n1, __m128 n2, __m128 n3, float cnt, __m128 rnd_c, __m128& sum, __m128i& out)
{
__m128i r = single_compute<rot % 2 != 0>(n0, n1, n2, n3, cnt, rnd_c, sum);
if(rot != 0)
r = _mm_or_si128(_mm_slli_si128(r, 16 - rot), _mm_srli_si128(r, rot));
out = _mm_xor_si128(out, r);
}
template<uint32_t MASK>
inline __m128i* scratchpad_ptr(uint8_t* lpad, uint32_t idx, size_t n) { return reinterpret_cast<__m128i*>(lpad + (idx & MASK) + n*16); }
template<size_t ITER, uint32_t MASK>
void cn_gpu_inner_ssse3(const uint8_t* spad, uint8_t* lpad)
{
uint32_t s = reinterpret_cast<const uint32_t*>(spad)[0] >> 8;
__m128i* idx0 = scratchpad_ptr<MASK>(lpad, s, 0);
__m128i* idx1 = scratchpad_ptr<MASK>(lpad, s, 1);
__m128i* idx2 = scratchpad_ptr<MASK>(lpad, s, 2);
__m128i* idx3 = scratchpad_ptr<MASK>(lpad, s, 3);
__m128 sum0 = _mm_setzero_ps();
for(size_t i = 0; i < ITER; i++)
{
__m128 n0, n1, n2, n3;
__m128i v0, v1, v2, v3;
__m128 suma, sumb, sum1, sum2, sum3;
prep_dv(idx0, v0, n0);
prep_dv(idx1, v1, n1);
prep_dv(idx2, v2, n2);
prep_dv(idx3, v3, n3);
__m128 rc = sum0;
__m128i out, out2;
out = _mm_setzero_si128();
single_compute_wrap<0>(n0, n1, n2, n3, 1.3437500f, rc, suma, out);
single_compute_wrap<1>(n0, n2, n3, n1, 1.2812500f, rc, suma, out);
single_compute_wrap<2>(n0, n3, n1, n2, 1.3593750f, rc, sumb, out);
single_compute_wrap<3>(n0, n3, n2, n1, 1.3671875f, rc, sumb, out);
sum0 = _mm_add_ps(suma, sumb);
_mm_store_si128(idx0, _mm_xor_si128(v0, out));
out2 = out;
out = _mm_setzero_si128();
single_compute_wrap<0>(n1, n0, n2, n3, 1.4296875f, rc, suma, out);
single_compute_wrap<1>(n1, n2, n3, n0, 1.3984375f, rc, suma, out);
single_compute_wrap<2>(n1, n3, n0, n2, 1.3828125f, rc, sumb, out);
single_compute_wrap<3>(n1, n3, n2, n0, 1.3046875f, rc, sumb, out);
sum1 = _mm_add_ps(suma, sumb);
_mm_store_si128(idx1, _mm_xor_si128(v1, out));
out2 = _mm_xor_si128(out2, out);
out = _mm_setzero_si128();
single_compute_wrap<0>(n2, n1, n0, n3, 1.4140625f, rc, suma, out);
single_compute_wrap<1>(n2, n0, n3, n1, 1.2734375f, rc, suma, out);
single_compute_wrap<2>(n2, n3, n1, n0, 1.2578125f, rc, sumb, out);
single_compute_wrap<3>(n2, n3, n0, n1, 1.2890625f, rc, sumb, out);
sum2 = _mm_add_ps(suma, sumb);
_mm_store_si128(idx2, _mm_xor_si128(v2, out));
out2 = _mm_xor_si128(out2, out);
out = _mm_setzero_si128();
single_compute_wrap<0>(n3, n1, n2, n0, 1.3203125f, rc, suma, out);
single_compute_wrap<1>(n3, n2, n0, n1, 1.3515625f, rc, suma, out);
single_compute_wrap<2>(n3, n0, n1, n2, 1.3359375f, rc, sumb, out);
single_compute_wrap<3>(n3, n0, n2, n1, 1.4609375f, rc, sumb, out);
sum3 = _mm_add_ps(suma, sumb);
_mm_store_si128(idx3, _mm_xor_si128(v3, out));
out2 = _mm_xor_si128(out2, out);
sum0 = _mm_add_ps(sum0, sum1);
sum2 = _mm_add_ps(sum2, sum3);
sum0 = _mm_add_ps(sum0, sum2);
sum0 = _mm_and_ps(_mm_castsi128_ps(_mm_set1_epi32(0x7fffffff)), sum0); // take abs(va) by masking the float sign bit
// vs range 0 - 64
n0 = _mm_mul_ps(sum0, _mm_set1_ps(16777216.0f));
v0 = _mm_cvttps_epi32(n0);
v0 = _mm_xor_si128(v0, out2);
v1 = _mm_shuffle_epi32(v0, _MM_SHUFFLE(0, 1, 2, 3));
v0 = _mm_xor_si128(v0, v1);
v1 = _mm_shuffle_epi32(v0, _MM_SHUFFLE(0, 1, 0, 1));
v0 = _mm_xor_si128(v0, v1);
// vs is now between 0 and 1
sum0 = _mm_div_ps(sum0, _mm_set1_ps(64.0f));
uint32_t n = _mm_cvtsi128_si32(v0);
idx0 = scratchpad_ptr<MASK>(lpad, n, 0);
idx1 = scratchpad_ptr<MASK>(lpad, n, 1);
idx2 = scratchpad_ptr<MASK>(lpad, n, 2);
idx3 = scratchpad_ptr<MASK>(lpad, n, 3);
}
}
template void cn_gpu_inner_ssse3<xmrig::CnAlgo<xmrig::Algorithm::CN_GPU>().iterations(), xmrig::CnAlgo<xmrig::Algorithm::CN_GPU>().mask()>(const uint8_t* spad, uint8_t* lpad);

View file

@ -0,0 +1,179 @@
/* XMRig
* Copyright 2018-2020 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2020 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <cinttypes>
#include <algorithm>
#include <thread>
#include "crypto/kawpow/KPCache.h"
#include "3rdparty/libethash/data_sizes.h"
#include "3rdparty/libethash/ethash_internal.h"
#include "3rdparty/libethash/ethash.h"
#include "base/io/log/Log.h"
#include "base/io/log/Tags.h"
#include "base/tools/Chrono.h"
#include "crypto/common/VirtualMemory.h"
namespace xmrig {
std::mutex KPCache::s_cacheMutex;
KPCache KPCache::s_cache;
KPCache::KPCache()
{
}
KPCache::~KPCache()
{
delete m_memory;
}
bool KPCache::init(uint32_t epoch)
{
if (epoch >= sizeof(cache_sizes) / sizeof(cache_sizes[0])) {
return false;
}
if (m_epoch == epoch) {
return true;
}
const uint64_t start_ms = Chrono::steadyMSecs();
const size_t size = cache_sizes[epoch];
if (!m_memory || m_memory->size() < size) {
delete m_memory;
m_memory = new VirtualMemory(size, false, false, false);
}
const ethash_h256_t seedhash = ethash_get_seedhash(epoch);
ethash_compute_cache_nodes(m_memory->raw(), size, &seedhash);
ethash_light cache;
cache.cache = m_memory->raw();
cache.cache_size = size;
cache.num_parent_nodes = cache.cache_size / sizeof(node);
calculate_fast_mod_data(cache.num_parent_nodes, cache.reciprocal, cache.increment, cache.shift);
const uint64_t cache_nodes = (size + sizeof(node) * 4 - 1) / sizeof(node);
m_DAGCache.resize(cache_nodes * (sizeof(node) / sizeof(uint32_t)));
// Init DAG cache
{
const uint64_t n = std::max(std::thread::hardware_concurrency(), 1U);
std::vector<std::thread> threads;
threads.reserve(n);
for (uint64_t i = 0; i < n; ++i) {
const uint32_t a = (cache_nodes * i) / n;
const uint32_t b = (cache_nodes * (i + 1)) / n;
threads.emplace_back([this, a, b, cache_nodes, &cache]() {
for (uint32_t j = a; j < b; ++j) {
ethash_calculate_dag_item_opt(((node*)m_DAGCache.data()) + j, j, num_dataset_parents, &cache);
}
});
}
for (auto& t : threads) {
t.join();
}
}
m_size = size;
m_epoch = epoch;
LOG_INFO("%s " YELLOW("KawPow") " light cache for epoch " WHITE_BOLD("%u") " calculated " BLACK_BOLD("(%" PRIu64 "ms)"), Tags::miner(), epoch, Chrono::steadyMSecs() - start_ms);
return true;
}
void* KPCache::data() const
{
return m_memory ? m_memory->raw() : nullptr;
}
static inline uint32_t clz(uint32_t a)
{
#ifdef _MSC_VER
unsigned long index;
_BitScanReverse(&index, a);
return 31 - index;
#else
return __builtin_clz(a);
#endif
}
uint64_t KPCache::cache_size(uint32_t epoch)
{
if (epoch >= sizeof(cache_sizes) / sizeof(cache_sizes[0])) {
return 0;
}
return cache_sizes[epoch];
}
uint64_t KPCache::dag_size(uint32_t epoch)
{
if (epoch >= sizeof(dag_sizes) / sizeof(dag_sizes[0])) {
return 0;
}
return dag_sizes[epoch];
}
void KPCache::calculate_fast_mod_data(uint32_t divisor, uint32_t& reciprocal, uint32_t& increment, uint32_t& shift)
{
if ((divisor & (divisor - 1)) == 0) {
reciprocal = 1;
increment = 0;
shift = 31U - clz(divisor);
}
else {
shift = 63U - clz(divisor);
const uint64_t N = 1ULL << shift;
const uint64_t q = N / divisor;
const uint64_t r = N - q * divisor;
if (r * 2 < divisor)
{
reciprocal = static_cast<uint32_t>(q);
increment = 1;
}
else
{
reciprocal = static_cast<uint32_t>(q + 1);
increment = 0;
}
}
}
} // namespace xmrig

View file

@ -0,0 +1,74 @@
/* XMRig
* Copyright 2018-2020 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2020 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef XMRIG_KP_CACHE_H
#define XMRIG_KP_CACHE_H
#include "base/tools/Object.h"
#include <mutex>
#include <vector>
namespace xmrig
{
class VirtualMemory;
class KPCache
{
public:
static constexpr size_t l1_cache_size = 16 * 1024;
static constexpr size_t l1_cache_num_items = l1_cache_size / sizeof(uint32_t);
static constexpr uint32_t num_dataset_parents = 512;
XMRIG_DISABLE_COPY_MOVE(KPCache)
KPCache();
~KPCache();
bool init(uint32_t epoch);
void* data() const;
size_t size() const { return m_size; }
uint32_t epoch() const { return m_epoch; }
const uint32_t* l1_cache() const { return m_DAGCache.data(); }
static uint64_t cache_size(uint32_t epoch);
static uint64_t dag_size(uint32_t epoch);
static void calculate_fast_mod_data(uint32_t divisor, uint32_t &reciprocal, uint32_t &increment, uint32_t& shift);
static std::mutex s_cacheMutex;
static KPCache s_cache;
private:
VirtualMemory* m_memory = nullptr;
size_t m_size = 0;
uint32_t m_epoch = 0xFFFFFFFFUL;
std::vector<uint32_t> m_DAGCache;
};
} /* namespace xmrig */
#endif /* XMRIG_KP_CACHE_H */

View file

@ -0,0 +1,353 @@
/* XMRig
* Copyright 2010 Jeff Garzik <jgarzik@pobox.com>
* Copyright 2012-2014 pooler <pooler@litecoinpool.org>
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2019 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018 Lee Clagett <https://github.com/vtnerd>
* Copyright 2018-2019 tevador <tevador@gmail.com>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "crypto/kawpow/KPHash.h"
#include "crypto/kawpow/KPCache.h"
#include "3rdparty/libethash/ethash.h"
#include "3rdparty/libethash/ethash_internal.h"
#include "3rdparty/libethash/data_sizes.h"
#ifdef _MSC_VER
#include <intrin.h>
#endif
namespace xmrig {
static const uint32_t ravencoin_kawpow[15] = {
0x00000072, //R
0x00000041, //A
0x00000056, //V
0x00000045, //E
0x0000004E, //N
0x00000043, //C
0x0000004F, //O
0x00000049, //I
0x0000004E, //N
0x0000004B, //K
0x00000041, //A
0x00000057, //W
0x00000050, //P
0x0000004F, //O
0x00000057, //W
};
static const uint32_t fnv_prime = 0x01000193;
static const uint32_t fnv_offset_basis = 0x811c9dc5;
static inline uint32_t fnv1a(uint32_t u, uint32_t v)
{
return (u ^ v) * fnv_prime;
}
static inline uint32_t kiss99(uint32_t& z, uint32_t& w, uint32_t& jsr, uint32_t& jcong)
{
z = 36969 * (z & 0xffff) + (z >> 16);
w = 18000 * (w & 0xffff) + (w >> 16);
jcong = 69069 * jcong + 1234567;
jsr ^= (jsr << 17);
jsr ^= (jsr >> 13);
jsr ^= (jsr << 5);
return (((z << 16) + w) ^ jcong) + jsr;
}
static inline uint32_t rotl(uint32_t n, uint32_t c)
{
#ifdef _MSC_VER
return _rotl(n, c);
#else
c &= 31;
uint32_t neg_c = (uint32_t)(-(int32_t)c);
return (n << c) | (n >> (neg_c & 31));
#endif
}
static inline uint32_t rotr(uint32_t n, uint32_t c)
{
#ifdef _MSC_VER
return _rotr(n, c);
#else
c &= 31;
uint32_t neg_c = (uint32_t)(-(int32_t)c);
return (n >> c) | (n << (neg_c & 31));
#endif
}
static inline void random_merge(uint32_t& a, uint32_t b, uint32_t selector)
{
const uint32_t x = (selector >> 16) % 31 + 1;
switch (selector % 4)
{
case 0:
a = (a * 33) + b;
break;
case 1:
a = (a ^ b) * 33;
break;
case 2:
a = rotl(a, x) ^ b;
break;
case 3:
a = rotr(a, x) ^ b;
break;
default:
#ifdef _MSC_VER
__assume(false);
#else
__builtin_unreachable();
#endif
break;
}
}
static inline uint32_t clz(uint32_t a)
{
#ifdef _MSC_VER
unsigned long index;
_BitScanReverse(&index, a);
return a ? (31 - index) : 32;
#else
return a ? (uint32_t)__builtin_clz(a) : 32;
#endif
}
static inline uint32_t popcount(uint32_t a)
{
#ifdef _MSC_VER
return __popcnt(a);
#else
return __builtin_popcount(a);
#endif
}
static inline uint32_t random_math(uint32_t a, uint32_t b, uint32_t selector)
{
switch (selector % 11)
{
case 0:
return a + b;
case 1:
return a * b;
case 2:
return (uint64_t(a) * b) >> 32;
case 3:
return (a < b) ? a : b;
case 4:
return rotl(a, b);
case 5:
return rotr(a, b);
case 6:
return a & b;
case 7:
return a | b;
case 8:
return a ^ b;
case 9:
return clz(a) + clz(b);
case 10:
return popcount(a) + popcount(b);
default:
#ifdef _MSC_VER
__assume(false);
#else
__builtin_unreachable();
#endif
break;
}
}
void KPHash::calculate(const KPCache& light_cache, uint32_t block_height, const uint8_t (&header_hash)[32], uint64_t nonce, uint32_t (&output)[8], uint32_t (&mix_hash)[8])
{
uint32_t keccak_state[25];
uint32_t mix[LANES][REGS];
memcpy(keccak_state, header_hash, sizeof(header_hash));
memcpy(keccak_state + 8, &nonce, sizeof(nonce));
memcpy(keccak_state + 10, ravencoin_kawpow, sizeof(ravencoin_kawpow));
ethash_keccakf800(keccak_state);
uint32_t z = fnv1a(fnv_offset_basis, keccak_state[0]);
uint32_t w = fnv1a(z, keccak_state[1]);
uint32_t jsr, jcong;
for (uint32_t l = 0; l < LANES; ++l) {
uint32_t z1 = z;
uint32_t w1 = w;
jsr = fnv1a(w, l);
jcong = fnv1a(jsr, l);
for (uint32_t r = 0; r < REGS; ++r) {
mix[l][r] = kiss99(z1, w1, jsr, jcong);
}
}
const uint32_t prog_number = block_height / PERIOD_LENGTH;
uint32_t dst_seq[REGS];
uint32_t src_seq[REGS];
z = fnv1a(fnv_offset_basis, prog_number);
w = fnv1a(z, 0);
jsr = fnv1a(w, prog_number);
jcong = fnv1a(jsr, 0);
for (uint32_t i = 0; i < REGS; ++i)
{
dst_seq[i] = i;
src_seq[i] = i;
}
for (uint32_t i = REGS; i > 1; --i)
{
std::swap(dst_seq[i - 1], dst_seq[kiss99(z, w, jsr, jcong) % i]);
std::swap(src_seq[i - 1], src_seq[kiss99(z, w, jsr, jcong) % i]);
}
const uint32_t epoch = light_cache.epoch();
const uint32_t num_items = static_cast<uint32_t>(dag_sizes[epoch] / ETHASH_MIX_BYTES / 2);
constexpr size_t num_words_per_lane = 256 / (sizeof(uint32_t) * LANES);
constexpr int max_operations = (CNT_CACHE > CNT_MATH) ? CNT_CACHE : CNT_MATH;
ethash_light cache;
cache.cache = light_cache.data();
cache.cache_size = light_cache.size();
cache.block_number = block_height;
cache.num_parent_nodes = cache.cache_size / sizeof(node);
KPCache::calculate_fast_mod_data(cache.num_parent_nodes, cache.reciprocal, cache.increment, cache.shift);
uint32_t z0 = z;
uint32_t w0 = w;
uint32_t jsr0 = jsr;
uint32_t jcong0 = jcong;
for (uint32_t r = 0; r < ETHASH_ACCESSES; ++r) {
uint32_t item_index = (mix[r % LANES][0] % num_items) * 4;
node item[4];
ethash_calculate_dag_item_opt(item + 0, item_index + 0, KPCache::num_dataset_parents, &cache);
ethash_calculate_dag_item_opt(item + 1, item_index + 1, KPCache::num_dataset_parents, &cache);
ethash_calculate_dag_item_opt(item + 2, item_index + 2, KPCache::num_dataset_parents, &cache);
ethash_calculate_dag_item_opt(item + 3, item_index + 3, KPCache::num_dataset_parents, &cache);
uint32_t dst_counter = 0;
uint32_t src_counter = 0;
z = z0;
w = w0;
jsr = jsr0;
jcong = jcong0;
for (uint32_t i = 0; i < max_operations; ++i) {
if (i < CNT_CACHE) {
const uint32_t src = src_seq[(src_counter++) % REGS];
const uint32_t dst = dst_seq[(dst_counter++) % REGS];
const uint32_t sel = kiss99(z, w, jsr, jcong);
for (uint32_t j = 0; j < LANES; ++j) {
random_merge(mix[j][dst], light_cache.l1_cache()[mix[j][src] % KPCache::l1_cache_num_items], sel);
}
}
if (i < CNT_MATH)
{
const uint32_t src_rnd = kiss99(z, w, jsr, jcong) % (REGS * (REGS - 1));
const uint32_t src1 = src_rnd % REGS;
uint32_t src2 = src_rnd / REGS;
if (src2 >= src1) {
++src2;
}
const uint32_t sel1 = kiss99(z, w, jsr, jcong);
const uint32_t dst = dst_seq[(dst_counter++) % REGS];
const uint32_t sel2 = kiss99(z, w, jsr, jcong);
for (size_t l = 0; l < LANES; ++l)
{
const uint32_t data = random_math(mix[l][src1], mix[l][src2], sel1);
random_merge(mix[l][dst], data, sel2);
}
}
}
uint32_t dsts[num_words_per_lane];
uint32_t sels[num_words_per_lane];
for (uint32_t i = 0; i < num_words_per_lane; ++i) {
dsts[i] = (i == 0) ? 0 : dst_seq[(dst_counter++) % REGS];
sels[i] = kiss99(z, w, jsr, jcong);
}
for (uint32_t l = 0; l < LANES; ++l) {
const uint32_t offset = ((l ^ r) % LANES) * num_words_per_lane;
for (size_t i = 0; i < num_words_per_lane; ++i) {
random_merge(mix[l][dsts[i]], ((uint32_t*)item)[offset + i], sels[i]);
}
}
}
uint32_t lane_hash[LANES];
for (uint32_t l = 0; l < LANES; ++l)
{
lane_hash[l] = fnv_offset_basis;
for (uint32_t i = 0; i < REGS; ++i) {
lane_hash[l] = fnv1a(lane_hash[l], mix[l][i]);
}
}
constexpr uint32_t num_words = 8;
for (uint32_t i = 0; i < num_words; ++i) {
mix_hash[i] = fnv_offset_basis;
}
for (uint32_t l = 0; l < LANES; ++l)
mix_hash[l % num_words] = fnv1a(mix_hash[l % num_words], lane_hash[l]);
memcpy(keccak_state + 8, mix_hash, sizeof(mix_hash));
memcpy(keccak_state + 16, ravencoin_kawpow, sizeof(uint32_t) * 9);
ethash_keccakf800(keccak_state);
memcpy(output, keccak_state, sizeof(output));
}
} // namespace xmrig

View file

@ -0,0 +1,58 @@
/* XMRig
* Copyright 2010 Jeff Garzik <jgarzik@pobox.com>
* Copyright 2012-2014 pooler <pooler@litecoinpool.org>
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2019 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018 Lee Clagett <https://github.com/vtnerd>
* Copyright 2018-2019 tevador <tevador@gmail.com>
* Copyright 2018-2020 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef XMRIG_KP_HASH_H
#define XMRIG_KP_HASH_H
#include <stdint.h>
namespace xmrig
{
class KPCache;
class KPHash
{
public:
static constexpr uint32_t EPOCH_LENGTH = 7500;
static constexpr uint32_t PERIOD_LENGTH = 3;
static constexpr int CNT_CACHE = 11;
static constexpr int CNT_MATH = 18;
static constexpr uint32_t REGS = 32;
static constexpr uint32_t LANES = 16;
static void calculate(const KPCache& light_cache, uint32_t block_height, const uint8_t (&header_hash)[32], uint64_t nonce, uint32_t (&output)[8], uint32_t (&mix_hash)[8]);
};
} /* namespace xmrig */
#endif /* XMRIG_KP_HASH_H */

View file

@ -7,8 +7,8 @@
* Copyright 2017-2019 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018 Lee Clagett <https://github.com/vtnerd>
* Copyright 2018-2019 tevador <tevador@gmail.com>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <https://github.com/xmrig>, <support@xmrig.com>
* Copyright 2018-2020 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2020 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@ -29,6 +29,7 @@
#include "backend/common/Tags.h"
#include "backend/cpu/CpuConfig.h"
#include "base/io/log/Log.h"
#include "base/io/log/Tags.h"
#include "crypto/rx/RxConfig.h"
#include "crypto/rx/RxQueue.h"
@ -41,7 +42,6 @@ class RxPrivate;
static bool osInitialized = false;
static bool msrInitialized = false;
static const char *tag = BLUE_BG(WHITE_BOLD_S " rx ") " ";
static RxPrivate *d_ptr = nullptr;
@ -59,7 +59,7 @@ public:
const char *xmrig::rx_tag()
{
return tag;
return Tags::randomx();
}

View file

@ -50,7 +50,7 @@
namespace xmrig {
static const char *tag = YELLOW_BG_BOLD(WHITE_BOLD_S " msr ") " ";
static const char *tag = YELLOW_BG_BOLD(WHITE_BOLD_S " msr ") " ";
static MsrItems savedState;

View file

@ -49,7 +49,7 @@ namespace xmrig {
static bool reuseDriver = false;
static const char *tag = YELLOW_BG_BOLD(WHITE_BOLD_S " msr ") " ";
static const char *tag = YELLOW_BG_BOLD(WHITE_BOLD_S " msr ") " ";
static MsrItems savedState;