Implemented new style algorithm definitions (except ARM), removed Algo and Variant enums.

This commit is contained in:
XMRig 2019-06-13 22:08:52 +07:00
parent d7f42d54ad
commit 1f0e3e501c
30 changed files with 1223 additions and 1359 deletions

207
src/crypto/cn/CnAlgo.h Normal file
View file

@ -0,0 +1,207 @@
/* XMRig
* Copyright 2010 Jeff Garzik <jgarzik@pobox.com>
* Copyright 2012-2014 pooler <pooler@litecoinpool.org>
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2019 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018 Lee Clagett <https://github.com/vtnerd>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef XMRIG_CN_ALGO_H
#define XMRIG_CN_ALGO_H
#include <stddef.h>
#include <stdint.h>
#include "crypto/common/Algorithm.h"
namespace xmrig
{
template<Algorithm::Id ALGO = Algorithm::INVALID>
class CnAlgo
{
public:
constexpr inline CnAlgo()
{
static_assert(ALGO != Algorithm::INVALID && m_memory[ALGO] > 0, "invalid CRYPTONIGHT algorithm");
static_assert(sizeof(m_memory) / sizeof(m_memory)[0] == Algorithm::MAX, "memory table size mismatch");
static_assert(sizeof(m_iterations) / sizeof(m_iterations)[0] == Algorithm::MAX, "iterations table size mismatch");
static_assert(sizeof(m_base) / sizeof(m_base)[0] == Algorithm::MAX, "iterations table size mismatch");
}
constexpr inline Algorithm::Id base() const { return m_base[ALGO]; }
constexpr inline bool isHeavy() const { return memory() == CN_MEMORY * 2; }
constexpr inline bool isR() const { return ALGO == Algorithm::CN_R || ALGO == Algorithm::CN_WOW; }
constexpr inline size_t memory() const { return m_memory[ALGO]; }
constexpr inline uint32_t iterations() const { return m_iterations[ALGO]; }
constexpr inline uint32_t mask() const { return ((memory() - 1) / 16) * 16; }
inline static size_t memory(Algorithm::Id algo)
{
switch (Algorithm::family(algo)) {
case Algorithm::CN:
return CN_MEMORY;
case Algorithm::CN_LITE:
return CN_MEMORY / 2;
case Algorithm::CN_HEAVY:
return CN_MEMORY * 2;
case Algorithm::CN_PICO:
return CN_MEMORY / 8;
default:
break;
}
return 0;
}
inline static uint32_t mask(Algorithm::Id algo)
{
# ifdef XMRIG_ALGO_CN_GPU
if (algo == Algorithm::CN_GPU) {
return 0x1FFFC0;
}
# endif
# ifdef XMRIG_ALGO_CN_PICO
if (algo == Algorithm::CN_PICO_0) {
return 0x1FFF0;
}
# endif
return ((memory(algo) - 1) / 16) * 16;
}
private:
constexpr const static size_t CN_MEMORY = 0x200000;
constexpr const static uint32_t CN_ITER = 0x80000;
constexpr const static size_t m_memory[] = {
CN_MEMORY, // CN_0
CN_MEMORY, // CN_1
CN_MEMORY, // CN_2
CN_MEMORY, // CN_R
CN_MEMORY, // CN_WOW
CN_MEMORY, // CN_FAST
CN_MEMORY, // CN_HALF
CN_MEMORY, // CN_XAO
CN_MEMORY, // CN_RTO
CN_MEMORY, // CN_RWZ
CN_MEMORY, // CN_ZLS
CN_MEMORY, // CN_DOUBLE
# ifdef XMRIG_ALGO_CN_GPU
CN_MEMORY, // CN_GPU
# endif
# ifdef XMRIG_ALGO_CN_LITE
CN_MEMORY / 2, // CN_LITE_0
CN_MEMORY / 2, // CN_LITE_1
# endif
# ifdef XMRIG_ALGO_CN_HEAVY
CN_MEMORY * 2, // CN_HEAVY_0
CN_MEMORY * 2, // CN_HEAVY_TUBE
CN_MEMORY * 2, // CN_HEAVY_XHV
# endif
# ifdef XMRIG_ALGO_CN_PICO
CN_MEMORY / 8, // CN_PICO_0
# endif
};
constexpr const static uint32_t m_iterations[] = {
CN_ITER, // CN_0
CN_ITER, // CN_1
CN_ITER, // CN_2
CN_ITER, // CN_R
CN_ITER, // CN_WOW
CN_ITER / 2, // CN_FAST
CN_ITER / 2, // CN_HALF
CN_ITER * 2, // CN_XAO
CN_ITER, // CN_RTO
0x60000, // CN_RWZ
0x60000, // CN_ZLS
CN_ITER * 2, // CN_DOUBLE
# ifdef XMRIG_ALGO_CN_GPU
0xC000, // CN_GPU
# endif
# ifdef XMRIG_ALGO_CN_LITE
CN_ITER / 2, // CN_LITE_0
CN_ITER / 2, // CN_LITE_1
# endif
# ifdef XMRIG_ALGO_CN_HEAVY
CN_ITER / 2, // CN_HEAVY_0
CN_ITER / 2, // CN_HEAVY_TUBE
CN_ITER / 2, // CN_HEAVY_XHV
# endif
# ifdef XMRIG_ALGO_CN_PICO
CN_ITER / 8, // CN_PICO_0
# endif
};
constexpr const static Algorithm::Id m_base[] = {
Algorithm::CN_0, // CN_0
Algorithm::CN_1, // CN_1
Algorithm::CN_2, // CN_2
Algorithm::CN_2, // CN_R
Algorithm::CN_2, // CN_WOW
Algorithm::CN_1, // CN_FAST
Algorithm::CN_2, // CN_HALF
Algorithm::CN_0, // CN_XAO
Algorithm::CN_1, // CN_RTO
Algorithm::CN_2, // CN_RWZ
Algorithm::CN_2, // CN_ZLS
Algorithm::CN_2, // CN_DOUBLE
# ifdef XMRIG_ALGO_CN_GPU
Algorithm::CN_GPU, // CN_GPU
# endif
# ifdef XMRIG_ALGO_CN_LITE
Algorithm::CN_0, // CN_LITE_0
Algorithm::CN_1, // CN_LITE_1
# endif
# ifdef XMRIG_ALGO_CN_HEAVY
Algorithm::CN_0, // CN_HEAVY_0
Algorithm::CN_1, // CN_HEAVY_TUBE
Algorithm::CN_0, // CN_HEAVY_XHV
# endif
# ifdef XMRIG_ALGO_CN_PICO
Algorithm::CN_2, // CN_PICO_0
# endif
};
};
#ifdef XMRIG_ALGO_CN_GPU
template<> constexpr inline uint32_t CnAlgo<Algorithm::CN_GPU>::mask() const { return 0x1FFFC0; }
#endif
#ifdef XMRIG_ALGO_CN_PICO
template<> constexpr inline uint32_t CnAlgo<Algorithm::CN_PICO_0>::mask() const { return 0x1FFF0; }
#endif
} /* namespace xmrig */
#endif /* XMRIG_CN_ALGO_H */

269
src/crypto/cn/CnHash.cpp Normal file
View file

@ -0,0 +1,269 @@
/* XMRig
* Copyright 2010 Jeff Garzik <jgarzik@pobox.com>
* Copyright 2012-2014 pooler <pooler@litecoinpool.org>
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2019 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018 Lee Clagett <https://github.com/vtnerd>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <stdio.h>
#include "common/cpu/Cpu.h"
#include "crypto/cn/CnHash.h"
#include "crypto/common/VirtualMemory.h"
#if defined(XMRIG_ARM)
# include "crypto/cn/CryptoNight_arm.h"
#else
# include "crypto/cn/CryptoNight_x86.h"
#endif
#define ADD_FN(algo) \
m_map[algo][AV_SINGLE][ASM_NONE] = cryptonight_single_hash<algo, false>; \
m_map[algo][AV_SINGLE_SOFT][ASM_NONE] = cryptonight_single_hash<algo, true>; \
m_map[algo][AV_DOUBLE][ASM_NONE] = cryptonight_double_hash<algo, false>; \
m_map[algo][AV_DOUBLE_SOFT][ASM_NONE] = cryptonight_double_hash<algo, true>; \
m_map[algo][AV_TRIPLE][ASM_NONE] = cryptonight_triple_hash<algo, false>; \
m_map[algo][AV_TRIPLE_SOFT][ASM_NONE] = cryptonight_triple_hash<algo, true>; \
m_map[algo][AV_QUAD][ASM_NONE] = cryptonight_quad_hash<algo, false>; \
m_map[algo][AV_QUAD_SOFT][ASM_NONE] = cryptonight_quad_hash<algo, true>; \
m_map[algo][AV_PENTA][ASM_NONE] = cryptonight_penta_hash<algo, false>; \
m_map[algo][AV_PENTA_SOFT][ASM_NONE] = cryptonight_penta_hash<algo, true>;
#ifdef XMRIG_FEATURE_ASM
# define ADD_FN_ASM(algo) \
m_map[algo][AV_SINGLE][ASM_INTEL] = cryptonight_single_hash_asm<algo, ASM_INTEL>; \
m_map[algo][AV_SINGLE][ASM_RYZEN] = cryptonight_single_hash_asm<algo, ASM_RYZEN>; \
m_map[algo][AV_SINGLE][ASM_BULLDOZER] = cryptonight_single_hash_asm<algo, ASM_BULLDOZER>; \
m_map[algo][AV_DOUBLE][ASM_INTEL] = cryptonight_double_hash_asm<algo, ASM_INTEL>; \
m_map[algo][AV_DOUBLE][ASM_RYZEN] = cryptonight_double_hash_asm<algo, ASM_RYZEN>; \
m_map[algo][AV_DOUBLE][ASM_BULLDOZER] = cryptonight_double_hash_asm<algo, ASM_BULLDOZER>;
extern "C" void cnv2_mainloop_ivybridge_asm(cryptonight_ctx **ctx);
extern "C" void cnv2_mainloop_ryzen_asm(cryptonight_ctx **ctx);
extern "C" void cnv2_mainloop_bulldozer_asm(cryptonight_ctx **ctx);
extern "C" void cnv2_double_mainloop_sandybridge_asm(cryptonight_ctx **ctx);
namespace xmrig {
cn_mainloop_fun cn_half_mainloop_ivybridge_asm = nullptr;
cn_mainloop_fun cn_half_mainloop_ryzen_asm = nullptr;
cn_mainloop_fun cn_half_mainloop_bulldozer_asm = nullptr;
cn_mainloop_fun cn_half_double_mainloop_sandybridge_asm = nullptr;
cn_mainloop_fun cn_trtl_mainloop_ivybridge_asm = nullptr;
cn_mainloop_fun cn_trtl_mainloop_ryzen_asm = nullptr;
cn_mainloop_fun cn_trtl_mainloop_bulldozer_asm = nullptr;
cn_mainloop_fun cn_trtl_double_mainloop_sandybridge_asm = nullptr;
cn_mainloop_fun cn_zls_mainloop_ivybridge_asm = nullptr;
cn_mainloop_fun cn_zls_mainloop_ryzen_asm = nullptr;
cn_mainloop_fun cn_zls_mainloop_bulldozer_asm = nullptr;
cn_mainloop_fun cn_zls_double_mainloop_sandybridge_asm = nullptr;
cn_mainloop_fun cn_double_mainloop_ivybridge_asm = nullptr;
cn_mainloop_fun cn_double_mainloop_ryzen_asm = nullptr;
cn_mainloop_fun cn_double_mainloop_bulldozer_asm = nullptr;
cn_mainloop_fun cn_double_double_mainloop_sandybridge_asm = nullptr;
template<typename T, typename U>
static void patchCode(T dst, U src, const uint32_t iterations, const uint32_t mask = CnAlgo<Algorithm::CN_HALF>().mask())
{
const uint8_t* p = reinterpret_cast<const uint8_t*>(src);
// Workaround for Visual Studio placing trampoline in debug builds.
# if defined(_MSC_VER)
if (p[0] == 0xE9) {
p += *(int32_t*)(p + 1) + 5;
}
# endif
size_t size = 0;
while (*(uint32_t*)(p + size) != 0xDEADC0DE) {
++size;
}
size += sizeof(uint32_t);
memcpy((void*) dst, (const void*) src, size);
uint8_t* patched_data = reinterpret_cast<uint8_t*>(dst);
for (size_t i = 0; i + sizeof(uint32_t) <= size; ++i) {
switch (*(uint32_t*)(patched_data + i)) {
case CnAlgo<Algorithm::CN_2>().iterations():
*(uint32_t*)(patched_data + i) = iterations;
break;
case CnAlgo<Algorithm::CN_2>().mask():
*(uint32_t*)(patched_data + i) = mask;
break;
}
}
}
static void patchAsmVariants()
{
const int allocation_size = 65536;
uint8_t *base = static_cast<uint8_t *>(VirtualMemory::allocateExecutableMemory(allocation_size));
cn_half_mainloop_ivybridge_asm = reinterpret_cast<cn_mainloop_fun> (base + 0x0000);
cn_half_mainloop_ryzen_asm = reinterpret_cast<cn_mainloop_fun> (base + 0x1000);
cn_half_mainloop_bulldozer_asm = reinterpret_cast<cn_mainloop_fun> (base + 0x2000);
cn_half_double_mainloop_sandybridge_asm = reinterpret_cast<cn_mainloop_fun> (base + 0x3000);
# ifdef XMRIG_ALGO_CN_PICO
cn_trtl_mainloop_ivybridge_asm = reinterpret_cast<cn_mainloop_fun> (base + 0x4000);
cn_trtl_mainloop_ryzen_asm = reinterpret_cast<cn_mainloop_fun> (base + 0x5000);
cn_trtl_mainloop_bulldozer_asm = reinterpret_cast<cn_mainloop_fun> (base + 0x6000);
cn_trtl_double_mainloop_sandybridge_asm = reinterpret_cast<cn_mainloop_fun> (base + 0x7000);
# endif
cn_zls_mainloop_ivybridge_asm = reinterpret_cast<cn_mainloop_fun> (base + 0x8000);
cn_zls_mainloop_ryzen_asm = reinterpret_cast<cn_mainloop_fun> (base + 0x9000);
cn_zls_mainloop_bulldozer_asm = reinterpret_cast<cn_mainloop_fun> (base + 0xA000);
cn_zls_double_mainloop_sandybridge_asm = reinterpret_cast<cn_mainloop_fun> (base + 0xB000);
cn_double_mainloop_ivybridge_asm = reinterpret_cast<cn_mainloop_fun> (base + 0xC000);
cn_double_mainloop_ryzen_asm = reinterpret_cast<cn_mainloop_fun> (base + 0xD000);
cn_double_mainloop_bulldozer_asm = reinterpret_cast<cn_mainloop_fun> (base + 0xE000);
cn_double_double_mainloop_sandybridge_asm = reinterpret_cast<cn_mainloop_fun> (base + 0xF000);
{
constexpr uint32_t ITER = CnAlgo<Algorithm::CN_HALF>().iterations();
patchCode(cn_half_mainloop_ivybridge_asm, cnv2_mainloop_ivybridge_asm, ITER);
patchCode(cn_half_mainloop_ryzen_asm, cnv2_mainloop_ryzen_asm, ITER);
patchCode(cn_half_mainloop_bulldozer_asm, cnv2_mainloop_bulldozer_asm, ITER);
patchCode(cn_half_double_mainloop_sandybridge_asm, cnv2_double_mainloop_sandybridge_asm, ITER);
}
# ifdef XMRIG_ALGO_CN_PICO
{
constexpr uint32_t ITER = CnAlgo<Algorithm::CN_PICO_0>().iterations();
constexpr uint32_t MASK = CnAlgo<Algorithm::CN_PICO_0>().mask();
patchCode(cn_trtl_mainloop_ivybridge_asm, cnv2_mainloop_ivybridge_asm, ITER, MASK);
patchCode(cn_trtl_mainloop_ryzen_asm, cnv2_mainloop_ryzen_asm, ITER, MASK);
patchCode(cn_trtl_mainloop_bulldozer_asm, cnv2_mainloop_bulldozer_asm, ITER, MASK);
patchCode(cn_trtl_double_mainloop_sandybridge_asm, cnv2_double_mainloop_sandybridge_asm, ITER, MASK);
}
# endif
{
constexpr uint32_t ITER = CnAlgo<Algorithm::CN_ZLS>().iterations();
patchCode(cn_zls_mainloop_ivybridge_asm, cnv2_mainloop_ivybridge_asm, ITER);
patchCode(cn_zls_mainloop_ryzen_asm, cnv2_mainloop_ryzen_asm, ITER);
patchCode(cn_zls_mainloop_bulldozer_asm, cnv2_mainloop_bulldozer_asm, ITER);
patchCode(cn_zls_double_mainloop_sandybridge_asm, cnv2_double_mainloop_sandybridge_asm, ITER);
}
{
constexpr uint32_t ITER = CnAlgo<Algorithm::CN_DOUBLE>().iterations();
patchCode(cn_double_mainloop_ivybridge_asm, cnv2_mainloop_ivybridge_asm, ITER);
patchCode(cn_double_mainloop_ryzen_asm, cnv2_mainloop_ryzen_asm, ITER);
patchCode(cn_double_mainloop_bulldozer_asm, cnv2_mainloop_bulldozer_asm, ITER);
patchCode(cn_double_double_mainloop_sandybridge_asm, cnv2_double_mainloop_sandybridge_asm, ITER);
}
VirtualMemory::protectExecutableMemory(base, allocation_size);
VirtualMemory::flushInstructionCache(base, allocation_size);
}
} // namespace xmrig
#else
# define ADD_FN_ASM(algo)
#endif
xmrig::CnHash::CnHash()
{
ADD_FN(Algorithm::CN_0);
ADD_FN(Algorithm::CN_1);
ADD_FN(Algorithm::CN_2);
ADD_FN(Algorithm::CN_R);
ADD_FN(Algorithm::CN_WOW);
ADD_FN(Algorithm::CN_FAST);
ADD_FN(Algorithm::CN_HALF);
ADD_FN(Algorithm::CN_XAO);
ADD_FN(Algorithm::CN_RTO);
ADD_FN(Algorithm::CN_RWZ);
ADD_FN(Algorithm::CN_ZLS);
ADD_FN(Algorithm::CN_DOUBLE);
ADD_FN_ASM(Algorithm::CN_2);
ADD_FN_ASM(Algorithm::CN_HALF);
ADD_FN_ASM(Algorithm::CN_R);
ADD_FN_ASM(Algorithm::CN_WOW);
ADD_FN_ASM(Algorithm::CN_RWZ);
ADD_FN_ASM(Algorithm::CN_ZLS);
ADD_FN_ASM(Algorithm::CN_DOUBLE);
# ifdef XMRIG_ALGO_CN_GPU
m_map[Algorithm::CN_GPU][AV_SINGLE][ASM_NONE] = cryptonight_single_hash_gpu<Algorithm::CN_GPU, false>;
m_map[Algorithm::CN_GPU][AV_SINGLE_SOFT][ASM_NONE] = cryptonight_single_hash_gpu<Algorithm::CN_GPU, true>;
# endif
# ifdef XMRIG_ALGO_CN_LITE
ADD_FN(Algorithm::CN_LITE_0);
ADD_FN(Algorithm::CN_LITE_1);
# endif
# ifdef XMRIG_ALGO_CN_HEAVY
ADD_FN(Algorithm::CN_HEAVY_0);
ADD_FN(Algorithm::CN_HEAVY_TUBE);
ADD_FN(Algorithm::CN_HEAVY_XHV);
# endif
# ifdef XMRIG_ALGO_CN_PICO
ADD_FN(Algorithm::CN_PICO_0);
ADD_FN_ASM(Algorithm::CN_PICO_0);
# endif
# ifdef XMRIG_FEATURE_ASM
patchAsmVariants();
# endif
}
xmrig::cn_hash_fun xmrig::CnHash::fn(const Algorithm &algorithm, AlgoVariant av, Assembly assembly) const
{
if (!algorithm.isValid()) {
return nullptr;
}
# ifdef XMRIG_FEATURE_ASM
cn_hash_fun fun = m_map[algorithm][av][assembly == ASM_AUTO ? Cpu::info()->assembly() : assembly];
if (fun) {
return fun;
}
# endif
return m_map[algorithm][av][ASM_NONE];
}

63
src/crypto/cn/CnHash.h Normal file
View file

@ -0,0 +1,63 @@
/* XMRig
* Copyright 2010 Jeff Garzik <jgarzik@pobox.com>
* Copyright 2012-2014 pooler <pooler@litecoinpool.org>
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2019 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018 Lee Clagett <https://github.com/vtnerd>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef XMRIG_CN_HASH_H
#define XMRIG_CN_HASH_H
#include <stddef.h>
#include <stdint.h>
#include "common/xmrig.h"
#include "crypto/cn/CnAlgo.h"
struct cryptonight_ctx;
namespace xmrig
{
typedef void (*cn_hash_fun)(const uint8_t *input, size_t size, uint8_t *output, cryptonight_ctx **ctx, uint64_t height);
typedef void (*cn_mainloop_fun)(cryptonight_ctx **ctx);
class CnHash
{
public:
CnHash();
cn_hash_fun fn(const Algorithm &algorithm, AlgoVariant av, Assembly assembly) const;
private:
cn_hash_fun m_map[Algorithm::MAX][AV_MAX][ASM_MAX] = {};
};
} /* namespace xmrig */
#endif /* XMRIG_CN_HASH_H */

View file

@ -42,10 +42,10 @@ typedef void(*cn_mainloop_fun_ms_abi)(cryptonight_ctx**) ABI_ATTRIBUTE;
struct cryptonight_r_data {
int variant;
int algo;
uint64_t height;
bool match(const int v, const uint64_t h) const { return (v == variant) && (h == height); }
bool match(const int a, const uint64_t h) const { return (a == algo) && (h == height); }
};

View file

@ -1,251 +0,0 @@
/* XMRig
* Copyright 2010 Jeff Garzik <jgarzik@pobox.com>
* Copyright 2012-2014 pooler <pooler@litecoinpool.org>
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2019 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018 Lee Clagett <https://github.com/vtnerd>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef XMRIG_CRYPTONIGHT_CONSTANTS_H
#define XMRIG_CRYPTONIGHT_CONSTANTS_H
#include <stddef.h>
#include <stdint.h>
#include "common/xmrig.h"
namespace xmrig
{
constexpr const size_t CRYPTONIGHT_MEMORY = 2 * 1024 * 1024;
constexpr const uint32_t CRYPTONIGHT_MASK = 0x1FFFF0;
constexpr const uint32_t CRYPTONIGHT_ITER = 0x80000;
constexpr const uint32_t CRYPTONIGHT_HALF_ITER = 0x40000;
constexpr const uint32_t CRYPTONIGHT_XAO_ITER = 0x100000;
constexpr const uint32_t CRYPTONIGHT_DOUBLE_ITER = 0x100000;
constexpr const uint32_t CRYPTONIGHT_WALTZ_ITER = 0x60000;
constexpr const uint32_t CRYPTONIGHT_ZLS_ITER = 0x60000;
constexpr const uint32_t CRYPTONIGHT_GPU_ITER = 0xC000;
constexpr const uint32_t CRYPTONIGHT_GPU_MASK = 0x1FFFC0;
constexpr const size_t CRYPTONIGHT_LITE_MEMORY = 1 * 1024 * 1024;
constexpr const uint32_t CRYPTONIGHT_LITE_MASK = 0xFFFF0;
constexpr const uint32_t CRYPTONIGHT_LITE_ITER = 0x40000;
constexpr const size_t CRYPTONIGHT_HEAVY_MEMORY = 4 * 1024 * 1024;
constexpr const uint32_t CRYPTONIGHT_HEAVY_MASK = 0x3FFFF0;
constexpr const uint32_t CRYPTONIGHT_HEAVY_ITER = 0x40000;
constexpr const size_t CRYPTONIGHT_PICO_MEMORY = 256 * 1024;
constexpr const uint32_t CRYPTONIGHT_PICO_MASK = 0x1FFF0;
constexpr const uint32_t CRYPTONIGHT_PICO_ITER = 0x40000;
constexpr const uint32_t CRYPTONIGHT_TRTL_ITER = 0x10000;
template<Algo ALGO> inline constexpr size_t cn_select_memory() { return 0; }
template<> inline constexpr size_t cn_select_memory<CRYPTONIGHT>() { return CRYPTONIGHT_MEMORY; }
template<> inline constexpr size_t cn_select_memory<CRYPTONIGHT_LITE>() { return CRYPTONIGHT_LITE_MEMORY; }
template<> inline constexpr size_t cn_select_memory<CRYPTONIGHT_HEAVY>() { return CRYPTONIGHT_HEAVY_MEMORY; }
template<> inline constexpr size_t cn_select_memory<CRYPTONIGHT_PICO>() { return CRYPTONIGHT_PICO_MEMORY; }
inline size_t cn_select_memory(Algo algorithm)
{
switch(algorithm)
{
case CRYPTONIGHT:
return CRYPTONIGHT_MEMORY;
case CRYPTONIGHT_LITE:
return CRYPTONIGHT_LITE_MEMORY;
case CRYPTONIGHT_HEAVY:
return CRYPTONIGHT_HEAVY_MEMORY;
case CRYPTONIGHT_PICO:
return CRYPTONIGHT_PICO_MEMORY;
default:
break;
}
return 0;
}
template<Algo ALGO> inline constexpr uint32_t cn_select_mask() { return 0; }
template<> inline constexpr uint32_t cn_select_mask<CRYPTONIGHT>() { return CRYPTONIGHT_MASK; }
template<> inline constexpr uint32_t cn_select_mask<CRYPTONIGHT_LITE>() { return CRYPTONIGHT_LITE_MASK; }
template<> inline constexpr uint32_t cn_select_mask<CRYPTONIGHT_HEAVY>() { return CRYPTONIGHT_HEAVY_MASK; }
template<> inline constexpr uint32_t cn_select_mask<CRYPTONIGHT_PICO>() { return CRYPTONIGHT_PICO_MASK; }
inline uint32_t cn_select_mask(Algo algorithm)
{
switch(algorithm)
{
case CRYPTONIGHT:
return CRYPTONIGHT_MASK;
case CRYPTONIGHT_LITE:
return CRYPTONIGHT_LITE_MASK;
case CRYPTONIGHT_HEAVY:
return CRYPTONIGHT_HEAVY_MASK;
case CRYPTONIGHT_PICO:
return CRYPTONIGHT_PICO_MASK;
default:
break;
}
return 0;
}
template<Algo ALGO, Variant variant> inline constexpr uint32_t cn_select_iter() { return 0; }
template<> inline constexpr uint32_t cn_select_iter<CRYPTONIGHT, VARIANT_0>() { return CRYPTONIGHT_ITER; }
template<> inline constexpr uint32_t cn_select_iter<CRYPTONIGHT, VARIANT_1>() { return CRYPTONIGHT_ITER; }
template<> inline constexpr uint32_t cn_select_iter<CRYPTONIGHT, VARIANT_2>() { return CRYPTONIGHT_ITER; }
template<> inline constexpr uint32_t cn_select_iter<CRYPTONIGHT, VARIANT_WOW>() { return CRYPTONIGHT_ITER; }
template<> inline constexpr uint32_t cn_select_iter<CRYPTONIGHT, VARIANT_4>() { return CRYPTONIGHT_ITER; }
template<> inline constexpr uint32_t cn_select_iter<CRYPTONIGHT, VARIANT_XTL>() { return CRYPTONIGHT_ITER; }
template<> inline constexpr uint32_t cn_select_iter<CRYPTONIGHT, VARIANT_HALF>() { return CRYPTONIGHT_HALF_ITER; }
template<> inline constexpr uint32_t cn_select_iter<CRYPTONIGHT, VARIANT_MSR>() { return CRYPTONIGHT_HALF_ITER; }
template<> inline constexpr uint32_t cn_select_iter<CRYPTONIGHT, VARIANT_XAO>() { return CRYPTONIGHT_XAO_ITER; }
template<> inline constexpr uint32_t cn_select_iter<CRYPTONIGHT, VARIANT_RTO>() { return CRYPTONIGHT_ITER; }
template<> inline constexpr uint32_t cn_select_iter<CRYPTONIGHT, VARIANT_GPU>() { return CRYPTONIGHT_GPU_ITER; }
template<> inline constexpr uint32_t cn_select_iter<CRYPTONIGHT, VARIANT_RWZ>() { return CRYPTONIGHT_WALTZ_ITER; }
template<> inline constexpr uint32_t cn_select_iter<CRYPTONIGHT, VARIANT_ZLS>() { return CRYPTONIGHT_ZLS_ITER; }
template<> inline constexpr uint32_t cn_select_iter<CRYPTONIGHT, VARIANT_DOUBLE>() { return CRYPTONIGHT_DOUBLE_ITER; }
template<> inline constexpr uint32_t cn_select_iter<CRYPTONIGHT_LITE, VARIANT_0>() { return CRYPTONIGHT_LITE_ITER; }
template<> inline constexpr uint32_t cn_select_iter<CRYPTONIGHT_LITE, VARIANT_1>() { return CRYPTONIGHT_LITE_ITER; }
template<> inline constexpr uint32_t cn_select_iter<CRYPTONIGHT_HEAVY, VARIANT_0>() { return CRYPTONIGHT_HEAVY_ITER; }
template<> inline constexpr uint32_t cn_select_iter<CRYPTONIGHT_HEAVY, VARIANT_XHV>() { return CRYPTONIGHT_HEAVY_ITER; }
template<> inline constexpr uint32_t cn_select_iter<CRYPTONIGHT_HEAVY, VARIANT_TUBE>() { return CRYPTONIGHT_HEAVY_ITER; }
template<> inline constexpr uint32_t cn_select_iter<CRYPTONIGHT_PICO, VARIANT_TRTL>() { return CRYPTONIGHT_TRTL_ITER; }
inline uint32_t cn_select_iter(Algo algorithm, Variant variant)
{
switch (variant) {
case VARIANT_MSR:
case VARIANT_HALF:
return CRYPTONIGHT_HALF_ITER;
case VARIANT_GPU:
return CRYPTONIGHT_GPU_ITER;
case VARIANT_RTO:
case VARIANT_DOUBLE:
return CRYPTONIGHT_XAO_ITER;
case VARIANT_TRTL:
return CRYPTONIGHT_TRTL_ITER;
case VARIANT_RWZ:
case VARIANT_ZLS:
return CRYPTONIGHT_WALTZ_ITER;
default:
break;
}
switch(algorithm)
{
case CRYPTONIGHT:
return CRYPTONIGHT_ITER;
case CRYPTONIGHT_LITE:
return CRYPTONIGHT_LITE_ITER;
case CRYPTONIGHT_HEAVY:
return CRYPTONIGHT_HEAVY_ITER;
case CRYPTONIGHT_PICO:
return CRYPTONIGHT_TRTL_ITER;
default:
break;
}
return 0;
}
template<Variant variant> inline constexpr Variant cn_base_variant() { return VARIANT_0; }
template<> inline constexpr Variant cn_base_variant<VARIANT_0>() { return VARIANT_0; }
template<> inline constexpr Variant cn_base_variant<VARIANT_1>() { return VARIANT_1; }
template<> inline constexpr Variant cn_base_variant<VARIANT_TUBE>() { return VARIANT_1; }
template<> inline constexpr Variant cn_base_variant<VARIANT_XTL>() { return VARIANT_1; }
template<> inline constexpr Variant cn_base_variant<VARIANT_MSR>() { return VARIANT_1; }
template<> inline constexpr Variant cn_base_variant<VARIANT_XHV>() { return VARIANT_0; }
template<> inline constexpr Variant cn_base_variant<VARIANT_XAO>() { return VARIANT_0; }
template<> inline constexpr Variant cn_base_variant<VARIANT_RTO>() { return VARIANT_1; }
template<> inline constexpr Variant cn_base_variant<VARIANT_2>() { return VARIANT_2; }
template<> inline constexpr Variant cn_base_variant<VARIANT_HALF>() { return VARIANT_2; }
template<> inline constexpr Variant cn_base_variant<VARIANT_TRTL>() { return VARIANT_2; }
template<> inline constexpr Variant cn_base_variant<VARIANT_GPU>() { return VARIANT_GPU; }
template<> inline constexpr Variant cn_base_variant<VARIANT_WOW>() { return VARIANT_2; }
template<> inline constexpr Variant cn_base_variant<VARIANT_4>() { return VARIANT_2; }
template<> inline constexpr Variant cn_base_variant<VARIANT_RWZ>() { return VARIANT_2; }
template<> inline constexpr Variant cn_base_variant<VARIANT_ZLS>() { return VARIANT_2; }
template<> inline constexpr Variant cn_base_variant<VARIANT_DOUBLE>() { return VARIANT_2; }
inline Variant cn_base_variant(Variant variant)
{
switch (variant) {
case VARIANT_0:
case VARIANT_XHV:
case VARIANT_XAO:
return VARIANT_0;
case VARIANT_1:
case VARIANT_TUBE:
case VARIANT_XTL:
case VARIANT_MSR:
case VARIANT_RTO:
return VARIANT_1;
case VARIANT_GPU:
return VARIANT_GPU;
default:
break;
}
return VARIANT_2;
}
template<Variant variant> inline constexpr bool cn_is_cryptonight_r() { return false; }
template<> inline constexpr bool cn_is_cryptonight_r<VARIANT_WOW>() { return true; }
template<> inline constexpr bool cn_is_cryptonight_r<VARIANT_4>() { return true; }
} /* namespace xmrig */
#endif /* XMRIG_CRYPTONIGHT_CONSTANTS_H */

View file

@ -33,21 +33,21 @@
#ifndef XMRIG_ARM
# define VARIANT1_INIT(part) \
uint64_t tweak1_2_##part = 0; \
if (BASE == xmrig::VARIANT_1) { \
if (BASE == Algorithm::CN_1) { \
tweak1_2_##part = (*reinterpret_cast<const uint64_t*>(input + 35 + part * size) ^ \
*(reinterpret_cast<const uint64_t*>(ctx[part]->state) + 24)); \
}
#else
# define VARIANT1_INIT(part) \
uint64_t tweak1_2_##part = 0; \
if (BASE == xmrig::VARIANT_1) { \
if (BASE == Algorithm::CN_1) { \
memcpy(&tweak1_2_##part, input + 35 + part * size, sizeof tweak1_2_##part); \
tweak1_2_##part ^= *(reinterpret_cast<const uint64_t*>(ctx[part]->state) + 24); \
}
#endif
#define VARIANT1_1(p) \
if (BASE == xmrig::VARIANT_1) { \
if (BASE == Algorithm::CN_1) { \
const uint8_t tmp = reinterpret_cast<const uint8_t*>(p)[11]; \
static const uint32_t table = 0x75310; \
const uint8_t index = (((tmp >> 3) & 6) | (tmp & 1)) << 1; \
@ -55,20 +55,20 @@
}
#define VARIANT1_2(p, part) \
if (BASE == xmrig::VARIANT_1) { \
if (BASE == Algorithm::CN_1) { \
(p) ^= tweak1_2_##part; \
}
#ifndef XMRIG_ARM
# define VARIANT2_INIT(part) \
__m128i division_result_xmm_##part = _mm_cvtsi64_si128(h##part[12]); \
__m128i sqrt_result_xmm_##part = _mm_cvtsi64_si128(h##part[13]);
__m128i division_result_xmm_##part = _mm_cvtsi64_si128(static_cast<int64_t>(h##part[12])); \
__m128i sqrt_result_xmm_##part = _mm_cvtsi64_si128(static_cast<int64_t>(h##part[13]));
#ifdef _MSC_VER
# define VARIANT2_SET_ROUNDING_MODE() if (BASE == xmrig::VARIANT_2) { _control87(RC_DOWN, MCW_RC); }
# define VARIANT2_SET_ROUNDING_MODE() if (BASE == Algorithm::CN_2) { _control87(RC_DOWN, MCW_RC); }
#else
# define VARIANT2_SET_ROUNDING_MODE() if (BASE == xmrig::VARIANT_2) { fesetround(FE_DOWNWARD); }
# define VARIANT2_SET_ROUNDING_MODE() if (BASE == Algorithm::CN_2) { fesetround(FE_DOWNWARD); }
#endif
# define VARIANT2_INTEGER_MATH(part, cl, cx) \
@ -91,7 +91,7 @@
_mm_store_si128((__m128i *)((base_ptr) + ((offset) ^ 0x10)), _mm_add_epi64(chunk3, _b1)); \
_mm_store_si128((__m128i *)((base_ptr) + ((offset) ^ 0x20)), _mm_add_epi64(chunk1, _b)); \
_mm_store_si128((__m128i *)((base_ptr) + ((offset) ^ 0x30)), _mm_add_epi64(chunk2, _a)); \
if (VARIANT == xmrig::VARIANT_4) { \
if (ALGO == Algorithm::CN_R) { \
_c = _mm_xor_si128(_mm_xor_si128(_c, chunk3), _mm_xor_si128(chunk1, chunk2)); \
} \
} while (0)
@ -141,7 +141,7 @@
vst1q_u64((uint64_t*)((base_ptr) + ((offset) ^ 0x10)), vaddq_u64(chunk3, vreinterpretq_u64_u8(_b1))); \
vst1q_u64((uint64_t*)((base_ptr) + ((offset) ^ 0x20)), vaddq_u64(chunk1, vreinterpretq_u64_u8(_b))); \
vst1q_u64((uint64_t*)((base_ptr) + ((offset) ^ 0x30)), vaddq_u64(chunk2, vreinterpretq_u64_u8(_a))); \
if (VARIANT == xmrig::VARIANT_4) { \
if (ALGO == Algorithm::CN_4) { \
_c = veorq_u64(veorq_u64(_c, chunk3), veorq_u64(chunk1, chunk2)); \
} \
} while (0)
@ -184,17 +184,17 @@
#define VARIANT4_RANDOM_MATH_INIT(part) \
uint32_t r##part[9]; \
struct V4_Instruction code##part[256]; \
if ((VARIANT == xmrig::VARIANT_WOW) || (VARIANT == xmrig::VARIANT_4)) { \
r##part[0] = (uint32_t)(h##part[12]); \
r##part[1] = (uint32_t)(h##part[12] >> 32); \
r##part[2] = (uint32_t)(h##part[13]); \
r##part[3] = (uint32_t)(h##part[13] >> 32); \
if (props.isR()) { \
r##part[0] = static_cast<uint32_t>(h##part[12]); \
r##part[1] = static_cast<uint32_t>(h##part[12] >> 32); \
r##part[2] = static_cast<uint32_t>(h##part[13]); \
r##part[3] = static_cast<uint32_t>(h##part[13] >> 32); \
} \
v4_random_math_init<VARIANT>(code##part, height);
v4_random_math_init<ALGO>(code##part, height);
#define VARIANT4_RANDOM_MATH(part, al, ah, cl, bx0, bx1) \
if ((VARIANT == xmrig::VARIANT_WOW) || (VARIANT == xmrig::VARIANT_4)) { \
cl ^= (r##part[0] + r##part[1]) | ((uint64_t)(r##part[2] + r##part[3]) << 32); \
if (props.isR()) { \
cl ^= (r##part[0] + r##part[1]) | (static_cast<uint64_t>(r##part[2] + r##part[3]) << 32); \
r##part[4] = static_cast<uint32_t>(al); \
r##part[5] = static_cast<uint32_t>(ah); \
r##part[6] = static_cast<uint32_t>(_mm_cvtsi128_si32(bx0)); \

View file

@ -156,21 +156,6 @@ const static uint8_t test_output_v2[160] = {
};
// "cn/xtl" Stellite (XTL)
const static uint8_t test_output_xtl[160] = {
0x8F, 0xE5, 0xF0, 0x5F, 0x02, 0x2A, 0x61, 0x7D, 0xE5, 0x3F, 0x79, 0x36, 0x4B, 0x25, 0xCB, 0xC3,
0xC0, 0x8E, 0x0E, 0x1F, 0xE3, 0xBE, 0x48, 0x57, 0x07, 0x03, 0xFE, 0xE1, 0xEC, 0x0E, 0xB0, 0xB1,
0x21, 0x26, 0xFF, 0x98, 0xE6, 0x86, 0x08, 0x5B, 0xC9, 0x96, 0x44, 0xA3, 0xB8, 0x4E, 0x28, 0x90,
0x76, 0xED, 0xAD, 0xB9, 0xAA, 0xAC, 0x01, 0x94, 0x1D, 0xBE, 0x3E, 0xEA, 0xAD, 0xEE, 0xB2, 0xCF,
0xB0, 0x43, 0x4B, 0x88, 0xFC, 0xB2, 0xF3, 0x82, 0x9D, 0xD7, 0xDF, 0x51, 0x97, 0x2C, 0x5A, 0xE3,
0xC7, 0x16, 0x0B, 0xC8, 0x7C, 0xB7, 0x2F, 0x1C, 0x55, 0x33, 0xCA, 0xE1, 0xEE, 0x08, 0xA4, 0x86,
0x60, 0xED, 0x6E, 0x9D, 0x2D, 0x05, 0x0D, 0x7D, 0x02, 0x49, 0x23, 0x39, 0x7C, 0xC3, 0x6D, 0x3D,
0x05, 0x51, 0x28, 0xF1, 0x9B, 0x3C, 0xDF, 0xC4, 0xEA, 0x8A, 0xA6, 0x6A, 0x3C, 0x8B, 0xE2, 0xAF,
0x47, 0x00, 0xFC, 0x36, 0xED, 0x50, 0xBB, 0xD2, 0x2E, 0x63, 0x4B, 0x93, 0x11, 0x0C, 0xA7, 0xBA,
0x32, 0x6E, 0x47, 0x4D, 0xCE, 0xCC, 0x82, 0x54, 0x1D, 0x06, 0xF8, 0x06, 0x86, 0xBD, 0x22, 0x48
};
// "cn/half"
const static uint8_t test_output_half[160] = {
0x5D, 0x4F, 0xBC, 0x35, 0x60, 0x97, 0xEA, 0x64, 0x40, 0xB0, 0x88, 0x8E, 0xDE, 0xB6, 0x35, 0xDD,

File diff suppressed because it is too large Load diff

View file

@ -22,7 +22,9 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "crypto/cn/CryptoNight_constants.h"
#include "crypto/cn/CnAlgo.h"
#ifdef __GNUC__
# include <x86intrin.h>
@ -206,4 +208,4 @@ void cn_gpu_inner_avx(const uint8_t* spad, uint8_t* lpad)
}
}
template void cn_gpu_inner_avx<xmrig::CRYPTONIGHT_GPU_ITER, xmrig::CRYPTONIGHT_GPU_MASK>(const uint8_t* spad, uint8_t* lpad);
template void cn_gpu_inner_avx<xmrig::CnAlgo<xmrig::Algorithm::CN_GPU>().iterations(), xmrig::CnAlgo<xmrig::Algorithm::CN_GPU>().mask()>(const uint8_t* spad, uint8_t* lpad);

View file

@ -22,7 +22,9 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "crypto/cn/CryptoNight_constants.h"
#include "crypto/cn/CnAlgo.h"
#ifdef __GNUC__
# include <x86intrin.h>
@ -207,4 +209,4 @@ void cn_gpu_inner_ssse3(const uint8_t* spad, uint8_t* lpad)
}
}
template void cn_gpu_inner_ssse3<xmrig::CRYPTONIGHT_GPU_ITER, xmrig::CRYPTONIGHT_GPU_MASK>(const uint8_t* spad, uint8_t* lpad);
template void cn_gpu_inner_ssse3<xmrig::CnAlgo<xmrig::Algorithm::CN_GPU>().iterations(), xmrig::CnAlgo<xmrig::Algorithm::CN_GPU>().mask()>(const uint8_t* spad, uint8_t* lpad);

View file

@ -1,6 +1,13 @@
#ifndef VARIANT4_RANDOM_MATH_H
#define VARIANT4_RANDOM_MATH_H
#include <string.h>
#include "crypto/common/Algorithm.h"
extern "C"
{
#include "crypto/cn/c_blake256.h"
@ -182,7 +189,7 @@ static FORCEINLINE void check_data(size_t* data_index, const size_t bytes_needed
// Generates as many random math operations as possible with given latency and ALU restrictions
// "code" array must have space for NUM_INSTRUCTIONS_MAX+1 instructions
template<xmrig::Variant VARIANT>
template<xmrig::Algorithm::Id ALGO>
static int v4_random_math_init(struct V4_Instruction* code, const uint64_t height)
{
// MUL is 3 cycles, 3-way addition and rotations are 2 cycles, SUB/XOR are 1 cycle
@ -204,8 +211,7 @@ static int v4_random_math_init(struct V4_Instruction* code, const uint64_t heigh
memset(data, 0, sizeof(data));
uint64_t tmp = SWAP64LE(height);
memcpy(data, &tmp, sizeof(uint64_t));
if (VARIANT == xmrig::VARIANT_4)
{
if (ALGO == xmrig::Algorithm::CN_R) {
data[20] = -38;
}
@ -249,7 +255,7 @@ static int v4_random_math_init(struct V4_Instruction* code, const uint64_t heigh
code_size = 0;
int total_iterations = 0;
r8_used = (VARIANT == xmrig::VARIANT_WOW);
r8_used = (ALGO == xmrig::Algorithm::CN_WOW);
// Generate random code to achieve minimal required latency for our abstract CPU
// Try to get this latency for all 4 registers
@ -291,10 +297,9 @@ static int v4_random_math_init(struct V4_Instruction* code, const uint64_t heigh
int b = src_index;
// Don't do ADD/SUB/XOR with the same register
if (((opcode == ADD) || (opcode == SUB) || (opcode == XOR)) && (a == b))
{
if (((opcode == ADD) || (opcode == SUB) || (opcode == XOR)) && (a == b)) {
// a is always < 4, so we don't need to check bounds here
b = (VARIANT == xmrig::VARIANT_WOW) ? (a + 4) : 8;
b = (ALGO == xmrig::Algorithm::CN_WOW) ? (a + 4) : 8;
src_index = b;
}