Added support for Uplexa (cn/upx2 algorithm)

This commit is contained in:
SChernykh 2021-04-17 14:53:42 +02:00
parent 6cb398bb42
commit da7f5826cb
23 changed files with 5867 additions and 5720 deletions

View file

@ -96,6 +96,11 @@ public:
return CN_ITER / 8;
# endif
# ifdef XMRIG_ALGO_CN_FEMTO
case Algorithm::CN_UPX2:
return CN_ITER / 32;
# endif
default:
break;
}
@ -111,6 +116,12 @@ public:
}
# endif
# ifdef XMRIG_ALGO_CN_FEMTO
if (algo == Algorithm::CN_UPX2) {
return 0x1FFF0;
}
# endif
return ((memory(algo) - 1) / 16) * 16;
}
@ -149,6 +160,9 @@ public:
# ifdef XMRIG_ALGO_CN_PICO
case Algorithm::CN_PICO_0:
case Algorithm::CN_PICO_TLO:
# endif
# ifdef XMRIG_ALGO_CN_FEMTO
case Algorithm::CN_UPX2:
# endif
return Algorithm::CN_2;
@ -176,6 +190,7 @@ template<> constexpr inline Algorithm::Id CnAlgo<Algorithm::CN_FAST>::base() con
template<> constexpr inline Algorithm::Id CnAlgo<Algorithm::CN_RTO>::base() const { return Algorithm::CN_1; }
template<> constexpr inline Algorithm::Id CnAlgo<Algorithm::CN_LITE_1>::base() const { return Algorithm::CN_1; }
template<> constexpr inline Algorithm::Id CnAlgo<Algorithm::CN_HEAVY_TUBE>::base() const { return Algorithm::CN_1; }
template<> constexpr inline Algorithm::Id CnAlgo<Algorithm::CN_UPX2>::base() const { return Algorithm::CN_2; }
template<> constexpr inline uint32_t CnAlgo<Algorithm::CN_FAST>::iterations() const { return CN_ITER / 2; }
@ -192,6 +207,7 @@ template<> constexpr inline uint32_t CnAlgo<Algorithm::CN_ZLS>::iterations() con
template<> constexpr inline uint32_t CnAlgo<Algorithm::CN_PICO_0>::iterations() const { return CN_ITER / 8; }
template<> constexpr inline uint32_t CnAlgo<Algorithm::CN_PICO_TLO>::iterations() const { return CN_ITER / 8; }
template<> constexpr inline uint32_t CnAlgo<Algorithm::CN_CCX>::iterations() const { return CN_ITER / 2; }
template<> constexpr inline uint32_t CnAlgo<Algorithm::CN_UPX2>::iterations() const { return CN_ITER / 32; }
template<> constexpr inline size_t CnAlgo<Algorithm::CN_LITE_0>::memory() const { return CN_MEMORY / 2; }
@ -201,9 +217,11 @@ template<> constexpr inline size_t CnAlgo<Algorithm::CN_HEAVY_TUBE>::memory() co
template<> constexpr inline size_t CnAlgo<Algorithm::CN_HEAVY_XHV>::memory() const { return CN_MEMORY * 2; }
template<> constexpr inline size_t CnAlgo<Algorithm::CN_PICO_0>::memory() const { return CN_MEMORY / 8; }
template<> constexpr inline size_t CnAlgo<Algorithm::CN_PICO_TLO>::memory() const { return CN_MEMORY / 8; }
template<> constexpr inline size_t CnAlgo<Algorithm::CN_UPX2>::memory() const { return CN_MEMORY / 16; }
template<> constexpr inline uint32_t CnAlgo<Algorithm::CN_PICO_0>::mask() const { return 0x1FFF0; }
template<> constexpr inline uint32_t CnAlgo<Algorithm::CN_UPX2>::mask() const { return 0x1FFF0; }
} /* namespace xmrig */

View file

@ -99,8 +99,11 @@ cn_mainloop_fun cn_double_mainloop_ryzen_asm = nullptr;
cn_mainloop_fun cn_double_mainloop_bulldozer_asm = nullptr;
cn_mainloop_fun cn_double_double_mainloop_sandybridge_asm = nullptr;
cn_mainloop_fun cn_upx2_mainloop_asm = nullptr;
cn_mainloop_fun cn_upx2_double_mainloop_asm = nullptr;
template<typename T, typename U>
template<Algorithm::Id SOURCE_ALGO = Algorithm::CN_2, typename T, typename U>
static void patchCode(T dst, U src, const uint32_t iterations, const uint32_t mask = CnAlgo<Algorithm::CN_HALF>().mask())
{
auto p = reinterpret_cast<const uint8_t*>(src);
@ -124,11 +127,11 @@ static void patchCode(T dst, U src, const uint32_t iterations, const uint32_t ma
auto patched_data = reinterpret_cast<uint8_t*>(dst);
for (size_t i = 0; i + sizeof(uint32_t) <= size; ++i) {
switch (*(uint32_t*)(patched_data + i)) {
case CnAlgo<Algorithm::CN_2>().iterations():
case CnAlgo<SOURCE_ALGO>().iterations():
*(uint32_t*)(patched_data + i) = iterations;
break;
case CnAlgo<Algorithm::CN_2>().mask():
case CnAlgo<SOURCE_ALGO>().mask():
*(uint32_t*)(patched_data + i) = mask;
break;
}
@ -138,7 +141,7 @@ static void patchCode(T dst, U src, const uint32_t iterations, const uint32_t ma
static void patchAsmVariants()
{
const int allocation_size = 81920;
const int allocation_size = 131072;
auto base = static_cast<uint8_t *>(VirtualMemory::allocateExecutableMemory(allocation_size, false));
cn_half_mainloop_ivybridge_asm = reinterpret_cast<cn_mainloop_fun> (base + 0x0000);
@ -170,6 +173,11 @@ static void patchAsmVariants()
cn_tlo_double_mainloop_sandybridge_asm = reinterpret_cast<cn_mainloop_fun> (base + 0x13000);
# endif
# ifdef XMRIG_ALGO_CN_FEMTO
cn_upx2_mainloop_asm = reinterpret_cast<cn_mainloop_fun> (base + 0x14000);
cn_upx2_double_mainloop_asm = reinterpret_cast<cn_mainloop_fun> (base + 0x15000);
# endif
{
constexpr uint32_t ITER = CnAlgo<Algorithm::CN_HALF>().iterations();
@ -219,6 +227,16 @@ static void patchAsmVariants()
patchCode(cn_double_double_mainloop_sandybridge_asm, cnv2_double_mainloop_sandybridge_asm, ITER);
}
# ifdef XMRIG_ALGO_CN_FEMTO
{
constexpr uint32_t ITER = CnAlgo<Algorithm::CN_UPX2>().iterations();
constexpr uint32_t MASK = CnAlgo<Algorithm::CN_UPX2>().mask();
patchCode<Algorithm::CN_RWZ>(cn_upx2_mainloop_asm, cnv2_rwz_mainloop_asm, ITER, MASK);
patchCode<Algorithm::CN_RWZ>(cn_upx2_double_mainloop_asm, cnv2_rwz_double_mainloop_asm, ITER, MASK);
}
#endif
VirtualMemory::protectRX(base, allocation_size);
VirtualMemory::flushInstructionCache(base, allocation_size);
}
@ -272,6 +290,17 @@ xmrig::CnHash::CnHash()
ADD_FN(Algorithm::CN_CCX);
# ifdef XMRIG_ALGO_CN_FEMTO
ADD_FN(Algorithm::CN_UPX2);
ADD_FN_ASM(Algorithm::CN_UPX2);
# ifdef _MSC_VER
// This is somehow faster on Ryzen
m_map[Algorithm::CN_UPX2][AV_DOUBLE][Assembly::RYZEN] = cryptonight_double_hash<Algorithm::CN_UPX2, false>;
# endif
# endif
# ifdef XMRIG_ALGO_ARGON2
m_map[Algorithm::AR2_CHUKWA][AV_SINGLE][Assembly::NONE] = argon2::single_hash<Algorithm::AR2_CHUKWA>;
m_map[Algorithm::AR2_CHUKWA][AV_SINGLE_SOFT][Assembly::NONE] = argon2::single_hash<Algorithm::AR2_CHUKWA>;

View file

@ -395,7 +395,7 @@ static inline void cryptonight_monero_tweak(const uint8_t* l, uint64_t idx, __m1
uint64_t* mem_out = (uint64_t*)&l[idx];
if (props.base() == Algorithm::CN_2) {
VARIANT2_SHUFFLE(l, idx, ax0, bx0, bx1, cx, (ALGO == Algorithm::CN_RWZ ? 1 : 0));
VARIANT2_SHUFFLE(l, idx, ax0, bx0, bx1, cx, (((ALGO == Algorithm::CN_RWZ) || (ALGO == Algorithm::CN_UPX2)) ? 1 : 0));
_mm_store_si128((__m128i *)mem_out, _mm_xor_si128(bx0, cx));
} else {
__m128i tmp = _mm_xor_si128(bx0, cx);
@ -528,7 +528,7 @@ inline void cryptonight_single_hash(const uint8_t *__restrict__ input, size_t si
if (ALGO == Algorithm::CN_R) {
VARIANT2_SHUFFLE(l0, idx0 & MASK, ax0, bx0, bx1, cx, 0);
} else {
VARIANT2_SHUFFLE2(l0, idx0 & MASK, ax0, bx0, bx1, hi, lo, (ALGO == Algorithm::CN_RWZ ? 1 : 0));
VARIANT2_SHUFFLE2(l0, idx0 & MASK, ax0, bx0, bx1, hi, lo, (((ALGO == Algorithm::CN_RWZ) || (ALGO == Algorithm::CN_UPX2)) ? 1 : 0));
}
}
@ -704,7 +704,7 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si
if (ALGO == Algorithm::CN_R) {
VARIANT2_SHUFFLE(l0, idx0 & MASK, ax0, bx00, bx01, cx0, 0);
} else {
VARIANT2_SHUFFLE2(l0, idx0 & MASK, ax0, bx00, bx01, hi, lo, (ALGO == Algorithm::CN_RWZ ? 1 : 0));
VARIANT2_SHUFFLE2(l0, idx0 & MASK, ax0, bx00, bx01, hi, lo, (((ALGO == Algorithm::CN_RWZ) || (ALGO == Algorithm::CN_UPX2)) ? 1 : 0));
}
}
@ -764,7 +764,7 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si
if (ALGO == Algorithm::CN_R) {
VARIANT2_SHUFFLE(l1, idx1 & MASK, ax1, bx10, bx11, cx1, 0);
} else {
VARIANT2_SHUFFLE2(l1, idx1 & MASK, ax1, bx10, bx11, hi, lo, (ALGO == Algorithm::CN_RWZ ? 1 : 0));
VARIANT2_SHUFFLE2(l1, idx1 & MASK, ax1, bx10, bx11, hi, lo, (((ALGO == Algorithm::CN_RWZ) || (ALGO == Algorithm::CN_UPX2)) ? 1 : 0));
}
}

View file

@ -370,6 +370,23 @@ const static uint8_t test_output_pico_tlo[160] = {
#endif
#ifdef XMRIG_ALGO_CN_FEMTO
// "cn/upx2"
const static uint8_t test_output_femto_upx2[160] = {
0xAA, 0xBB, 0xB8, 0xED, 0x14, 0xA8, 0x35, 0xFA, 0x22, 0xCF, 0xB1, 0xB5, 0xDE, 0xA8, 0x72, 0xB0,
0xA1, 0xD6, 0xCB, 0xD8, 0x46, 0xF4, 0x39, 0x1C, 0x0F, 0x01, 0xF3, 0x87, 0x5E, 0x3A, 0x37, 0x61,
0x38, 0x59, 0x15, 0x72, 0xF8, 0x20, 0xD4, 0xDE, 0x25, 0x3C, 0xF5, 0x5A, 0x21, 0x92, 0xB6, 0x22,
0xB0, 0x28, 0x9E, 0x2E, 0x5C, 0x36, 0x16, 0xE6, 0x1E, 0x78, 0x7A, 0x8F, 0xE4, 0x62, 0xEC, 0x5A,
0xFD, 0x58, 0xCC, 0x6F, 0x3C, 0xD3, 0x8A, 0x0B, 0x5B, 0x6C, 0x83, 0x4E, 0x9B, 0xD4, 0xC2, 0x5A,
0x43, 0x2C, 0x48, 0x98, 0xF3, 0x16, 0xCA, 0x87, 0xE9, 0x5F, 0x44, 0x93, 0x53, 0x48, 0x00, 0xA3,
0xE8, 0xE4, 0xB6, 0x9D, 0x5A, 0x3B, 0x49, 0x2C, 0x21, 0xE9, 0x4B, 0x02, 0xFC, 0x87, 0x8D, 0x75,
0x66, 0x05, 0xAF, 0xA3, 0x9D, 0xC9, 0xD8, 0x88, 0x2D, 0x67, 0x31, 0x21, 0x4C, 0x4D, 0x88, 0x7D,
0x86, 0x9E, 0x4D, 0x74, 0xF4, 0x4C, 0x57, 0x27, 0xCF, 0xEF, 0x86, 0x01, 0xB0, 0x52, 0x18, 0xF3,
0xAD, 0xE4, 0x52, 0x5E, 0xB0, 0x4A, 0x97, 0xB4, 0x96, 0x18, 0xB6, 0x9C, 0x93, 0x0E, 0x49, 0xBB,
};
#endif
#ifdef XMRIG_ALGO_ARGON2
// "argon2/chukwa"
const static uint8_t argon2_chukwa_test_out[160] = {

View file

@ -563,7 +563,7 @@ static inline void cryptonight_monero_tweak(uint64_t *mem_out, const uint8_t *l,
constexpr CnAlgo<ALGO> props;
if (props.base() == Algorithm::CN_2) {
VARIANT2_SHUFFLE(l, idx, ax0, bx0, bx1, cx, (ALGO == Algorithm::CN_RWZ ? 1 : 0));
VARIANT2_SHUFFLE(l, idx, ax0, bx0, bx1, cx, (((ALGO == Algorithm::CN_RWZ) || (ALGO == Algorithm::CN_UPX2)) ? 1 : 0));
_mm_store_si128(reinterpret_cast<__m128i *>(mem_out), _mm_xor_si128(bx0, cx));
} else {
__m128i tmp = _mm_xor_si128(bx0, cx);
@ -715,7 +715,7 @@ inline void cryptonight_single_hash(const uint8_t *__restrict__ input, size_t si
if (ALGO == Algorithm::CN_R) {
VARIANT2_SHUFFLE(l0, idx0 & MASK, ax0, bx0, bx1, cx, 0);
} else {
VARIANT2_SHUFFLE2(l0, idx0 & MASK, ax0, bx0, bx1, hi, lo, (ALGO == Algorithm::CN_RWZ ? 1 : 0));
VARIANT2_SHUFFLE2(l0, idx0 & MASK, ax0, bx0, bx1, hi, lo, (((ALGO == Algorithm::CN_RWZ) || (ALGO == Algorithm::CN_UPX2)) ? 1 : 0));
}
}
@ -822,6 +822,8 @@ extern cn_mainloop_fun cn_double_mainloop_ryzen_asm;
extern cn_mainloop_fun cn_double_mainloop_bulldozer_asm;
extern cn_mainloop_fun cn_double_double_mainloop_sandybridge_asm;
extern cn_mainloop_fun cn_upx2_mainloop_asm;
extern cn_mainloop_fun cn_upx2_double_mainloop_asm;
} // namespace xmrig
@ -934,6 +936,11 @@ inline void cryptonight_single_hash_asm(const uint8_t *__restrict__ input, size_
cn_double_mainloop_bulldozer_asm(ctx);
}
}
# ifdef XMRIG_ALGO_CN_FEMTO
else if (ALGO == Algorithm::CN_UPX2) {
cn_upx2_mainloop_asm(ctx);
}
# endif
else if (props.isR()) {
ctx[0]->generated_code(ctx);
}
@ -976,6 +983,11 @@ inline void cryptonight_double_hash_asm(const uint8_t *__restrict__ input, size_
else if (ALGO == Algorithm::CN_PICO_TLO) {
cn_tlo_double_mainloop_sandybridge_asm(ctx);
}
# endif
# ifdef XMRIG_ALGO_CN_FEMTO
else if (ALGO == Algorithm::CN_UPX2) {
cn_upx2_double_mainloop_asm(ctx);
}
# endif
else if (ALGO == Algorithm::CN_RWZ) {
cnv2_rwz_double_mainloop_asm(ctx);
@ -1134,7 +1146,7 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si
if (ALGO == Algorithm::CN_R) {
VARIANT2_SHUFFLE(l0, idx0 & MASK, ax0, bx00, bx01, cx0, 0);
} else {
VARIANT2_SHUFFLE2(l0, idx0 & MASK, ax0, bx00, bx01, hi, lo, (ALGO == Algorithm::CN_RWZ ? 1 : 0));
VARIANT2_SHUFFLE2(l0, idx0 & MASK, ax0, bx00, bx01, hi, lo, (((ALGO == Algorithm::CN_RWZ) || (ALGO == Algorithm::CN_UPX2)) ? 1 : 0));
}
}
@ -1192,7 +1204,7 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si
if (ALGO == Algorithm::CN_R) {
VARIANT2_SHUFFLE(l1, idx1 & MASK, ax1, bx10, bx11, cx1, 0);
} else {
VARIANT2_SHUFFLE2(l1, idx1 & MASK, ax1, bx10, bx11, hi, lo, (ALGO == Algorithm::CN_RWZ ? 1 : 0));
VARIANT2_SHUFFLE2(l1, idx1 & MASK, ax1, bx10, bx11, hi, lo, (((ALGO == Algorithm::CN_RWZ) || (ALGO == Algorithm::CN_UPX2)) ? 1 : 0));
}
}
@ -1302,7 +1314,7 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si
if (ALGO == Algorithm::CN_R) { \
VARIANT2_SHUFFLE(l, idx & MASK, a, b0, b1, c, 0); \
} else { \
VARIANT2_SHUFFLE2(l, idx & MASK, a, b0, b1, hi, lo, (ALGO == Algorithm::CN_RWZ ? 1 : 0)); \
VARIANT2_SHUFFLE2(l, idx & MASK, a, b0, b1, hi, lo, (((ALGO == Algorithm::CN_RWZ) || (ALGO == Algorithm::CN_UPX2)) ? 1 : 0)); \
} \
} \
if (ALGO == Algorithm::CN_R) { \