Merge xmrig v6.11.0 into master

This commit is contained in:
MoneroOcean 2021-04-06 15:17:08 +00:00
commit fd8ed2c6a6
41 changed files with 760 additions and 275 deletions

View file

@ -190,8 +190,8 @@
r##part[1] = static_cast<uint32_t>(h##part[12] >> 32); \
r##part[2] = static_cast<uint32_t>(h##part[13]); \
r##part[3] = static_cast<uint32_t>(h##part[13] >> 32); \
} \
v4_random_math_init<ALGO>(code##part, height);
v4_random_math_init<ALGO>(code##part, height); \
}
#define VARIANT4_RANDOM_MATH(part, al, ah, cl, bx0, bx1) \
if (props.isR()) { \

View file

@ -743,8 +743,18 @@ inline void cryptonight_single_hash(const uint8_t *__restrict__ input, size_t si
# ifdef XMRIG_ALGO_CN_HEAVY
if (props.isHeavy()) {
int64_t n = ((int64_t*)&l0[interleaved_index<interleave>(idx0 & MASK)])[0];
int32_t d = ((int32_t*)&l0[interleaved_index<interleave>(idx0 & MASK)])[2];
int64_t q = n / (d | 0x5);
int64_t d = ((int32_t*)&l0[interleaved_index<interleave>(idx0 & MASK)])[2];
int64_t d5;
# if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 8))
d5 = d | 5;
# else
// Workaround for stupid GCC which converts to 32 bit before doing "| 5" and then converts back to 64 bit
asm("mov %1, %0\n\tor $5, %0" : "=r"(d5) : "r"(d));
# endif
int64_t q = n / d5;
((int64_t*)&l0[interleaved_index<interleave>(idx0 & MASK)])[0] = n ^ q;

View file

@ -343,7 +343,7 @@ typedef union ALIGN_STRUCT(16) SIMDVec {
// Older gcc does not define vld1q_u8_x4 type
#if defined(__GNUC__) && !defined(__clang__) && \
((__GNUC__ == 10 && (__GNUC_MINOR__ <= 1)) || \
((__GNUC__ == 10 && (__GNUC_MINOR__ <= 2)) || \
(__GNUC__ == 9 && (__GNUC_MINOR__ <= 3)) || \
(__GNUC__ == 8 && (__GNUC_MINOR__ <= 4)) || __GNUC__ <= 7)
FORCE_INLINE uint8x16x4_t _sse2neon_vld1q_u8_x4(const uint8_t *p)

View file

@ -103,7 +103,7 @@ namespace randomx {
#endif
#endif
#if defined(_M_X64) || defined(__x86_64__)
#if defined(XMRIG_FEATURE_ASM) && (defined(_M_X64) || defined(__x86_64__))
#define RANDOMX_HAVE_COMPILER 1
class JitCompilerX86;
using JitCompiler = JitCompilerX86;

View file

@ -28,7 +28,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#pragma once
#if defined(_M_X64) || defined(__x86_64__)
#if defined(XMRIG_FEATURE_ASM) && (defined(_M_X64) || defined(__x86_64__))
#include "crypto/randomx/jit_compiler_x86.hpp"
#elif defined(__aarch64__)
#include "crypto/randomx/jit_compiler_a64.hpp"

View file

@ -174,7 +174,7 @@ RandomX_ConfigurationBase::RandomX_ConfigurationBase()
fillAes4Rx4_Key[6] = rx_set_int_vec_i128(0xf63befa7, 0x2ba9660a, 0xf765a38b, 0xf273c9e7);
fillAes4Rx4_Key[7] = rx_set_int_vec_i128(0xc0b0762d, 0x0c06d1fd, 0x915839de, 0x7a7cd609);
# if defined(_M_X64) || defined(__x86_64__)
# if defined(XMRIG_FEATURE_ASM) && (defined(_M_X64) || defined(__x86_64__))
// Workaround for Visual Studio placing trampoline in debug builds.
auto addr = [](void (*func)()) {
const uint8_t* p = reinterpret_cast<const uint8_t*>(func);
@ -239,10 +239,9 @@ void RandomX_ConfigurationBase::Apply()
ScratchpadL3Mask_Calculated = (((ScratchpadL3_Size / sizeof(uint64_t)) - 1) * 8);
ScratchpadL3Mask64_Calculated = ((ScratchpadL3_Size / sizeof(uint64_t)) / 8 - 1) * 64;
CacheLineAlignMask_Calculated = (DatasetBaseSize - 1) & ~(RANDOMX_DATASET_ITEM_SIZE - 1);
#if defined(_M_X64) || defined(__x86_64__)
#if defined(XMRIG_FEATURE_ASM) && (defined(_M_X64) || defined(__x86_64__))
*(uint32_t*)(codeShhPrefetchTweaked + 3) = ArgonMemory * 16 - 1;
const uint32_t DatasetBaseMask = DatasetBaseSize - RANDOMX_DATASET_ITEM_SIZE;
*(uint32_t*)(codeReadDatasetRyzenTweaked + 9) = DatasetBaseMask;