KawPow WIP
This commit is contained in:
parent
07025dc41b
commit
22b937cc1c
88 changed files with 11004 additions and 8383 deletions
|
@ -89,11 +89,6 @@ public:
|
|||
case Algorithm::CN_DOUBLE:
|
||||
return CN_ITER * 2;
|
||||
|
||||
# ifdef XMRIG_ALGO_CN_GPU
|
||||
case Algorithm::CN_GPU:
|
||||
return 0xC000;
|
||||
# endif
|
||||
|
||||
# ifdef XMRIG_ALGO_CN_PICO
|
||||
case Algorithm::CN_PICO_0:
|
||||
case Algorithm::CN_PICO_TLO:
|
||||
|
@ -109,12 +104,6 @@ public:
|
|||
|
||||
inline static uint32_t mask(Algorithm::Id algo)
|
||||
{
|
||||
# ifdef XMRIG_ALGO_CN_GPU
|
||||
if (algo == Algorithm::CN_GPU) {
|
||||
return 0x1FFFC0;
|
||||
}
|
||||
# endif
|
||||
|
||||
# ifdef XMRIG_ALGO_CN_PICO
|
||||
if (algo == Algorithm::CN_PICO_0) {
|
||||
return 0x1FFF0;
|
||||
|
@ -161,11 +150,6 @@ public:
|
|||
# endif
|
||||
return Algorithm::CN_2;
|
||||
|
||||
# ifdef XMRIG_ALGO_CN_GPU
|
||||
case Algorithm::CN_GPU:
|
||||
return Algorithm::CN_GPU;
|
||||
# endif
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -202,7 +186,6 @@ template<> constexpr inline uint32_t CnAlgo<Algorithm::CN_XAO>::iterations() con
|
|||
template<> constexpr inline uint32_t CnAlgo<Algorithm::CN_DOUBLE>::iterations() const { return CN_ITER * 2; }
|
||||
template<> constexpr inline uint32_t CnAlgo<Algorithm::CN_RWZ>::iterations() const { return 0x60000; }
|
||||
template<> constexpr inline uint32_t CnAlgo<Algorithm::CN_ZLS>::iterations() const { return 0x60000; }
|
||||
template<> constexpr inline uint32_t CnAlgo<Algorithm::CN_GPU>::iterations() const { return 0xC000; }
|
||||
template<> constexpr inline uint32_t CnAlgo<Algorithm::CN_PICO_0>::iterations() const { return CN_ITER / 8; }
|
||||
template<> constexpr inline uint32_t CnAlgo<Algorithm::CN_PICO_TLO>::iterations() const { return CN_ITER / 8; }
|
||||
|
||||
|
@ -216,7 +199,6 @@ template<> constexpr inline size_t CnAlgo<Algorithm::CN_PICO_0>::memory() const
|
|||
template<> constexpr inline size_t CnAlgo<Algorithm::CN_PICO_TLO>::memory() const { return CN_MEMORY / 8; }
|
||||
|
||||
|
||||
template<> constexpr inline uint32_t CnAlgo<Algorithm::CN_GPU>::mask() const { return 0x1FFFC0; }
|
||||
template<> constexpr inline uint32_t CnAlgo<Algorithm::CN_PICO_0>::mask() const { return 0x1FFF0; }
|
||||
|
||||
|
||||
|
|
|
@ -252,11 +252,6 @@ xmrig::CnHash::CnHash()
|
|||
ADD_FN_ASM(Algorithm::CN_ZLS);
|
||||
ADD_FN_ASM(Algorithm::CN_DOUBLE);
|
||||
|
||||
# ifdef XMRIG_ALGO_CN_GPU
|
||||
m_map[Algorithm::CN_GPU][AV_SINGLE][Assembly::NONE] = cryptonight_single_hash_gpu<Algorithm::CN_GPU, false>;
|
||||
m_map[Algorithm::CN_GPU][AV_SINGLE_SOFT][Assembly::NONE] = cryptonight_single_hash_gpu<Algorithm::CN_GPU, true>;
|
||||
# endif
|
||||
|
||||
# ifdef XMRIG_ALGO_CN_LITE
|
||||
ADD_FN(Algorithm::CN_LITE_0);
|
||||
ADD_FN(Algorithm::CN_LITE_1);
|
||||
|
|
|
@ -294,11 +294,7 @@ static inline void cn_implode_scratchpad(const __m128i *input, __m128i *output)
|
|||
{
|
||||
constexpr CnAlgo<ALGO> props;
|
||||
|
||||
# ifdef XMRIG_ALGO_CN_GPU
|
||||
constexpr bool IS_HEAVY = props.isHeavy() || ALGO == Algorithm::CN_GPU;
|
||||
# else
|
||||
constexpr bool IS_HEAVY = props.isHeavy();
|
||||
# endif
|
||||
|
||||
__m128i xout0, xout1, xout2, xout3, xout4, xout5, xout6, xout7;
|
||||
__m128i k0, k1, k2, k3, k4, k5, k6, k7, k8, k9;
|
||||
|
@ -580,66 +576,6 @@ inline void cryptonight_single_hash(const uint8_t *__restrict__ input, size_t si
|
|||
}
|
||||
|
||||
|
||||
} /* namespace xmrig */
|
||||
|
||||
|
||||
#ifdef XMRIG_ALGO_CN_GPU
|
||||
template<size_t ITER, uint32_t MASK>
|
||||
void cn_gpu_inner_arm(const uint8_t *spad, uint8_t *lpad);
|
||||
|
||||
|
||||
namespace xmrig {
|
||||
|
||||
|
||||
template<size_t MEM>
|
||||
void cn_explode_scratchpad_gpu(const uint8_t *input, uint8_t *output)
|
||||
{
|
||||
constexpr size_t hash_size = 200; // 25x8 bytes
|
||||
alignas(16) uint64_t hash[25];
|
||||
|
||||
for (uint64_t i = 0; i < MEM / 512; i++) {
|
||||
memcpy(hash, input, hash_size);
|
||||
hash[0] ^= i;
|
||||
|
||||
xmrig::keccakf(hash, 24);
|
||||
memcpy(output, hash, 160);
|
||||
output += 160;
|
||||
|
||||
xmrig::keccakf(hash, 24);
|
||||
memcpy(output, hash, 176);
|
||||
output += 176;
|
||||
|
||||
xmrig::keccakf(hash, 24);
|
||||
memcpy(output, hash, 176);
|
||||
output += 176;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template<xmrig::Algorithm::Id ALGO, bool SOFT_AES>
|
||||
inline void cryptonight_single_hash_gpu(const uint8_t *__restrict__ input, size_t size, uint8_t *__restrict__ output, cryptonight_ctx **__restrict__ ctx, uint64_t height)
|
||||
{
|
||||
constexpr CnAlgo<ALGO> props;
|
||||
|
||||
keccak(input, size, ctx[0]->state);
|
||||
cn_explode_scratchpad_gpu<props.memory()>(ctx[0]->state, ctx[0]->memory);
|
||||
|
||||
fesetround(FE_TONEAREST);
|
||||
|
||||
cn_gpu_inner_arm<props.iterations(), props.mask()>(ctx[0]->state, ctx[0]->memory);
|
||||
|
||||
cn_implode_scratchpad<ALGO, SOFT_AES>(reinterpret_cast<const __m128i *>(ctx[0]->memory), reinterpret_cast<__m128i *>(ctx[0]->state));
|
||||
keccakf(reinterpret_cast<uint64_t*>(ctx[0]->state), 24);
|
||||
memcpy(output, ctx[0]->state, 32);
|
||||
}
|
||||
|
||||
} /* namespace xmrig */
|
||||
#endif
|
||||
|
||||
|
||||
namespace xmrig {
|
||||
|
||||
|
||||
template<Algorithm::Id ALGO, bool SOFT_AES>
|
||||
inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t size, uint8_t *__restrict__ output, struct cryptonight_ctx **__restrict__ ctx, uint64_t height)
|
||||
{
|
||||
|
|
|
@ -356,23 +356,6 @@ const static uint8_t test_output_pico_tlo[160] = {
|
|||
#endif
|
||||
|
||||
|
||||
#ifdef XMRIG_ALGO_CN_GPU
|
||||
// "cn/gpu"
|
||||
const static uint8_t test_output_gpu[160] = {
|
||||
0xE5, 0x5C, 0xB2, 0x3E, 0x51, 0x64, 0x9A, 0x59, 0xB1, 0x27, 0xB9, 0x6B, 0x51, 0x5F, 0x2B, 0xF7,
|
||||
0xBF, 0xEA, 0x19, 0x97, 0x41, 0xA0, 0x21, 0x6C, 0xF8, 0x38, 0xDE, 0xD0, 0x6E, 0xFF, 0x82, 0xDF,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
|
||||
};
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef XMRIG_ALGO_ARGON2
|
||||
// "argon2/chukwa"
|
||||
const static uint8_t argon2_chukwa_test_out[160] = {
|
||||
|
|
|
@ -371,11 +371,7 @@ static inline void cn_implode_scratchpad(const __m128i *input, __m128i *output)
|
|||
{
|
||||
constexpr CnAlgo<ALGO> props;
|
||||
|
||||
# ifdef XMRIG_ALGO_CN_GPU
|
||||
constexpr bool IS_HEAVY = props.isHeavy() || ALGO == Algorithm::CN_GPU;
|
||||
# else
|
||||
constexpr bool IS_HEAVY = props.isHeavy();
|
||||
# endif
|
||||
|
||||
__m128i xout0, xout1, xout2, xout3, xout4, xout5, xout6, xout7;
|
||||
__m128i k0, k1, k2, k3, k4, k5, k6, k7, k8, k9;
|
||||
|
@ -702,73 +698,6 @@ inline void cryptonight_single_hash(const uint8_t *__restrict__ input, size_t si
|
|||
} /* namespace xmrig */
|
||||
|
||||
|
||||
#ifdef XMRIG_ALGO_CN_GPU
|
||||
template<size_t ITER, uint32_t MASK>
|
||||
void cn_gpu_inner_avx(const uint8_t *spad, uint8_t *lpad);
|
||||
|
||||
|
||||
template<size_t ITER, uint32_t MASK>
|
||||
void cn_gpu_inner_ssse3(const uint8_t *spad, uint8_t *lpad);
|
||||
|
||||
|
||||
namespace xmrig {
|
||||
|
||||
|
||||
template<size_t MEM>
|
||||
void cn_explode_scratchpad_gpu(const uint8_t *input, uint8_t *output)
|
||||
{
|
||||
constexpr size_t hash_size = 200; // 25x8 bytes
|
||||
alignas(16) uint64_t hash[25];
|
||||
|
||||
for (uint64_t i = 0; i < MEM / 512; i++) {
|
||||
memcpy(hash, input, hash_size);
|
||||
hash[0] ^= i;
|
||||
|
||||
xmrig::keccakf(hash, 24);
|
||||
memcpy(output, hash, 160);
|
||||
output += 160;
|
||||
|
||||
xmrig::keccakf(hash, 24);
|
||||
memcpy(output, hash, 176);
|
||||
output += 176;
|
||||
|
||||
xmrig::keccakf(hash, 24);
|
||||
memcpy(output, hash, 176);
|
||||
output += 176;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template<Algorithm::Id ALGO, bool SOFT_AES>
|
||||
inline void cryptonight_single_hash_gpu(const uint8_t *__restrict__ input, size_t size, uint8_t *__restrict__ output, cryptonight_ctx **__restrict__ ctx, uint64_t)
|
||||
{
|
||||
constexpr CnAlgo<ALGO> props;
|
||||
|
||||
keccak(input, size, ctx[0]->state);
|
||||
cn_explode_scratchpad_gpu<props.memory()>(ctx[0]->state, ctx[0]->memory);
|
||||
|
||||
# ifdef _MSC_VER
|
||||
_control87(RC_NEAR, MCW_RC);
|
||||
# else
|
||||
fesetround(FE_TONEAREST);
|
||||
# endif
|
||||
|
||||
if (xmrig::Cpu::info()->hasAVX2()) {
|
||||
cn_gpu_inner_avx<props.iterations(), props.mask()>(ctx[0]->state, ctx[0]->memory);
|
||||
} else {
|
||||
cn_gpu_inner_ssse3<props.iterations(), props.mask()>(ctx[0]->state, ctx[0]->memory);
|
||||
}
|
||||
|
||||
cn_implode_scratchpad<ALGO, SOFT_AES>(reinterpret_cast<const __m128i *>(ctx[0]->memory), reinterpret_cast<__m128i *>(ctx[0]->state));
|
||||
keccakf(reinterpret_cast<uint64_t*>(ctx[0]->state), 24);
|
||||
memcpy(output, ctx[0]->state, 32);
|
||||
}
|
||||
|
||||
|
||||
} /* namespace xmrig */
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef XMRIG_FEATURE_ASM
|
||||
extern "C" void cnv2_mainloop_ivybridge_asm(cryptonight_ctx **ctx);
|
||||
extern "C" void cnv2_mainloop_ryzen_asm(cryptonight_ctx **ctx);
|
||||
|
|
|
@ -1,240 +0,0 @@
|
|||
/* XMRig
|
||||
* Copyright 2010 Jeff Garzik <jgarzik@pobox.com>
|
||||
* Copyright 2012-2014 pooler <pooler@litecoinpool.org>
|
||||
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
|
||||
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
|
||||
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
|
||||
* Copyright 2017-2019 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
|
||||
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
|
||||
* Copyright 2016-2019 XMRig <support@xmrig.com>
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
|
||||
#include <arm_neon.h>
|
||||
|
||||
|
||||
#include "crypto/cn/CnAlgo.h"
|
||||
|
||||
|
||||
inline void vandq_f32(float32x4_t &v, uint32_t v2)
|
||||
{
|
||||
uint32x4_t vc = vdupq_n_u32(v2);
|
||||
v = (float32x4_t)vandq_u32((uint32x4_t)v, vc);
|
||||
}
|
||||
|
||||
|
||||
inline void vorq_f32(float32x4_t &v, uint32_t v2)
|
||||
{
|
||||
uint32x4_t vc = vdupq_n_u32(v2);
|
||||
v = (float32x4_t)vorrq_u32((uint32x4_t)v, vc);
|
||||
}
|
||||
|
||||
|
||||
template <size_t v>
|
||||
inline void vrot_si32(int32x4_t &r)
|
||||
{
|
||||
r = (int32x4_t)vextq_s8((int8x16_t)r, (int8x16_t)r, v);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline void vrot_si32<0>(int32x4_t &r)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
inline uint32_t vheor_s32(const int32x4_t &v)
|
||||
{
|
||||
int32x4_t v0 = veorq_s32(v, vrev64q_s32(v));
|
||||
int32x2_t vf = veor_s32(vget_high_s32(v0), vget_low_s32(v0));
|
||||
return (uint32_t)vget_lane_s32(vf, 0);
|
||||
}
|
||||
|
||||
|
||||
inline void prep_dv(int32_t *idx, int32x4_t &v, float32x4_t &n)
|
||||
{
|
||||
v = vld1q_s32(idx);
|
||||
n = vcvtq_f32_s32(v);
|
||||
}
|
||||
|
||||
|
||||
inline void sub_round(const float32x4_t &n0, const float32x4_t &n1, const float32x4_t &n2, const float32x4_t &n3, const float32x4_t &rnd_c, float32x4_t &n, float32x4_t &d, float32x4_t &c)
|
||||
{
|
||||
float32x4_t ln1 = vaddq_f32(n1, c);
|
||||
float32x4_t nn = vmulq_f32(n0, c);
|
||||
nn = vmulq_f32(ln1, vmulq_f32(nn, nn));
|
||||
vandq_f32(nn, 0xFEFFFFFF);
|
||||
vorq_f32(nn, 0x00800000);
|
||||
n = vaddq_f32(n, nn);
|
||||
|
||||
float32x4_t ln3 = vsubq_f32(n3, c);
|
||||
float32x4_t dd = vmulq_f32(n2, c);
|
||||
dd = vmulq_f32(ln3, vmulq_f32(dd, dd));
|
||||
vandq_f32(dd, 0xFEFFFFFF);
|
||||
vorq_f32(dd, 0x00800000);
|
||||
d = vaddq_f32(d, dd);
|
||||
|
||||
//Constant feedback
|
||||
c = vaddq_f32(c, rnd_c);
|
||||
c = vaddq_f32(c, vdupq_n_f32(0.734375f));
|
||||
float32x4_t r = vaddq_f32(nn, dd);
|
||||
vandq_f32(r, 0x807FFFFF);
|
||||
vorq_f32(r, 0x40000000);
|
||||
c = vaddq_f32(c, r);
|
||||
}
|
||||
|
||||
|
||||
inline void round_compute(const float32x4_t &n0, const float32x4_t &n1, const float32x4_t &n2, const float32x4_t &n3, const float32x4_t &rnd_c, float32x4_t &c, float32x4_t &r)
|
||||
{
|
||||
float32x4_t n = vdupq_n_f32(0.0f), d = vdupq_n_f32(0.0f);
|
||||
|
||||
sub_round(n0, n1, n2, n3, rnd_c, n, d, c);
|
||||
sub_round(n1, n2, n3, n0, rnd_c, n, d, c);
|
||||
sub_round(n2, n3, n0, n1, rnd_c, n, d, c);
|
||||
sub_round(n3, n0, n1, n2, rnd_c, n, d, c);
|
||||
sub_round(n3, n2, n1, n0, rnd_c, n, d, c);
|
||||
sub_round(n2, n1, n0, n3, rnd_c, n, d, c);
|
||||
sub_round(n1, n0, n3, n2, rnd_c, n, d, c);
|
||||
sub_round(n0, n3, n2, n1, rnd_c, n, d, c);
|
||||
|
||||
// Make sure abs(d) > 2.0 - this prevents division by zero and accidental overflows by division by < 1.0
|
||||
vandq_f32(d, 0xFF7FFFFF);
|
||||
vorq_f32(d, 0x40000000);
|
||||
r = vaddq_f32(r, vdivq_f32(n, d));
|
||||
}
|
||||
|
||||
|
||||
// 112×4 = 448
|
||||
template <bool add>
|
||||
inline int32x4_t single_compute(const float32x4_t &n0, const float32x4_t &n1, const float32x4_t &n2, const float32x4_t &n3, float cnt, const float32x4_t &rnd_c, float32x4_t &sum)
|
||||
{
|
||||
float32x4_t c = vdupq_n_f32(cnt);
|
||||
float32x4_t r = vdupq_n_f32(0.0f);
|
||||
|
||||
round_compute(n0, n1, n2, n3, rnd_c, c, r);
|
||||
round_compute(n0, n1, n2, n3, rnd_c, c, r);
|
||||
round_compute(n0, n1, n2, n3, rnd_c, c, r);
|
||||
round_compute(n0, n1, n2, n3, rnd_c, c, r);
|
||||
|
||||
// do a quick fmod by setting exp to 2
|
||||
vandq_f32(r, 0x807FFFFF);
|
||||
vorq_f32(r, 0x40000000);
|
||||
|
||||
if (add) {
|
||||
sum = vaddq_f32(sum, r);
|
||||
} else {
|
||||
sum = r;
|
||||
}
|
||||
|
||||
const float32x4_t cc2 = vdupq_n_f32(536870880.0f);
|
||||
r = vmulq_f32(r, cc2); // 35
|
||||
return vcvtq_s32_f32(r);
|
||||
}
|
||||
|
||||
|
||||
template<size_t rot>
|
||||
inline void single_compute_wrap(const float32x4_t &n0, const float32x4_t &n1, const float32x4_t &n2, const float32x4_t &n3, float cnt, const float32x4_t &rnd_c, float32x4_t &sum, int32x4_t &out)
|
||||
{
|
||||
int32x4_t r = single_compute<rot % 2 != 0>(n0, n1, n2, n3, cnt, rnd_c, sum);
|
||||
vrot_si32<rot>(r);
|
||||
out = veorq_s32(out, r);
|
||||
}
|
||||
|
||||
|
||||
template<uint32_t MASK>
|
||||
inline int32_t *scratchpad_ptr(uint8_t* lpad, uint32_t idx, size_t n) { return reinterpret_cast<int32_t *>(lpad + (idx & MASK) + n * 16); }
|
||||
|
||||
|
||||
template<size_t ITER, uint32_t MASK>
|
||||
void cn_gpu_inner_arm(const uint8_t *spad, uint8_t *lpad)
|
||||
{
|
||||
uint32_t s = reinterpret_cast<const uint32_t*>(spad)[0] >> 8;
|
||||
int32_t *idx0 = scratchpad_ptr<MASK>(lpad, s, 0);
|
||||
int32_t *idx1 = scratchpad_ptr<MASK>(lpad, s, 1);
|
||||
int32_t *idx2 = scratchpad_ptr<MASK>(lpad, s, 2);
|
||||
int32_t *idx3 = scratchpad_ptr<MASK>(lpad, s, 3);
|
||||
float32x4_t sum0 = vdupq_n_f32(0.0f);
|
||||
|
||||
for (size_t i = 0; i < ITER; i++) {
|
||||
float32x4_t n0, n1, n2, n3;
|
||||
int32x4_t v0, v1, v2, v3;
|
||||
float32x4_t suma, sumb, sum1, sum2, sum3;
|
||||
|
||||
prep_dv(idx0, v0, n0);
|
||||
prep_dv(idx1, v1, n1);
|
||||
prep_dv(idx2, v2, n2);
|
||||
prep_dv(idx3, v3, n3);
|
||||
float32x4_t rc = sum0;
|
||||
|
||||
int32x4_t out, out2;
|
||||
out = vdupq_n_s32(0);
|
||||
single_compute_wrap<0>(n0, n1, n2, n3, 1.3437500f, rc, suma, out);
|
||||
single_compute_wrap<1>(n0, n2, n3, n1, 1.2812500f, rc, suma, out);
|
||||
single_compute_wrap<2>(n0, n3, n1, n2, 1.3593750f, rc, sumb, out);
|
||||
single_compute_wrap<3>(n0, n3, n2, n1, 1.3671875f, rc, sumb, out);
|
||||
sum0 = vaddq_f32(suma, sumb);
|
||||
vst1q_s32(idx0, veorq_s32(v0, out));
|
||||
out2 = out;
|
||||
|
||||
out = vdupq_n_s32(0);
|
||||
single_compute_wrap<0>(n1, n0, n2, n3, 1.4296875f, rc, suma, out);
|
||||
single_compute_wrap<1>(n1, n2, n3, n0, 1.3984375f, rc, suma, out);
|
||||
single_compute_wrap<2>(n1, n3, n0, n2, 1.3828125f, rc, sumb, out);
|
||||
single_compute_wrap<3>(n1, n3, n2, n0, 1.3046875f, rc, sumb, out);
|
||||
sum1 = vaddq_f32(suma, sumb);
|
||||
vst1q_s32(idx1, veorq_s32(v1, out));
|
||||
out2 = veorq_s32(out2, out);
|
||||
|
||||
out = vdupq_n_s32(0);
|
||||
single_compute_wrap<0>(n2, n1, n0, n3, 1.4140625f, rc, suma, out);
|
||||
single_compute_wrap<1>(n2, n0, n3, n1, 1.2734375f, rc, suma, out);
|
||||
single_compute_wrap<2>(n2, n3, n1, n0, 1.2578125f, rc, sumb, out);
|
||||
single_compute_wrap<3>(n2, n3, n0, n1, 1.2890625f, rc, sumb, out);
|
||||
sum2 = vaddq_f32(suma, sumb);
|
||||
vst1q_s32(idx2, veorq_s32(v2, out));
|
||||
out2 = veorq_s32(out2, out);
|
||||
|
||||
out = vdupq_n_s32(0);
|
||||
single_compute_wrap<0>(n3, n1, n2, n0, 1.3203125f, rc, suma, out);
|
||||
single_compute_wrap<1>(n3, n2, n0, n1, 1.3515625f, rc, suma, out);
|
||||
single_compute_wrap<2>(n3, n0, n1, n2, 1.3359375f, rc, sumb, out);
|
||||
single_compute_wrap<3>(n3, n0, n2, n1, 1.4609375f, rc, sumb, out);
|
||||
sum3 = vaddq_f32(suma, sumb);
|
||||
vst1q_s32(idx3, veorq_s32(v3, out));
|
||||
out2 = veorq_s32(out2, out);
|
||||
|
||||
sum0 = vaddq_f32(sum0, sum1);
|
||||
sum2 = vaddq_f32(sum2, sum3);
|
||||
sum0 = vaddq_f32(sum0, sum2);
|
||||
|
||||
const float32x4_t cc1 = vdupq_n_f32(16777216.0f);
|
||||
const float32x4_t cc2 = vdupq_n_f32(64.0f);
|
||||
vandq_f32(sum0, 0x7fffffff); // take abs(va) by masking the float sign bit
|
||||
// vs range 0 - 64
|
||||
n0 = vmulq_f32(sum0, cc1);
|
||||
v0 = vcvtq_s32_f32(n0);
|
||||
v0 = veorq_s32(v0, out2);
|
||||
uint32_t n = vheor_s32(v0);
|
||||
|
||||
// vs is now between 0 and 1
|
||||
sum0 = vdivq_f32(sum0, cc2);
|
||||
idx0 = scratchpad_ptr<MASK>(lpad, n, 0);
|
||||
idx1 = scratchpad_ptr<MASK>(lpad, n, 1);
|
||||
idx2 = scratchpad_ptr<MASK>(lpad, n, 2);
|
||||
idx3 = scratchpad_ptr<MASK>(lpad, n, 3);
|
||||
}
|
||||
}
|
||||
|
||||
template void cn_gpu_inner_arm<xmrig::CnAlgo<xmrig::Algorithm::CN_GPU>().iterations(), xmrig::CnAlgo<xmrig::Algorithm::CN_GPU>().mask()>(const uint8_t* spad, uint8_t* lpad);
|
|
@ -1,211 +0,0 @@
|
|||
/* XMRig
|
||||
* Copyright 2010 Jeff Garzik <jgarzik@pobox.com>
|
||||
* Copyright 2012-2014 pooler <pooler@litecoinpool.org>
|
||||
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
|
||||
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
|
||||
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
|
||||
* Copyright 2017-2019 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
|
||||
* Copyright 2018-2020 SChernykh <https://github.com/SChernykh>
|
||||
* Copyright 2016-2020 XMRig <support@xmrig.com>
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
|
||||
#include "crypto/cn/CnAlgo.h"
|
||||
|
||||
|
||||
#ifdef __GNUC__
|
||||
# include <x86intrin.h>
|
||||
#else
|
||||
# include <intrin.h>
|
||||
# define __restrict__ __restrict
|
||||
#endif
|
||||
#ifndef _mm256_bslli_epi128
|
||||
#define _mm256_bslli_epi128(a, count) _mm256_slli_si256((a), (count))
|
||||
#endif
|
||||
#ifndef _mm256_bsrli_epi128
|
||||
#define _mm256_bsrli_epi128(a, count) _mm256_srli_si256((a), (count))
|
||||
#endif
|
||||
|
||||
inline void prep_dv_avx(__m256i* idx, __m256i& v, __m256& n01)
|
||||
{
|
||||
v = _mm256_load_si256(idx);
|
||||
n01 = _mm256_cvtepi32_ps(v);
|
||||
}
|
||||
|
||||
inline __m256 fma_break(const __m256& x)
|
||||
{
|
||||
// Break the dependency chain by setitng the exp to ?????01
|
||||
__m256 xx = _mm256_and_ps(_mm256_castsi256_ps(_mm256_set1_epi32(0xFEFFFFFF)), x);
|
||||
return _mm256_or_ps(_mm256_castsi256_ps(_mm256_set1_epi32(0x00800000)), xx);
|
||||
}
|
||||
|
||||
// 14
|
||||
inline void sub_round(const __m256& n0, const __m256& n1, const __m256& n2, const __m256& n3, const __m256& rnd_c, __m256& n, __m256& d, __m256& c)
|
||||
{
|
||||
__m256 nn = _mm256_mul_ps(n0, c);
|
||||
nn = _mm256_mul_ps(_mm256_add_ps(n1, c), _mm256_mul_ps(nn, nn));
|
||||
nn = fma_break(nn);
|
||||
n = _mm256_add_ps(n, nn);
|
||||
|
||||
__m256 dd = _mm256_mul_ps(n2, c);
|
||||
dd = _mm256_mul_ps(_mm256_sub_ps(n3, c), _mm256_mul_ps(dd, dd));
|
||||
dd = fma_break(dd);
|
||||
d = _mm256_add_ps(d, dd);
|
||||
|
||||
//Constant feedback
|
||||
c = _mm256_add_ps(c, rnd_c);
|
||||
c = _mm256_add_ps(c, _mm256_set1_ps(0.734375f));
|
||||
__m256 r = _mm256_add_ps(nn, dd);
|
||||
r = _mm256_and_ps(_mm256_castsi256_ps(_mm256_set1_epi32(0x807FFFFF)), r);
|
||||
r = _mm256_or_ps(_mm256_castsi256_ps(_mm256_set1_epi32(0x40000000)), r);
|
||||
c = _mm256_add_ps(c, r);
|
||||
}
|
||||
|
||||
// 14*8 + 2 = 112
|
||||
inline void round_compute(const __m256& n0, const __m256& n1, const __m256& n2, const __m256& n3, const __m256& rnd_c, __m256& c, __m256& r)
|
||||
{
|
||||
__m256 n = _mm256_setzero_ps(), d = _mm256_setzero_ps();
|
||||
|
||||
sub_round(n0, n1, n2, n3, rnd_c, n, d, c);
|
||||
sub_round(n1, n2, n3, n0, rnd_c, n, d, c);
|
||||
sub_round(n2, n3, n0, n1, rnd_c, n, d, c);
|
||||
sub_round(n3, n0, n1, n2, rnd_c, n, d, c);
|
||||
sub_round(n3, n2, n1, n0, rnd_c, n, d, c);
|
||||
sub_round(n2, n1, n0, n3, rnd_c, n, d, c);
|
||||
sub_round(n1, n0, n3, n2, rnd_c, n, d, c);
|
||||
sub_round(n0, n3, n2, n1, rnd_c, n, d, c);
|
||||
|
||||
// Make sure abs(d) > 2.0 - this prevents division by zero and accidental overflows by division by < 1.0
|
||||
d = _mm256_and_ps(_mm256_castsi256_ps(_mm256_set1_epi32(0xFF7FFFFF)), d);
|
||||
d = _mm256_or_ps(_mm256_castsi256_ps(_mm256_set1_epi32(0x40000000)), d);
|
||||
r = _mm256_add_ps(r, _mm256_div_ps(n, d));
|
||||
}
|
||||
|
||||
// 112×4 = 448
|
||||
template <bool add>
|
||||
inline __m256i double_compute(const __m256& n0, const __m256& n1, const __m256& n2, const __m256& n3,
|
||||
float lcnt, float hcnt, const __m256& rnd_c, __m256& sum)
|
||||
{
|
||||
__m256 c = _mm256_insertf128_ps(_mm256_castps128_ps256(_mm_set1_ps(lcnt)), _mm_set1_ps(hcnt), 1);
|
||||
__m256 r = _mm256_setzero_ps();
|
||||
|
||||
round_compute(n0, n1, n2, n3, rnd_c, c, r);
|
||||
round_compute(n0, n1, n2, n3, rnd_c, c, r);
|
||||
round_compute(n0, n1, n2, n3, rnd_c, c, r);
|
||||
round_compute(n0, n1, n2, n3, rnd_c, c, r);
|
||||
|
||||
// do a quick fmod by setting exp to 2
|
||||
r = _mm256_and_ps(_mm256_castsi256_ps(_mm256_set1_epi32(0x807FFFFF)), r);
|
||||
r = _mm256_or_ps(_mm256_castsi256_ps(_mm256_set1_epi32(0x40000000)), r);
|
||||
|
||||
if(add)
|
||||
sum = _mm256_add_ps(sum, r);
|
||||
else
|
||||
sum = r;
|
||||
|
||||
r = _mm256_mul_ps(r, _mm256_set1_ps(536870880.0f)); // 35
|
||||
return _mm256_cvttps_epi32(r);
|
||||
}
|
||||
|
||||
template <size_t rot>
|
||||
inline void double_compute_wrap(const __m256& n0, const __m256& n1, const __m256& n2, const __m256& n3,
|
||||
float lcnt, float hcnt, const __m256& rnd_c, __m256& sum, __m256i& out)
|
||||
{
|
||||
__m256i r = double_compute<rot % 2 != 0>(n0, n1, n2, n3, lcnt, hcnt, rnd_c, sum);
|
||||
if(rot != 0)
|
||||
r = _mm256_or_si256(_mm256_bslli_epi128(r, 16 - rot), _mm256_bsrli_epi128(r, rot));
|
||||
|
||||
out = _mm256_xor_si256(out, r);
|
||||
}
|
||||
|
||||
template<uint32_t MASK>
|
||||
inline __m256i* scratchpad_ptr(uint8_t* lpad, uint32_t idx, size_t n) { return reinterpret_cast<__m256i*>(lpad + (idx & MASK) + n*16); }
|
||||
|
||||
template<size_t ITER, uint32_t MASK>
|
||||
void cn_gpu_inner_avx(const uint8_t* spad, uint8_t* lpad)
|
||||
{
|
||||
uint32_t s = reinterpret_cast<const uint32_t*>(spad)[0] >> 8;
|
||||
__m256i* idx0 = scratchpad_ptr<MASK>(lpad, s, 0);
|
||||
__m256i* idx2 = scratchpad_ptr<MASK>(lpad, s, 2);
|
||||
__m256 sum0 = _mm256_setzero_ps();
|
||||
|
||||
for(size_t i = 0; i < ITER; i++)
|
||||
{
|
||||
__m256i v01, v23;
|
||||
__m256 suma, sumb, sum1;
|
||||
__m256 rc = sum0;
|
||||
|
||||
__m256 n01, n23;
|
||||
prep_dv_avx(idx0, v01, n01);
|
||||
prep_dv_avx(idx2, v23, n23);
|
||||
|
||||
__m256i out, out2;
|
||||
__m256 n10, n22, n33;
|
||||
n10 = _mm256_permute2f128_ps(n01, n01, 0x01);
|
||||
n22 = _mm256_permute2f128_ps(n23, n23, 0x00);
|
||||
n33 = _mm256_permute2f128_ps(n23, n23, 0x11);
|
||||
|
||||
out = _mm256_setzero_si256();
|
||||
double_compute_wrap<0>(n01, n10, n22, n33, 1.3437500f, 1.4296875f, rc, suma, out);
|
||||
double_compute_wrap<1>(n01, n22, n33, n10, 1.2812500f, 1.3984375f, rc, suma, out);
|
||||
double_compute_wrap<2>(n01, n33, n10, n22, 1.3593750f, 1.3828125f, rc, sumb, out);
|
||||
double_compute_wrap<3>(n01, n33, n22, n10, 1.3671875f, 1.3046875f, rc, sumb, out);
|
||||
_mm256_store_si256(idx0, _mm256_xor_si256(v01, out));
|
||||
sum0 = _mm256_add_ps(suma, sumb);
|
||||
out2 = out;
|
||||
|
||||
__m256 n11, n02, n30;
|
||||
n11 = _mm256_permute2f128_ps(n01, n01, 0x11);
|
||||
n02 = _mm256_permute2f128_ps(n01, n23, 0x20);
|
||||
n30 = _mm256_permute2f128_ps(n01, n23, 0x03);
|
||||
|
||||
out = _mm256_setzero_si256();
|
||||
double_compute_wrap<0>(n23, n11, n02, n30, 1.4140625f, 1.3203125f, rc, suma, out);
|
||||
double_compute_wrap<1>(n23, n02, n30, n11, 1.2734375f, 1.3515625f, rc, suma, out);
|
||||
double_compute_wrap<2>(n23, n30, n11, n02, 1.2578125f, 1.3359375f, rc, sumb, out);
|
||||
double_compute_wrap<3>(n23, n30, n02, n11, 1.2890625f, 1.4609375f, rc, sumb, out);
|
||||
_mm256_store_si256(idx2, _mm256_xor_si256(v23, out));
|
||||
sum1 = _mm256_add_ps(suma, sumb);
|
||||
|
||||
out2 = _mm256_xor_si256(out2, out);
|
||||
out2 = _mm256_xor_si256(_mm256_permute2x128_si256(out2,out2,0x41), out2);
|
||||
suma = _mm256_permute2f128_ps(sum0, sum1, 0x30);
|
||||
sumb = _mm256_permute2f128_ps(sum0, sum1, 0x21);
|
||||
sum0 = _mm256_add_ps(suma, sumb);
|
||||
sum0 = _mm256_add_ps(sum0, _mm256_permute2f128_ps(sum0, sum0, 0x41));
|
||||
|
||||
// Clear the high 128 bits
|
||||
__m128 sum = _mm256_castps256_ps128(sum0);
|
||||
|
||||
sum = _mm_and_ps(_mm_castsi128_ps(_mm_set1_epi32(0x7fffffff)), sum); // take abs(va) by masking the float sign bit
|
||||
// vs range 0 - 64
|
||||
__m128i v0 = _mm_cvttps_epi32(_mm_mul_ps(sum, _mm_set1_ps(16777216.0f)));
|
||||
v0 = _mm_xor_si128(v0, _mm256_castsi256_si128(out2));
|
||||
__m128i v1 = _mm_shuffle_epi32(v0, _MM_SHUFFLE(0, 1, 2, 3));
|
||||
v0 = _mm_xor_si128(v0, v1);
|
||||
v1 = _mm_shuffle_epi32(v0, _MM_SHUFFLE(0, 1, 0, 1));
|
||||
v0 = _mm_xor_si128(v0, v1);
|
||||
|
||||
// vs is now between 0 and 1
|
||||
sum = _mm_div_ps(sum, _mm_set1_ps(64.0f));
|
||||
sum0 = _mm256_insertf128_ps(_mm256_castps128_ps256(sum), sum, 1);
|
||||
uint32_t n = _mm_cvtsi128_si32(v0);
|
||||
idx0 = scratchpad_ptr<MASK>(lpad, n, 0);
|
||||
idx2 = scratchpad_ptr<MASK>(lpad, n, 2);
|
||||
}
|
||||
}
|
||||
|
||||
template void cn_gpu_inner_avx<xmrig::CnAlgo<xmrig::Algorithm::CN_GPU>().iterations(), xmrig::CnAlgo<xmrig::Algorithm::CN_GPU>().mask()>(const uint8_t* spad, uint8_t* lpad);
|
|
@ -1,212 +0,0 @@
|
|||
/* XMRig
|
||||
* Copyright 2010 Jeff Garzik <jgarzik@pobox.com>
|
||||
* Copyright 2012-2014 pooler <pooler@litecoinpool.org>
|
||||
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
|
||||
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
|
||||
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
|
||||
* Copyright 2017-2019 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
|
||||
* Copyright 2018-2020 SChernykh <https://github.com/SChernykh>
|
||||
* Copyright 2016-2020 XMRig <support@xmrig.com>
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
|
||||
#include "crypto/cn/CnAlgo.h"
|
||||
|
||||
|
||||
#ifdef __GNUC__
|
||||
# include <x86intrin.h>
|
||||
#else
|
||||
# include <intrin.h>
|
||||
# define __restrict__ __restrict
|
||||
#endif
|
||||
|
||||
inline void prep_dv(__m128i* idx, __m128i& v, __m128& n)
|
||||
{
|
||||
v = _mm_load_si128(idx);
|
||||
n = _mm_cvtepi32_ps(v);
|
||||
}
|
||||
|
||||
inline __m128 fma_break(__m128 x)
|
||||
{
|
||||
// Break the dependency chain by setitng the exp to ?????01
|
||||
x = _mm_and_ps(_mm_castsi128_ps(_mm_set1_epi32(0xFEFFFFFF)), x);
|
||||
return _mm_or_ps(_mm_castsi128_ps(_mm_set1_epi32(0x00800000)), x);
|
||||
}
|
||||
|
||||
// 14
|
||||
inline void sub_round(__m128 n0, __m128 n1, __m128 n2, __m128 n3, __m128 rnd_c, __m128& n, __m128& d, __m128& c)
|
||||
{
|
||||
n1 = _mm_add_ps(n1, c);
|
||||
__m128 nn = _mm_mul_ps(n0, c);
|
||||
nn = _mm_mul_ps(n1, _mm_mul_ps(nn,nn));
|
||||
nn = fma_break(nn);
|
||||
n = _mm_add_ps(n, nn);
|
||||
|
||||
n3 = _mm_sub_ps(n3, c);
|
||||
__m128 dd = _mm_mul_ps(n2, c);
|
||||
dd = _mm_mul_ps(n3, _mm_mul_ps(dd,dd));
|
||||
dd = fma_break(dd);
|
||||
d = _mm_add_ps(d, dd);
|
||||
|
||||
//Constant feedback
|
||||
c = _mm_add_ps(c, rnd_c);
|
||||
c = _mm_add_ps(c, _mm_set1_ps(0.734375f));
|
||||
__m128 r = _mm_add_ps(nn, dd);
|
||||
r = _mm_and_ps(_mm_castsi128_ps(_mm_set1_epi32(0x807FFFFF)), r);
|
||||
r = _mm_or_ps(_mm_castsi128_ps(_mm_set1_epi32(0x40000000)), r);
|
||||
c = _mm_add_ps(c, r);
|
||||
}
|
||||
|
||||
// 14*8 + 2 = 112
|
||||
inline void round_compute(__m128 n0, __m128 n1, __m128 n2, __m128 n3, __m128 rnd_c, __m128& c, __m128& r)
|
||||
{
|
||||
__m128 n = _mm_setzero_ps(), d = _mm_setzero_ps();
|
||||
|
||||
sub_round(n0, n1, n2, n3, rnd_c, n, d, c);
|
||||
sub_round(n1, n2, n3, n0, rnd_c, n, d, c);
|
||||
sub_round(n2, n3, n0, n1, rnd_c, n, d, c);
|
||||
sub_round(n3, n0, n1, n2, rnd_c, n, d, c);
|
||||
sub_round(n3, n2, n1, n0, rnd_c, n, d, c);
|
||||
sub_round(n2, n1, n0, n3, rnd_c, n, d, c);
|
||||
sub_round(n1, n0, n3, n2, rnd_c, n, d, c);
|
||||
sub_round(n0, n3, n2, n1, rnd_c, n, d, c);
|
||||
|
||||
// Make sure abs(d) > 2.0 - this prevents division by zero and accidental overflows by division by < 1.0
|
||||
d = _mm_and_ps(_mm_castsi128_ps(_mm_set1_epi32(0xFF7FFFFF)), d);
|
||||
d = _mm_or_ps(_mm_castsi128_ps(_mm_set1_epi32(0x40000000)), d);
|
||||
r =_mm_add_ps(r, _mm_div_ps(n,d));
|
||||
}
|
||||
|
||||
// 112×4 = 448
|
||||
template<bool add>
|
||||
inline __m128i single_compute(__m128 n0, __m128 n1, __m128 n2, __m128 n3, float cnt, __m128 rnd_c, __m128& sum)
|
||||
{
|
||||
__m128 c = _mm_set1_ps(cnt);
|
||||
__m128 r = _mm_setzero_ps();
|
||||
|
||||
round_compute(n0, n1, n2, n3, rnd_c, c, r);
|
||||
round_compute(n0, n1, n2, n3, rnd_c, c, r);
|
||||
round_compute(n0, n1, n2, n3, rnd_c, c, r);
|
||||
round_compute(n0, n1, n2, n3, rnd_c, c, r);
|
||||
|
||||
// do a quick fmod by setting exp to 2
|
||||
r = _mm_and_ps(_mm_castsi128_ps(_mm_set1_epi32(0x807FFFFF)), r);
|
||||
r = _mm_or_ps(_mm_castsi128_ps(_mm_set1_epi32(0x40000000)), r);
|
||||
|
||||
if(add)
|
||||
sum = _mm_add_ps(sum, r);
|
||||
else
|
||||
sum = r;
|
||||
|
||||
r = _mm_mul_ps(r, _mm_set1_ps(536870880.0f)); // 35
|
||||
return _mm_cvttps_epi32(r);
|
||||
}
|
||||
|
||||
template<size_t rot>
|
||||
inline void single_compute_wrap(__m128 n0, __m128 n1, __m128 n2, __m128 n3, float cnt, __m128 rnd_c, __m128& sum, __m128i& out)
|
||||
{
|
||||
__m128i r = single_compute<rot % 2 != 0>(n0, n1, n2, n3, cnt, rnd_c, sum);
|
||||
if(rot != 0)
|
||||
r = _mm_or_si128(_mm_slli_si128(r, 16 - rot), _mm_srli_si128(r, rot));
|
||||
out = _mm_xor_si128(out, r);
|
||||
}
|
||||
|
||||
template<uint32_t MASK>
|
||||
inline __m128i* scratchpad_ptr(uint8_t* lpad, uint32_t idx, size_t n) { return reinterpret_cast<__m128i*>(lpad + (idx & MASK) + n*16); }
|
||||
|
||||
template<size_t ITER, uint32_t MASK>
|
||||
void cn_gpu_inner_ssse3(const uint8_t* spad, uint8_t* lpad)
|
||||
{
|
||||
uint32_t s = reinterpret_cast<const uint32_t*>(spad)[0] >> 8;
|
||||
__m128i* idx0 = scratchpad_ptr<MASK>(lpad, s, 0);
|
||||
__m128i* idx1 = scratchpad_ptr<MASK>(lpad, s, 1);
|
||||
__m128i* idx2 = scratchpad_ptr<MASK>(lpad, s, 2);
|
||||
__m128i* idx3 = scratchpad_ptr<MASK>(lpad, s, 3);
|
||||
__m128 sum0 = _mm_setzero_ps();
|
||||
|
||||
for(size_t i = 0; i < ITER; i++)
|
||||
{
|
||||
__m128 n0, n1, n2, n3;
|
||||
__m128i v0, v1, v2, v3;
|
||||
__m128 suma, sumb, sum1, sum2, sum3;
|
||||
|
||||
prep_dv(idx0, v0, n0);
|
||||
prep_dv(idx1, v1, n1);
|
||||
prep_dv(idx2, v2, n2);
|
||||
prep_dv(idx3, v3, n3);
|
||||
__m128 rc = sum0;
|
||||
|
||||
__m128i out, out2;
|
||||
out = _mm_setzero_si128();
|
||||
single_compute_wrap<0>(n0, n1, n2, n3, 1.3437500f, rc, suma, out);
|
||||
single_compute_wrap<1>(n0, n2, n3, n1, 1.2812500f, rc, suma, out);
|
||||
single_compute_wrap<2>(n0, n3, n1, n2, 1.3593750f, rc, sumb, out);
|
||||
single_compute_wrap<3>(n0, n3, n2, n1, 1.3671875f, rc, sumb, out);
|
||||
sum0 = _mm_add_ps(suma, sumb);
|
||||
_mm_store_si128(idx0, _mm_xor_si128(v0, out));
|
||||
out2 = out;
|
||||
|
||||
out = _mm_setzero_si128();
|
||||
single_compute_wrap<0>(n1, n0, n2, n3, 1.4296875f, rc, suma, out);
|
||||
single_compute_wrap<1>(n1, n2, n3, n0, 1.3984375f, rc, suma, out);
|
||||
single_compute_wrap<2>(n1, n3, n0, n2, 1.3828125f, rc, sumb, out);
|
||||
single_compute_wrap<3>(n1, n3, n2, n0, 1.3046875f, rc, sumb, out);
|
||||
sum1 = _mm_add_ps(suma, sumb);
|
||||
_mm_store_si128(idx1, _mm_xor_si128(v1, out));
|
||||
out2 = _mm_xor_si128(out2, out);
|
||||
|
||||
out = _mm_setzero_si128();
|
||||
single_compute_wrap<0>(n2, n1, n0, n3, 1.4140625f, rc, suma, out);
|
||||
single_compute_wrap<1>(n2, n0, n3, n1, 1.2734375f, rc, suma, out);
|
||||
single_compute_wrap<2>(n2, n3, n1, n0, 1.2578125f, rc, sumb, out);
|
||||
single_compute_wrap<3>(n2, n3, n0, n1, 1.2890625f, rc, sumb, out);
|
||||
sum2 = _mm_add_ps(suma, sumb);
|
||||
_mm_store_si128(idx2, _mm_xor_si128(v2, out));
|
||||
out2 = _mm_xor_si128(out2, out);
|
||||
|
||||
out = _mm_setzero_si128();
|
||||
single_compute_wrap<0>(n3, n1, n2, n0, 1.3203125f, rc, suma, out);
|
||||
single_compute_wrap<1>(n3, n2, n0, n1, 1.3515625f, rc, suma, out);
|
||||
single_compute_wrap<2>(n3, n0, n1, n2, 1.3359375f, rc, sumb, out);
|
||||
single_compute_wrap<3>(n3, n0, n2, n1, 1.4609375f, rc, sumb, out);
|
||||
sum3 = _mm_add_ps(suma, sumb);
|
||||
_mm_store_si128(idx3, _mm_xor_si128(v3, out));
|
||||
out2 = _mm_xor_si128(out2, out);
|
||||
sum0 = _mm_add_ps(sum0, sum1);
|
||||
sum2 = _mm_add_ps(sum2, sum3);
|
||||
sum0 = _mm_add_ps(sum0, sum2);
|
||||
|
||||
sum0 = _mm_and_ps(_mm_castsi128_ps(_mm_set1_epi32(0x7fffffff)), sum0); // take abs(va) by masking the float sign bit
|
||||
// vs range 0 - 64
|
||||
n0 = _mm_mul_ps(sum0, _mm_set1_ps(16777216.0f));
|
||||
v0 = _mm_cvttps_epi32(n0);
|
||||
v0 = _mm_xor_si128(v0, out2);
|
||||
v1 = _mm_shuffle_epi32(v0, _MM_SHUFFLE(0, 1, 2, 3));
|
||||
v0 = _mm_xor_si128(v0, v1);
|
||||
v1 = _mm_shuffle_epi32(v0, _MM_SHUFFLE(0, 1, 0, 1));
|
||||
v0 = _mm_xor_si128(v0, v1);
|
||||
|
||||
// vs is now between 0 and 1
|
||||
sum0 = _mm_div_ps(sum0, _mm_set1_ps(64.0f));
|
||||
uint32_t n = _mm_cvtsi128_si32(v0);
|
||||
idx0 = scratchpad_ptr<MASK>(lpad, n, 0);
|
||||
idx1 = scratchpad_ptr<MASK>(lpad, n, 1);
|
||||
idx2 = scratchpad_ptr<MASK>(lpad, n, 2);
|
||||
idx3 = scratchpad_ptr<MASK>(lpad, n, 3);
|
||||
}
|
||||
}
|
||||
|
||||
template void cn_gpu_inner_ssse3<xmrig::CnAlgo<xmrig::Algorithm::CN_GPU>().iterations(), xmrig::CnAlgo<xmrig::Algorithm::CN_GPU>().mask()>(const uint8_t* spad, uint8_t* lpad);
|
Loading…
Add table
Add a link
Reference in a new issue