Added generic Argon2 implementation (conflicts with RandomX).

This commit is contained in:
XMRig 2019-08-16 00:31:29 +07:00
parent df58821655
commit 0c25424a3e
53 changed files with 5140 additions and 126 deletions

View file

@ -0,0 +1,41 @@
#include <stdint.h>
#include <string.h>
#include <stdlib.h>
#include "impl-select.h"
#include "cpu-flags.h"
#include "argon2-sse2.h"
#include "argon2-ssse3.h"
#include "argon2-xop.h"
#include "argon2-avx2.h"
#include "argon2-avx512f.h"
/* NOTE: there is no portable intrinsic for 64-bit rotate, but any
* sane compiler should be able to compile this into a ROR instruction: */
#define rotr64(x, n) ((x) >> (n)) | ((x) << (64 - (n)))
#include "argon2-template-64.h"
void fill_segment_default(const argon2_instance_t *instance,
argon2_position_t position)
{
fill_segment_64(instance, position);
}
void argon2_get_impl_list(argon2_impl_list *list)
{
static const argon2_impl IMPLS[] = {
{ "x86_64", NULL, fill_segment_default },
{ "SSE2", check_sse2, fill_segment_sse2 },
{ "SSSE3", check_ssse3, fill_segment_ssse3 },
{ "XOP", check_xop, fill_segment_xop },
{ "AVX2", check_avx2, fill_segment_avx2 },
{ "AVX-512F", check_avx512f, fill_segment_avx512f },
};
cpu_flags_get();
list->count = sizeof(IMPLS) / sizeof(IMPLS[0]);
list->entries = IMPLS;
}

View file

@ -0,0 +1,343 @@
#include "argon2-avx2.h"
#ifdef HAVE_AVX2
#include <string.h>
#include <x86intrin.h>
#include "cpu-flags.h"
#define r16 (_mm256_setr_epi8( \
2, 3, 4, 5, 6, 7, 0, 1, \
10, 11, 12, 13, 14, 15, 8, 9, \
18, 19, 20, 21, 22, 23, 16, 17, \
26, 27, 28, 29, 30, 31, 24, 25))
#define r24 (_mm256_setr_epi8( \
3, 4, 5, 6, 7, 0, 1, 2, \
11, 12, 13, 14, 15, 8, 9, 10, \
19, 20, 21, 22, 23, 16, 17, 18, \
27, 28, 29, 30, 31, 24, 25, 26))
#define ror64_16(x) _mm256_shuffle_epi8((x), r16)
#define ror64_24(x) _mm256_shuffle_epi8((x), r24)
#define ror64_32(x) _mm256_shuffle_epi32((x), _MM_SHUFFLE(2, 3, 0, 1))
#define ror64_63(x) \
_mm256_xor_si256(_mm256_srli_epi64((x), 63), _mm256_add_epi64((x), (x)))
static __m256i f(__m256i x, __m256i y)
{
__m256i z = _mm256_mul_epu32(x, y);
return _mm256_add_epi64(_mm256_add_epi64(x, y), _mm256_add_epi64(z, z));
}
#define G1(A0, B0, C0, D0, A1, B1, C1, D1) \
do { \
A0 = f(A0, B0); \
A1 = f(A1, B1); \
\
D0 = _mm256_xor_si256(D0, A0); \
D1 = _mm256_xor_si256(D1, A1); \
\
D0 = ror64_32(D0); \
D1 = ror64_32(D1); \
\
C0 = f(C0, D0); \
C1 = f(C1, D1); \
\
B0 = _mm256_xor_si256(B0, C0); \
B1 = _mm256_xor_si256(B1, C1); \
\
B0 = ror64_24(B0); \
B1 = ror64_24(B1); \
} while ((void)0, 0)
#define G2(A0, B0, C0, D0, A1, B1, C1, D1) \
do { \
A0 = f(A0, B0); \
A1 = f(A1, B1); \
\
D0 = _mm256_xor_si256(D0, A0); \
D1 = _mm256_xor_si256(D1, A1); \
\
D0 = ror64_16(D0); \
D1 = ror64_16(D1); \
\
C0 = f(C0, D0); \
C1 = f(C1, D1); \
\
B0 = _mm256_xor_si256(B0, C0); \
B1 = _mm256_xor_si256(B1, C1); \
\
B0 = ror64_63(B0); \
B1 = ror64_63(B1); \
} while ((void)0, 0)
#define DIAGONALIZE1(A0, B0, C0, D0, A1, B1, C1, D1) \
do { \
B0 = _mm256_permute4x64_epi64(B0, _MM_SHUFFLE(0, 3, 2, 1)); \
B1 = _mm256_permute4x64_epi64(B1, _MM_SHUFFLE(0, 3, 2, 1)); \
\
C0 = _mm256_permute4x64_epi64(C0, _MM_SHUFFLE(1, 0, 3, 2)); \
C1 = _mm256_permute4x64_epi64(C1, _MM_SHUFFLE(1, 0, 3, 2)); \
\
D0 = _mm256_permute4x64_epi64(D0, _MM_SHUFFLE(2, 1, 0, 3)); \
D1 = _mm256_permute4x64_epi64(D1, _MM_SHUFFLE(2, 1, 0, 3)); \
} while ((void)0, 0)
#define UNDIAGONALIZE1(A0, B0, C0, D0, A1, B1, C1, D1) \
do { \
B0 = _mm256_permute4x64_epi64(B0, _MM_SHUFFLE(2, 1, 0, 3)); \
B1 = _mm256_permute4x64_epi64(B1, _MM_SHUFFLE(2, 1, 0, 3)); \
\
C0 = _mm256_permute4x64_epi64(C0, _MM_SHUFFLE(1, 0, 3, 2)); \
C1 = _mm256_permute4x64_epi64(C1, _MM_SHUFFLE(1, 0, 3, 2)); \
\
D0 = _mm256_permute4x64_epi64(D0, _MM_SHUFFLE(0, 3, 2, 1)); \
D1 = _mm256_permute4x64_epi64(D1, _MM_SHUFFLE(0, 3, 2, 1)); \
} while ((void)0, 0)
#define DIAGONALIZE2(A0, B0, C0, D0, A1, B1, C1, D1) \
do { \
__m256i tmp1, tmp2; \
tmp1 = _mm256_blend_epi32(B0, B1, 0xCC); \
tmp2 = _mm256_blend_epi32(B0, B1, 0x33); \
B1 = _mm256_permute4x64_epi64(tmp1, _MM_SHUFFLE(2,3,0,1)); \
B0 = _mm256_permute4x64_epi64(tmp2, _MM_SHUFFLE(2,3,0,1)); \
\
tmp1 = C0; \
C0 = C1; \
C1 = tmp1; \
\
tmp1 = _mm256_blend_epi32(D0, D1, 0xCC); \
tmp2 = _mm256_blend_epi32(D0, D1, 0x33); \
D0 = _mm256_permute4x64_epi64(tmp1, _MM_SHUFFLE(2,3,0,1)); \
D1 = _mm256_permute4x64_epi64(tmp2, _MM_SHUFFLE(2,3,0,1)); \
} while ((void)0, 0)
#define UNDIAGONALIZE2(A0, B0, C0, D0, A1, B1, C1, D1) \
do { \
__m256i tmp1, tmp2; \
tmp1 = _mm256_blend_epi32(B0, B1, 0xCC); \
tmp2 = _mm256_blend_epi32(B0, B1, 0x33); \
B0 = _mm256_permute4x64_epi64(tmp1, _MM_SHUFFLE(2,3,0,1)); \
B1 = _mm256_permute4x64_epi64(tmp2, _MM_SHUFFLE(2,3,0,1)); \
\
tmp1 = C0; \
C0 = C1; \
C1 = tmp1; \
\
tmp1 = _mm256_blend_epi32(D0, D1, 0xCC); \
tmp2 = _mm256_blend_epi32(D0, D1, 0x33); \
D1 = _mm256_permute4x64_epi64(tmp1, _MM_SHUFFLE(2,3,0,1)); \
D0 = _mm256_permute4x64_epi64(tmp2, _MM_SHUFFLE(2,3,0,1)); \
} while ((void)0, 0)
#define BLAKE2_ROUND1(A0, B0, C0, D0, A1, B1, C1, D1) \
do { \
G1(A0, B0, C0, D0, A1, B1, C1, D1); \
G2(A0, B0, C0, D0, A1, B1, C1, D1); \
\
DIAGONALIZE1(A0, B0, C0, D0, A1, B1, C1, D1); \
\
G1(A0, B0, C0, D0, A1, B1, C1, D1); \
G2(A0, B0, C0, D0, A1, B1, C1, D1); \
\
UNDIAGONALIZE1(A0, B0, C0, D0, A1, B1, C1, D1); \
} while ((void)0, 0)
#define BLAKE2_ROUND2(A0, A1, B0, B1, C0, C1, D0, D1) \
do { \
G1(A0, B0, C0, D0, A1, B1, C1, D1); \
G2(A0, B0, C0, D0, A1, B1, C1, D1); \
\
DIAGONALIZE2(A0, B0, C0, D0, A1, B1, C1, D1); \
\
G1(A0, B0, C0, D0, A1, B1, C1, D1); \
G2(A0, B0, C0, D0, A1, B1, C1, D1); \
\
UNDIAGONALIZE2(A0, B0, C0, D0, A1, B1, C1, D1); \
} while ((void)0, 0)
enum {
ARGON2_HWORDS_IN_BLOCK = ARGON2_OWORDS_IN_BLOCK / 2,
};
static void fill_block(__m256i *s, const block *ref_block, block *next_block,
int with_xor)
{
__m256i block_XY[ARGON2_HWORDS_IN_BLOCK];
unsigned int i;
if (with_xor) {
for (i = 0; i < ARGON2_HWORDS_IN_BLOCK; i++) {
s[i] =_mm256_xor_si256(
s[i], _mm256_loadu_si256((const __m256i *)ref_block->v + i));
block_XY[i] = _mm256_xor_si256(
s[i], _mm256_loadu_si256((const __m256i *)next_block->v + i));
}
} else {
for (i = 0; i < ARGON2_HWORDS_IN_BLOCK; i++) {
block_XY[i] = s[i] =_mm256_xor_si256(
s[i], _mm256_loadu_si256((const __m256i *)ref_block->v + i));
}
}
for (i = 0; i < 4; ++i) {
BLAKE2_ROUND1(
s[8 * i + 0], s[8 * i + 1], s[8 * i + 2], s[8 * i + 3],
s[8 * i + 4], s[8 * i + 5], s[8 * i + 6], s[8 * i + 7]);
}
for (i = 0; i < 4; ++i) {
BLAKE2_ROUND2(
s[4 * 0 + i], s[4 * 1 + i], s[4 * 2 + i], s[4 * 3 + i],
s[4 * 4 + i], s[4 * 5 + i], s[4 * 6 + i], s[4 * 7 + i]);
}
for (i = 0; i < ARGON2_HWORDS_IN_BLOCK; i++) {
s[i] = _mm256_xor_si256(s[i], block_XY[i]);
_mm256_storeu_si256((__m256i *)next_block->v + i, s[i]);
}
}
static void next_addresses(block *address_block, block *input_block)
{
/*Temporary zero-initialized blocks*/
__m256i zero_block[ARGON2_HWORDS_IN_BLOCK];
__m256i zero2_block[ARGON2_HWORDS_IN_BLOCK];
memset(zero_block, 0, sizeof(zero_block));
memset(zero2_block, 0, sizeof(zero2_block));
/*Increasing index counter*/
input_block->v[6]++;
/*First iteration of G*/
fill_block(zero_block, input_block, address_block, 0);
/*Second iteration of G*/
fill_block(zero2_block, address_block, address_block, 0);
}
void fill_segment_avx2(const argon2_instance_t *instance,
argon2_position_t position)
{
block *ref_block = NULL, *curr_block = NULL;
block address_block, input_block;
uint64_t pseudo_rand, ref_index, ref_lane;
uint32_t prev_offset, curr_offset;
uint32_t starting_index, i;
__m256i state[ARGON2_HWORDS_IN_BLOCK];
int data_independent_addressing;
if (instance == NULL) {
return;
}
data_independent_addressing = (instance->type == Argon2_i) ||
(instance->type == Argon2_id && (position.pass == 0) &&
(position.slice < ARGON2_SYNC_POINTS / 2));
if (data_independent_addressing) {
init_block_value(&input_block, 0);
input_block.v[0] = position.pass;
input_block.v[1] = position.lane;
input_block.v[2] = position.slice;
input_block.v[3] = instance->memory_blocks;
input_block.v[4] = instance->passes;
input_block.v[5] = instance->type;
}
starting_index = 0;
if ((0 == position.pass) && (0 == position.slice)) {
starting_index = 2; /* we have already generated the first two blocks */
/* Don't forget to generate the first block of addresses: */
if (data_independent_addressing) {
next_addresses(&address_block, &input_block);
}
}
/* Offset of the current block */
curr_offset = position.lane * instance->lane_length +
position.slice * instance->segment_length + starting_index;
if (0 == curr_offset % instance->lane_length) {
/* Last block in this lane */
prev_offset = curr_offset + instance->lane_length - 1;
} else {
/* Previous block */
prev_offset = curr_offset - 1;
}
memcpy(state, ((instance->memory + prev_offset)->v), ARGON2_BLOCK_SIZE);
for (i = starting_index; i < instance->segment_length;
++i, ++curr_offset, ++prev_offset) {
/*1.1 Rotating prev_offset if needed */
if (curr_offset % instance->lane_length == 1) {
prev_offset = curr_offset - 1;
}
/* 1.2 Computing the index of the reference block */
/* 1.2.1 Taking pseudo-random value from the previous block */
if (data_independent_addressing) {
if (i % ARGON2_ADDRESSES_IN_BLOCK == 0) {
next_addresses(&address_block, &input_block);
}
pseudo_rand = address_block.v[i % ARGON2_ADDRESSES_IN_BLOCK];
} else {
pseudo_rand = instance->memory[prev_offset].v[0];
}
/* 1.2.2 Computing the lane of the reference block */
ref_lane = ((pseudo_rand >> 32)) % instance->lanes;
if ((position.pass == 0) && (position.slice == 0)) {
/* Can not reference other lanes yet */
ref_lane = position.lane;
}
/* 1.2.3 Computing the number of possible reference block within the
* lane.
*/
position.index = i;
ref_index = index_alpha(instance, &position, pseudo_rand & 0xFFFFFFFF,
ref_lane == position.lane);
/* 2 Creating a new block */
ref_block =
instance->memory + instance->lane_length * ref_lane + ref_index;
curr_block = instance->memory + curr_offset;
/* version 1.2.1 and earlier: overwrite, not XOR */
if (0 == position.pass || ARGON2_VERSION_10 == instance->version) {
fill_block(state, ref_block, curr_block, 0);
} else {
fill_block(state, ref_block, curr_block, 1);
}
}
}
int check_avx2(void)
{
return cpu_flags_have_avx2();
}
#else
void fill_segment_avx2(const argon2_instance_t *instance,
argon2_position_t position)
{
}
int check_avx2(void)
{
return 0;
}
#endif

View file

@ -0,0 +1,11 @@
#ifndef ARGON2_AVX2_H
#define ARGON2_AVX2_H
#include "core.h"
void fill_segment_avx2(const argon2_instance_t *instance,
argon2_position_t position);
int check_avx2(void);
#endif // ARGON2_AVX2_H

View file

@ -0,0 +1,328 @@
#include "argon2-avx512f.h"
#ifdef HAVE_AVX512F
#include <stdint.h>
#include <string.h>
#include <x86intrin.h>
#include "cpu-flags.h"
#define ror64(x, n) _mm512_ror_epi64((x), (n))
static __m512i f(__m512i x, __m512i y)
{
__m512i z = _mm512_mul_epu32(x, y);
return _mm512_add_epi64(_mm512_add_epi64(x, y), _mm512_add_epi64(z, z));
}
#define G1(A0, B0, C0, D0, A1, B1, C1, D1) \
do { \
A0 = f(A0, B0); \
A1 = f(A1, B1); \
\
D0 = _mm512_xor_si512(D0, A0); \
D1 = _mm512_xor_si512(D1, A1); \
\
D0 = ror64(D0, 32); \
D1 = ror64(D1, 32); \
\
C0 = f(C0, D0); \
C1 = f(C1, D1); \
\
B0 = _mm512_xor_si512(B0, C0); \
B1 = _mm512_xor_si512(B1, C1); \
\
B0 = ror64(B0, 24); \
B1 = ror64(B1, 24); \
} while ((void)0, 0)
#define G2(A0, B0, C0, D0, A1, B1, C1, D1) \
do { \
A0 = f(A0, B0); \
A1 = f(A1, B1); \
\
D0 = _mm512_xor_si512(D0, A0); \
D1 = _mm512_xor_si512(D1, A1); \
\
D0 = ror64(D0, 16); \
D1 = ror64(D1, 16); \
\
C0 = f(C0, D0); \
C1 = f(C1, D1); \
\
B0 = _mm512_xor_si512(B0, C0); \
B1 = _mm512_xor_si512(B1, C1); \
\
B0 = ror64(B0, 63); \
B1 = ror64(B1, 63); \
} while ((void)0, 0)
#define DIAGONALIZE(A0, B0, C0, D0, A1, B1, C1, D1) \
do { \
B0 = _mm512_permutex_epi64(B0, _MM_SHUFFLE(0, 3, 2, 1)); \
B1 = _mm512_permutex_epi64(B1, _MM_SHUFFLE(0, 3, 2, 1)); \
\
C0 = _mm512_permutex_epi64(C0, _MM_SHUFFLE(1, 0, 3, 2)); \
C1 = _mm512_permutex_epi64(C1, _MM_SHUFFLE(1, 0, 3, 2)); \
\
D0 = _mm512_permutex_epi64(D0, _MM_SHUFFLE(2, 1, 0, 3)); \
D1 = _mm512_permutex_epi64(D1, _MM_SHUFFLE(2, 1, 0, 3)); \
} while ((void)0, 0)
#define UNDIAGONALIZE(A0, B0, C0, D0, A1, B1, C1, D1) \
do { \
B0 = _mm512_permutex_epi64(B0, _MM_SHUFFLE(2, 1, 0, 3)); \
B1 = _mm512_permutex_epi64(B1, _MM_SHUFFLE(2, 1, 0, 3)); \
\
C0 = _mm512_permutex_epi64(C0, _MM_SHUFFLE(1, 0, 3, 2)); \
C1 = _mm512_permutex_epi64(C1, _MM_SHUFFLE(1, 0, 3, 2)); \
\
D0 = _mm512_permutex_epi64(D0, _MM_SHUFFLE(0, 3, 2, 1)); \
D1 = _mm512_permutex_epi64(D1, _MM_SHUFFLE(0, 3, 2, 1)); \
} while ((void)0, 0)
#define BLAKE2_ROUND(A0, B0, C0, D0, A1, B1, C1, D1) \
do { \
G1(A0, B0, C0, D0, A1, B1, C1, D1); \
G2(A0, B0, C0, D0, A1, B1, C1, D1); \
\
DIAGONALIZE(A0, B0, C0, D0, A1, B1, C1, D1); \
\
G1(A0, B0, C0, D0, A1, B1, C1, D1); \
G2(A0, B0, C0, D0, A1, B1, C1, D1); \
\
UNDIAGONALIZE(A0, B0, C0, D0, A1, B1, C1, D1); \
} while ((void)0, 0)
#define SWAP_HALVES(A0, A1) \
do { \
__m512i t0, t1; \
t0 = _mm512_shuffle_i64x2(A0, A1, _MM_SHUFFLE(1, 0, 1, 0)); \
t1 = _mm512_shuffle_i64x2(A0, A1, _MM_SHUFFLE(3, 2, 3, 2)); \
A0 = t0; \
A1 = t1; \
} while((void)0, 0)
#define SWAP_QUARTERS(A0, A1) \
do { \
SWAP_HALVES(A0, A1); \
A0 = _mm512_permutexvar_epi64(_mm512_setr_epi64(0, 1, 4, 5, 2, 3, 6, 7), A0); \
A1 = _mm512_permutexvar_epi64(_mm512_setr_epi64(0, 1, 4, 5, 2, 3, 6, 7), A1); \
} while((void)0, 0)
#define UNSWAP_QUARTERS(A0, A1) \
do { \
A0 = _mm512_permutexvar_epi64(_mm512_setr_epi64(0, 1, 4, 5, 2, 3, 6, 7), A0); \
A1 = _mm512_permutexvar_epi64(_mm512_setr_epi64(0, 1, 4, 5, 2, 3, 6, 7), A1); \
SWAP_HALVES(A0, A1); \
} while((void)0, 0)
#define BLAKE2_ROUND1(A0, C0, B0, D0, A1, C1, B1, D1) \
do { \
SWAP_HALVES(A0, B0); \
SWAP_HALVES(C0, D0); \
SWAP_HALVES(A1, B1); \
SWAP_HALVES(C1, D1); \
BLAKE2_ROUND(A0, B0, C0, D0, A1, B1, C1, D1); \
SWAP_HALVES(A0, B0); \
SWAP_HALVES(C0, D0); \
SWAP_HALVES(A1, B1); \
SWAP_HALVES(C1, D1); \
} while ((void)0, 0)
#define BLAKE2_ROUND2(A0, A1, B0, B1, C0, C1, D0, D1) \
do { \
SWAP_QUARTERS(A0, A1); \
SWAP_QUARTERS(B0, B1); \
SWAP_QUARTERS(C0, C1); \
SWAP_QUARTERS(D0, D1); \
BLAKE2_ROUND(A0, B0, C0, D0, A1, B1, C1, D1); \
UNSWAP_QUARTERS(A0, A1); \
UNSWAP_QUARTERS(B0, B1); \
UNSWAP_QUARTERS(C0, C1); \
UNSWAP_QUARTERS(D0, D1); \
} while ((void)0, 0)
enum {
ARGON2_VECS_IN_BLOCK = ARGON2_OWORDS_IN_BLOCK / 4,
};
static void fill_block(__m512i *s, const block *ref_block, block *next_block,
int with_xor)
{
__m512i block_XY[ARGON2_VECS_IN_BLOCK];
unsigned int i;
if (with_xor) {
for (i = 0; i < ARGON2_VECS_IN_BLOCK; i++) {
s[i] =_mm512_xor_si512(
s[i], _mm512_loadu_si512((const __m512i *)ref_block->v + i));
block_XY[i] = _mm512_xor_si512(
s[i], _mm512_loadu_si512((const __m512i *)next_block->v + i));
}
} else {
for (i = 0; i < ARGON2_VECS_IN_BLOCK; i++) {
block_XY[i] = s[i] =_mm512_xor_si512(
s[i], _mm512_loadu_si512((const __m512i *)ref_block->v + i));
}
}
for (i = 0; i < 2; ++i) {
BLAKE2_ROUND1(
s[8 * i + 0], s[8 * i + 1], s[8 * i + 2], s[8 * i + 3],
s[8 * i + 4], s[8 * i + 5], s[8 * i + 6], s[8 * i + 7]);
}
for (i = 0; i < 2; ++i) {
BLAKE2_ROUND2(
s[2 * 0 + i], s[2 * 1 + i], s[2 * 2 + i], s[2 * 3 + i],
s[2 * 4 + i], s[2 * 5 + i], s[2 * 6 + i], s[2 * 7 + i]);
}
for (i = 0; i < ARGON2_VECS_IN_BLOCK; i++) {
s[i] = _mm512_xor_si512(s[i], block_XY[i]);
_mm512_storeu_si512((__m512i *)next_block->v + i, s[i]);
}
}
static void next_addresses(block *address_block, block *input_block)
{
/*Temporary zero-initialized blocks*/
__m512i zero_block[ARGON2_VECS_IN_BLOCK];
__m512i zero2_block[ARGON2_VECS_IN_BLOCK];
memset(zero_block, 0, sizeof(zero_block));
memset(zero2_block, 0, sizeof(zero2_block));
/*Increasing index counter*/
input_block->v[6]++;
/*First iteration of G*/
fill_block(zero_block, input_block, address_block, 0);
/*Second iteration of G*/
fill_block(zero2_block, address_block, address_block, 0);
}
void fill_segment_avx512f(const argon2_instance_t *instance,
argon2_position_t position)
{
block *ref_block = NULL, *curr_block = NULL;
block address_block, input_block;
uint64_t pseudo_rand, ref_index, ref_lane;
uint32_t prev_offset, curr_offset;
uint32_t starting_index, i;
__m512i state[ARGON2_VECS_IN_BLOCK];
int data_independent_addressing;
if (instance == NULL) {
return;
}
data_independent_addressing = (instance->type == Argon2_i) ||
(instance->type == Argon2_id && (position.pass == 0) &&
(position.slice < ARGON2_SYNC_POINTS / 2));
if (data_independent_addressing) {
init_block_value(&input_block, 0);
input_block.v[0] = position.pass;
input_block.v[1] = position.lane;
input_block.v[2] = position.slice;
input_block.v[3] = instance->memory_blocks;
input_block.v[4] = instance->passes;
input_block.v[5] = instance->type;
}
starting_index = 0;
if ((0 == position.pass) && (0 == position.slice)) {
starting_index = 2; /* we have already generated the first two blocks */
/* Don't forget to generate the first block of addresses: */
if (data_independent_addressing) {
next_addresses(&address_block, &input_block);
}
}
/* Offset of the current block */
curr_offset = position.lane * instance->lane_length +
position.slice * instance->segment_length + starting_index;
if (0 == curr_offset % instance->lane_length) {
/* Last block in this lane */
prev_offset = curr_offset + instance->lane_length - 1;
} else {
/* Previous block */
prev_offset = curr_offset - 1;
}
memcpy(state, ((instance->memory + prev_offset)->v), ARGON2_BLOCK_SIZE);
for (i = starting_index; i < instance->segment_length;
++i, ++curr_offset, ++prev_offset) {
/*1.1 Rotating prev_offset if needed */
if (curr_offset % instance->lane_length == 1) {
prev_offset = curr_offset - 1;
}
/* 1.2 Computing the index of the reference block */
/* 1.2.1 Taking pseudo-random value from the previous block */
if (data_independent_addressing) {
if (i % ARGON2_ADDRESSES_IN_BLOCK == 0) {
next_addresses(&address_block, &input_block);
}
pseudo_rand = address_block.v[i % ARGON2_ADDRESSES_IN_BLOCK];
} else {
pseudo_rand = instance->memory[prev_offset].v[0];
}
/* 1.2.2 Computing the lane of the reference block */
ref_lane = ((pseudo_rand >> 32)) % instance->lanes;
if ((position.pass == 0) && (position.slice == 0)) {
/* Can not reference other lanes yet */
ref_lane = position.lane;
}
/* 1.2.3 Computing the number of possible reference block within the
* lane.
*/
position.index = i;
ref_index = index_alpha(instance, &position, pseudo_rand & 0xFFFFFFFF,
ref_lane == position.lane);
/* 2 Creating a new block */
ref_block =
instance->memory + instance->lane_length * ref_lane + ref_index;
curr_block = instance->memory + curr_offset;
/* version 1.2.1 and earlier: overwrite, not XOR */
if (0 == position.pass || ARGON2_VERSION_10 == instance->version) {
fill_block(state, ref_block, curr_block, 0);
} else {
fill_block(state, ref_block, curr_block, 1);
}
}
}
int check_avx512f(void)
{
return cpu_flags_have_avx512f();
}
#else
void fill_segment_avx512f(const argon2_instance_t *instance,
argon2_position_t position)
{
}
int check_avx512f(void)
{
return 0;
}
#endif

View file

@ -0,0 +1,11 @@
#ifndef ARGON2_AVX512F_H
#define ARGON2_AVX512F_H
#include "core.h"
void fill_segment_avx512f(const argon2_instance_t *instance,
argon2_position_t position);
int check_avx512f(void);
#endif // ARGON2_AVX512F_H

View file

@ -0,0 +1,124 @@
#include "argon2-sse2.h"
#ifdef HAVE_SSE2
#include <x86intrin.h>
#include "cpu-flags.h"
#define ror64_16(x) \
_mm_shufflehi_epi16( \
_mm_shufflelo_epi16((x), _MM_SHUFFLE(0, 3, 2, 1)), \
_MM_SHUFFLE(0, 3, 2, 1))
#define ror64_24(x) \
_mm_xor_si128(_mm_srli_epi64((x), 24), _mm_slli_epi64((x), 40))
#define ror64_32(x) _mm_shuffle_epi32((x), _MM_SHUFFLE(2, 3, 0, 1))
#define ror64_63(x) \
_mm_xor_si128(_mm_srli_epi64((x), 63), _mm_add_epi64((x), (x)))
static __m128i f(__m128i x, __m128i y)
{
__m128i z = _mm_mul_epu32(x, y);
return _mm_add_epi64(_mm_add_epi64(x, y), _mm_add_epi64(z, z));
}
#define G1(A0, B0, C0, D0, A1, B1, C1, D1) \
do { \
A0 = f(A0, B0); \
A1 = f(A1, B1); \
\
D0 = _mm_xor_si128(D0, A0); \
D1 = _mm_xor_si128(D1, A1); \
\
D0 = ror64_32(D0); \
D1 = ror64_32(D1); \
\
C0 = f(C0, D0); \
C1 = f(C1, D1); \
\
B0 = _mm_xor_si128(B0, C0); \
B1 = _mm_xor_si128(B1, C1); \
\
B0 = ror64_24(B0); \
B1 = ror64_24(B1); \
} while ((void)0, 0)
#define G2(A0, B0, C0, D0, A1, B1, C1, D1) \
do { \
A0 = f(A0, B0); \
A1 = f(A1, B1); \
\
D0 = _mm_xor_si128(D0, A0); \
D1 = _mm_xor_si128(D1, A1); \
\
D0 = ror64_16(D0); \
D1 = ror64_16(D1); \
\
C0 = f(C0, D0); \
C1 = f(C1, D1); \
\
B0 = _mm_xor_si128(B0, C0); \
B1 = _mm_xor_si128(B1, C1); \
\
B0 = ror64_63(B0); \
B1 = ror64_63(B1); \
} while ((void)0, 0)
#define DIAGONALIZE(A0, B0, C0, D0, A1, B1, C1, D1) \
do { \
__m128i t0 = D0; \
__m128i t1 = B0; \
D0 = _mm_unpackhi_epi64(D1, _mm_unpacklo_epi64(t0, t0)); \
D1 = _mm_unpackhi_epi64(t0, _mm_unpacklo_epi64(D1, D1)); \
B0 = _mm_unpackhi_epi64(B0, _mm_unpacklo_epi64(B1, B1)); \
B1 = _mm_unpackhi_epi64(B1, _mm_unpacklo_epi64(t1, t1)); \
} while ((void)0, 0)
#define UNDIAGONALIZE(A0, B0, C0, D0, A1, B1, C1, D1) \
do { \
__m128i t0 = B0; \
__m128i t1 = D0; \
B0 = _mm_unpackhi_epi64(B1, _mm_unpacklo_epi64(B0, B0)); \
B1 = _mm_unpackhi_epi64(t0, _mm_unpacklo_epi64(B1, B1)); \
D0 = _mm_unpackhi_epi64(D0, _mm_unpacklo_epi64(D1, D1)); \
D1 = _mm_unpackhi_epi64(D1, _mm_unpacklo_epi64(t1, t1)); \
} while ((void)0, 0)
#define BLAKE2_ROUND(A0, A1, B0, B1, C0, C1, D0, D1) \
do { \
G1(A0, B0, C0, D0, A1, B1, C1, D1); \
G2(A0, B0, C0, D0, A1, B1, C1, D1); \
\
DIAGONALIZE(A0, B0, C0, D0, A1, B1, C1, D1); \
\
G1(A0, B0, C1, D0, A1, B1, C0, D1); \
G2(A0, B0, C1, D0, A1, B1, C0, D1); \
\
UNDIAGONALIZE(A0, B0, C0, D0, A1, B1, C1, D1); \
} while ((void)0, 0)
#include "argon2-template-128.h"
void fill_segment_sse2(const argon2_instance_t *instance,
argon2_position_t position)
{
fill_segment_128(instance, position);
}
int check_sse2(void)
{
return cpu_flags_have_sse2();
}
#else
void fill_segment_sse2(const argon2_instance_t *instance,
argon2_position_t position)
{
}
int check_sse2(void)
{
return 0;
}
#endif

View file

@ -0,0 +1,11 @@
#ifndef ARGON2_SSE2_H
#define ARGON2_SSE2_H
#include "core.h"
void fill_segment_sse2(const argon2_instance_t *instance,
argon2_position_t position);
int check_sse2(void);
#endif // ARGON2_SSE2_H

View file

@ -0,0 +1,136 @@
#include "argon2-ssse3.h"
#ifdef HAVE_SSSE3
#include <string.h>
#include <x86intrin.h>
#include "cpu-flags.h"
#define r16 (_mm_setr_epi8( \
2, 3, 4, 5, 6, 7, 0, 1, \
10, 11, 12, 13, 14, 15, 8, 9))
#define r24 (_mm_setr_epi8( \
3, 4, 5, 6, 7, 0, 1, 2, \
11, 12, 13, 14, 15, 8, 9, 10))
#define ror64_16(x) _mm_shuffle_epi8((x), r16)
#define ror64_24(x) _mm_shuffle_epi8((x), r24)
#define ror64_32(x) _mm_shuffle_epi32((x), _MM_SHUFFLE(2, 3, 0, 1))
#define ror64_63(x) \
_mm_xor_si128(_mm_srli_epi64((x), 63), _mm_add_epi64((x), (x)))
static __m128i f(__m128i x, __m128i y)
{
__m128i z = _mm_mul_epu32(x, y);
return _mm_add_epi64(_mm_add_epi64(x, y), _mm_add_epi64(z, z));
}
#define G1(A0, B0, C0, D0, A1, B1, C1, D1) \
do { \
A0 = f(A0, B0); \
A1 = f(A1, B1); \
\
D0 = _mm_xor_si128(D0, A0); \
D1 = _mm_xor_si128(D1, A1); \
\
D0 = ror64_32(D0); \
D1 = ror64_32(D1); \
\
C0 = f(C0, D0); \
C1 = f(C1, D1); \
\
B0 = _mm_xor_si128(B0, C0); \
B1 = _mm_xor_si128(B1, C1); \
\
B0 = ror64_24(B0); \
B1 = ror64_24(B1); \
} while ((void)0, 0)
#define G2(A0, B0, C0, D0, A1, B1, C1, D1) \
do { \
A0 = f(A0, B0); \
A1 = f(A1, B1); \
\
D0 = _mm_xor_si128(D0, A0); \
D1 = _mm_xor_si128(D1, A1); \
\
D0 = ror64_16(D0); \
D1 = ror64_16(D1); \
\
C0 = f(C0, D0); \
C1 = f(C1, D1); \
\
B0 = _mm_xor_si128(B0, C0); \
B1 = _mm_xor_si128(B1, C1); \
\
B0 = ror64_63(B0); \
B1 = ror64_63(B1); \
} while ((void)0, 0)
#define DIAGONALIZE(A0, B0, C0, D0, A1, B1, C1, D1) \
do { \
__m128i t0 = _mm_alignr_epi8(B1, B0, 8); \
__m128i t1 = _mm_alignr_epi8(B0, B1, 8); \
B0 = t0; \
B1 = t1; \
\
t0 = _mm_alignr_epi8(D1, D0, 8); \
t1 = _mm_alignr_epi8(D0, D1, 8); \
D0 = t1; \
D1 = t0; \
} while ((void)0, 0)
#define UNDIAGONALIZE(A0, B0, C0, D0, A1, B1, C1, D1) \
do { \
__m128i t0 = _mm_alignr_epi8(B0, B1, 8); \
__m128i t1 = _mm_alignr_epi8(B1, B0, 8); \
B0 = t0; \
B1 = t1; \
\
t0 = _mm_alignr_epi8(D0, D1, 8); \
t1 = _mm_alignr_epi8(D1, D0, 8); \
D0 = t1; \
D1 = t0; \
} while ((void)0, 0)
#define BLAKE2_ROUND(A0, A1, B0, B1, C0, C1, D0, D1) \
do { \
G1(A0, B0, C0, D0, A1, B1, C1, D1); \
G2(A0, B0, C0, D0, A1, B1, C1, D1); \
\
DIAGONALIZE(A0, B0, C0, D0, A1, B1, C1, D1); \
\
G1(A0, B0, C1, D0, A1, B1, C0, D1); \
G2(A0, B0, C1, D0, A1, B1, C0, D1); \
\
UNDIAGONALIZE(A0, B0, C0, D0, A1, B1, C1, D1); \
} while ((void)0, 0)
#include "argon2-template-128.h"
void fill_segment_ssse3(const argon2_instance_t *instance,
argon2_position_t position)
{
fill_segment_128(instance, position);
}
int check_ssse3(void)
{
return cpu_flags_have_ssse3();
}
#else
void fill_segment_ssse3(const argon2_instance_t *instance,
argon2_position_t position)
{
}
int check_ssse3(void)
{
return 0;
}
#endif

View file

@ -0,0 +1,11 @@
#ifndef ARGON2_SSSE3_H
#define ARGON2_SSSE3_H
#include "core.h"
void fill_segment_ssse3(const argon2_instance_t *instance,
argon2_position_t position);
int check_ssse3(void);
#endif // ARGON2_SSSE3_H

View file

@ -0,0 +1,164 @@
#include <string.h>
#include <x86intrin.h>
#include "core.h"
static void fill_block(__m128i *s, const block *ref_block, block *next_block,
int with_xor)
{
__m128i block_XY[ARGON2_OWORDS_IN_BLOCK];
unsigned int i;
if (with_xor) {
for (i = 0; i < ARGON2_OWORDS_IN_BLOCK; i++) {
s[i] = _mm_xor_si128(
s[i], _mm_loadu_si128((const __m128i *)ref_block->v + i));
block_XY[i] = _mm_xor_si128(
s[i], _mm_loadu_si128((const __m128i *)next_block->v + i));
}
} else {
for (i = 0; i < ARGON2_OWORDS_IN_BLOCK; i++) {
block_XY[i] = s[i] = _mm_xor_si128(
s[i], _mm_loadu_si128((const __m128i *)ref_block->v + i));
}
}
for (i = 0; i < 8; ++i) {
BLAKE2_ROUND(
s[8 * i + 0], s[8 * i + 1], s[8 * i + 2], s[8 * i + 3],
s[8 * i + 4], s[8 * i + 5], s[8 * i + 6], s[8 * i + 7]);
}
for (i = 0; i < 8; ++i) {
BLAKE2_ROUND(
s[8 * 0 + i], s[8 * 1 + i], s[8 * 2 + i], s[8 * 3 + i],
s[8 * 4 + i], s[8 * 5 + i], s[8 * 6 + i], s[8 * 7 + i]);
}
for (i = 0; i < ARGON2_OWORDS_IN_BLOCK; i++) {
s[i] = _mm_xor_si128(s[i], block_XY[i]);
_mm_storeu_si128((__m128i *)next_block->v + i, s[i]);
}
}
static void next_addresses(block *address_block, block *input_block)
{
/*Temporary zero-initialized blocks*/
__m128i zero_block[ARGON2_OWORDS_IN_BLOCK];
__m128i zero2_block[ARGON2_OWORDS_IN_BLOCK];
memset(zero_block, 0, sizeof(zero_block));
memset(zero2_block, 0, sizeof(zero2_block));
/*Increasing index counter*/
input_block->v[6]++;
/*First iteration of G*/
fill_block(zero_block, input_block, address_block, 0);
/*Second iteration of G*/
fill_block(zero2_block, address_block, address_block, 0);
}
static void fill_segment_128(const argon2_instance_t *instance,
argon2_position_t position)
{
block *ref_block = NULL, *curr_block = NULL;
block address_block, input_block;
uint64_t pseudo_rand, ref_index, ref_lane;
uint32_t prev_offset, curr_offset;
uint32_t starting_index, i;
__m128i state[ARGON2_OWORDS_IN_BLOCK];
int data_independent_addressing;
if (instance == NULL) {
return;
}
data_independent_addressing = (instance->type == Argon2_i) ||
(instance->type == Argon2_id && (position.pass == 0) &&
(position.slice < ARGON2_SYNC_POINTS / 2));
if (data_independent_addressing) {
init_block_value(&input_block, 0);
input_block.v[0] = position.pass;
input_block.v[1] = position.lane;
input_block.v[2] = position.slice;
input_block.v[3] = instance->memory_blocks;
input_block.v[4] = instance->passes;
input_block.v[5] = instance->type;
}
starting_index = 0;
if ((0 == position.pass) && (0 == position.slice)) {
starting_index = 2; /* we have already generated the first two blocks */
/* Don't forget to generate the first block of addresses: */
if (data_independent_addressing) {
next_addresses(&address_block, &input_block);
}
}
/* Offset of the current block */
curr_offset = position.lane * instance->lane_length +
position.slice * instance->segment_length + starting_index;
if (0 == curr_offset % instance->lane_length) {
/* Last block in this lane */
prev_offset = curr_offset + instance->lane_length - 1;
} else {
/* Previous block */
prev_offset = curr_offset - 1;
}
memcpy(state, ((instance->memory + prev_offset)->v), ARGON2_BLOCK_SIZE);
for (i = starting_index; i < instance->segment_length;
++i, ++curr_offset, ++prev_offset) {
/*1.1 Rotating prev_offset if needed */
if (curr_offset % instance->lane_length == 1) {
prev_offset = curr_offset - 1;
}
/* 1.2 Computing the index of the reference block */
/* 1.2.1 Taking pseudo-random value from the previous block */
if (data_independent_addressing) {
if (i % ARGON2_ADDRESSES_IN_BLOCK == 0) {
next_addresses(&address_block, &input_block);
}
pseudo_rand = address_block.v[i % ARGON2_ADDRESSES_IN_BLOCK];
} else {
pseudo_rand = instance->memory[prev_offset].v[0];
}
/* 1.2.2 Computing the lane of the reference block */
ref_lane = ((pseudo_rand >> 32)) % instance->lanes;
if ((position.pass == 0) && (position.slice == 0)) {
/* Can not reference other lanes yet */
ref_lane = position.lane;
}
/* 1.2.3 Computing the number of possible reference block within the
* lane.
*/
position.index = i;
ref_index = index_alpha(instance, &position, pseudo_rand & 0xFFFFFFFF,
ref_lane == position.lane);
/* 2 Creating a new block */
ref_block =
instance->memory + instance->lane_length * ref_lane + ref_index;
curr_block = instance->memory + curr_offset;
/* version 1.2.1 and earlier: overwrite, not XOR */
if (0 == position.pass || ARGON2_VERSION_10 == instance->version) {
fill_block(state, ref_block, curr_block, 0);
} else {
fill_block(state, ref_block, curr_block, 1);
}
}
}

View file

@ -0,0 +1,124 @@
#include "argon2-xop.h"
#ifdef HAVE_XOP
#include <string.h>
#include <x86intrin.h>
#include "cpu-flags.h"
#define ror64(x, c) _mm_roti_epi64((x), -(c))
static __m128i f(__m128i x, __m128i y)
{
__m128i z = _mm_mul_epu32(x, y);
return _mm_add_epi64(_mm_add_epi64(x, y), _mm_add_epi64(z, z));
}
#define G1(A0, B0, C0, D0, A1, B1, C1, D1) \
do { \
A0 = f(A0, B0); \
A1 = f(A1, B1); \
\
D0 = _mm_xor_si128(D0, A0); \
D1 = _mm_xor_si128(D1, A1); \
\
D0 = ror64(D0, 32); \
D1 = ror64(D1, 32); \
\
C0 = f(C0, D0); \
C1 = f(C1, D1); \
\
B0 = _mm_xor_si128(B0, C0); \
B1 = _mm_xor_si128(B1, C1); \
\
B0 = ror64(B0, 24); \
B1 = ror64(B1, 24); \
} while ((void)0, 0)
#define G2(A0, B0, C0, D0, A1, B1, C1, D1) \
do { \
A0 = f(A0, B0); \
A1 = f(A1, B1); \
\
D0 = _mm_xor_si128(D0, A0); \
D1 = _mm_xor_si128(D1, A1); \
\
D0 = ror64(D0, 16); \
D1 = ror64(D1, 16); \
\
C0 = f(C0, D0); \
C1 = f(C1, D1); \
\
B0 = _mm_xor_si128(B0, C0); \
B1 = _mm_xor_si128(B1, C1); \
\
B0 = ror64(B0, 63); \
B1 = ror64(B1, 63); \
} while ((void)0, 0)
#define DIAGONALIZE(A0, B0, C0, D0, A1, B1, C1, D1) \
do { \
__m128i t0 = _mm_alignr_epi8(B1, B0, 8); \
__m128i t1 = _mm_alignr_epi8(B0, B1, 8); \
B0 = t0; \
B1 = t1; \
\
t0 = _mm_alignr_epi8(D1, D0, 8); \
t1 = _mm_alignr_epi8(D0, D1, 8); \
D0 = t1; \
D1 = t0; \
} while ((void)0, 0)
#define UNDIAGONALIZE(A0, B0, C0, D0, A1, B1, C1, D1) \
do { \
__m128i t0 = _mm_alignr_epi8(B0, B1, 8); \
__m128i t1 = _mm_alignr_epi8(B1, B0, 8); \
B0 = t0; \
B1 = t1; \
\
t0 = _mm_alignr_epi8(D0, D1, 8); \
t1 = _mm_alignr_epi8(D1, D0, 8); \
D0 = t1; \
D1 = t0; \
} while ((void)0, 0)
#define BLAKE2_ROUND(A0, A1, B0, B1, C0, C1, D0, D1) \
do { \
G1(A0, B0, C0, D0, A1, B1, C1, D1); \
G2(A0, B0, C0, D0, A1, B1, C1, D1); \
\
DIAGONALIZE(A0, B0, C0, D0, A1, B1, C1, D1); \
\
G1(A0, B0, C1, D0, A1, B1, C0, D1); \
G2(A0, B0, C1, D0, A1, B1, C0, D1); \
\
UNDIAGONALIZE(A0, B0, C0, D0, A1, B1, C1, D1); \
} while ((void)0, 0)
#include "argon2-template-128.h"
void fill_segment_xop(const argon2_instance_t *instance,
argon2_position_t position)
{
fill_segment_128(instance, position);
}
int check_xop(void)
{
return cpu_flags_have_xop();
}
#else
void fill_segment_xop(const argon2_instance_t *instance,
argon2_position_t position)
{
}
int check_xop(void)
{
return 0;
}
#endif

View file

@ -0,0 +1,11 @@
#ifndef ARGON2_XOP_H
#define ARGON2_XOP_H
#include "core.h"
void fill_segment_xop(const argon2_instance_t *instance,
argon2_position_t position);
int check_xop(void);
#endif // ARGON2_XOP_H

View file

@ -0,0 +1,91 @@
#include "cpu-flags.h"
#include <cpuid.h>
enum {
X86_64_FEATURE_SSE2 = (1 << 0),
X86_64_FEATURE_SSSE3 = (1 << 1),
X86_64_FEATURE_XOP = (1 << 2),
X86_64_FEATURE_AVX2 = (1 << 3),
X86_64_FEATURE_AVX512F = (1 << 4),
};
static unsigned int cpu_flags;
static unsigned int get_cpuid(int ext, unsigned int level, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx)
{
unsigned int eax;
__cpuid(ext ? (0x80000000 | level) : level,
eax, *ebx, *ecx, *edx);
return eax;
}
static unsigned int get_cpuid_count(int ext, unsigned int level,
unsigned int count, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx)
{
unsigned int eax;
__cpuid_count(ext ? (0x80000000 | level) : level,
count, eax, *ebx, *ecx, *edx);
return 1;
}
void cpu_flags_get(void)
{
unsigned int ebx, ecx, edx;
unsigned int level, level_ext;
cpu_flags = 0;
level = get_cpuid(0, 0, &ebx, &ecx, &edx);
level_ext = get_cpuid(1, 0, &ebx, &ecx, &edx);
if (level >= 1 && get_cpuid(0, 1, &ebx, &ecx, &edx)) {
if (edx & (1 << 26)) {
cpu_flags |= X86_64_FEATURE_SSE2;
}
if (ecx & (1 << 9)) {
cpu_flags |= X86_64_FEATURE_SSSE3;
}
}
if (level >= 7 && get_cpuid_count(0, 7, 0, &ebx, &ecx, &edx)) {
if (ebx & (1 << 5)) {
cpu_flags |= X86_64_FEATURE_AVX2;
}
if (ebx & (1 << 16)) {
cpu_flags |= X86_64_FEATURE_AVX512F;
}
}
if (level_ext >= 1 && get_cpuid(1, 1, &ebx, &ecx, &edx)) {
if (ecx & (1 << 11)) {
cpu_flags |= X86_64_FEATURE_XOP;
}
}
/* FIXME: check also OS support! */
}
int cpu_flags_have_sse2(void)
{
return cpu_flags & X86_64_FEATURE_SSE2;
}
int cpu_flags_have_ssse3(void)
{
return cpu_flags & X86_64_FEATURE_SSSE3;
}
int cpu_flags_have_xop(void)
{
return cpu_flags & X86_64_FEATURE_XOP;
}
int cpu_flags_have_avx2(void)
{
return cpu_flags & X86_64_FEATURE_AVX2;
}
int cpu_flags_have_avx512f(void)
{
return cpu_flags & X86_64_FEATURE_AVX512F;
}

View file

@ -0,0 +1,12 @@
#ifndef ARGON2_CPU_FLAGS_H
#define ARGON2_CPU_FLAGS_H
void cpu_flags_get(void);
int cpu_flags_have_sse2(void);
int cpu_flags_have_ssse3(void);
int cpu_flags_have_xop(void);
int cpu_flags_have_avx2(void);
int cpu_flags_have_avx512f(void);
#endif // ARGON2_CPU_FLAGS_H

View file

@ -0,0 +1,8 @@
#include <x86intrin.h>
void function_avx2(__m256i *dst, const __m256i *a, const __m256i *b)
{
*dst = _mm256_xor_si256(*a, *b);
}
int main(void) { return 0; }

View file

@ -0,0 +1,8 @@
#include <x86intrin.h>
void function_avx512f(__m512i *dst, const __m512i *a)
{
*dst = _mm512_ror_epi64(*a, 57);
}
int main(void) { return 0; }

View file

@ -0,0 +1,8 @@
#include <x86intrin.h>
void function_sse2(__m128i *dst, const __m128i *a, const __m128i *b)
{
*dst = _mm_xor_si128(*a, *b);
}
int main(void) { return 0; }

View file

@ -0,0 +1,8 @@
#include <x86intrin.h>
void function_ssse3(__m128i *dst, const __m128i *a, const __m128i *b)
{
*dst = _mm_shuffle_epi8(*a, *b);
}
int main(void) { return 0; }

View file

@ -0,0 +1,8 @@
#include <x86intrin.h>
void function_xop(__m128i *dst, const __m128i *a, int b)
{
*dst = _mm_roti_epi64(*a, b);
}
int main(void) { return 0; }