Adjust panthera code for MSVC 2019 compilation (fixes #41)

This commit is contained in:
Tony Butler 2021-03-26 15:48:20 -06:00
parent 174663bb50
commit 504f608871
3 changed files with 29 additions and 12 deletions

View file

@ -37,12 +37,19 @@
#ifdef __ICC
/* Miscompile with icc 14.0.0 (at least), so don't use restrict there */
#define restrict
#define static_restrict static
#elif defined(_MSC_VER)
#define restrict
#define static_restrict
#elif __STDC_VERSION__ >= 199901L
/* Have restrict */
#define static_restrict static restrict
#elif defined(__GNUC__)
#define restrict __restrict
#define static_restrict static __restrict
#else
#define restrict
#define static_restrict
#endif
/*
@ -132,9 +139,9 @@ static const uint32_t Krnd[64] = {
* the 512-bit input block to produce a new state.
*/
static void
SHA256_Transform(uint32_t state[static restrict 8],
const uint8_t block[static restrict 64],
uint32_t W[static restrict 64], uint32_t S[static restrict 8])
SHA256_Transform(uint32_t state[static_restrict 8],
const uint8_t block[static_restrict 64],
uint32_t W[static_restrict 64], uint32_t S[static_restrict 8])
{
int i;
@ -203,7 +210,7 @@ static const uint8_t PAD[64] = {
/* Add padding and terminating bit-count. */
static void
SHA256_Pad(SHA256_CTX * ctx, uint32_t tmp32[static restrict 72])
SHA256_Pad(SHA256_CTX * ctx, uint32_t tmp32[static_restrict 72])
{
size_t r;
@ -257,7 +264,7 @@ SHA256_Init(SHA256_CTX * ctx)
*/
static void
_SHA256_Update(SHA256_CTX * ctx, const void * in, size_t len,
uint32_t tmp32[static restrict 72])
uint32_t tmp32[static_restrict 72])
{
uint32_t r;
const uint8_t * src = in;
@ -315,7 +322,7 @@ SHA256_Update(SHA256_CTX * ctx, const void * in, size_t len)
*/
static void
_SHA256_Final(uint8_t digest[32], SHA256_CTX * ctx,
uint32_t tmp32[static restrict 72])
uint32_t tmp32[static_restrict 72])
{
/* Add padding. */
@ -367,8 +374,8 @@ SHA256_Buf(const void * in, size_t len, uint8_t digest[32])
*/
static void
_HMAC_SHA256_Init(HMAC_SHA256_CTX * ctx, const void * _K, size_t Klen,
uint32_t tmp32[static restrict 72], uint8_t pad[static restrict 64],
uint8_t khash[static restrict 32])
uint32_t tmp32[static_restrict 72], uint8_t pad[static_restrict 64],
uint8_t khash[static_restrict 32])
{
const uint8_t * K = _K;
size_t i;
@ -420,7 +427,7 @@ HMAC_SHA256_Init(HMAC_SHA256_CTX * ctx, const void * _K, size_t Klen)
*/
static void
_HMAC_SHA256_Update(HMAC_SHA256_CTX * ctx, const void * in, size_t len,
uint32_t tmp32[static restrict 72])
uint32_t tmp32[static_restrict 72])
{
/* Feed data to the inner SHA256 operation. */
@ -447,7 +454,7 @@ HMAC_SHA256_Update(HMAC_SHA256_CTX * ctx, const void * in, size_t len)
*/
static void
_HMAC_SHA256_Final(uint8_t digest[32], HMAC_SHA256_CTX * ctx,
uint32_t tmp32[static restrict 72], uint8_t ihash[static restrict 32])
uint32_t tmp32[static_restrict 72], uint8_t ihash[static_restrict 32])
{
/* Finish the inner SHA256 operation. */
@ -500,8 +507,8 @@ HMAC_SHA256_Buf(const void * K, size_t Klen, const void * in, size_t len,
/* Add padding and terminating bit-count, but don't invoke Transform yet. */
static int
SHA256_Pad_Almost(SHA256_CTX * ctx, uint8_t len[static restrict 8],
uint32_t tmp32[static restrict 72])
SHA256_Pad_Almost(SHA256_CTX * ctx, uint8_t len[static_restrict 8],
uint32_t tmp32[static_restrict 72])
{
uint32_t r;

View file

@ -49,6 +49,7 @@
* no slowdown from the prefixes is generally observed on AMD CPUs supporting
* XOP, some slowdown is sometimes observed on Intel CPUs with AVX.
*/
#if !defined(_MSC_VER)
#ifdef __XOP__
#warning "Note: XOP is enabled. That's great."
#elif defined(__AVX__)
@ -60,6 +61,7 @@
#else
#warning "Note: building generic code for non-x86. That's OK."
#endif
#endif
/*
* The SSE4 code version has fewer instructions than the generic SSE2 version,
@ -102,6 +104,10 @@
#include "yespower-platform.c"
#if defined(_MSC_VER)
#define __thread
#endif
#if __STDC_VERSION__ >= 199901L
/* Have restrict */
#elif defined(__GNUC__)
@ -527,7 +533,9 @@ static volatile uint64_t Smask2var = Smask2;
/* 64-bit without AVX. This relies on out-of-order execution and register
* renaming. It may actually be fastest on CPUs with AVX(2) as well - e.g.,
* it runs great on Haswell. */
#if !defined(_MSC_VER)
#warning "Note: using x86-64 inline assembly for pwxform. That's great."
#endif
#undef MAYBE_MEMORY_BARRIER
#define MAYBE_MEMORY_BARRIER \
__asm__("" : : : "memory");

View file

@ -44,7 +44,9 @@
* yespower-opt.c.
*/
#if !defined(_MSC_VER)
#warning "This reference implementation is deliberately mostly not optimized. Use yespower-opt.c instead unless you're testing (against) the reference implementation on purpose."
#endif
#include <errno.h>
#include <stdint.h>