From 092f12d3e41a92760afdaac0e4ed389ec064a82b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Sat, 11 May 2024 22:15:35 +0300 Subject: [PATCH] AVX512 and VPCLMULQDQ based CRC-32 and CRC-32C This implementation is based on crc32_refl_by16_vclmul_avx512 in https://github.com/intel/intel-ipsec-mb/ with some optimizations. Some of the code is based on #72. --- CMakeLists.txt | 53 ++- .../private/intel/crc32c_compiler_shims.h | 26 ++ source/intel/asm/crc32c_sse42_asm.c | 22 +- source/intel/crc_hw.c | 127 +++++++ source/intel/intrin/crc32_avx512.c | 343 ++++++++++++++++++ source/intel/visualc/visualc_crc32c_sse42.c | 28 +- tests/crc_test.c | 25 ++ 7 files changed, 581 insertions(+), 43 deletions(-) create mode 100644 include/aws/checksums/private/intel/crc32c_compiler_shims.h create mode 100644 source/intel/crc_hw.c create mode 100644 source/intel/intrin/crc32_avx512.c diff --git a/CMakeLists.txt b/CMakeLists.txt index a21bc36..a7fb210 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -28,6 +28,7 @@ string(REPLACE ";" "${AWS_MODULE_DIR};" AWS_MODULE_PATH "${CMAKE_PREFIX_PATH}${A # Append that generated list to the module search path list(APPEND CMAKE_MODULE_PATH ${AWS_MODULE_PATH}) +include(AwsSIMD) include(AwsCFlags) include(AwsCheckHeaders) include(AwsSharedLibSetup) @@ -58,17 +59,48 @@ file(GLOB AWS_ARCH_SRC ) if (USE_CPU_EXTENSIONS) - if(AWS_ARCH_INTEL) - # First, check if inline assembly is available. Inline assembly can also be supported by MSVC if the compiler in use is Clang. - if(AWS_HAVE_GCC_INLINE_ASM) - file(GLOB AWS_ARCH_SRC - "source/intel/asm/*.c" + if (AWS_ARCH_INTEL) + file (GLOB AWS_ARCH_INTEL_SRC + "source/intel/*.c" + ) + + if (AWS_HAVE_AVX512_INTRINSICS) + if (MSVC) + file(GLOB AWS_ARCH_INTRIN_SRC + "source/intel/intrin/*.c" + "source/intel/visualc/*.c" ) - elseif (MSVC) - file(GLOB AWS_ARCH_SRC + else() + file(GLOB AWS_ARCH_INTRIN_SRC + "source/intel/intrin/*.c" + ) + endif() + else() + if (MSVC) + file(GLOB AWS_ARCH_INTRIN_SRC "source/intel/visualc/*.c" + ) + endif() + endif() + + source_group("Source Files\\intel" FILES ${AWS_ARCH_INTEL_SRC}) + source_group("Source Files\\intel\\intrin" FILES ${AWS_ARCH_INTRIN_SRC}) + + if (AWS_HAVE_GCC_INLINE_ASM) + file(GLOB AWS_ARCH_ASM_SRC + "source/intel/asm/*.c" + ) + + file(GLOB AWS_ARCH_SRC + ${AWS_ARCH_INTEL_SRC} + ${AWS_ARCH_INTRIN_SRC} + ${AWS_ARCH_ASM_SRC} + ) + else() + file(GLOB AWS_ARCH_SRC + ${AWS_ARCH_INTEL_SRC} + ${AWS_ARCH_INTRIN_SRC} ) - source_group("Source Files\\intel\\visualc" FILES ${AWS_ARCH_SRC}) endif() endif() @@ -114,6 +146,7 @@ file(GLOB CHECKSUMS_COMBINED_SRC add_library(${PROJECT_NAME} ${CHECKSUMS_COMBINED_HEADERS} ${CHECKSUMS_COMBINED_SRC}) + aws_set_common_properties(${PROJECT_NAME}) aws_prepare_symbol_visibility_args(${PROJECT_NAME} "AWS_CHECKSUMS") aws_check_headers(${PROJECT_NAME} ${AWS_CHECKSUMS_HEADERS}) @@ -123,6 +156,10 @@ aws_add_sanitizers(${PROJECT_NAME}) # We are not ABI stable yet set_target_properties(${PROJECT_NAME} PROPERTIES VERSION 1.0.0) +if (USE_CPU_EXTENSIONS AND AWS_ARCH_INTEL) + simd_add_source_avx(${PROJECT_NAME} ${AWS_ARCH_SRC}) +endif() + target_include_directories(${PROJECT_NAME} PUBLIC $ $) diff --git a/include/aws/checksums/private/intel/crc32c_compiler_shims.h b/include/aws/checksums/private/intel/crc32c_compiler_shims.h new file mode 100644 index 0000000..b321757 --- /dev/null +++ b/include/aws/checksums/private/intel/crc32c_compiler_shims.h @@ -0,0 +1,26 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include + +#include +#include + +#if _WIN64 || __x86_64__ || __ppc64_ +typedef uint64_t *slice_ptr_type; +typedef uint64_t slice_ptr_int_type; +# define crc_intrin_fn _mm_crc32_u64 +#else +typedef uint32_t *slice_ptr_type; +typedef uint32_t slice_ptr_int_type; +# define crc_intrin_fn _mm_crc32_u32 +#endif + +#ifdef AWS_HAVE_AVX512_INTRINSICS +uint32_t aws_checksums_crc32c_avx512(const uint8_t *input, int length, uint32_t crc); +uint32_t aws_checksums_crc32_avx512(const uint8_t *input, int length, uint32_t crc); +#endif + +uint32_t aws_checksums_crc32c_sse42(const uint8_t *input, int length, uint32_t crc); diff --git a/source/intel/asm/crc32c_sse42_asm.c b/source/intel/asm/crc32c_sse42_asm.c index 35e1d09..bc79597 100644 --- a/source/intel/asm/crc32c_sse42_asm.c +++ b/source/intel/asm/crc32c_sse42_asm.c @@ -3,7 +3,7 @@ * SPDX-License-Identifier: Apache-2.0. */ -#include +#include #include @@ -283,7 +283,7 @@ static bool detected_clmul = false; * Pass 0 in the previousCrc32 parameter as an initial value unless continuing to update a running CRC in a subsequent * call. */ -uint32_t aws_checksums_crc32c_hw(const uint8_t *input, int length, uint32_t previousCrc32) { +uint32_t aws_checksums_crc32c_sse42(const uint8_t *input, int length, uint32_t previousCrc32) { if (AWS_UNLIKELY(!detection_performed)) { detected_clmul = aws_cpu_has_feature(AWS_CPU_FEATURE_CLMUL); @@ -293,7 +293,8 @@ uint32_t aws_checksums_crc32c_hw(const uint8_t *input, int length, uint32_t prev detection_performed = true; } - uint32_t crc = ~previousCrc32; + /* this is called by a higher-level shim and previousCRC32 is already ~ */ + uint32_t crc = previousCrc32; /* For small input, forget about alignment checks - simply compute the CRC32c one byte at a time */ if (AWS_UNLIKELY(length < 8)) { @@ -358,22 +359,17 @@ uint32_t aws_checksums_crc32c_hw(const uint8_t *input, int length, uint32_t prev return ~crc; } -uint32_t aws_checksums_crc32_hw(const uint8_t *input, int length, uint32_t previousCrc32) { - return aws_checksums_crc32_sw(input, length, previousCrc32); -} # if defined(__clang__) # pragma clang diagnostic pop # endif #else -uint32_t aws_checksums_crc32_hw(const uint8_t *input, int length, uint32_t previousCrc32) { - return aws_checksums_crc32_sw(input, length, previousCrc32); -} - -uint32_t aws_checksums_crc32c_hw(const uint8_t *input, int length, uint32_t previousCrc32) { - return aws_checksums_crc32c_sw(input, length, previousCrc32); +uint32_t aws_checksums_crc32c_sse42(const uint8_t *input, int length, uint32_t previousCrc32) { + /* these are nested in a larger computation. As a result the crc doesn't need to be bit flipped. + However, the sw function is also used as a standalone implementation that does need to do the + bit flip. So go ahead and flip it here, so the sw implementation flips it back. */ + return aws_checksums_crc32c_sw(input, length, ~previousCrc32); } - #endif /* clang-format on */ diff --git a/source/intel/crc_hw.c b/source/intel/crc_hw.c new file mode 100644 index 0000000..7fc2ee0 --- /dev/null +++ b/source/intel/crc_hw.c @@ -0,0 +1,127 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ +#include +#ifdef _MSC_VER +# include +#else +# include +#endif + +static bool detection_performed; +static bool detected_sse42; +static bool detected_clmul; +#ifdef AWS_HAVE_AVX512_INTRINSICS +static bool detected_vpclmulqdq; +#endif + +static void aws_checksums_hw_detect(void) +{ +#ifdef _MSC_VER + int regs[4]; + __cpuid(regs, 1); + uint32_t ecx = regs[2]; +#else + uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0; + __cpuid(1, reax, rebx, recx, redx); +#endif + detected_sse42 = ecx & 1U << 20; + detected_clmul = ecx & 1U << 1; + +#ifdef AWS_HAVE_AVX512_INTRINSICS +# ifdef _MSC_VER + __cpuidex(regs, 7, 0); + uint32_t ebx = regs[1]; + ecx = regs[2]; +# else + __cpuid_count(7, 0, eax, ebx, ecx, edx); +# endif + detected_vpclmulqdq = ecx & 1U<<10/*VPCLMULQDQ*/ && + !(~ebx & ((1U<<16/*AVX512F*/ | 1U<<17/*AVX512DQ*/ | + 1U<<30/*AVX512BW*/ | 1U<<31/*AVX512VL*/))); +#endif + + /* Simply setting the flag true to skip HW detection next time + Not using memory barriers since the worst that can + happen is a fallback to the non HW accelerated code. */ + detection_performed = true; +} + +/* + * Computes the Castagnoli CRC32c (iSCSI) of the specified data buffer using the Intel CRC32Q (64-bit quad word) and + * PCLMULQDQ machine instructions (if present). + * Handles data that isn't 8-byte aligned as well as any trailing data with the CRC32B (byte) instruction. + * Pass 0 in the previousCrc32 parameter as an initial value unless continuing to update a running CRC in a subsequent + * call. + */ +uint32_t aws_checksums_crc32c_hw(const uint8_t *input, int length, uint32_t previousCrc32) { + + if (AWS_UNLIKELY(!detection_performed)) { + aws_checksums_hw_detect(); + } + +#ifdef AWS_HAVE_AVX512_INTRINSICS + if (detected_vpclmulqdq) { + return aws_checksums_crc32c_avx512(inputr, length, crc); + } +#endif + + /* this is the entry point. We should only do the bit flip once. It should not be done for the subfunctions and + * branches.*/ + uint32_t crc = ~previousCrc32; + + /* For small input, forget about alignment checks - simply compute the CRC32c one byte at a time */ + if (length < (int)sizeof(slice_ptr_int_type)) { + while (length-- > 0) { + crc = (uint32_t)_mm_crc32_u8(crc, *input++); + } + return ~crc; + } + + /* Get the 8-byte memory alignment of our input buffer by looking at the least significant 3 bits */ + int input_alignment = (uintptr_t)(input)&0x7; + + /* Compute the number of unaligned bytes before the first aligned 8-byte chunk (will be in the range 0-7) */ + int leading = (8 - input_alignment) & 0x7; + + /* reduce the length by the leading unaligned bytes we are about to process */ + length -= leading; + + /* spin through the leading unaligned input bytes (if any) one-by-one */ + while (leading-- > 0) { + crc = (uint32_t)_mm_crc32_u8(crc, *input++); + } + + if (detected_sse42 && detected_clmul) { + return aws_checksums_crc32c_sse42(input, length, crc); + } + + /* Spin through remaining (aligned) 8-byte chunks using the CRC32Q quad word instruction */ + while (length >= (int)sizeof(slice_ptr_int_type)) { + crc = (uint32_t)crc_intrin_fn(crc, *input); + input += sizeof(slice_ptr_int_type); + length -= (int)sizeof(slice_ptr_int_type); + } + + /* Finish up with any trailing bytes using the CRC32B single byte instruction one-by-one */ + while (length-- > 0) { + crc = (uint32_t)_mm_crc32_u8(crc, *input); + input++; + } + + return ~crc; +} + +uint32_t aws_checksums_crc32_hw(const uint8_t *input, int length, uint32_t previousCrc32) { +#ifdef AWS_HAVE_AVX512_INTRINSICS + if (AWS_UNLIKELY(!detection_performed)) { + aws_checksums_hw_detect(); + } + + if (detected_vpclmulqdq) { + return aws_checksums_crc32_avx512(inputr, length, crc); + } +#endif + return aws_checksums_crc32_sw(input, length, previousCrc32); +} diff --git a/source/intel/intrin/crc32_avx512.c b/source/intel/intrin/crc32_avx512.c new file mode 100644 index 0000000..2c1f4d9 --- /dev/null +++ b/source/intel/intrin/crc32_avx512.c @@ -0,0 +1,343 @@ +/** + * SPDX-License-Identifier: Apache-2.0. + */ + +#include +#include + +#ifdef _MSC_VER +# include +# define USE_VPCLMULQDQ /* nothing */ +#else +# include +# define TARGET "pclmul,avx512f,avx512dq,avx512bw,avx512vl,vpclmulqdq" +# define USE_VPCLMULQDQ __attribute__((target(TARGET))) +#endif + +#include + +#ifdef _MSC_VER +/* MSVC does not seem to define this intrinsic for vmovdqa */ +# define _mm_load_epi32(x) (*(const __m128i)(x)) +#endif + +/* + This implementation is based on crc32_refl_by16_vclmul_avx512 + in https://github.com/intel/intel-ipsec-mb/ with some optimizations. + The // comments in crc32_avx512() correspond to assembler labels. +*/ + +/** table of constants corresponding to a CRC polynomial up to degree 32 */ +struct crc32_tab +{ + const uint64_t b2048[2], b1024[2]; + _Alignas(64) const uint64_t b896[6]; /* includes b786, b640 */ + const uint64_t b512[2]; + const uint64_t b384[2], b256[2], b128[2], zeropad_for_b384[2]; + const uint64_t b64[2], b32[2]; +}; + +/** ISO 3309 CRC-32 (reflected polynomial 0x04C11DB7); zlib crc32() */ +_Alignas(64) static const struct crc32_tab refl32 = { + { 0x00000000e95c1271, 0x00000000ce3371cb }, + { 0x00000000910eeec1, 0x0000000033fff533 }, + { 0x000000000cbec0ed, 0x0000000031f8303f, + 0x0000000057c54819, 0x00000000df068dc2, + 0x00000000ae0b5394, 0x000000001c279815 }, + { 0x000000001d9513d7, 0x000000008f352d95 }, + { 0x00000000af449247, 0x000000003db1ecdc }, + { 0x0000000081256527, 0x00000000f1da05aa }, + { 0x00000000ccaa009e, 0x00000000ae689191 }, + { 0, 0 }, + { 0x00000000ccaa009e, 0x00000000b8bc6765 }, + { 0x00000001f7011640, 0x00000001db710640 } +}; + +/** Castagnoli CRC-32C (reflected polynomial 0x1EDC6F41) */ +_Alignas(64) static const struct crc32_tab refl32c = { + { 0x00000000b9e02b86, 0x00000000dcb17aa4 }, + { 0x000000000d3b6092, 0x000000006992cea2 }, + { 0x0000000047db8317, 0x000000002ad91c30, + 0x000000000715ce53, 0x00000000c49f4f67, + 0x0000000039d3b296, 0x00000000083a6eec }, + { 0x000000009e4addf8, 0x00000000740eef02 }, + { 0x00000000ddc0152b, 0x000000001c291d04 }, + { 0x00000000ba4fc28e, 0x000000003da6d0cb }, + { 0x00000000493c7d27, 0x00000000f20c0dfe }, + { 0, 0 }, + { 0x00000000493c7d27, 0x00000000dd45aab8 }, + { 0x00000000dea713f0, 0x0000000105ec76f0 } +}; + +#define TERNARY_XOR3 (0xf0^0xcc^0xaa) +#define TERNARY_XNOR3 (0xff-TERNARY_XOR3) +#define TERNARY_XOR2_AND ((0xf0^0xcc)&0xaa) + +USE_VPCLMULQDQ +/** @return a^b^c */ +static inline __m128i xor3_128(__m128i a, __m128i b, __m128i c) +{ + return _mm_ternarylogic_epi64(a, b, c, TERNARY_XOR3); +} + +USE_VPCLMULQDQ +/** @return ~(a^b^c) */ +static inline __m128i xnor3_128(__m128i a, __m128i b, __m128i c) +{ + return _mm_ternarylogic_epi64(a, b, c, TERNARY_XNOR3); +} + +USE_VPCLMULQDQ +/** @return a^b^c */ +static inline __m512i xor3_512(__m512i a, __m512i b, __m512i c) +{ + return _mm512_ternarylogic_epi64(a, b, c, TERNARY_XOR3); +} + +USE_VPCLMULQDQ +/** @return (a^b)&c */ +static inline __m128i xor2_and_128(__m128i a, __m128i b, __m128i c) +{ + return _mm_ternarylogic_epi64(a, b, c, TERNARY_XOR2_AND); +} + +USE_VPCLMULQDQ +/** Load 64 bytes */ +static inline __m512i load512(const char *b) { return _mm512_loadu_epi8(b); } + +USE_VPCLMULQDQ +/** Load 16 bytes */ +static inline __m128i load128(const char *b) { return _mm_loadu_epi64(b); } + +/** Combine 512 data bits with CRC */ +USE_VPCLMULQDQ +static inline __m512i combine512(__m512i a, __m512i tab, __m512i b) +{ + return xor3_512(b, _mm512_clmulepi64_epi128(a, tab, 0x01), + _mm512_clmulepi64_epi128(a, tab, 0x10)); +} + +#define xor512(a, b) _mm512_xor_epi64(a, b) +#define xor256(a, b) _mm256_xor_epi64(a, b) +#define xor128(a, b) _mm_xor_epi64(a, b) +#define and128(a, b) _mm_and_si128(a, b) + +USE_VPCLMULQDQ +/** Pick a 128-bit component of a 512-bit vector */ +static inline __m512i extract512_128_3(__m512i a) +{ +#if defined __GNUC__ && __GNUC__ >= 11 + /* While technically incorrect, this would seem to translate into a + vextracti32x4 instruction, which actually outputs a ZMM register + (anything above the XMM range is cleared). */ + return _mm512_castsi128_si512(_mm512_extracti64x2_epi64(a, 3)); +#else + /* On clang, this is needed in order to get a correct result. */ + return _mm512_maskz_shuffle_i64x2(3, a, a, 3); +#endif +} + +_Alignas(16) static const uint64_t shuffle128[4] = { + 0x8786858483828100, 0x8f8e8d8c8b8a8988, + 0x0706050403020100, 0x000e0d0c0b0a0908 +}; + +static const __mmask16 size_mask[16] = { + 0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f, 0x007f, 0x00ff, + 0x01ff, 0x03ff, 0x07ff, 0x0fff, 0x1fff, 0x3fff, 0x7fff, 0xffff +}; + +_Alignas(16) static const uint64_t shift128[4] = { + 0x8786858483828100, 0x8f8e8d8c8b8a8988, + 0x0706050403020100, 0x000e0d0c0b0a0908 +}; + +static const char shift_1_to_3_reflect[7 + 11] = { + -1, -1, -1, -1, -1, -1, -1, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 +}; + +USE_VPCLMULQDQ +static uint32_t crc32_avx512(const uint8_t *buf, int size, uint32_t crc, + const struct crc32_tab *tab) +{ + const __m512i crc_in = _mm512_castsi128_si512(_mm_cvtsi32_si128(~crc)), + b512 = _mm512_broadcast_i32x4(_mm_load_epi32(tab->b512)); + __m128i crc_out; + __m512i lo; + + if (size >= 256) { + lo = xor512(load512(buf), crc_in); + __m512i l1 = load512(buf + 64); + + const __m512i b1024 = _mm512_broadcast_i32x4(_mm_load_epi32(&tab->b1024)); + size -= 256; + if (size >= 256) { + __m512i h0 = load512(buf + 128), + hi = load512(buf + 192); + const __m512i b2048 = _mm512_broadcast_i32x4(_mm_load_epi32(&tab->b2048)); + size -= 256; + do { + buf += 256; + lo = combine512(lo, b2048, load512(buf)); + l1 = combine512(l1, b2048, load512(buf + 64)); + h0 = combine512(h0, b2048, load512(buf + 128)); + hi = combine512(hi, b2048, load512(buf + 192)); + size -= 256; + } while (size >= 0); + + buf += 256; + lo = combine512(lo, b1024, h0); + l1 = combine512(l1, b1024, hi); + size += 128; + } else { + do { + buf += 128; + lo = combine512(lo, b1024, load512(buf)); + l1 = combine512(l1, b1024, load512(buf + 64)); + size -= 128; + } while (size >= 0); + + buf += 128; + } + + if (size >= -64) { + size += 128; + lo = combine512(lo, b512, l1); + goto fold_64_B_loop; + } + + const __m512i + b896 = _mm512_load_epi32(&tab->b896), + b384 = _mm512_load_epi32(&tab->b384); + + __m512i c4 = xor3_512(_mm512_clmulepi64_epi128(lo, b896, 1), + _mm512_clmulepi64_epi128(lo, b896, 0x10), + _mm512_clmulepi64_epi128(l1, b384, 1)); + c4 = xor3_512(c4, _mm512_clmulepi64_epi128(l1, b384, 0x10), + extract512_128_3(l1)); + + __m256i c2 = _mm512_castsi512_si256(_mm512_shuffle_i64x2(c4, c4, 0b01001110)); + c2 = xor256(c2, _mm512_castsi512_si256(c4)); + crc_out = xor128(_mm256_extracti64x2_epi64(c2, 1), + _mm256_castsi256_si128(c2)); + size += 128 - 16; + goto final_reduction; + } + + __m128i b; + + // less_than_256 + if (size >= 32) { + if (size >= 64) { + lo = xor512(load512(buf), crc_in); + + while (buf += 64, (size -= 64) >= 64) + fold_64_B_loop: + lo = combine512(lo, b512, load512(buf)); + + // reduce_64B + const __m512i b384 = _mm512_load_epi32(&tab->b384); + __m512i crc512 = + xor3_512(_mm512_clmulepi64_epi128(lo, b384, 1), + _mm512_clmulepi64_epi128(lo, b384, 0x10), + extract512_128_3(lo)); + crc512 = xor512(crc512, _mm512_shuffle_i64x2(crc512, crc512, 0b01001110)); + const __m256i crc256 = _mm512_castsi512_si256(crc512); + crc_out = xor128(_mm256_extracti64x2_epi64(crc256, 1), + _mm256_castsi256_si128(crc256)); + size -= 16; + } else { + // less_than_64 + crc_out = xor128(load128(buf), + _mm512_castsi512_si128(crc_in)); + buf += 16; + size -= 32; + } + + final_reduction: + b = _mm_load_epi32(&tab->b128); + + while (size >= 0) { + // reduction_loop_16B + crc_out = xor3_128(load128(buf), + _mm_clmulepi64_si128(crc_out, b, 1), + _mm_clmulepi64_si128(crc_out, b, 0x10)); + buf += 16; + size -= 16; + } + // final_reduction_for_128 + + size += 16; + if (size) { + get_last_two_xmms: + const __m128i crc2 = crc_out, d = load128(buf + (size - 16)); + __m128i S = load128(((const char*) shuffle128) + size); + crc_out = _mm_shuffle_epi8(crc_out, S); + S = xor128(S, _mm_set1_epi32(0x80808080)); + crc_out = xor3_128(_mm_blendv_epi8(_mm_shuffle_epi8(crc2, S), d, S), + _mm_clmulepi64_si128(crc_out, b, 1), + _mm_clmulepi64_si128(crc_out, b, 0x10)); + } + + done_128: + __m128i crc_tmp; + b = _mm_load_epi32(&tab->b64); + crc_tmp = xor128(_mm_clmulepi64_si128(crc_out, b, 0x00), + _mm_srli_si128(crc_out, 8)); + crc_out = _mm_slli_si128(crc_tmp, 4); + crc_out = _mm_clmulepi64_si128(crc_out, b, 0x10); + crc_out = xor128(crc_out, crc_tmp); + + barrett: + b = _mm_load_epi32(&tab->b32); + crc_tmp = crc_out; + crc_out = and128(crc_out, _mm_set_epi64x(~0ULL, ~0xFFFFFFFFULL)); + crc_out = _mm_clmulepi64_si128(crc_out, b, 0); + crc_out = xor2_and_128(crc_out, crc_tmp, _mm_set_epi64x(0, ~0ULL)); + crc_out = xnor3_128(crc_out, crc_tmp, + _mm_clmulepi64_si128(crc_out, b, 0x10)); + return _mm_extract_epi32(crc_out, 2); + } else { + // less_than_32 + if (size > 0) { + if (size > 16) { + crc_out = xor128(load128(buf), + _mm512_castsi512_si128(crc_in)); + buf += 16; + size -= 16; + b = _mm_load_epi32(&tab->b128); + goto get_last_two_xmms; + } else if (size < 16) { + crc_out = _mm_maskz_loadu_epi8(size_mask[size - 1], buf); + crc_out = xor128(crc_out, _mm512_castsi512_si128(crc_in)); + + if (size >= 4) { + crc_out = _mm_shuffle_epi8(crc_out, + load128(((const char*) shift128) + size)); + goto done_128; + } else { + // only_less_than_4 + /* Shift, zero-filling 5 to 7 of the 8-byte crc_out */ + crc_out = _mm_shuffle_epi8(crc_out, + load128(shift_1_to_3_reflect + size - 1)); + goto barrett; + } + } else { + crc_out = xor128(load128(buf), _mm512_castsi512_si128(crc_in)); + goto done_128; + } + } else + return crc; + } +} + +uint32_t aws_checksums_crc32_avx512(const uint8_t *input, int length, uint32_t crc) +{ + return crc32_avx512(input, length, crc, &refl32); +} + +uint32_t aws_checksums_crc32c_avx512(const uint8_t *input, int length, uint32_t crc) +{ + return crc32_avx512(input, length, crc, &refl32c); +} diff --git a/source/intel/visualc/visualc_crc32c_sse42.c b/source/intel/visualc/visualc_crc32c_sse42.c index ca1aca4..707f2ba 100644 --- a/source/intel/visualc/visualc_crc32c_sse42.c +++ b/source/intel/visualc/visualc_crc32c_sse42.c @@ -3,26 +3,15 @@ * SPDX-License-Identifier: Apache-2.0. */ -#include -#include - -#if defined(_M_X64) || defined(_M_IX86) - -# if defined(_M_X64) -typedef uint64_t *slice_ptr_type; -typedef uint64_t slice_ptr_int_type; -# else -typedef uint32_t *slice_ptr_type; -typedef uint32_t slice_ptr_int_type; -# endif +#include /** * This implements crc32c via the intel sse 4.2 instructions. * This is separate from the straight asm version, because visual c does not allow * inline assembly for x64. */ -uint32_t aws_checksums_crc32c_hw(const uint8_t *data, int length, uint32_t previousCrc32) { - uint32_t crc = ~previousCrc32; +uint32_t aws_checksums_crc32c_sse42(const uint8_t *data, int length, uint32_t previousCrc32) { + uint32_t crc = previousCrc32; int length_to_process = length; slice_ptr_type temp = (slice_ptr_type)data; @@ -54,11 +43,11 @@ uint32_t aws_checksums_crc32c_hw(const uint8_t *data, int length, uint32_t previ uint32_t remainder = length_to_process % sizeof(temp); while (slices--) { -# if defined(_M_X64) +#if defined(_M_X64) crc = (uint32_t)_mm_crc32_u64(crc, *temp++); -# else +#else crc = _mm_crc32_u32(crc, *temp++); -# endif +#endif } /* process the remaining parts that can't be done on the slice size. */ @@ -70,8 +59,3 @@ uint32_t aws_checksums_crc32c_hw(const uint8_t *data, int length, uint32_t previ return ~crc; } - -uint32_t aws_checksums_crc32_hw(const uint8_t *input, int length, uint32_t previousCrc32) { - return aws_checksums_crc32_sw(input, length, previousCrc32); -} -#endif /* x64 || x86 */ diff --git a/tests/crc_test.c b/tests/crc_test.c index c975791..16e2d7d 100644 --- a/tests/crc_test.c +++ b/tests/crc_test.c @@ -5,6 +5,9 @@ #include #include + +#include + #include static const uint8_t DATA_32_ZEROS[32] = {0}; @@ -101,6 +104,17 @@ static int s_test_crc32c(struct aws_allocator *allocator, void *ctx) { res |= s_test_known_crc32c(CRC_FUNC_NAME(aws_checksums_crc32c)); res |= s_test_known_crc32c(CRC_FUNC_NAME(aws_checksums_crc32c_sw)); + struct aws_byte_buf avx_buf; + /* enough for 3 avx512 runs */ + aws_byte_buf_init(&avx_buf, allocator, 768); + aws_device_random_buffer(&avx_buf); + + uint32_t crc = aws_checksums_crc32c_sw(avx_buf.buffer, (int)avx_buf.len, 0); + uint32_t hw_crc = aws_checksums_crc32c_hw(avx_buf.buffer, (int)avx_buf.len, 0); + + aws_byte_buf_clean_up(&avx_buf); + ASSERT_UINT_EQUALS(hw_crc, crc); + return res; } AWS_TEST_CASE(test_crc32c, s_test_crc32c) @@ -112,6 +126,17 @@ static int s_test_crc32(struct aws_allocator *allocator, void *ctx) { int res = 0; res |= s_test_known_crc32(CRC_FUNC_NAME(aws_checksums_crc32)); + struct aws_byte_buf avx_buf; + /* enough for 3 avx512 runs */ + aws_byte_buf_init(&avx_buf, allocator, 768); + aws_device_random_buffer(&avx_buf); + + uint32_t crc = aws_checksums_crc32_sw(avx_buf.buffer, (int)avx_buf.len, 0); + uint32_t hw_crc = aws_checksums_crc32_hw(avx_buf.buffer, (int)avx_buf.len, 0); + + aws_byte_buf_clean_up(&avx_buf); + ASSERT_UINT_EQUALS(hw_crc, crc); + return res; } AWS_TEST_CASE(test_crc32, s_test_crc32)