(root)/
xz-5.4.5/
src/
liblzma/
check/
crc64_fast.c
       1  ///////////////////////////////////////////////////////////////////////////////
       2  //
       3  /// \file       crc64.c
       4  /// \brief      CRC64 calculation
       5  ///
       6  /// There are two methods in this file. crc64_generic uses the
       7  /// the slice-by-four algorithm. This is the same idea that is
       8  /// used in crc32_fast.c, but for CRC64 we use only four tables
       9  /// instead of eight to avoid increasing CPU cache usage.
      10  ///
      11  /// crc64_clmul uses 32/64-bit x86 SSSE3, SSE4.1, and CLMUL instructions.
      12  /// It was derived from
      13  /// https://www.intel.com/content/dam/www/public/us/en/documents/white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf
      14  /// and the public domain code from https://github.com/rawrunprotected/crc
      15  /// (URLs were checked on 2022-11-07).
      16  ///
      17  /// FIXME: Builds for 32-bit x86 use crc64_x86.S by default instead
      18  /// of this file and thus CLMUL version isn't available on 32-bit x86
      19  /// unless configured with --disable-assembler. Even then the lookup table
      20  /// isn't omitted in crc64_table.c since it doesn't know that assembly
      21  /// code has been disabled.
      22  //
      23  //  Authors:    Lasse Collin
      24  //              Ilya Kurdyukov
      25  //
      26  //  This file has been put into the public domain.
      27  //  You can do whatever you want with this file.
      28  //
      29  ///////////////////////////////////////////////////////////////////////////////
      30  
      31  #include "check.h"
      32  
      33  #undef CRC_GENERIC
      34  #undef CRC_CLMUL
      35  #undef CRC_USE_GENERIC_FOR_SMALL_INPUTS
      36  
      37  // If CLMUL cannot be used then only the generic slice-by-four is built.
      38  #if !defined(HAVE_USABLE_CLMUL)
      39  #	define CRC_GENERIC 1
      40  
      41  // If CLMUL is allowed unconditionally in the compiler options then the
      42  // generic version can be omitted. Note that this doesn't work with MSVC
      43  // as I don't know how to detect the features here.
      44  //
      45  // NOTE: Keep this this in sync with crc64_table.c.
      46  #elif (defined(__SSSE3__) && defined(__SSE4_1__) && defined(__PCLMUL__)) \
      47  		|| (defined(__e2k__) && __iset__ >= 6)
      48  #	define CRC_CLMUL 1
      49  
      50  // Otherwise build both and detect at runtime which version to use.
      51  #else
      52  #	define CRC_GENERIC 1
      53  #	define CRC_CLMUL 1
      54  
      55  /*
      56  	// The generic code is much faster with 1-8-byte inputs and has
      57  	// similar performance up to 16 bytes  at least in microbenchmarks
      58  	// (it depends on input buffer alignment too). If both versions are
      59  	// built, this #define will use the generic version for inputs up to
      60  	// 16 bytes and CLMUL for bigger inputs. It saves a little in code
      61  	// size since the special cases for 0-16-byte inputs will be omitted
      62  	// from the CLMUL code.
      63  #	define CRC_USE_GENERIC_FOR_SMALL_INPUTS 1
      64  */
      65  
      66  #	if defined(_MSC_VER)
      67  #		include <intrin.h>
      68  #	elif defined(HAVE_CPUID_H)
      69  #		include <cpuid.h>
      70  #	endif
      71  #endif
      72  
      73  
      74  /////////////////////////////////
      75  // Generic slice-by-four CRC64 //
      76  /////////////////////////////////
      77  
      78  #ifdef CRC_GENERIC
      79  
      80  #include "crc_macros.h"
      81  
      82  
      83  #ifdef WORDS_BIGENDIAN
      84  #	define A1(x) ((x) >> 56)
      85  #else
      86  #	define A1 A
      87  #endif
      88  
      89  
      90  // See the comments in crc32_fast.c. They aren't duplicated here.
      91  static uint64_t
      92  crc64_generic(const uint8_t *buf, size_t size, uint64_t crc)
      93  {
      94  	crc = ~crc;
      95  
      96  #ifdef WORDS_BIGENDIAN
      97  	crc = bswap64(crc);
      98  #endif
      99  
     100  	if (size > 4) {
     101  		while ((uintptr_t)(buf) & 3) {
     102  			crc = lzma_crc64_table[0][*buf++ ^ A1(crc)] ^ S8(crc);
     103  			--size;
     104  		}
     105  
     106  		const uint8_t *const limit = buf + (size & ~(size_t)(3));
     107  		size &= (size_t)(3);
     108  
     109  		while (buf < limit) {
     110  #ifdef WORDS_BIGENDIAN
     111  			const uint32_t tmp = (uint32_t)(crc >> 32)
     112  					^ aligned_read32ne(buf);
     113  #else
     114  			const uint32_t tmp = (uint32_t)crc
     115  					^ aligned_read32ne(buf);
     116  #endif
     117  			buf += 4;
     118  
     119  			crc = lzma_crc64_table[3][A(tmp)]
     120  			    ^ lzma_crc64_table[2][B(tmp)]
     121  			    ^ S32(crc)
     122  			    ^ lzma_crc64_table[1][C(tmp)]
     123  			    ^ lzma_crc64_table[0][D(tmp)];
     124  		}
     125  	}
     126  
     127  	while (size-- != 0)
     128  		crc = lzma_crc64_table[0][*buf++ ^ A1(crc)] ^ S8(crc);
     129  
     130  #ifdef WORDS_BIGENDIAN
     131  	crc = bswap64(crc);
     132  #endif
     133  
     134  	return ~crc;
     135  }
     136  #endif
     137  
     138  
     139  /////////////////////
     140  // x86 CLMUL CRC64 //
     141  /////////////////////
     142  
     143  #ifdef CRC_CLMUL
     144  
     145  #include <immintrin.h>
     146  
     147  
     148  /*
     149  // These functions were used to generate the constants
     150  // at the top of crc64_clmul().
     151  static uint64_t
     152  calc_lo(uint64_t poly)
     153  {
     154  	uint64_t a = poly;
     155  	uint64_t b = 0;
     156  
     157  	for (unsigned i = 0; i < 64; ++i) {
     158  		b = (b >> 1) | (a << 63);
     159  		a = (a >> 1) ^ (a & 1 ? poly : 0);
     160  	}
     161  
     162  	return b;
     163  }
     164  
     165  static uint64_t
     166  calc_hi(uint64_t poly, uint64_t a)
     167  {
     168  	for (unsigned i = 0; i < 64; ++i)
     169  		a = (a >> 1) ^ (a & 1 ? poly : 0);
     170  
     171  	return a;
     172  }
     173  */
     174  
     175  
     176  #define MASK_L(in, mask, r) \
     177  	r = _mm_shuffle_epi8(in, mask)
     178  
     179  #define MASK_H(in, mask, r) \
     180  	r = _mm_shuffle_epi8(in, _mm_xor_si128(mask, vsign))
     181  
     182  #define MASK_LH(in, mask, low, high) \
     183  	MASK_L(in, mask, low); \
     184  	MASK_H(in, mask, high)
     185  
     186  
     187  // MSVC (VS2015 - VS2022) produces bad 32-bit x86 code from the CLMUL CRC
     188  // code when optimizations are enabled (release build). According to the bug
     189  // report, the ebx register is corrupted and the calculated result is wrong.
     190  // Trying to workaround the problem with "__asm mov ebx, ebx" didn't help.
     191  // The following pragma works and performance is still good. x86-64 builds
     192  // aren't affected by this problem.
     193  //
     194  // NOTE: Another pragma after the function restores the optimizations.
     195  // If the #if condition here is updated, the other one must be updated too.
     196  #if defined(_MSC_VER) && !defined(__INTEL_COMPILER) && !defined(__clang__) \
     197  		&& defined(_M_IX86)
     198  #	pragma optimize("g", off)
     199  #endif
     200  
     201  // EDG-based compilers (Intel's classic compiler and compiler for E2K) can
     202  // define __GNUC__ but the attribute must not be used with them.
     203  // The new Clang-based ICX needs the attribute.
     204  //
     205  // NOTE: Build systems check for this too, keep them in sync with this.
     206  #if (defined(__GNUC__) || defined(__clang__)) && !defined(__EDG__)
     207  __attribute__((__target__("ssse3,sse4.1,pclmul")))
     208  #endif
     209  // The intrinsics use 16-byte-aligned reads from buf, thus they may read
     210  // up to 15 bytes before or after the buffer (depending on the alignment
     211  // of the buf argument). The values of the extra bytes are ignored.
     212  // This unavoidably trips -fsanitize=address so address sanitizier has
     213  // to be disabled for this function.
     214  #if lzma_has_attribute(__no_sanitize_address__)
     215  __attribute__((__no_sanitize_address__))
     216  #endif
     217  static uint64_t
     218  crc64_clmul(const uint8_t *buf, size_t size, uint64_t crc)
     219  {
     220  	// The prototypes of the intrinsics use signed types while most of
     221  	// the values are treated as unsigned here. These warnings in this
     222  	// function have been checked and found to be harmless so silence them.
     223  #if TUKLIB_GNUC_REQ(4, 6) || defined(__clang__)
     224  #	pragma GCC diagnostic push
     225  #	pragma GCC diagnostic ignored "-Wsign-conversion"
     226  #	pragma GCC diagnostic ignored "-Wconversion"
     227  #endif
     228  
     229  #ifndef CRC_USE_GENERIC_FOR_SMALL_INPUTS
     230  	// The code assumes that there is at least one byte of input.
     231  	if (size == 0)
     232  		return crc;
     233  #endif
     234  
     235  	// const uint64_t poly = 0xc96c5795d7870f42; // CRC polynomial
     236  	const uint64_t p  = 0x92d8af2baf0e1e85; // (poly << 1) | 1
     237  	const uint64_t mu = 0x9c3e466c172963d5; // (calc_lo(poly) << 1) | 1
     238  	const uint64_t k2 = 0xdabe95afc7875f40; // calc_hi(poly, 1)
     239  	const uint64_t k1 = 0xe05dd497ca393ae4; // calc_hi(poly, k2)
     240  	const __m128i vfold0 = _mm_set_epi64x(p, mu);
     241  	const __m128i vfold1 = _mm_set_epi64x(k2, k1);
     242  
     243  	// Create a vector with 8-bit values 0 to 15. This is used to
     244  	// construct control masks for _mm_blendv_epi8 and _mm_shuffle_epi8.
     245  	const __m128i vramp = _mm_setr_epi32(
     246  			0x03020100, 0x07060504, 0x0b0a0908, 0x0f0e0d0c);
     247  
     248  	// This is used to inverse the control mask of _mm_shuffle_epi8
     249  	// so that bytes that wouldn't be picked with the original mask
     250  	// will be picked and vice versa.
     251  	const __m128i vsign = _mm_set1_epi8(0x80);
     252  
     253  	// Memory addresses A to D and the distances between them:
     254  	//
     255  	//     A           B     C         D
     256  	//     [skip_start][size][skip_end]
     257  	//     [     size2      ]
     258  	//
     259  	// A and D are 16-byte aligned. B and C are 1-byte aligned.
     260  	// skip_start and skip_end are 0-15 bytes. size is at least 1 byte.
     261  	//
     262  	// A = aligned_buf will initially point to this address.
     263  	// B = The address pointed by the caller-supplied buf.
     264  	// C = buf + size == aligned_buf + size2
     265  	// D = buf + size + skip_end == aligned_buf + size2 + skip_end
     266  	const size_t skip_start = (size_t)((uintptr_t)buf & 15);
     267  	const size_t skip_end = (size_t)((0U - (uintptr_t)(buf + size)) & 15);
     268  	const __m128i *aligned_buf = (const __m128i *)(
     269  			(uintptr_t)buf & ~(uintptr_t)15);
     270  
     271  	// If size2 <= 16 then the whole input fits into a single 16-byte
     272  	// vector. If size2 > 16 then at least two 16-byte vectors must
     273  	// be processed. If size2 > 16 && size <= 16 then there is only
     274  	// one 16-byte vector's worth of input but it is unaligned in memory.
     275  	//
     276  	// NOTE: There is no integer overflow here if the arguments are valid.
     277  	// If this overflowed, buf + size would too.
     278  	size_t size2 = skip_start + size;
     279  
     280  	// Masks to be used with _mm_blendv_epi8 and _mm_shuffle_epi8:
     281  	// The first skip_start or skip_end bytes in the vectors will have
     282  	// the high bit (0x80) set. _mm_blendv_epi8 and _mm_shuffle_epi8
     283  	// will produce zeros for these positions. (Bitwise-xor of these
     284  	// masks with vsign will produce the opposite behavior.)
     285  	const __m128i mask_start
     286  			= _mm_sub_epi8(vramp, _mm_set1_epi8(skip_start));
     287  	const __m128i mask_end = _mm_sub_epi8(vramp, _mm_set1_epi8(skip_end));
     288  
     289  	// Get the first 1-16 bytes into data0. If loading less than 16 bytes,
     290  	// the bytes are loaded to the high bits of the vector and the least
     291  	// significant positions are filled with zeros.
     292  	const __m128i data0 = _mm_blendv_epi8(_mm_load_si128(aligned_buf),
     293  			_mm_setzero_si128(), mask_start);
     294  	++aligned_buf;
     295  
     296  #if defined(__i386__) || defined(_M_IX86)
     297  	const __m128i initial_crc = _mm_set_epi64x(0, ~crc);
     298  #else
     299  	// GCC and Clang would produce good code with _mm_set_epi64x
     300  	// but MSVC needs _mm_cvtsi64_si128 on x86-64.
     301  	const __m128i initial_crc = _mm_cvtsi64_si128(~crc);
     302  #endif
     303  
     304  	__m128i v0, v1, v2, v3;
     305  
     306  #ifndef CRC_USE_GENERIC_FOR_SMALL_INPUTS
     307  	if (size <= 16) {
     308  		// Right-shift initial_crc by 1-16 bytes based on "size"
     309  		// and store the result in v1 (high bytes) and v0 (low bytes).
     310  		//
     311  		// NOTE: The highest 8 bytes of initial_crc are zeros so
     312  		// v1 will be filled with zeros if size >= 8. The highest 8
     313  		// bytes of v1 will always become zeros.
     314  		//
     315  		// [      v1      ][      v0      ]
     316  		//  [ initial_crc  ]                  size == 1
     317  		//   [ initial_crc  ]                 size == 2
     318  		//                [ initial_crc  ]    size == 15
     319  		//                 [ initial_crc  ]   size == 16 (all in v0)
     320  		const __m128i mask_low = _mm_add_epi8(
     321  				vramp, _mm_set1_epi8(size - 16));
     322  		MASK_LH(initial_crc, mask_low, v0, v1);
     323  
     324  		if (size2 <= 16) {
     325  			// There are 1-16 bytes of input and it is all
     326  			// in data0. Copy the input bytes to v3. If there
     327  			// are fewer than 16 bytes, the low bytes in v3
     328  			// will be filled with zeros. That is, the input
     329  			// bytes are stored to the same position as
     330  			// (part of) initial_crc is in v0.
     331  			MASK_L(data0, mask_end, v3);
     332  		} else {
     333  			// There are 2-16 bytes of input but not all bytes
     334  			// are in data0.
     335  			const __m128i data1 = _mm_load_si128(aligned_buf);
     336  
     337  			// Collect the 2-16 input bytes from data0 and data1
     338  			// to v2 and v3, and bitwise-xor them with the
     339  			// low bits of initial_crc in v0. Note that the
     340  			// the second xor is below this else-block as it
     341  			// is shared with the other branch.
     342  			MASK_H(data0, mask_end, v2);
     343  			MASK_L(data1, mask_end, v3);
     344  			v0 = _mm_xor_si128(v0, v2);
     345  		}
     346  
     347  		v0 = _mm_xor_si128(v0, v3);
     348  		v1 = _mm_alignr_epi8(v1, v0, 8);
     349  	} else
     350  #endif
     351  	{
     352  		const __m128i data1 = _mm_load_si128(aligned_buf);
     353  		MASK_LH(initial_crc, mask_start, v0, v1);
     354  		v0 = _mm_xor_si128(v0, data0);
     355  		v1 = _mm_xor_si128(v1, data1);
     356  
     357  #define FOLD \
     358  	v1 = _mm_xor_si128(v1, _mm_clmulepi64_si128(v0, vfold1, 0x00)); \
     359  	v0 = _mm_xor_si128(v1, _mm_clmulepi64_si128(v0, vfold1, 0x11));
     360  
     361  		while (size2 > 32) {
     362  			++aligned_buf;
     363  			size2 -= 16;
     364  			FOLD
     365  			v1 = _mm_load_si128(aligned_buf);
     366  		}
     367  
     368  		if (size2 < 32) {
     369  			MASK_H(v0, mask_end, v2);
     370  			MASK_L(v0, mask_end, v0);
     371  			MASK_L(v1, mask_end, v3);
     372  			v1 = _mm_or_si128(v2, v3);
     373  		}
     374  
     375  		FOLD
     376  		v1 = _mm_srli_si128(v0, 8);
     377  #undef FOLD
     378  	}
     379  
     380  	v1 = _mm_xor_si128(_mm_clmulepi64_si128(v0, vfold1, 0x10), v1);
     381  	v0 = _mm_clmulepi64_si128(v1, vfold0, 0x00);
     382  	v2 = _mm_clmulepi64_si128(v0, vfold0, 0x10);
     383  	v0 = _mm_xor_si128(_mm_xor_si128(v2, _mm_slli_si128(v0, 8)), v1);
     384  
     385  #if defined(__i386__) || defined(_M_IX86)
     386  	return ~(((uint64_t)(uint32_t)_mm_extract_epi32(v0, 3) << 32) |
     387  			(uint64_t)(uint32_t)_mm_extract_epi32(v0, 2));
     388  #else
     389  	return ~(uint64_t)_mm_extract_epi64(v0, 1);
     390  #endif
     391  
     392  #if TUKLIB_GNUC_REQ(4, 6) || defined(__clang__)
     393  #	pragma GCC diagnostic pop
     394  #endif
     395  }
     396  #if defined(_MSC_VER) && !defined(__INTEL_COMPILER) && !defined(__clang__) \
     397  		&& defined(_M_IX86)
     398  #	pragma optimize("", on)
     399  #endif
     400  #endif
     401  
     402  
     403  ////////////////////////
     404  // Detect CPU support //
     405  ////////////////////////
     406  
     407  #if defined(CRC_GENERIC) && defined(CRC_CLMUL)
     408  static inline bool
     409  is_clmul_supported(void)
     410  {
     411  	int success = 1;
     412  	uint32_t r[4]; // eax, ebx, ecx, edx
     413  
     414  #if defined(_MSC_VER)
     415  	// This needs <intrin.h> with MSVC. ICC has it as a built-in
     416  	// on all platforms.
     417  	__cpuid(r, 1);
     418  #elif defined(HAVE_CPUID_H)
     419  	// Compared to just using __asm__ to run CPUID, this also checks
     420  	// that CPUID is supported and saves and restores ebx as that is
     421  	// needed with GCC < 5 with position-independent code (PIC).
     422  	success = __get_cpuid(1, &r[0], &r[1], &r[2], &r[3]);
     423  #else
     424  	// Just a fallback that shouldn't be needed.
     425  	__asm__("cpuid\n\t"
     426  			: "=a"(r[0]), "=b"(r[1]), "=c"(r[2]), "=d"(r[3])
     427  			: "a"(1), "c"(0));
     428  #endif
     429  
     430  	// Returns true if these are supported:
     431  	// CLMUL (bit 1 in ecx)
     432  	// SSSE3 (bit 9 in ecx)
     433  	// SSE4.1 (bit 19 in ecx)
     434  	const uint32_t ecx_mask = (1 << 1) | (1 << 9) | (1 << 19);
     435  	return success && (r[2] & ecx_mask) == ecx_mask;
     436  
     437  	// Alternative methods that weren't used:
     438  	//   - ICC's _may_i_use_cpu_feature: the other methods should work too.
     439  	//   - GCC >= 6 / Clang / ICX __builtin_cpu_supports("pclmul")
     440  	//
     441  	// CPUID decding is needed with MSVC anyway and older GCC. This keeps
     442  	// the feature checks in the build system simpler too. The nice thing
     443  	// about __builtin_cpu_supports would be that it generates very short
     444  	// code as is it only reads a variable set at startup but a few bytes
     445  	// doesn't matter here.
     446  }
     447  
     448  
     449  #ifdef HAVE_FUNC_ATTRIBUTE_CONSTRUCTOR
     450  #	define CRC64_FUNC_INIT
     451  #	define CRC64_SET_FUNC_ATTR __attribute__((__constructor__))
     452  #else
     453  #	define CRC64_FUNC_INIT = &crc64_dispatch
     454  #	define CRC64_SET_FUNC_ATTR
     455  static uint64_t crc64_dispatch(const uint8_t *buf, size_t size, uint64_t crc);
     456  #endif
     457  
     458  
     459  // Pointer to the the selected CRC64 method.
     460  static uint64_t (*crc64_func)(const uint8_t *buf, size_t size, uint64_t crc)
     461  		CRC64_FUNC_INIT;
     462  
     463  
     464  CRC64_SET_FUNC_ATTR
     465  static void
     466  crc64_set_func(void)
     467  {
     468  	crc64_func = is_clmul_supported() ? &crc64_clmul : &crc64_generic;
     469  	return;
     470  }
     471  
     472  
     473  #ifndef HAVE_FUNC_ATTRIBUTE_CONSTRUCTOR
     474  static uint64_t
     475  crc64_dispatch(const uint8_t *buf, size_t size, uint64_t crc)
     476  {
     477  	// When __attribute__((__constructor__)) isn't supported, set the
     478  	// function pointer without any locking. If multiple threads run
     479  	// the detection code in parallel, they will all end up setting
     480  	// the pointer to the same value. This avoids the use of
     481  	// mythread_once() on every call to lzma_crc64() but this likely
     482  	// isn't strictly standards compliant. Let's change it if it breaks.
     483  	crc64_set_func();
     484  	return crc64_func(buf, size, crc);
     485  }
     486  #endif
     487  #endif
     488  
     489  
     490  extern LZMA_API(uint64_t)
     491  lzma_crc64(const uint8_t *buf, size_t size, uint64_t crc)
     492  {
     493  #if defined(CRC_GENERIC) && defined(CRC_CLMUL)
     494  	// If CLMUL is available, it is the best for non-tiny inputs,
     495  	// being over twice as fast as the generic slice-by-four version.
     496  	// However, for size <= 16 it's different. In the extreme case
     497  	// of size == 1 the generic version can be five times faster.
     498  	// At size >= 8 the CLMUL starts to become reasonable. It
     499  	// varies depending on the alignment of buf too.
     500  	//
     501  	// The above doesn't include the overhead of mythread_once().
     502  	// At least on x86-64 GNU/Linux, pthread_once() is very fast but
     503  	// it still makes lzma_crc64(buf, 1, crc) 50-100 % slower. When
     504  	// size reaches 12-16 bytes the overhead becomes negligible.
     505  	//
     506  	// So using the generic version for size <= 16 may give better
     507  	// performance with tiny inputs but if such inputs happen rarely
     508  	// it's not so obvious because then the lookup table of the
     509  	// generic version may not be in the processor cache.
     510  #ifdef CRC_USE_GENERIC_FOR_SMALL_INPUTS
     511  	if (size <= 16)
     512  		return crc64_generic(buf, size, crc);
     513  #endif
     514  
     515  /*
     516  #ifndef HAVE_FUNC_ATTRIBUTE_CONSTRUCTOR
     517  	// See crc64_dispatch(). This would be the alternative which uses
     518  	// locking and doesn't use crc64_dispatch(). Note that on Windows
     519  	// this method needs Vista threads.
     520  	mythread_once(crc64_set_func);
     521  #endif
     522  */
     523  
     524  	return crc64_func(buf, size, crc);
     525  
     526  #elif defined(CRC_CLMUL)
     527  	// If CLMUL is used unconditionally without runtime CPU detection
     528  	// then omitting the generic version and its 8 KiB lookup table
     529  	// makes the library smaller.
     530  	//
     531  	// FIXME: Lookup table isn't currently omitted on 32-bit x86,
     532  	// see crc64_table.c.
     533  	return crc64_clmul(buf, size, crc);
     534  
     535  #else
     536  	return crc64_generic(buf, size, crc);
     537  #endif
     538  }