1  /* { dg-additional-options "-march=armv8.2-a+sve+bf16" } */
       2  /* { dg-require-effective-target aarch64_asm_bf16_ok }  */
       3  /* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
       4  
       5  #include "test_sve_acle.h"
       6  
       7  /*
       8  ** bfmmla_f32_tied1:
       9  **	bfmmla	z0\.s, z4\.h, z5\.h
      10  **	ret
      11  */
      12  TEST_DUAL_Z (bfmmla_f32_tied1, svfloat32_t, svbfloat16_t,
      13  	     z0 = svbfmmla_f32 (z0, z4, z5),
      14  	     z0 = svbfmmla (z0, z4, z5))
      15  
      16  /*
      17  ** bfmmla_f32_tied2:
      18  **	mov	(z[0-9]+)\.d, z0\.d
      19  **	movprfx	z0, z4
      20  **	bfmmla	z0\.s, \1\.h, z1\.h
      21  **	ret
      22  */
      23  TEST_DUAL_Z_REV (bfmmla_f32_tied2, svfloat32_t, svbfloat16_t,
      24  		 z0_res = svbfmmla_f32 (z4, z0, z1),
      25  		 z0_res = svbfmmla (z4, z0, z1))
      26  
      27  /*
      28  ** bfmmla_f32_tied3:
      29  **	mov	(z[0-9]+)\.d, z0\.d
      30  **	movprfx	z0, z4
      31  **	bfmmla	z0\.s, z1\.h, \1\.h
      32  **	ret
      33  */
      34  TEST_DUAL_Z_REV (bfmmla_f32_tied3, svfloat32_t, svbfloat16_t,
      35  		 z0_res = svbfmmla_f32 (z4, z1, z0),
      36  		 z0_res = svbfmmla (z4, z1, z0))
      37  
      38  /*
      39  ** bfmmla_f32_untied:
      40  **	movprfx	z0, z1
      41  **	bfmmla	z0\.s, z4\.h, z5\.h
      42  **	ret
      43  */
      44  TEST_DUAL_Z (bfmmla_f32_untied, svfloat32_t, svbfloat16_t,
      45  	     z0 = svbfmmla_f32 (z1, z4, z5),
      46  	     z0 = svbfmmla (z1, z4, z5))