(root)/
gcc-13.2.0/
gcc/
testsuite/
gcc.target/
aarch64/
sve/
acle/
asm/
divr_f16.c
       1  /* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
       2  
       3  #include "test_sve_acle.h"
       4  
       5  /*
       6  ** divr_f16_m_tied1:
       7  **	fdivr	z0\.h, p0/m, z0\.h, z1\.h
       8  **	ret
       9  */
      10  TEST_UNIFORM_Z (divr_f16_m_tied1, svfloat16_t,
      11  		z0 = svdivr_f16_m (p0, z0, z1),
      12  		z0 = svdivr_m (p0, z0, z1))
      13  
      14  /*
      15  ** divr_f16_m_tied2:
      16  **	mov	(z[0-9]+)\.d, z0\.d
      17  **	movprfx	z0, z1
      18  **	fdivr	z0\.h, p0/m, z0\.h, \1\.h
      19  **	ret
      20  */
      21  TEST_UNIFORM_Z (divr_f16_m_tied2, svfloat16_t,
      22  		z0 = svdivr_f16_m (p0, z1, z0),
      23  		z0 = svdivr_m (p0, z1, z0))
      24  
      25  /*
      26  ** divr_f16_m_untied:
      27  **	movprfx	z0, z1
      28  **	fdivr	z0\.h, p0/m, z0\.h, z2\.h
      29  **	ret
      30  */
      31  TEST_UNIFORM_Z (divr_f16_m_untied, svfloat16_t,
      32  		z0 = svdivr_f16_m (p0, z1, z2),
      33  		z0 = svdivr_m (p0, z1, z2))
      34  
      35  /*
      36  ** divr_h4_f16_m_tied1:
      37  **	mov	(z[0-9]+\.h), h4
      38  **	fdivr	z0\.h, p0/m, z0\.h, \1
      39  **	ret
      40  */
      41  TEST_UNIFORM_ZD (divr_h4_f16_m_tied1, svfloat16_t, __fp16,
      42  		 z0 = svdivr_n_f16_m (p0, z0, d4),
      43  		 z0 = svdivr_m (p0, z0, d4))
      44  
      45  /*
      46  ** divr_h4_f16_m_untied:
      47  **	mov	(z[0-9]+\.h), h4
      48  **	movprfx	z0, z1
      49  **	fdivr	z0\.h, p0/m, z0\.h, \1
      50  **	ret
      51  */
      52  TEST_UNIFORM_ZD (divr_h4_f16_m_untied, svfloat16_t, __fp16,
      53  		 z0 = svdivr_n_f16_m (p0, z1, d4),
      54  		 z0 = svdivr_m (p0, z1, d4))
      55  
      56  /*
      57  ** divr_1_f16_m_tied1:
      58  **	fmov	(z[0-9]+\.h), #1\.0(?:e\+0)?
      59  **	fdivr	z0\.h, p0/m, z0\.h, \1
      60  **	ret
      61  */
      62  TEST_UNIFORM_Z (divr_1_f16_m_tied1, svfloat16_t,
      63  		z0 = svdivr_n_f16_m (p0, z0, 1),
      64  		z0 = svdivr_m (p0, z0, 1))
      65  
      66  /*
      67  ** divr_1_f16_m_untied: { xfail *-*-* }
      68  **	fmov	(z[0-9]+\.h), #1\.0(?:e\+0)?
      69  **	movprfx	z0, z1
      70  **	fdivr	z0\.h, p0/m, z0\.h, \1
      71  **	ret
      72  */
      73  TEST_UNIFORM_Z (divr_1_f16_m_untied, svfloat16_t,
      74  		z0 = svdivr_n_f16_m (p0, z1, 1),
      75  		z0 = svdivr_m (p0, z1, 1))
      76  
      77  /*
      78  ** divr_0p5_f16_m_tied1:
      79  **	fmov	(z[0-9]+\.h), #(?:0\.5|5\.0e-1)
      80  **	fdivr	z0\.h, p0/m, z0\.h, \1
      81  **	ret
      82  */
      83  TEST_UNIFORM_Z (divr_0p5_f16_m_tied1, svfloat16_t,
      84  		z0 = svdivr_n_f16_m (p0, z0, 0.5),
      85  		z0 = svdivr_m (p0, z0, 0.5))
      86  
      87  /*
      88  ** divr_0p5_f16_m_untied: { xfail *-*-* }
      89  **	fmov	(z[0-9]+\.h), #(?:0\.5|5\.0e-1)
      90  **	movprfx	z0, z1
      91  **	fdivr	z0\.h, p0/m, z0\.h, \1
      92  **	ret
      93  */
      94  TEST_UNIFORM_Z (divr_0p5_f16_m_untied, svfloat16_t,
      95  		z0 = svdivr_n_f16_m (p0, z1, 0.5),
      96  		z0 = svdivr_m (p0, z1, 0.5))
      97  
      98  /*
      99  ** divr_f16_z_tied1:
     100  **	movprfx	z0\.h, p0/z, z0\.h
     101  **	fdivr	z0\.h, p0/m, z0\.h, z1\.h
     102  **	ret
     103  */
     104  TEST_UNIFORM_Z (divr_f16_z_tied1, svfloat16_t,
     105  		z0 = svdivr_f16_z (p0, z0, z1),
     106  		z0 = svdivr_z (p0, z0, z1))
     107  
     108  /*
     109  ** divr_f16_z_tied2:
     110  **	movprfx	z0\.h, p0/z, z0\.h
     111  **	fdiv	z0\.h, p0/m, z0\.h, z1\.h
     112  **	ret
     113  */
     114  TEST_UNIFORM_Z (divr_f16_z_tied2, svfloat16_t,
     115  		z0 = svdivr_f16_z (p0, z1, z0),
     116  		z0 = svdivr_z (p0, z1, z0))
     117  
     118  /*
     119  ** divr_f16_z_untied:
     120  ** (
     121  **	movprfx	z0\.h, p0/z, z1\.h
     122  **	fdivr	z0\.h, p0/m, z0\.h, z2\.h
     123  ** |
     124  **	movprfx	z0\.h, p0/z, z2\.h
     125  **	fdiv	z0\.h, p0/m, z0\.h, z1\.h
     126  ** )
     127  **	ret
     128  */
     129  TEST_UNIFORM_Z (divr_f16_z_untied, svfloat16_t,
     130  		z0 = svdivr_f16_z (p0, z1, z2),
     131  		z0 = svdivr_z (p0, z1, z2))
     132  
     133  /*
     134  ** divr_h4_f16_z_tied1:
     135  **	mov	(z[0-9]+\.h), h4
     136  **	movprfx	z0\.h, p0/z, z0\.h
     137  **	fdivr	z0\.h, p0/m, z0\.h, \1
     138  **	ret
     139  */
     140  TEST_UNIFORM_ZD (divr_h4_f16_z_tied1, svfloat16_t, __fp16,
     141  		 z0 = svdivr_n_f16_z (p0, z0, d4),
     142  		 z0 = svdivr_z (p0, z0, d4))
     143  
     144  /*
     145  ** divr_h4_f16_z_untied:
     146  **	mov	(z[0-9]+\.h), h4
     147  ** (
     148  **	movprfx	z0\.h, p0/z, z1\.h
     149  **	fdivr	z0\.h, p0/m, z0\.h, \1
     150  ** |
     151  **	movprfx	z0\.h, p0/z, \1
     152  **	fdiv	z0\.h, p0/m, z0\.h, z1\.h
     153  ** )
     154  **	ret
     155  */
     156  TEST_UNIFORM_ZD (divr_h4_f16_z_untied, svfloat16_t, __fp16,
     157  		 z0 = svdivr_n_f16_z (p0, z1, d4),
     158  		 z0 = svdivr_z (p0, z1, d4))
     159  
     160  /*
     161  ** divr_1_f16_z:
     162  **	fmov	(z[0-9]+\.h), #1\.0(?:e\+0)?
     163  **	movprfx	z0\.h, p0/z, z0\.h
     164  **	fdivr	z0\.h, p0/m, z0\.h, \1
     165  **	ret
     166  */
     167  TEST_UNIFORM_Z (divr_1_f16_z, svfloat16_t,
     168  		z0 = svdivr_n_f16_z (p0, z0, 1),
     169  		z0 = svdivr_z (p0, z0, 1))
     170  
     171  /*
     172  ** divr_0p5_f16_z_tied1:
     173  **	fmov	(z[0-9]+\.h), #(?:0\.5|5\.0e-1)
     174  **	movprfx	z0\.h, p0/z, z0\.h
     175  **	fdivr	z0\.h, p0/m, z0\.h, \1
     176  **	ret
     177  */
     178  TEST_UNIFORM_Z (divr_0p5_f16_z_tied1, svfloat16_t,
     179  		z0 = svdivr_n_f16_z (p0, z0, 0.5),
     180  		z0 = svdivr_z (p0, z0, 0.5))
     181  
     182  /*
     183  ** divr_0p5_f16_z_untied:
     184  **	fmov	(z[0-9]+\.h), #(?:0\.5|5\.0e-1)
     185  ** (
     186  **	movprfx	z0\.h, p0/z, z1\.h
     187  **	fdivr	z0\.h, p0/m, z0\.h, \1
     188  ** |
     189  **	movprfx	z0\.h, p0/z, \1
     190  **	fdiv	z0\.h, p0/m, z0\.h, z1\.h
     191  ** )
     192  **	ret
     193  */
     194  TEST_UNIFORM_Z (divr_0p5_f16_z_untied, svfloat16_t,
     195  		z0 = svdivr_n_f16_z (p0, z1, 0.5),
     196  		z0 = svdivr_z (p0, z1, 0.5))
     197  
     198  /*
     199  ** divr_f16_x_tied1:
     200  **	fdivr	z0\.h, p0/m, z0\.h, z1\.h
     201  **	ret
     202  */
     203  TEST_UNIFORM_Z (divr_f16_x_tied1, svfloat16_t,
     204  		z0 = svdivr_f16_x (p0, z0, z1),
     205  		z0 = svdivr_x (p0, z0, z1))
     206  
     207  /*
     208  ** divr_f16_x_tied2:
     209  **	fdiv	z0\.h, p0/m, z0\.h, z1\.h
     210  **	ret
     211  */
     212  TEST_UNIFORM_Z (divr_f16_x_tied2, svfloat16_t,
     213  		z0 = svdivr_f16_x (p0, z1, z0),
     214  		z0 = svdivr_x (p0, z1, z0))
     215  
     216  /*
     217  ** divr_f16_x_untied:
     218  ** (
     219  **	movprfx	z0, z1
     220  **	fdivr	z0\.h, p0/m, z0\.h, z2\.h
     221  ** |
     222  **	movprfx	z0, z2
     223  **	fdiv	z0\.h, p0/m, z0\.h, z1\.h
     224  ** )
     225  **	ret
     226  */
     227  TEST_UNIFORM_Z (divr_f16_x_untied, svfloat16_t,
     228  		z0 = svdivr_f16_x (p0, z1, z2),
     229  		z0 = svdivr_x (p0, z1, z2))
     230  
     231  /*
     232  ** divr_h4_f16_x_tied1:
     233  **	mov	(z[0-9]+\.h), h4
     234  **	fdivr	z0\.h, p0/m, z0\.h, \1
     235  **	ret
     236  */
     237  TEST_UNIFORM_ZD (divr_h4_f16_x_tied1, svfloat16_t, __fp16,
     238  		 z0 = svdivr_n_f16_x (p0, z0, d4),
     239  		 z0 = svdivr_x (p0, z0, d4))
     240  
     241  /*
     242  ** divr_h4_f16_x_untied:
     243  **	mov	z0\.h, h4
     244  **	fdiv	z0\.h, p0/m, z0\.h, z1\.h
     245  **	ret
     246  */
     247  TEST_UNIFORM_ZD (divr_h4_f16_x_untied, svfloat16_t, __fp16,
     248  		 z0 = svdivr_n_f16_x (p0, z1, d4),
     249  		 z0 = svdivr_x (p0, z1, d4))
     250  
     251  /*
     252  ** divr_1_f16_x_tied1:
     253  **	fmov	(z[0-9]+\.h), #1\.0(?:e\+0)?
     254  **	fdivr	z0\.h, p0/m, z0\.h, \1
     255  **	ret
     256  */
     257  TEST_UNIFORM_Z (divr_1_f16_x_tied1, svfloat16_t,
     258  		z0 = svdivr_n_f16_x (p0, z0, 1),
     259  		z0 = svdivr_x (p0, z0, 1))
     260  
     261  /*
     262  ** divr_1_f16_x_untied:
     263  **	fmov	z0\.h, #1\.0(?:e\+0)?
     264  **	fdiv	z0\.h, p0/m, z0\.h, z1\.h
     265  **	ret
     266  */
     267  TEST_UNIFORM_Z (divr_1_f16_x_untied, svfloat16_t,
     268  		z0 = svdivr_n_f16_x (p0, z1, 1),
     269  		z0 = svdivr_x (p0, z1, 1))
     270  
     271  /*
     272  ** ptrue_divr_f16_x_tied1:
     273  **	...
     274  **	ptrue	p[0-9]+\.b[^\n]*
     275  **	...
     276  **	ret
     277  */
     278  TEST_UNIFORM_Z (ptrue_divr_f16_x_tied1, svfloat16_t,
     279  		z0 = svdivr_f16_x (svptrue_b16 (), z0, z1),
     280  		z0 = svdivr_x (svptrue_b16 (), z0, z1))
     281  
     282  /*
     283  ** ptrue_divr_f16_x_tied2:
     284  **	...
     285  **	ptrue	p[0-9]+\.b[^\n]*
     286  **	...
     287  **	ret
     288  */
     289  TEST_UNIFORM_Z (ptrue_divr_f16_x_tied2, svfloat16_t,
     290  		z0 = svdivr_f16_x (svptrue_b16 (), z1, z0),
     291  		z0 = svdivr_x (svptrue_b16 (), z1, z0))
     292  
     293  /*
     294  ** ptrue_divr_f16_x_untied:
     295  **	...
     296  **	ptrue	p[0-9]+\.b[^\n]*
     297  **	...
     298  **	ret
     299  */
     300  TEST_UNIFORM_Z (ptrue_divr_f16_x_untied, svfloat16_t,
     301  		z0 = svdivr_f16_x (svptrue_b16 (), z1, z2),
     302  		z0 = svdivr_x (svptrue_b16 (), z1, z2))
     303  
     304  /*
     305  ** ptrue_divr_1_f16_x_tied1:
     306  **	...
     307  **	ptrue	p[0-9]+\.b[^\n]*
     308  **	...
     309  **	ret
     310  */
     311  TEST_UNIFORM_Z (ptrue_divr_1_f16_x_tied1, svfloat16_t,
     312  		z0 = svdivr_n_f16_x (svptrue_b16 (), z0, 1),
     313  		z0 = svdivr_x (svptrue_b16 (), z0, 1))
     314  
     315  /*
     316  ** ptrue_divr_1_f16_x_untied:
     317  **	...
     318  **	ptrue	p[0-9]+\.b[^\n]*
     319  **	...
     320  **	ret
     321  */
     322  TEST_UNIFORM_Z (ptrue_divr_1_f16_x_untied, svfloat16_t,
     323  		z0 = svdivr_n_f16_x (svptrue_b16 (), z1, 1),
     324  		z0 = svdivr_x (svptrue_b16 (), z1, 1))