1  /* { dg-additional-options "-fno-trapping-math" } */
       2  /* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
       3  
       4  #include "test_sve_acle.h"
       5  
       6  /*
       7  ** subr_f64_m_tied1:
       8  **	fsubr	z0\.d, p0/m, z0\.d, z1\.d
       9  **	ret
      10  */
      11  TEST_UNIFORM_Z (subr_f64_m_tied1, svfloat64_t,
      12  		z0 = svsubr_f64_m (p0, z0, z1),
      13  		z0 = svsubr_m (p0, z0, z1))
      14  
      15  /*
      16  ** subr_f64_m_tied2:
      17  **	mov	(z[0-9]+\.d), z0\.d
      18  **	movprfx	z0, z1
      19  **	fsubr	z0\.d, p0/m, z0\.d, \1
      20  **	ret
      21  */
      22  TEST_UNIFORM_Z (subr_f64_m_tied2, svfloat64_t,
      23  		z0 = svsubr_f64_m (p0, z1, z0),
      24  		z0 = svsubr_m (p0, z1, z0))
      25  
      26  /*
      27  ** subr_f64_m_untied:
      28  **	movprfx	z0, z1
      29  **	fsubr	z0\.d, p0/m, z0\.d, z2\.d
      30  **	ret
      31  */
      32  TEST_UNIFORM_Z (subr_f64_m_untied, svfloat64_t,
      33  		z0 = svsubr_f64_m (p0, z1, z2),
      34  		z0 = svsubr_m (p0, z1, z2))
      35  
      36  /*
      37  ** subr_d4_f64_m_tied1:
      38  **	mov	(z[0-9]+\.d), d4
      39  **	fsubr	z0\.d, p0/m, z0\.d, \1
      40  **	ret
      41  */
      42  TEST_UNIFORM_ZD (subr_d4_f64_m_tied1, svfloat64_t, double,
      43  		 z0 = svsubr_n_f64_m (p0, z0, d4),
      44  		 z0 = svsubr_m (p0, z0, d4))
      45  
      46  /*
      47  ** subr_d4_f64_m_untied:
      48  **	mov	(z[0-9]+\.d), d4
      49  **	movprfx	z0, z1
      50  **	fsubr	z0\.d, p0/m, z0\.d, \1
      51  **	ret
      52  */
      53  TEST_UNIFORM_ZD (subr_d4_f64_m_untied, svfloat64_t, double,
      54  		 z0 = svsubr_n_f64_m (p0, z1, d4),
      55  		 z0 = svsubr_m (p0, z1, d4))
      56  
      57  /*
      58  ** subr_1_f64_m_tied1:
      59  **	fsubr	z0\.d, p0/m, z0\.d, #1\.0
      60  **	ret
      61  */
      62  TEST_UNIFORM_Z (subr_1_f64_m_tied1, svfloat64_t,
      63  		z0 = svsubr_n_f64_m (p0, z0, 1),
      64  		z0 = svsubr_m (p0, z0, 1))
      65  
      66  /*
      67  ** subr_1_f64_m_untied:
      68  **	movprfx	z0, z1
      69  **	fsubr	z0\.d, p0/m, z0\.d, #1\.0
      70  **	ret
      71  */
      72  TEST_UNIFORM_Z (subr_1_f64_m_untied, svfloat64_t,
      73  		z0 = svsubr_n_f64_m (p0, z1, 1),
      74  		z0 = svsubr_m (p0, z1, 1))
      75  
      76  /*
      77  ** subr_0p5_f64_m_tied1:
      78  **	fsubr	z0\.d, p0/m, z0\.d, #0\.5
      79  **	ret
      80  */
      81  TEST_UNIFORM_Z (subr_0p5_f64_m_tied1, svfloat64_t,
      82  		z0 = svsubr_n_f64_m (p0, z0, 0.5),
      83  		z0 = svsubr_m (p0, z0, 0.5))
      84  
      85  /*
      86  ** subr_0p5_f64_m_untied:
      87  **	movprfx	z0, z1
      88  **	fsubr	z0\.d, p0/m, z0\.d, #0\.5
      89  **	ret
      90  */
      91  TEST_UNIFORM_Z (subr_0p5_f64_m_untied, svfloat64_t,
      92  		z0 = svsubr_n_f64_m (p0, z1, 0.5),
      93  		z0 = svsubr_m (p0, z1, 0.5))
      94  
      95  /*
      96  ** subr_m1_f64_m_tied1:
      97  **	fmov	(z[0-9]+\.d), #-1\.0(?:e\+0)?
      98  **	fsubr	z0\.d, p0/m, z0\.d, \1
      99  **	ret
     100  */
     101  TEST_UNIFORM_Z (subr_m1_f64_m_tied1, svfloat64_t,
     102  		z0 = svsubr_n_f64_m (p0, z0, -1),
     103  		z0 = svsubr_m (p0, z0, -1))
     104  
     105  /*
     106  ** subr_m1_f64_m_untied: { xfail *-*-* }
     107  **	fmov	(z[0-9]+\.d), #-1\.0(?:e\+0)?
     108  **	movprfx	z0, z1
     109  **	fsubr	z0\.d, p0/m, z0\.d, \1
     110  **	ret
     111  */
     112  TEST_UNIFORM_Z (subr_m1_f64_m_untied, svfloat64_t,
     113  		z0 = svsubr_n_f64_m (p0, z1, -1),
     114  		z0 = svsubr_m (p0, z1, -1))
     115  
     116  /*
     117  ** subr_f64_z_tied1:
     118  **	movprfx	z0\.d, p0/z, z0\.d
     119  **	fsubr	z0\.d, p0/m, z0\.d, z1\.d
     120  **	ret
     121  */
     122  TEST_UNIFORM_Z (subr_f64_z_tied1, svfloat64_t,
     123  		z0 = svsubr_f64_z (p0, z0, z1),
     124  		z0 = svsubr_z (p0, z0, z1))
     125  
     126  /*
     127  ** subr_f64_z_tied2:
     128  **	movprfx	z0\.d, p0/z, z0\.d
     129  **	fsub	z0\.d, p0/m, z0\.d, z1\.d
     130  **	ret
     131  */
     132  TEST_UNIFORM_Z (subr_f64_z_tied2, svfloat64_t,
     133  		z0 = svsubr_f64_z (p0, z1, z0),
     134  		z0 = svsubr_z (p0, z1, z0))
     135  
     136  /*
     137  ** subr_f64_z_untied:
     138  ** (
     139  **	movprfx	z0\.d, p0/z, z1\.d
     140  **	fsubr	z0\.d, p0/m, z0\.d, z2\.d
     141  ** |
     142  **	movprfx	z0\.d, p0/z, z2\.d
     143  **	fsub	z0\.d, p0/m, z0\.d, z1\.d
     144  ** )
     145  **	ret
     146  */
     147  TEST_UNIFORM_Z (subr_f64_z_untied, svfloat64_t,
     148  		z0 = svsubr_f64_z (p0, z1, z2),
     149  		z0 = svsubr_z (p0, z1, z2))
     150  
     151  /*
     152  ** subr_d4_f64_z_tied1:
     153  **	mov	(z[0-9]+\.d), d4
     154  **	movprfx	z0\.d, p0/z, z0\.d
     155  **	fsubr	z0\.d, p0/m, z0\.d, \1
     156  **	ret
     157  */
     158  TEST_UNIFORM_ZD (subr_d4_f64_z_tied1, svfloat64_t, double,
     159  		 z0 = svsubr_n_f64_z (p0, z0, d4),
     160  		 z0 = svsubr_z (p0, z0, d4))
     161  
     162  /*
     163  ** subr_d4_f64_z_untied:
     164  **	mov	(z[0-9]+\.d), d4
     165  ** (
     166  **	movprfx	z0\.d, p0/z, z1\.d
     167  **	fsubr	z0\.d, p0/m, z0\.d, \1
     168  ** |
     169  **	movprfx	z0\.d, p0/z, \1
     170  **	fsub	z0\.d, p0/m, z0\.d, z1\.d
     171  ** )
     172  **	ret
     173  */
     174  TEST_UNIFORM_ZD (subr_d4_f64_z_untied, svfloat64_t, double,
     175  		 z0 = svsubr_n_f64_z (p0, z1, d4),
     176  		 z0 = svsubr_z (p0, z1, d4))
     177  
     178  /*
     179  ** subr_1_f64_z_tied1:
     180  **	movprfx	z0\.d, p0/z, z0\.d
     181  **	fsubr	z0\.d, p0/m, z0\.d, #1\.0
     182  **	ret
     183  */
     184  TEST_UNIFORM_Z (subr_1_f64_z_tied1, svfloat64_t,
     185  		z0 = svsubr_n_f64_z (p0, z0, 1),
     186  		z0 = svsubr_z (p0, z0, 1))
     187  
     188  /*
     189  ** subr_1_f64_z_untied:
     190  **	movprfx	z0\.d, p0/z, z1\.d
     191  **	fsubr	z0\.d, p0/m, z0\.d, #1\.0
     192  **	ret
     193  */
     194  TEST_UNIFORM_Z (subr_1_f64_z_untied, svfloat64_t,
     195  		z0 = svsubr_n_f64_z (p0, z1, 1),
     196  		z0 = svsubr_z (p0, z1, 1))
     197  
     198  /*
     199  ** subr_0p5_f64_z_tied1:
     200  **	movprfx	z0\.d, p0/z, z0\.d
     201  **	fsubr	z0\.d, p0/m, z0\.d, #0\.5
     202  **	ret
     203  */
     204  TEST_UNIFORM_Z (subr_0p5_f64_z_tied1, svfloat64_t,
     205  		z0 = svsubr_n_f64_z (p0, z0, 0.5),
     206  		z0 = svsubr_z (p0, z0, 0.5))
     207  
     208  /*
     209  ** subr_0p5_f64_z_untied:
     210  **	movprfx	z0\.d, p0/z, z1\.d
     211  **	fsubr	z0\.d, p0/m, z0\.d, #0\.5
     212  **	ret
     213  */
     214  TEST_UNIFORM_Z (subr_0p5_f64_z_untied, svfloat64_t,
     215  		z0 = svsubr_n_f64_z (p0, z1, 0.5),
     216  		z0 = svsubr_z (p0, z1, 0.5))
     217  
     218  /*
     219  ** subr_m1_f64_z_tied1:
     220  **	fmov	(z[0-9]+\.d), #-1\.0(?:e\+0)?
     221  **	movprfx	z0\.d, p0/z, z0\.d
     222  **	fsubr	z0\.d, p0/m, z0\.d, \1
     223  **	ret
     224  */
     225  TEST_UNIFORM_Z (subr_m1_f64_z_tied1, svfloat64_t,
     226  		z0 = svsubr_n_f64_z (p0, z0, -1),
     227  		z0 = svsubr_z (p0, z0, -1))
     228  
     229  /*
     230  ** subr_m1_f64_z_untied:
     231  **	fmov	(z[0-9]+\.d), #-1\.0(?:e\+0)?
     232  ** (
     233  **	movprfx	z0\.d, p0/z, z1\.d
     234  **	fsubr	z0\.d, p0/m, z0\.d, \1
     235  ** |
     236  **	movprfx	z0\.d, p0/z, \1
     237  **	fsub	z0\.d, p0/m, z0\.d, z1\.d
     238  ** )
     239  **	ret
     240  */
     241  TEST_UNIFORM_Z (subr_m1_f64_z_untied, svfloat64_t,
     242  		z0 = svsubr_n_f64_z (p0, z1, -1),
     243  		z0 = svsubr_z (p0, z1, -1))
     244  
     245  /*
     246  ** subr_f64_x_tied1:
     247  **	fsub	z0\.d, z1\.d, z0\.d
     248  **	ret
     249  */
     250  TEST_UNIFORM_Z (subr_f64_x_tied1, svfloat64_t,
     251  		z0 = svsubr_f64_x (p0, z0, z1),
     252  		z0 = svsubr_x (p0, z0, z1))
     253  
     254  /*
     255  ** subr_f64_x_tied2:
     256  **	fsub	z0\.d, z0\.d, z1\.d
     257  **	ret
     258  */
     259  TEST_UNIFORM_Z (subr_f64_x_tied2, svfloat64_t,
     260  		z0 = svsubr_f64_x (p0, z1, z0),
     261  		z0 = svsubr_x (p0, z1, z0))
     262  
     263  /*
     264  ** subr_f64_x_untied:
     265  **	fsub	z0\.d, z2\.d, z1\.d
     266  **	ret
     267  */
     268  TEST_UNIFORM_Z (subr_f64_x_untied, svfloat64_t,
     269  		z0 = svsubr_f64_x (p0, z1, z2),
     270  		z0 = svsubr_x (p0, z1, z2))
     271  
     272  /*
     273  ** subr_d4_f64_x_tied1:
     274  **	mov	(z[0-9]+\.d), d4
     275  **	fsub	z0\.d, \1, z0\.d
     276  **	ret
     277  */
     278  TEST_UNIFORM_ZD (subr_d4_f64_x_tied1, svfloat64_t, double,
     279  		 z0 = svsubr_n_f64_x (p0, z0, d4),
     280  		 z0 = svsubr_x (p0, z0, d4))
     281  
     282  /*
     283  ** subr_d4_f64_x_untied:
     284  **	mov	(z[0-9]+\.d), d4
     285  **	fsub	z0\.d, \1, z1\.d
     286  **	ret
     287  */
     288  TEST_UNIFORM_ZD (subr_d4_f64_x_untied, svfloat64_t, double,
     289  		 z0 = svsubr_n_f64_x (p0, z1, d4),
     290  		 z0 = svsubr_x (p0, z1, d4))
     291  
     292  /*
     293  ** subr_1_f64_x_tied1:
     294  **	fsubr	z0\.d, p0/m, z0\.d, #1\.0
     295  **	ret
     296  */
     297  TEST_UNIFORM_Z (subr_1_f64_x_tied1, svfloat64_t,
     298  		z0 = svsubr_n_f64_x (p0, z0, 1),
     299  		z0 = svsubr_x (p0, z0, 1))
     300  
     301  /*
     302  ** subr_1_f64_x_untied:
     303  **	movprfx	z0, z1
     304  **	fsubr	z0\.d, p0/m, z0\.d, #1\.0
     305  **	ret
     306  */
     307  TEST_UNIFORM_Z (subr_1_f64_x_untied, svfloat64_t,
     308  		z0 = svsubr_n_f64_x (p0, z1, 1),
     309  		z0 = svsubr_x (p0, z1, 1))
     310  
     311  /*
     312  ** subr_0p5_f64_x_tied1:
     313  **	fsubr	z0\.d, p0/m, z0\.d, #0\.5
     314  **	ret
     315  */
     316  TEST_UNIFORM_Z (subr_0p5_f64_x_tied1, svfloat64_t,
     317  		z0 = svsubr_n_f64_x (p0, z0, 0.5),
     318  		z0 = svsubr_x (p0, z0, 0.5))
     319  
     320  /*
     321  ** subr_0p5_f64_x_untied:
     322  **	movprfx	z0, z1
     323  **	fsubr	z0\.d, p0/m, z0\.d, #0\.5
     324  **	ret
     325  */
     326  TEST_UNIFORM_Z (subr_0p5_f64_x_untied, svfloat64_t,
     327  		z0 = svsubr_n_f64_x (p0, z1, 0.5),
     328  		z0 = svsubr_x (p0, z1, 0.5))
     329  
     330  /*
     331  ** subr_m1_f64_x_tied1:
     332  **	fmov	(z[0-9]+\.d), #-1\.0(?:e\+0)?
     333  **	fsub	z0\.d, \1, z0\.d
     334  **	ret
     335  */
     336  TEST_UNIFORM_Z (subr_m1_f64_x_tied1, svfloat64_t,
     337  		z0 = svsubr_n_f64_x (p0, z0, -1),
     338  		z0 = svsubr_x (p0, z0, -1))
     339  
     340  /*
     341  ** subr_m1_f64_x_untied:
     342  **	fmov	(z[0-9]+\.d), #-1\.0(?:e\+0)?
     343  **	fsub	z0\.d, \1, z1\.d
     344  **	ret
     345  */
     346  TEST_UNIFORM_Z (subr_m1_f64_x_untied, svfloat64_t,
     347  		z0 = svsubr_n_f64_x (p0, z1, -1),
     348  		z0 = svsubr_x (p0, z1, -1))
     349  
     350  /*
     351  ** ptrue_subr_f64_x_tied1:
     352  **	fsub	z0\.d, z1\.d, z0\.d
     353  **	ret
     354  */
     355  TEST_UNIFORM_Z (ptrue_subr_f64_x_tied1, svfloat64_t,
     356  		z0 = svsubr_f64_x (svptrue_b64 (), z0, z1),
     357  		z0 = svsubr_x (svptrue_b64 (), z0, z1))
     358  
     359  /*
     360  ** ptrue_subr_f64_x_tied2:
     361  **	fsub	z0\.d, z0\.d, z1\.d
     362  **	ret
     363  */
     364  TEST_UNIFORM_Z (ptrue_subr_f64_x_tied2, svfloat64_t,
     365  		z0 = svsubr_f64_x (svptrue_b64 (), z1, z0),
     366  		z0 = svsubr_x (svptrue_b64 (), z1, z0))
     367  
     368  /*
     369  ** ptrue_subr_f64_x_untied:
     370  **	fsub	z0\.d, z2\.d, z1\.d
     371  **	ret
     372  */
     373  TEST_UNIFORM_Z (ptrue_subr_f64_x_untied, svfloat64_t,
     374  		z0 = svsubr_f64_x (svptrue_b64 (), z1, z2),
     375  		z0 = svsubr_x (svptrue_b64 (), z1, z2))
     376  
     377  /*
     378  ** ptrue_subr_1_f64_x_tied1:
     379  **	...
     380  **	ptrue	p[0-9]+\.b[^\n]*
     381  **	...
     382  **	ret
     383  */
     384  TEST_UNIFORM_Z (ptrue_subr_1_f64_x_tied1, svfloat64_t,
     385  		z0 = svsubr_n_f64_x (svptrue_b64 (), z0, 1),
     386  		z0 = svsubr_x (svptrue_b64 (), z0, 1))
     387  
     388  /*
     389  ** ptrue_subr_1_f64_x_untied:
     390  **	...
     391  **	ptrue	p[0-9]+\.b[^\n]*
     392  **	...
     393  **	ret
     394  */
     395  TEST_UNIFORM_Z (ptrue_subr_1_f64_x_untied, svfloat64_t,
     396  		z0 = svsubr_n_f64_x (svptrue_b64 (), z1, 1),
     397  		z0 = svsubr_x (svptrue_b64 (), z1, 1))
     398  
     399  /*
     400  ** ptrue_subr_0p5_f64_x_tied1:
     401  **	...
     402  **	ptrue	p[0-9]+\.b[^\n]*
     403  **	...
     404  **	ret
     405  */
     406  TEST_UNIFORM_Z (ptrue_subr_0p5_f64_x_tied1, svfloat64_t,
     407  		z0 = svsubr_n_f64_x (svptrue_b64 (), z0, 0.5),
     408  		z0 = svsubr_x (svptrue_b64 (), z0, 0.5))
     409  
     410  /*
     411  ** ptrue_subr_0p5_f64_x_untied:
     412  **	...
     413  **	ptrue	p[0-9]+\.b[^\n]*
     414  **	...
     415  **	ret
     416  */
     417  TEST_UNIFORM_Z (ptrue_subr_0p5_f64_x_untied, svfloat64_t,
     418  		z0 = svsubr_n_f64_x (svptrue_b64 (), z1, 0.5),
     419  		z0 = svsubr_x (svptrue_b64 (), z1, 0.5))
     420  
     421  /*
     422  ** ptrue_subr_m1_f64_x_tied1:
     423  **	fmov	(z[0-9]+\.d), #-1\.0(?:e\+0)?
     424  **	fsub	z0\.d, \1, z0\.d
     425  **	ret
     426  */
     427  TEST_UNIFORM_Z (ptrue_subr_m1_f64_x_tied1, svfloat64_t,
     428  		z0 = svsubr_n_f64_x (svptrue_b64 (), z0, -1),
     429  		z0 = svsubr_x (svptrue_b64 (), z0, -1))
     430  
     431  /*
     432  ** ptrue_subr_m1_f64_x_untied:
     433  **	fmov	(z[0-9]+\.d), #-1\.0(?:e\+0)?
     434  **	fsub	z0\.d, \1, z1\.d
     435  **	ret
     436  */
     437  TEST_UNIFORM_Z (ptrue_subr_m1_f64_x_untied, svfloat64_t,
     438  		z0 = svsubr_n_f64_x (svptrue_b64 (), z1, -1),
     439  		z0 = svsubr_x (svptrue_b64 (), z1, -1))