1  /* { dg-additional-options "-fno-trapping-math" } */
       2  /* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
       3  
       4  #include "test_sve_acle.h"
       5  
       6  /*
       7  ** add_f16_m_tied1:
       8  **	fadd	z0\.h, p0/m, z0\.h, z1\.h
       9  **	ret
      10  */
      11  TEST_UNIFORM_Z (add_f16_m_tied1, svfloat16_t,
      12  		z0 = svadd_f16_m (p0, z0, z1),
      13  		z0 = svadd_m (p0, z0, z1))
      14  
      15  /*
      16  ** add_f16_m_tied2:
      17  **	mov	(z[0-9]+)\.d, z0\.d
      18  **	movprfx	z0, z1
      19  **	fadd	z0\.h, p0/m, z0\.h, \1\.h
      20  **	ret
      21  */
      22  TEST_UNIFORM_Z (add_f16_m_tied2, svfloat16_t,
      23  		z0 = svadd_f16_m (p0, z1, z0),
      24  		z0 = svadd_m (p0, z1, z0))
      25  
      26  /*
      27  ** add_f16_m_untied:
      28  **	movprfx	z0, z1
      29  **	fadd	z0\.h, p0/m, z0\.h, z2\.h
      30  **	ret
      31  */
      32  TEST_UNIFORM_Z (add_f16_m_untied, svfloat16_t,
      33  		z0 = svadd_f16_m (p0, z1, z2),
      34  		z0 = svadd_m (p0, z1, z2))
      35  
      36  /*
      37  ** add_h4_f16_m_tied1:
      38  **	mov	(z[0-9]+\.h), h4
      39  **	fadd	z0\.h, p0/m, z0\.h, \1
      40  **	ret
      41  */
      42  TEST_UNIFORM_ZD (add_h4_f16_m_tied1, svfloat16_t, __fp16,
      43  		 z0 = svadd_n_f16_m (p0, z0, d4),
      44  		 z0 = svadd_m (p0, z0, d4))
      45  
      46  /*
      47  ** add_h4_f16_m_untied:
      48  **	mov	(z[0-9]+\.h), h4
      49  **	movprfx	z0, z1
      50  **	fadd	z0\.h, p0/m, z0\.h, \1
      51  **	ret
      52  */
      53  TEST_UNIFORM_ZD (add_h4_f16_m_untied, svfloat16_t, __fp16,
      54  		 z0 = svadd_n_f16_m (p0, z1, d4),
      55  		 z0 = svadd_m (p0, z1, d4))
      56  
      57  /*
      58  ** add_1_f16_m_tied1:
      59  **	fadd	z0\.h, p0/m, z0\.h, #1\.0
      60  **	ret
      61  */
      62  TEST_UNIFORM_Z (add_1_f16_m_tied1, svfloat16_t,
      63  		z0 = svadd_n_f16_m (p0, z0, 1),
      64  		z0 = svadd_m (p0, z0, 1))
      65  
      66  /*
      67  ** add_1_f16_m_untied:
      68  **	movprfx	z0, z1
      69  **	fadd	z0\.h, p0/m, z0\.h, #1\.0
      70  **	ret
      71  */
      72  TEST_UNIFORM_Z (add_1_f16_m_untied, svfloat16_t,
      73  		z0 = svadd_n_f16_m (p0, z1, 1),
      74  		z0 = svadd_m (p0, z1, 1))
      75  
      76  /*
      77  ** add_0p5_f16_m_tied1:
      78  **	fadd	z0\.h, p0/m, z0\.h, #0\.5
      79  **	ret
      80  */
      81  TEST_UNIFORM_Z (add_0p5_f16_m_tied1, svfloat16_t,
      82  		z0 = svadd_n_f16_m (p0, z0, 0.5),
      83  		z0 = svadd_m (p0, z0, 0.5))
      84  
      85  /*
      86  ** add_0p5_f16_m_untied:
      87  **	movprfx	z0, z1
      88  **	fadd	z0\.h, p0/m, z0\.h, #0\.5
      89  **	ret
      90  */
      91  TEST_UNIFORM_Z (add_0p5_f16_m_untied, svfloat16_t,
      92  		z0 = svadd_n_f16_m (p0, z1, 0.5),
      93  		z0 = svadd_m (p0, z1, 0.5))
      94  
      95  /*
      96  ** add_m1_f16_m_tied1:
      97  **	fsub	z0\.h, p0/m, z0\.h, #1\.0
      98  **	ret
      99  */
     100  TEST_UNIFORM_Z (add_m1_f16_m_tied1, svfloat16_t,
     101  		z0 = svadd_n_f16_m (p0, z0, -1),
     102  		z0 = svadd_m (p0, z0, -1))
     103  
     104  /*
     105  ** add_m1_f16_m_untied:
     106  **	movprfx	z0, z1
     107  **	fsub	z0\.h, p0/m, z0\.h, #1\.0
     108  **	ret
     109  */
     110  TEST_UNIFORM_Z (add_m1_f16_m_untied, svfloat16_t,
     111  		z0 = svadd_n_f16_m (p0, z1, -1),
     112  		z0 = svadd_m (p0, z1, -1))
     113  
     114  /*
     115  ** add_m0p5_f16_m_tied1:
     116  **	fsub	z0\.h, p0/m, z0\.h, #0\.5
     117  **	ret
     118  */
     119  TEST_UNIFORM_Z (add_m0p5_f16_m_tied1, svfloat16_t,
     120  		z0 = svadd_n_f16_m (p0, z0, -0.5),
     121  		z0 = svadd_m (p0, z0, -0.5))
     122  
     123  /*
     124  ** add_m0p5_f16_m_untied:
     125  **	movprfx	z0, z1
     126  **	fsub	z0\.h, p0/m, z0\.h, #0\.5
     127  **	ret
     128  */
     129  TEST_UNIFORM_Z (add_m0p5_f16_m_untied, svfloat16_t,
     130  		z0 = svadd_n_f16_m (p0, z1, -0.5),
     131  		z0 = svadd_m (p0, z1, -0.5))
     132  
     133  /*
     134  ** add_m2_f16_m:
     135  **	fmov	(z[0-9]+\.h), #-2\.0(?:e\+0)?
     136  **	fadd	z0\.h, p0/m, z0\.h, \1
     137  **	ret
     138  */
     139  TEST_UNIFORM_Z (add_m2_f16_m, svfloat16_t,
     140  		z0 = svadd_n_f16_m (p0, z0, -2),
     141  		z0 = svadd_m (p0, z0, -2))
     142  
     143  /*
     144  ** add_f16_z_tied1:
     145  **	movprfx	z0\.h, p0/z, z0\.h
     146  **	fadd	z0\.h, p0/m, z0\.h, z1\.h
     147  **	ret
     148  */
     149  TEST_UNIFORM_Z (add_f16_z_tied1, svfloat16_t,
     150  		z0 = svadd_f16_z (p0, z0, z1),
     151  		z0 = svadd_z (p0, z0, z1))
     152  
     153  /*
     154  ** add_f16_z_tied2:
     155  **	movprfx	z0\.h, p0/z, z0\.h
     156  **	fadd	z0\.h, p0/m, z0\.h, z1\.h
     157  **	ret
     158  */
     159  TEST_UNIFORM_Z (add_f16_z_tied2, svfloat16_t,
     160  		z0 = svadd_f16_z (p0, z1, z0),
     161  		z0 = svadd_z (p0, z1, z0))
     162  
     163  /*
     164  ** add_f16_z_untied:
     165  ** (
     166  **	movprfx	z0\.h, p0/z, z1\.h
     167  **	fadd	z0\.h, p0/m, z0\.h, z2\.h
     168  ** |
     169  **	movprfx	z0\.h, p0/z, z2\.h
     170  **	fadd	z0\.h, p0/m, z0\.h, z1\.h
     171  ** )
     172  **	ret
     173  */
     174  TEST_UNIFORM_Z (add_f16_z_untied, svfloat16_t,
     175  		z0 = svadd_f16_z (p0, z1, z2),
     176  		z0 = svadd_z (p0, z1, z2))
     177  
     178  /*
     179  ** add_h4_f16_z_tied1:
     180  **	mov	(z[0-9]+\.h), h4
     181  **	movprfx	z0\.h, p0/z, z0\.h
     182  **	fadd	z0\.h, p0/m, z0\.h, \1
     183  **	ret
     184  */
     185  TEST_UNIFORM_ZD (add_h4_f16_z_tied1, svfloat16_t, __fp16,
     186  		 z0 = svadd_n_f16_z (p0, z0, d4),
     187  		 z0 = svadd_z (p0, z0, d4))
     188  
     189  /*
     190  ** add_h4_f16_z_untied:
     191  **	mov	(z[0-9]+\.h), h4
     192  ** (
     193  **	movprfx	z0\.h, p0/z, z1\.h
     194  **	fadd	z0\.h, p0/m, z0\.h, \1
     195  ** |
     196  **	movprfx	z0\.h, p0/z, \1
     197  **	fadd	z0\.h, p0/m, z0\.h, z1\.h
     198  ** )
     199  **	ret
     200  */
     201  TEST_UNIFORM_ZD (add_h4_f16_z_untied, svfloat16_t, __fp16,
     202  		 z0 = svadd_n_f16_z (p0, z1, d4),
     203  		 z0 = svadd_z (p0, z1, d4))
     204  
     205  /*
     206  ** add_1_f16_z_tied1:
     207  **	movprfx	z0\.h, p0/z, z0\.h
     208  **	fadd	z0\.h, p0/m, z0\.h, #1\.0
     209  **	ret
     210  */
     211  TEST_UNIFORM_Z (add_1_f16_z_tied1, svfloat16_t,
     212  		z0 = svadd_n_f16_z (p0, z0, 1),
     213  		z0 = svadd_z (p0, z0, 1))
     214  
     215  /*
     216  ** add_1_f16_z_untied:
     217  **	movprfx	z0\.h, p0/z, z1\.h
     218  **	fadd	z0\.h, p0/m, z0\.h, #1\.0
     219  **	ret
     220  */
     221  TEST_UNIFORM_Z (add_1_f16_z_untied, svfloat16_t,
     222  		z0 = svadd_n_f16_z (p0, z1, 1),
     223  		z0 = svadd_z (p0, z1, 1))
     224  
     225  /*
     226  ** add_0p5_f16_z_tied1:
     227  **	movprfx	z0\.h, p0/z, z0\.h
     228  **	fadd	z0\.h, p0/m, z0\.h, #0\.5
     229  **	ret
     230  */
     231  TEST_UNIFORM_Z (add_0p5_f16_z_tied1, svfloat16_t,
     232  		z0 = svadd_n_f16_z (p0, z0, 0.5),
     233  		z0 = svadd_z (p0, z0, 0.5))
     234  
     235  /*
     236  ** add_0p5_f16_z_untied:
     237  **	movprfx	z0\.h, p0/z, z1\.h
     238  **	fadd	z0\.h, p0/m, z0\.h, #0\.5
     239  **	ret
     240  */
     241  TEST_UNIFORM_Z (add_0p5_f16_z_untied, svfloat16_t,
     242  		z0 = svadd_n_f16_z (p0, z1, 0.5),
     243  		z0 = svadd_z (p0, z1, 0.5))
     244  
     245  /*
     246  ** add_m1_f16_z_tied1:
     247  **	movprfx	z0\.h, p0/z, z0\.h
     248  **	fsub	z0\.h, p0/m, z0\.h, #1\.0
     249  **	ret
     250  */
     251  TEST_UNIFORM_Z (add_m1_f16_z_tied1, svfloat16_t,
     252  		z0 = svadd_n_f16_z (p0, z0, -1),
     253  		z0 = svadd_z (p0, z0, -1))
     254  
     255  /*
     256  ** add_m1_f16_z_untied:
     257  **	movprfx	z0\.h, p0/z, z1\.h
     258  **	fsub	z0\.h, p0/m, z0\.h, #1\.0
     259  **	ret
     260  */
     261  TEST_UNIFORM_Z (add_m1_f16_z_untied, svfloat16_t,
     262  		z0 = svadd_n_f16_z (p0, z1, -1),
     263  		z0 = svadd_z (p0, z1, -1))
     264  
     265  /*
     266  ** add_m0p5_f16_z_tied1:
     267  **	movprfx	z0\.h, p0/z, z0\.h
     268  **	fsub	z0\.h, p0/m, z0\.h, #0\.5
     269  **	ret
     270  */
     271  TEST_UNIFORM_Z (add_m0p5_f16_z_tied1, svfloat16_t,
     272  		z0 = svadd_n_f16_z (p0, z0, -0.5),
     273  		z0 = svadd_z (p0, z0, -0.5))
     274  
     275  /*
     276  ** add_m0p5_f16_z_untied:
     277  **	movprfx	z0\.h, p0/z, z1\.h
     278  **	fsub	z0\.h, p0/m, z0\.h, #0\.5
     279  **	ret
     280  */
     281  TEST_UNIFORM_Z (add_m0p5_f16_z_untied, svfloat16_t,
     282  		z0 = svadd_n_f16_z (p0, z1, -0.5),
     283  		z0 = svadd_z (p0, z1, -0.5))
     284  
     285  /*
     286  ** add_m2_f16_z:
     287  **	fmov	(z[0-9]+\.h), #-2\.0(?:e\+0)?
     288  **	movprfx	z0\.h, p0/z, z0\.h
     289  **	fadd	z0\.h, p0/m, z0\.h, \1
     290  **	ret
     291  */
     292  TEST_UNIFORM_Z (add_m2_f16_z, svfloat16_t,
     293  		z0 = svadd_n_f16_z (p0, z0, -2),
     294  		z0 = svadd_z (p0, z0, -2))
     295  
     296  /*
     297  ** add_f16_x_tied1:
     298  **	fadd	z0\.h, (z0\.h, z1\.h|z1\.h, z0\.h)
     299  **	ret
     300  */
     301  TEST_UNIFORM_Z (add_f16_x_tied1, svfloat16_t,
     302  		z0 = svadd_f16_x (p0, z0, z1),
     303  		z0 = svadd_x (p0, z0, z1))
     304  
     305  /*
     306  ** add_f16_x_tied2:
     307  **	fadd	z0\.h, (z0\.h, z1\.h|z1\.h, z0\.h)
     308  **	ret
     309  */
     310  TEST_UNIFORM_Z (add_f16_x_tied2, svfloat16_t,
     311  		z0 = svadd_f16_x (p0, z1, z0),
     312  		z0 = svadd_x (p0, z1, z0))
     313  
     314  /*
     315  ** add_f16_x_untied:
     316  **	fadd	z0\.h, (z1\.h, z2\.h|z2\.h, z1\.h)
     317  **	ret
     318  */
     319  TEST_UNIFORM_Z (add_f16_x_untied, svfloat16_t,
     320  		z0 = svadd_f16_x (p0, z1, z2),
     321  		z0 = svadd_x (p0, z1, z2))
     322  
     323  /*
     324  ** add_h4_f16_x_tied1:
     325  **	mov	(z[0-9]+\.h), h4
     326  **	fadd	z0\.h, (z0\.h, \1|\1, z0\.h)
     327  **	ret
     328  */
     329  TEST_UNIFORM_ZD (add_h4_f16_x_tied1, svfloat16_t, __fp16,
     330  		 z0 = svadd_n_f16_x (p0, z0, d4),
     331  		 z0 = svadd_x (p0, z0, d4))
     332  
     333  /*
     334  ** add_h4_f16_x_untied:
     335  **	mov	(z[0-9]+\.h), h4
     336  **	fadd	z0\.h, (z1\.h, \1|\1, z1\.h)
     337  **	ret
     338  */
     339  TEST_UNIFORM_ZD (add_h4_f16_x_untied, svfloat16_t, __fp16,
     340  		 z0 = svadd_n_f16_x (p0, z1, d4),
     341  		 z0 = svadd_x (p0, z1, d4))
     342  
     343  /*
     344  ** add_1_f16_x_tied1:
     345  **	fadd	z0\.h, p0/m, z0\.h, #1\.0
     346  **	ret
     347  */
     348  TEST_UNIFORM_Z (add_1_f16_x_tied1, svfloat16_t,
     349  		z0 = svadd_n_f16_x (p0, z0, 1),
     350  		z0 = svadd_x (p0, z0, 1))
     351  
     352  /*
     353  ** add_1_f16_x_untied:
     354  **	movprfx	z0, z1
     355  **	fadd	z0\.h, p0/m, z0\.h, #1\.0
     356  **	ret
     357  */
     358  TEST_UNIFORM_Z (add_1_f16_x_untied, svfloat16_t,
     359  		z0 = svadd_n_f16_x (p0, z1, 1),
     360  		z0 = svadd_x (p0, z1, 1))
     361  
     362  /*
     363  ** add_0p5_f16_x_tied1:
     364  **	fadd	z0\.h, p0/m, z0\.h, #0\.5
     365  **	ret
     366  */
     367  TEST_UNIFORM_Z (add_0p5_f16_x_tied1, svfloat16_t,
     368  		z0 = svadd_n_f16_x (p0, z0, 0.5),
     369  		z0 = svadd_x (p0, z0, 0.5))
     370  
     371  /*
     372  ** add_0p5_f16_x_untied:
     373  **	movprfx	z0, z1
     374  **	fadd	z0\.h, p0/m, z0\.h, #0\.5
     375  **	ret
     376  */
     377  TEST_UNIFORM_Z (add_0p5_f16_x_untied, svfloat16_t,
     378  		z0 = svadd_n_f16_x (p0, z1, 0.5),
     379  		z0 = svadd_x (p0, z1, 0.5))
     380  
     381  /*
     382  ** add_m1_f16_x_tied1:
     383  **	fsub	z0\.h, p0/m, z0\.h, #1\.0
     384  **	ret
     385  */
     386  TEST_UNIFORM_Z (add_m1_f16_x_tied1, svfloat16_t,
     387  		z0 = svadd_n_f16_x (p0, z0, -1),
     388  		z0 = svadd_x (p0, z0, -1))
     389  
     390  /*
     391  ** add_m1_f16_x_untied:
     392  **	movprfx	z0, z1
     393  **	fsub	z0\.h, p0/m, z0\.h, #1\.0
     394  **	ret
     395  */
     396  TEST_UNIFORM_Z (add_m1_f16_x_untied, svfloat16_t,
     397  		z0 = svadd_n_f16_x (p0, z1, -1),
     398  		z0 = svadd_x (p0, z1, -1))
     399  
     400  /*
     401  ** add_m0p5_f16_x_tied1:
     402  **	fsub	z0\.h, p0/m, z0\.h, #0\.5
     403  **	ret
     404  */
     405  TEST_UNIFORM_Z (add_m0p5_f16_x_tied1, svfloat16_t,
     406  		z0 = svadd_n_f16_x (p0, z0, -0.5),
     407  		z0 = svadd_x (p0, z0, -0.5))
     408  
     409  /*
     410  ** add_m0p5_f16_x_untied:
     411  **	movprfx	z0, z1
     412  **	fsub	z0\.h, p0/m, z0\.h, #0\.5
     413  **	ret
     414  */
     415  TEST_UNIFORM_Z (add_m0p5_f16_x_untied, svfloat16_t,
     416  		z0 = svadd_n_f16_x (p0, z1, -0.5),
     417  		z0 = svadd_x (p0, z1, -0.5))
     418  
     419  /*
     420  ** add_2_f16_x_tied1:
     421  **	fmov	(z[0-9]+\.h), #2\.0(?:e\+0)?
     422  **	fadd	z0\.h, (z0\.h, \1|\1, z0\.h)
     423  **	ret
     424  */
     425  TEST_UNIFORM_Z (add_2_f16_x_tied1, svfloat16_t,
     426  		z0 = svadd_n_f16_x (p0, z0, 2),
     427  		z0 = svadd_x (p0, z0, 2))
     428  
     429  /*
     430  ** add_2_f16_x_untied:
     431  **	fmov	(z[0-9]+\.h), #2\.0(?:e\+0)?
     432  **	fadd	z0\.h, (z1\.h, \1|\1, z1\.h)
     433  **	ret
     434  */
     435  TEST_UNIFORM_Z (add_2_f16_x_untied, svfloat16_t,
     436  		z0 = svadd_n_f16_x (p0, z1, 2),
     437  		z0 = svadd_x (p0, z1, 2))
     438  
     439  /*
     440  ** ptrue_add_f16_x_tied1:
     441  **	fadd	z0\.h, (z0\.h, z1\.h|z1\.h, z0\.h)
     442  **	ret
     443  */
     444  TEST_UNIFORM_Z (ptrue_add_f16_x_tied1, svfloat16_t,
     445  		z0 = svadd_f16_x (svptrue_b16 (), z0, z1),
     446  		z0 = svadd_x (svptrue_b16 (), z0, z1))
     447  
     448  /*
     449  ** ptrue_add_f16_x_tied2:
     450  **	fadd	z0\.h, (z0\.h, z1\.h|z1\.h, z0\.h)
     451  **	ret
     452  */
     453  TEST_UNIFORM_Z (ptrue_add_f16_x_tied2, svfloat16_t,
     454  		z0 = svadd_f16_x (svptrue_b16 (), z1, z0),
     455  		z0 = svadd_x (svptrue_b16 (), z1, z0))
     456  
     457  /*
     458  ** ptrue_add_f16_x_untied:
     459  **	fadd	z0\.h, (z1\.h, z2\.h|z2\.h, z1\.h)
     460  **	ret
     461  */
     462  TEST_UNIFORM_Z (ptrue_add_f16_x_untied, svfloat16_t,
     463  		z0 = svadd_f16_x (svptrue_b16 (), z1, z2),
     464  		z0 = svadd_x (svptrue_b16 (), z1, z2))
     465  
     466  /*
     467  ** ptrue_add_1_f16_x_tied1:
     468  **	...
     469  **	ptrue	p[0-9]+\.b[^\n]*
     470  **	...
     471  **	ret
     472  */
     473  TEST_UNIFORM_Z (ptrue_add_1_f16_x_tied1, svfloat16_t,
     474  		z0 = svadd_n_f16_x (svptrue_b16 (), z0, 1),
     475  		z0 = svadd_x (svptrue_b16 (), z0, 1))
     476  
     477  /*
     478  ** ptrue_add_1_f16_x_untied:
     479  **	...
     480  **	ptrue	p[0-9]+\.b[^\n]*
     481  **	...
     482  **	ret
     483  */
     484  TEST_UNIFORM_Z (ptrue_add_1_f16_x_untied, svfloat16_t,
     485  		z0 = svadd_n_f16_x (svptrue_b16 (), z1, 1),
     486  		z0 = svadd_x (svptrue_b16 (), z1, 1))
     487  
     488  /*
     489  ** ptrue_add_0p5_f16_x_tied1:
     490  **	...
     491  **	ptrue	p[0-9]+\.b[^\n]*
     492  **	...
     493  **	ret
     494  */
     495  TEST_UNIFORM_Z (ptrue_add_0p5_f16_x_tied1, svfloat16_t,
     496  		z0 = svadd_n_f16_x (svptrue_b16 (), z0, 0.5),
     497  		z0 = svadd_x (svptrue_b16 (), z0, 0.5))
     498  
     499  /*
     500  ** ptrue_add_0p5_f16_x_untied:
     501  **	...
     502  **	ptrue	p[0-9]+\.b[^\n]*
     503  **	...
     504  **	ret
     505  */
     506  TEST_UNIFORM_Z (ptrue_add_0p5_f16_x_untied, svfloat16_t,
     507  		z0 = svadd_n_f16_x (svptrue_b16 (), z1, 0.5),
     508  		z0 = svadd_x (svptrue_b16 (), z1, 0.5))
     509  
     510  /*
     511  ** ptrue_add_m1_f16_x_tied1:
     512  **	...
     513  **	ptrue	p[0-9]+\.b[^\n]*
     514  **	...
     515  **	ret
     516  */
     517  TEST_UNIFORM_Z (ptrue_add_m1_f16_x_tied1, svfloat16_t,
     518  		z0 = svadd_n_f16_x (svptrue_b16 (), z0, -1),
     519  		z0 = svadd_x (svptrue_b16 (), z0, -1))
     520  
     521  /*
     522  ** ptrue_add_m1_f16_x_untied:
     523  **	...
     524  **	ptrue	p[0-9]+\.b[^\n]*
     525  **	...
     526  **	ret
     527  */
     528  TEST_UNIFORM_Z (ptrue_add_m1_f16_x_untied, svfloat16_t,
     529  		z0 = svadd_n_f16_x (svptrue_b16 (), z1, -1),
     530  		z0 = svadd_x (svptrue_b16 (), z1, -1))
     531  
     532  /*
     533  ** ptrue_add_m0p5_f16_x_tied1:
     534  **	...
     535  **	ptrue	p[0-9]+\.b[^\n]*
     536  **	...
     537  **	ret
     538  */
     539  TEST_UNIFORM_Z (ptrue_add_m0p5_f16_x_tied1, svfloat16_t,
     540  		z0 = svadd_n_f16_x (svptrue_b16 (), z0, -0.5),
     541  		z0 = svadd_x (svptrue_b16 (), z0, -0.5))
     542  
     543  /*
     544  ** ptrue_add_m0p5_f16_x_untied:
     545  **	...
     546  **	ptrue	p[0-9]+\.b[^\n]*
     547  **	...
     548  **	ret
     549  */
     550  TEST_UNIFORM_Z (ptrue_add_m0p5_f16_x_untied, svfloat16_t,
     551  		z0 = svadd_n_f16_x (svptrue_b16 (), z1, -0.5),
     552  		z0 = svadd_x (svptrue_b16 (), z1, -0.5))
     553  
     554  /*
     555  ** ptrue_add_2_f16_x_tied1:
     556  **	fmov	(z[0-9]+\.h), #2\.0(?:e\+0)?
     557  **	fadd	z0\.h, (z0\.h, \1|\1, z0\.h)
     558  **	ret
     559  */
     560  TEST_UNIFORM_Z (ptrue_add_2_f16_x_tied1, svfloat16_t,
     561  		z0 = svadd_n_f16_x (svptrue_b16 (), z0, 2),
     562  		z0 = svadd_x (svptrue_b16 (), z0, 2))
     563  
     564  /*
     565  ** ptrue_add_2_f16_x_untied:
     566  **	fmov	(z[0-9]+\.h), #2\.0(?:e\+0)?
     567  **	fadd	z0\.h, (z1\.h, \1|\1, z1\.h)
     568  **	ret
     569  */
     570  TEST_UNIFORM_Z (ptrue_add_2_f16_x_untied, svfloat16_t,
     571  		z0 = svadd_n_f16_x (svptrue_b16 (), z1, 2),
     572  		z0 = svadd_x (svptrue_b16 (), z1, 2))