(root)/
gcc-13.2.0/
gcc/
testsuite/
gcc.target/
aarch64/
sve/
acle/
asm/
cmla_f16.c
       1  /* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
       2  
       3  #include "test_sve_acle.h"
       4  
       5  /*
       6  ** cmla_0_f16_m_tied1:
       7  **	fcmla	z0\.h, p0/m, z1\.h, z2\.h, #0
       8  **	ret
       9  */
      10  TEST_UNIFORM_Z (cmla_0_f16_m_tied1, svfloat16_t,
      11  		z0 = svcmla_f16_m (p0, z0, z1, z2, 0),
      12  		z0 = svcmla_m (p0, z0, z1, z2, 0))
      13  
      14  /*
      15  ** cmla_0_f16_m_tied2:
      16  **	mov	(z[0-9]+)\.d, z0\.d
      17  **	movprfx	z0, z1
      18  **	fcmla	z0\.h, p0/m, \1\.h, z2\.h, #0
      19  **	ret
      20  */
      21  TEST_UNIFORM_Z (cmla_0_f16_m_tied2, svfloat16_t,
      22  		z0 = svcmla_f16_m (p0, z1, z0, z2, 0),
      23  		z0 = svcmla_m (p0, z1, z0, z2, 0))
      24  
      25  /*
      26  ** cmla_0_f16_m_tied3:
      27  **	mov	(z[0-9]+)\.d, z0\.d
      28  **	movprfx	z0, z1
      29  **	fcmla	z0\.h, p0/m, z2\.h, \1\.h, #0
      30  **	ret
      31  */
      32  TEST_UNIFORM_Z (cmla_0_f16_m_tied3, svfloat16_t,
      33  		z0 = svcmla_f16_m (p0, z1, z2, z0, 0),
      34  		z0 = svcmla_m (p0, z1, z2, z0, 0))
      35  
      36  /*
      37  ** cmla_0_f16_m_untied:
      38  **	movprfx	z0, z1
      39  **	fcmla	z0\.h, p0/m, z2\.h, z3\.h, #0
      40  **	ret
      41  */
      42  TEST_UNIFORM_Z (cmla_0_f16_m_untied, svfloat16_t,
      43  		z0 = svcmla_f16_m (p0, z1, z2, z3, 0),
      44  		z0 = svcmla_m (p0, z1, z2, z3, 0))
      45  
      46  /*
      47  ** cmla_90_f16_m_tied1:
      48  **	fcmla	z0\.h, p0/m, z1\.h, z2\.h, #90
      49  **	ret
      50  */
      51  TEST_UNIFORM_Z (cmla_90_f16_m_tied1, svfloat16_t,
      52  		z0 = svcmla_f16_m (p0, z0, z1, z2, 90),
      53  		z0 = svcmla_m (p0, z0, z1, z2, 90))
      54  
      55  /*
      56  ** cmla_90_f16_m_tied2:
      57  **	mov	(z[0-9]+)\.d, z0\.d
      58  **	movprfx	z0, z1
      59  **	fcmla	z0\.h, p0/m, \1\.h, z2\.h, #90
      60  **	ret
      61  */
      62  TEST_UNIFORM_Z (cmla_90_f16_m_tied2, svfloat16_t,
      63  		z0 = svcmla_f16_m (p0, z1, z0, z2, 90),
      64  		z0 = svcmla_m (p0, z1, z0, z2, 90))
      65  
      66  /*
      67  ** cmla_90_f16_m_tied3:
      68  **	mov	(z[0-9]+)\.d, z0\.d
      69  **	movprfx	z0, z1
      70  **	fcmla	z0\.h, p0/m, z2\.h, \1\.h, #90
      71  **	ret
      72  */
      73  TEST_UNIFORM_Z (cmla_90_f16_m_tied3, svfloat16_t,
      74  		z0 = svcmla_f16_m (p0, z1, z2, z0, 90),
      75  		z0 = svcmla_m (p0, z1, z2, z0, 90))
      76  
      77  /*
      78  ** cmla_90_f16_m_untied:
      79  **	movprfx	z0, z1
      80  **	fcmla	z0\.h, p0/m, z2\.h, z3\.h, #90
      81  **	ret
      82  */
      83  TEST_UNIFORM_Z (cmla_90_f16_m_untied, svfloat16_t,
      84  		z0 = svcmla_f16_m (p0, z1, z2, z3, 90),
      85  		z0 = svcmla_m (p0, z1, z2, z3, 90))
      86  
      87  /*
      88  ** cmla_180_f16_m_tied1:
      89  **	fcmla	z0\.h, p0/m, z1\.h, z2\.h, #180
      90  **	ret
      91  */
      92  TEST_UNIFORM_Z (cmla_180_f16_m_tied1, svfloat16_t,
      93  		z0 = svcmla_f16_m (p0, z0, z1, z2, 180),
      94  		z0 = svcmla_m (p0, z0, z1, z2, 180))
      95  
      96  /*
      97  ** cmla_180_f16_m_tied2:
      98  **	mov	(z[0-9]+)\.d, z0\.d
      99  **	movprfx	z0, z1
     100  **	fcmla	z0\.h, p0/m, \1\.h, z2\.h, #180
     101  **	ret
     102  */
     103  TEST_UNIFORM_Z (cmla_180_f16_m_tied2, svfloat16_t,
     104  		z0 = svcmla_f16_m (p0, z1, z0, z2, 180),
     105  		z0 = svcmla_m (p0, z1, z0, z2, 180))
     106  
     107  /*
     108  ** cmla_180_f16_m_tied3:
     109  **	mov	(z[0-9]+)\.d, z0\.d
     110  **	movprfx	z0, z1
     111  **	fcmla	z0\.h, p0/m, z2\.h, \1\.h, #180
     112  **	ret
     113  */
     114  TEST_UNIFORM_Z (cmla_180_f16_m_tied3, svfloat16_t,
     115  		z0 = svcmla_f16_m (p0, z1, z2, z0, 180),
     116  		z0 = svcmla_m (p0, z1, z2, z0, 180))
     117  
     118  /*
     119  ** cmla_180_f16_m_untied:
     120  **	movprfx	z0, z1
     121  **	fcmla	z0\.h, p0/m, z2\.h, z3\.h, #180
     122  **	ret
     123  */
     124  TEST_UNIFORM_Z (cmla_180_f16_m_untied, svfloat16_t,
     125  		z0 = svcmla_f16_m (p0, z1, z2, z3, 180),
     126  		z0 = svcmla_m (p0, z1, z2, z3, 180))
     127  
     128  /*
     129  ** cmla_270_f16_m_tied1:
     130  **	fcmla	z0\.h, p0/m, z1\.h, z2\.h, #270
     131  **	ret
     132  */
     133  TEST_UNIFORM_Z (cmla_270_f16_m_tied1, svfloat16_t,
     134  		z0 = svcmla_f16_m (p0, z0, z1, z2, 270),
     135  		z0 = svcmla_m (p0, z0, z1, z2, 270))
     136  
     137  /*
     138  ** cmla_270_f16_m_tied2:
     139  **	mov	(z[0-9]+)\.d, z0\.d
     140  **	movprfx	z0, z1
     141  **	fcmla	z0\.h, p0/m, \1\.h, z2\.h, #270
     142  **	ret
     143  */
     144  TEST_UNIFORM_Z (cmla_270_f16_m_tied2, svfloat16_t,
     145  		z0 = svcmla_f16_m (p0, z1, z0, z2, 270),
     146  		z0 = svcmla_m (p0, z1, z0, z2, 270))
     147  
     148  /*
     149  ** cmla_270_f16_m_tied3:
     150  **	mov	(z[0-9]+)\.d, z0\.d
     151  **	movprfx	z0, z1
     152  **	fcmla	z0\.h, p0/m, z2\.h, \1\.h, #270
     153  **	ret
     154  */
     155  TEST_UNIFORM_Z (cmla_270_f16_m_tied3, svfloat16_t,
     156  		z0 = svcmla_f16_m (p0, z1, z2, z0, 270),
     157  		z0 = svcmla_m (p0, z1, z2, z0, 270))
     158  
     159  /*
     160  ** cmla_270_f16_m_untied:
     161  **	movprfx	z0, z1
     162  **	fcmla	z0\.h, p0/m, z2\.h, z3\.h, #270
     163  **	ret
     164  */
     165  TEST_UNIFORM_Z (cmla_270_f16_m_untied, svfloat16_t,
     166  		z0 = svcmla_f16_m (p0, z1, z2, z3, 270),
     167  		z0 = svcmla_m (p0, z1, z2, z3, 270))
     168  
     169  /*
     170  ** cmla_0_f16_z_tied1:
     171  **	movprfx	z0\.h, p0/z, z0\.h
     172  **	fcmla	z0\.h, p0/m, z1\.h, z2\.h, #0
     173  **	ret
     174  */
     175  TEST_UNIFORM_Z (cmla_0_f16_z_tied1, svfloat16_t,
     176  		z0 = svcmla_f16_z (p0, z0, z1, z2, 0),
     177  		z0 = svcmla_z (p0, z0, z1, z2, 0))
     178  
     179  /*
     180  ** cmla_0_f16_z_tied2:
     181  **	mov	(z[0-9]+)\.d, z0\.d
     182  **	movprfx	z0\.h, p0/z, z1\.h
     183  **	fcmla	z0\.h, p0/m, \1\.h, z2\.h, #0
     184  **	ret
     185  */
     186  TEST_UNIFORM_Z (cmla_0_f16_z_tied2, svfloat16_t,
     187  		z0 = svcmla_f16_z (p0, z1, z0, z2, 0),
     188  		z0 = svcmla_z (p0, z1, z0, z2, 0))
     189  
     190  /*
     191  ** cmla_0_f16_z_tied3:
     192  **	mov	(z[0-9]+)\.d, z0\.d
     193  **	movprfx	z0\.h, p0/z, z1\.h
     194  **	fcmla	z0\.h, p0/m, z2\.h, \1\.h, #0
     195  **	ret
     196  */
     197  TEST_UNIFORM_Z (cmla_0_f16_z_tied3, svfloat16_t,
     198  		z0 = svcmla_f16_z (p0, z1, z2, z0, 0),
     199  		z0 = svcmla_z (p0, z1, z2, z0, 0))
     200  
     201  /*
     202  ** cmla_0_f16_z_untied:
     203  **	movprfx	z0\.h, p0/z, z1\.h
     204  **	fcmla	z0\.h, p0/m, z2\.h, z3\.h, #0
     205  **	ret
     206  */
     207  TEST_UNIFORM_Z (cmla_0_f16_z_untied, svfloat16_t,
     208  		z0 = svcmla_f16_z (p0, z1, z2, z3, 0),
     209  		z0 = svcmla_z (p0, z1, z2, z3, 0))
     210  
     211  /*
     212  ** cmla_90_f16_z_tied1:
     213  **	movprfx	z0\.h, p0/z, z0\.h
     214  **	fcmla	z0\.h, p0/m, z1\.h, z2\.h, #90
     215  **	ret
     216  */
     217  TEST_UNIFORM_Z (cmla_90_f16_z_tied1, svfloat16_t,
     218  		z0 = svcmla_f16_z (p0, z0, z1, z2, 90),
     219  		z0 = svcmla_z (p0, z0, z1, z2, 90))
     220  
     221  /*
     222  ** cmla_90_f16_z_tied2:
     223  **	mov	(z[0-9]+)\.d, z0\.d
     224  **	movprfx	z0\.h, p0/z, z1\.h
     225  **	fcmla	z0\.h, p0/m, \1\.h, z2\.h, #90
     226  **	ret
     227  */
     228  TEST_UNIFORM_Z (cmla_90_f16_z_tied2, svfloat16_t,
     229  		z0 = svcmla_f16_z (p0, z1, z0, z2, 90),
     230  		z0 = svcmla_z (p0, z1, z0, z2, 90))
     231  
     232  /*
     233  ** cmla_90_f16_z_tied3:
     234  **	mov	(z[0-9]+)\.d, z0\.d
     235  **	movprfx	z0\.h, p0/z, z1\.h
     236  **	fcmla	z0\.h, p0/m, z2\.h, \1\.h, #90
     237  **	ret
     238  */
     239  TEST_UNIFORM_Z (cmla_90_f16_z_tied3, svfloat16_t,
     240  		z0 = svcmla_f16_z (p0, z1, z2, z0, 90),
     241  		z0 = svcmla_z (p0, z1, z2, z0, 90))
     242  
     243  /*
     244  ** cmla_90_f16_z_untied:
     245  **	movprfx	z0\.h, p0/z, z1\.h
     246  **	fcmla	z0\.h, p0/m, z2\.h, z3\.h, #90
     247  **	ret
     248  */
     249  TEST_UNIFORM_Z (cmla_90_f16_z_untied, svfloat16_t,
     250  		z0 = svcmla_f16_z (p0, z1, z2, z3, 90),
     251  		z0 = svcmla_z (p0, z1, z2, z3, 90))
     252  
     253  /*
     254  ** cmla_180_f16_z_tied1:
     255  **	movprfx	z0\.h, p0/z, z0\.h
     256  **	fcmla	z0\.h, p0/m, z1\.h, z2\.h, #180
     257  **	ret
     258  */
     259  TEST_UNIFORM_Z (cmla_180_f16_z_tied1, svfloat16_t,
     260  		z0 = svcmla_f16_z (p0, z0, z1, z2, 180),
     261  		z0 = svcmla_z (p0, z0, z1, z2, 180))
     262  
     263  /*
     264  ** cmla_180_f16_z_tied2:
     265  **	mov	(z[0-9]+)\.d, z0\.d
     266  **	movprfx	z0\.h, p0/z, z1\.h
     267  **	fcmla	z0\.h, p0/m, \1\.h, z2\.h, #180
     268  **	ret
     269  */
     270  TEST_UNIFORM_Z (cmla_180_f16_z_tied2, svfloat16_t,
     271  		z0 = svcmla_f16_z (p0, z1, z0, z2, 180),
     272  		z0 = svcmla_z (p0, z1, z0, z2, 180))
     273  
     274  /*
     275  ** cmla_180_f16_z_tied3:
     276  **	mov	(z[0-9]+)\.d, z0\.d
     277  **	movprfx	z0\.h, p0/z, z1\.h
     278  **	fcmla	z0\.h, p0/m, z2\.h, \1\.h, #180
     279  **	ret
     280  */
     281  TEST_UNIFORM_Z (cmla_180_f16_z_tied3, svfloat16_t,
     282  		z0 = svcmla_f16_z (p0, z1, z2, z0, 180),
     283  		z0 = svcmla_z (p0, z1, z2, z0, 180))
     284  
     285  /*
     286  ** cmla_180_f16_z_untied:
     287  **	movprfx	z0\.h, p0/z, z1\.h
     288  **	fcmla	z0\.h, p0/m, z2\.h, z3\.h, #180
     289  **	ret
     290  */
     291  TEST_UNIFORM_Z (cmla_180_f16_z_untied, svfloat16_t,
     292  		z0 = svcmla_f16_z (p0, z1, z2, z3, 180),
     293  		z0 = svcmla_z (p0, z1, z2, z3, 180))
     294  
     295  /*
     296  ** cmla_270_f16_z_tied1:
     297  **	movprfx	z0\.h, p0/z, z0\.h
     298  **	fcmla	z0\.h, p0/m, z1\.h, z2\.h, #270
     299  **	ret
     300  */
     301  TEST_UNIFORM_Z (cmla_270_f16_z_tied1, svfloat16_t,
     302  		z0 = svcmla_f16_z (p0, z0, z1, z2, 270),
     303  		z0 = svcmla_z (p0, z0, z1, z2, 270))
     304  
     305  /*
     306  ** cmla_270_f16_z_tied2:
     307  **	mov	(z[0-9]+)\.d, z0\.d
     308  **	movprfx	z0\.h, p0/z, z1\.h
     309  **	fcmla	z0\.h, p0/m, \1\.h, z2\.h, #270
     310  **	ret
     311  */
     312  TEST_UNIFORM_Z (cmla_270_f16_z_tied2, svfloat16_t,
     313  		z0 = svcmla_f16_z (p0, z1, z0, z2, 270),
     314  		z0 = svcmla_z (p0, z1, z0, z2, 270))
     315  
     316  /*
     317  ** cmla_270_f16_z_tied3:
     318  **	mov	(z[0-9]+)\.d, z0\.d
     319  **	movprfx	z0\.h, p0/z, z1\.h
     320  **	fcmla	z0\.h, p0/m, z2\.h, \1\.h, #270
     321  **	ret
     322  */
     323  TEST_UNIFORM_Z (cmla_270_f16_z_tied3, svfloat16_t,
     324  		z0 = svcmla_f16_z (p0, z1, z2, z0, 270),
     325  		z0 = svcmla_z (p0, z1, z2, z0, 270))
     326  
     327  /*
     328  ** cmla_270_f16_z_untied:
     329  **	movprfx	z0\.h, p0/z, z1\.h
     330  **	fcmla	z0\.h, p0/m, z2\.h, z3\.h, #270
     331  **	ret
     332  */
     333  TEST_UNIFORM_Z (cmla_270_f16_z_untied, svfloat16_t,
     334  		z0 = svcmla_f16_z (p0, z1, z2, z3, 270),
     335  		z0 = svcmla_z (p0, z1, z2, z3, 270))
     336  
     337  /*
     338  ** cmla_0_f16_x_tied1:
     339  **	fcmla	z0\.h, p0/m, z1\.h, z2\.h, #0
     340  **	ret
     341  */
     342  TEST_UNIFORM_Z (cmla_0_f16_x_tied1, svfloat16_t,
     343  		z0 = svcmla_f16_x (p0, z0, z1, z2, 0),
     344  		z0 = svcmla_x (p0, z0, z1, z2, 0))
     345  
     346  /*
     347  ** cmla_0_f16_x_tied2:
     348  **	mov	(z[0-9]+)\.d, z0\.d
     349  **	movprfx	z0, z1
     350  **	fcmla	z0\.h, p0/m, \1\.h, z2\.h, #0
     351  **	ret
     352  */
     353  TEST_UNIFORM_Z (cmla_0_f16_x_tied2, svfloat16_t,
     354  		z0 = svcmla_f16_x (p0, z1, z0, z2, 0),
     355  		z0 = svcmla_x (p0, z1, z0, z2, 0))
     356  
     357  /*
     358  ** cmla_0_f16_x_tied3:
     359  **	mov	(z[0-9]+)\.d, z0\.d
     360  **	movprfx	z0, z1
     361  **	fcmla	z0\.h, p0/m, z2\.h, \1\.h, #0
     362  **	ret
     363  */
     364  TEST_UNIFORM_Z (cmla_0_f16_x_tied3, svfloat16_t,
     365  		z0 = svcmla_f16_x (p0, z1, z2, z0, 0),
     366  		z0 = svcmla_x (p0, z1, z2, z0, 0))
     367  
     368  /*
     369  ** cmla_0_f16_x_untied:
     370  **	movprfx	z0, z1
     371  **	fcmla	z0\.h, p0/m, z2\.h, z3\.h, #0
     372  **	ret
     373  */
     374  TEST_UNIFORM_Z (cmla_0_f16_x_untied, svfloat16_t,
     375  		z0 = svcmla_f16_x (p0, z1, z2, z3, 0),
     376  		z0 = svcmla_x (p0, z1, z2, z3, 0))
     377  
     378  /*
     379  ** cmla_90_f16_x_tied1:
     380  **	fcmla	z0\.h, p0/m, z1\.h, z2\.h, #90
     381  **	ret
     382  */
     383  TEST_UNIFORM_Z (cmla_90_f16_x_tied1, svfloat16_t,
     384  		z0 = svcmla_f16_x (p0, z0, z1, z2, 90),
     385  		z0 = svcmla_x (p0, z0, z1, z2, 90))
     386  
     387  /*
     388  ** cmla_90_f16_x_tied2:
     389  **	mov	(z[0-9]+)\.d, z0\.d
     390  **	movprfx	z0, z1
     391  **	fcmla	z0\.h, p0/m, \1\.h, z2\.h, #90
     392  **	ret
     393  */
     394  TEST_UNIFORM_Z (cmla_90_f16_x_tied2, svfloat16_t,
     395  		z0 = svcmla_f16_x (p0, z1, z0, z2, 90),
     396  		z0 = svcmla_x (p0, z1, z0, z2, 90))
     397  
     398  /*
     399  ** cmla_90_f16_x_tied3:
     400  **	mov	(z[0-9]+)\.d, z0\.d
     401  **	movprfx	z0, z1
     402  **	fcmla	z0\.h, p0/m, z2\.h, \1\.h, #90
     403  **	ret
     404  */
     405  TEST_UNIFORM_Z (cmla_90_f16_x_tied3, svfloat16_t,
     406  		z0 = svcmla_f16_x (p0, z1, z2, z0, 90),
     407  		z0 = svcmla_x (p0, z1, z2, z0, 90))
     408  
     409  /*
     410  ** cmla_90_f16_x_untied:
     411  **	movprfx	z0, z1
     412  **	fcmla	z0\.h, p0/m, z2\.h, z3\.h, #90
     413  **	ret
     414  */
     415  TEST_UNIFORM_Z (cmla_90_f16_x_untied, svfloat16_t,
     416  		z0 = svcmla_f16_x (p0, z1, z2, z3, 90),
     417  		z0 = svcmla_x (p0, z1, z2, z3, 90))
     418  
     419  /*
     420  ** cmla_180_f16_x_tied1:
     421  **	fcmla	z0\.h, p0/m, z1\.h, z2\.h, #180
     422  **	ret
     423  */
     424  TEST_UNIFORM_Z (cmla_180_f16_x_tied1, svfloat16_t,
     425  		z0 = svcmla_f16_x (p0, z0, z1, z2, 180),
     426  		z0 = svcmla_x (p0, z0, z1, z2, 180))
     427  
     428  /*
     429  ** cmla_180_f16_x_tied2:
     430  **	mov	(z[0-9]+)\.d, z0\.d
     431  **	movprfx	z0, z1
     432  **	fcmla	z0\.h, p0/m, \1\.h, z2\.h, #180
     433  **	ret
     434  */
     435  TEST_UNIFORM_Z (cmla_180_f16_x_tied2, svfloat16_t,
     436  		z0 = svcmla_f16_x (p0, z1, z0, z2, 180),
     437  		z0 = svcmla_x (p0, z1, z0, z2, 180))
     438  
     439  /*
     440  ** cmla_180_f16_x_tied3:
     441  **	mov	(z[0-9]+)\.d, z0\.d
     442  **	movprfx	z0, z1
     443  **	fcmla	z0\.h, p0/m, z2\.h, \1\.h, #180
     444  **	ret
     445  */
     446  TEST_UNIFORM_Z (cmla_180_f16_x_tied3, svfloat16_t,
     447  		z0 = svcmla_f16_x (p0, z1, z2, z0, 180),
     448  		z0 = svcmla_x (p0, z1, z2, z0, 180))
     449  
     450  /*
     451  ** cmla_180_f16_x_untied:
     452  **	movprfx	z0, z1
     453  **	fcmla	z0\.h, p0/m, z2\.h, z3\.h, #180
     454  **	ret
     455  */
     456  TEST_UNIFORM_Z (cmla_180_f16_x_untied, svfloat16_t,
     457  		z0 = svcmla_f16_x (p0, z1, z2, z3, 180),
     458  		z0 = svcmla_x (p0, z1, z2, z3, 180))
     459  
     460  /*
     461  ** cmla_270_f16_x_tied1:
     462  **	fcmla	z0\.h, p0/m, z1\.h, z2\.h, #270
     463  **	ret
     464  */
     465  TEST_UNIFORM_Z (cmla_270_f16_x_tied1, svfloat16_t,
     466  		z0 = svcmla_f16_x (p0, z0, z1, z2, 270),
     467  		z0 = svcmla_x (p0, z0, z1, z2, 270))
     468  
     469  /*
     470  ** cmla_270_f16_x_tied2:
     471  **	mov	(z[0-9]+)\.d, z0\.d
     472  **	movprfx	z0, z1
     473  **	fcmla	z0\.h, p0/m, \1\.h, z2\.h, #270
     474  **	ret
     475  */
     476  TEST_UNIFORM_Z (cmla_270_f16_x_tied2, svfloat16_t,
     477  		z0 = svcmla_f16_x (p0, z1, z0, z2, 270),
     478  		z0 = svcmla_x (p0, z1, z0, z2, 270))
     479  
     480  /*
     481  ** cmla_270_f16_x_tied3:
     482  **	mov	(z[0-9]+)\.d, z0\.d
     483  **	movprfx	z0, z1
     484  **	fcmla	z0\.h, p0/m, z2\.h, \1\.h, #270
     485  **	ret
     486  */
     487  TEST_UNIFORM_Z (cmla_270_f16_x_tied3, svfloat16_t,
     488  		z0 = svcmla_f16_x (p0, z1, z2, z0, 270),
     489  		z0 = svcmla_x (p0, z1, z2, z0, 270))
     490  
     491  /*
     492  ** cmla_270_f16_x_untied:
     493  **	movprfx	z0, z1
     494  **	fcmla	z0\.h, p0/m, z2\.h, z3\.h, #270
     495  **	ret
     496  */
     497  TEST_UNIFORM_Z (cmla_270_f16_x_untied, svfloat16_t,
     498  		z0 = svcmla_f16_x (p0, z1, z2, z3, 270),
     499  		z0 = svcmla_x (p0, z1, z2, z3, 270))
     500  
     501  /*
     502  ** ptrue_cmla_0_f16_x_tied1:
     503  **	...
     504  **	ptrue	p[0-9]+\.b[^\n]*
     505  **	...
     506  **	ret
     507  */
     508  TEST_UNIFORM_Z (ptrue_cmla_0_f16_x_tied1, svfloat16_t,
     509  		z0 = svcmla_f16_x (svptrue_b16 (), z0, z1, z2, 0),
     510  		z0 = svcmla_x (svptrue_b16 (), z0, z1, z2, 0))
     511  
     512  /*
     513  ** ptrue_cmla_0_f16_x_tied2:
     514  **	...
     515  **	ptrue	p[0-9]+\.b[^\n]*
     516  **	...
     517  **	ret
     518  */
     519  TEST_UNIFORM_Z (ptrue_cmla_0_f16_x_tied2, svfloat16_t,
     520  		z0 = svcmla_f16_x (svptrue_b16 (), z1, z0, z2, 0),
     521  		z0 = svcmla_x (svptrue_b16 (), z1, z0, z2, 0))
     522  
     523  /*
     524  ** ptrue_cmla_0_f16_x_tied3:
     525  **	...
     526  **	ptrue	p[0-9]+\.b[^\n]*
     527  **	...
     528  **	ret
     529  */
     530  TEST_UNIFORM_Z (ptrue_cmla_0_f16_x_tied3, svfloat16_t,
     531  		z0 = svcmla_f16_x (svptrue_b16 (), z1, z2, z0, 0),
     532  		z0 = svcmla_x (svptrue_b16 (), z1, z2, z0, 0))
     533  
     534  /*
     535  ** ptrue_cmla_0_f16_x_untied:
     536  **	...
     537  **	ptrue	p[0-9]+\.b[^\n]*
     538  **	...
     539  **	ret
     540  */
     541  TEST_UNIFORM_Z (ptrue_cmla_0_f16_x_untied, svfloat16_t,
     542  		z0 = svcmla_f16_x (svptrue_b16 (), z1, z2, z3, 0),
     543  		z0 = svcmla_x (svptrue_b16 (), z1, z2, z3, 0))
     544  
     545  /*
     546  ** ptrue_cmla_90_f16_x_tied1:
     547  **	...
     548  **	ptrue	p[0-9]+\.b[^\n]*
     549  **	...
     550  **	ret
     551  */
     552  TEST_UNIFORM_Z (ptrue_cmla_90_f16_x_tied1, svfloat16_t,
     553  		z0 = svcmla_f16_x (svptrue_b16 (), z0, z1, z2, 90),
     554  		z0 = svcmla_x (svptrue_b16 (), z0, z1, z2, 90))
     555  
     556  /*
     557  ** ptrue_cmla_90_f16_x_tied2:
     558  **	...
     559  **	ptrue	p[0-9]+\.b[^\n]*
     560  **	...
     561  **	ret
     562  */
     563  TEST_UNIFORM_Z (ptrue_cmla_90_f16_x_tied2, svfloat16_t,
     564  		z0 = svcmla_f16_x (svptrue_b16 (), z1, z0, z2, 90),
     565  		z0 = svcmla_x (svptrue_b16 (), z1, z0, z2, 90))
     566  
     567  /*
     568  ** ptrue_cmla_90_f16_x_tied3:
     569  **	...
     570  **	ptrue	p[0-9]+\.b[^\n]*
     571  **	...
     572  **	ret
     573  */
     574  TEST_UNIFORM_Z (ptrue_cmla_90_f16_x_tied3, svfloat16_t,
     575  		z0 = svcmla_f16_x (svptrue_b16 (), z1, z2, z0, 90),
     576  		z0 = svcmla_x (svptrue_b16 (), z1, z2, z0, 90))
     577  
     578  /*
     579  ** ptrue_cmla_90_f16_x_untied:
     580  **	...
     581  **	ptrue	p[0-9]+\.b[^\n]*
     582  **	...
     583  **	ret
     584  */
     585  TEST_UNIFORM_Z (ptrue_cmla_90_f16_x_untied, svfloat16_t,
     586  		z0 = svcmla_f16_x (svptrue_b16 (), z1, z2, z3, 90),
     587  		z0 = svcmla_x (svptrue_b16 (), z1, z2, z3, 90))
     588  
     589  /*
     590  ** ptrue_cmla_180_f16_x_tied1:
     591  **	...
     592  **	ptrue	p[0-9]+\.b[^\n]*
     593  **	...
     594  **	ret
     595  */
     596  TEST_UNIFORM_Z (ptrue_cmla_180_f16_x_tied1, svfloat16_t,
     597  		z0 = svcmla_f16_x (svptrue_b16 (), z0, z1, z2, 180),
     598  		z0 = svcmla_x (svptrue_b16 (), z0, z1, z2, 180))
     599  
     600  /*
     601  ** ptrue_cmla_180_f16_x_tied2:
     602  **	...
     603  **	ptrue	p[0-9]+\.b[^\n]*
     604  **	...
     605  **	ret
     606  */
     607  TEST_UNIFORM_Z (ptrue_cmla_180_f16_x_tied2, svfloat16_t,
     608  		z0 = svcmla_f16_x (svptrue_b16 (), z1, z0, z2, 180),
     609  		z0 = svcmla_x (svptrue_b16 (), z1, z0, z2, 180))
     610  
     611  /*
     612  ** ptrue_cmla_180_f16_x_tied3:
     613  **	...
     614  **	ptrue	p[0-9]+\.b[^\n]*
     615  **	...
     616  **	ret
     617  */
     618  TEST_UNIFORM_Z (ptrue_cmla_180_f16_x_tied3, svfloat16_t,
     619  		z0 = svcmla_f16_x (svptrue_b16 (), z1, z2, z0, 180),
     620  		z0 = svcmla_x (svptrue_b16 (), z1, z2, z0, 180))
     621  
     622  /*
     623  ** ptrue_cmla_180_f16_x_untied:
     624  **	...
     625  **	ptrue	p[0-9]+\.b[^\n]*
     626  **	...
     627  **	ret
     628  */
     629  TEST_UNIFORM_Z (ptrue_cmla_180_f16_x_untied, svfloat16_t,
     630  		z0 = svcmla_f16_x (svptrue_b16 (), z1, z2, z3, 180),
     631  		z0 = svcmla_x (svptrue_b16 (), z1, z2, z3, 180))
     632  
     633  /*
     634  ** ptrue_cmla_270_f16_x_tied1:
     635  **	...
     636  **	ptrue	p[0-9]+\.b[^\n]*
     637  **	...
     638  **	ret
     639  */
     640  TEST_UNIFORM_Z (ptrue_cmla_270_f16_x_tied1, svfloat16_t,
     641  		z0 = svcmla_f16_x (svptrue_b16 (), z0, z1, z2, 270),
     642  		z0 = svcmla_x (svptrue_b16 (), z0, z1, z2, 270))
     643  
     644  /*
     645  ** ptrue_cmla_270_f16_x_tied2:
     646  **	...
     647  **	ptrue	p[0-9]+\.b[^\n]*
     648  **	...
     649  **	ret
     650  */
     651  TEST_UNIFORM_Z (ptrue_cmla_270_f16_x_tied2, svfloat16_t,
     652  		z0 = svcmla_f16_x (svptrue_b16 (), z1, z0, z2, 270),
     653  		z0 = svcmla_x (svptrue_b16 (), z1, z0, z2, 270))
     654  
     655  /*
     656  ** ptrue_cmla_270_f16_x_tied3:
     657  **	...
     658  **	ptrue	p[0-9]+\.b[^\n]*
     659  **	...
     660  **	ret
     661  */
     662  TEST_UNIFORM_Z (ptrue_cmla_270_f16_x_tied3, svfloat16_t,
     663  		z0 = svcmla_f16_x (svptrue_b16 (), z1, z2, z0, 270),
     664  		z0 = svcmla_x (svptrue_b16 (), z1, z2, z0, 270))
     665  
     666  /*
     667  ** ptrue_cmla_270_f16_x_untied:
     668  **	...
     669  **	ptrue	p[0-9]+\.b[^\n]*
     670  **	...
     671  **	ret
     672  */
     673  TEST_UNIFORM_Z (ptrue_cmla_270_f16_x_untied, svfloat16_t,
     674  		z0 = svcmla_f16_x (svptrue_b16 (), z1, z2, z3, 270),
     675  		z0 = svcmla_x (svptrue_b16 (), z1, z2, z3, 270))