(root)/
gcc-13.2.0/
gcc/
testsuite/
gcc.target/
arm/
acle/
cde-mve-full-assembly.c
       1  /* { dg-do compile } */
       2  /* { dg-skip-if "Require optimisation to compile DCE tests" { *-*-* } { "-O0" "-mfloat-abi=softfp" } { "" } } */
       3  /* { dg-require-effective-target arm_v8_1m_main_cde_mve_fp_ok } */
       4  /* { dg-add-options arm_v8_1m_main_cde_mve_fp } */
       5  /* We use -ffast-math so that the addition of 0.0 to a value is assumed to not
       6     change the value.  This means the tests for float types can use the same
       7     trick of adding to a value initialised to zero to check whether the RTL
       8     patterns correctly mark that the incoming value is not used.  */
       9  /* { dg-additional-options "-ffast-math" } */
      10  /* { dg-final { check-function-bodies "**" "" } } */
      11  
      12  #include "cde-mve-tests.c"
      13  
      14  /* NOTE:
      15       We avoid matching the functions returning a __builtin_neon_ti value since
      16       there are variations between processors that make matching the whole
      17       function difficult.
      18       Since moving a TImode value into an MVE 'Q' register takes a few
      19       temporaries, this leaves many instructions which can end up being
      20       scheduled in different ways.  Matching the ways this ends up getting
      21       scheduled and restructured is awkward, and the extra tests for this one
      22       data type don't seem to be worth the confusing testcases.  */
      23  
      24  /*
      25  ** test_cde_vcx1q_u8float16x8_tintint:
      26  ** 	vcx1	p0, q0, #33
      27  ** 	bx	lr
      28  */
      29  /*
      30  ** test_cde_vcx1q_u8float32x4_tintint:
      31  ** 	vcx1	p0, q0, #33
      32  ** 	bx	lr
      33  */
      34  /*
      35  ** test_cde_vcx1q_u8uint8x16_tintint:
      36  ** 	vcx1	p0, q0, #33
      37  ** 	bx	lr
      38  */
      39  /*
      40  ** test_cde_vcx1q_u8uint16x8_tintint:
      41  ** 	vcx1	p0, q0, #33
      42  ** 	bx	lr
      43  */
      44  /*
      45  ** test_cde_vcx1q_u8uint32x4_tintint:
      46  ** 	vcx1	p0, q0, #33
      47  ** 	bx	lr
      48  */
      49  /*
      50  ** test_cde_vcx1q_u8uint64x2_tintint:
      51  ** 	vcx1	p0, q0, #33
      52  ** 	bx	lr
      53  */
      54  /*
      55  ** test_cde_vcx1q_u8int8x16_tintint:
      56  ** 	vcx1	p0, q0, #33
      57  ** 	bx	lr
      58  */
      59  /*
      60  ** test_cde_vcx1q_u8int16x8_tintint:
      61  ** 	vcx1	p0, q0, #33
      62  ** 	bx	lr
      63  */
      64  /*
      65  ** test_cde_vcx1q_u8int32x4_tintint:
      66  ** 	vcx1	p0, q0, #33
      67  ** 	bx	lr
      68  */
      69  /*
      70  ** test_cde_vcx1q_u8int64x2_tintint:
      71  ** 	vcx1	p0, q0, #33
      72  ** 	bx	lr
      73  */
      74  /*
      75  ** test_cde_vcx1qafloat16x8_tintint:
      76  ** 	vmov\.i32	q0, #0  @ v16qi
      77  ** 	vcx1a	p0, q0, #33
      78  ** 	bx	lr
      79  */
      80  /*
      81  ** test_cde_vcx1qafloat32x4_tintint:
      82  ** 	vmov\.i32	q0, #0  @ v16qi
      83  ** 	vcx1a	p0, q0, #33
      84  ** 	bx	lr
      85  */
      86  /*
      87  ** test_cde_vcx1qauint8x16_tintint:
      88  ** 	vmov\.i32	q0, #0  @ v16qi
      89  ** 	vcx1a	p0, q0, #33
      90  ** 	bx	lr
      91  */
      92  /*
      93  ** test_cde_vcx1qauint16x8_tintint:
      94  ** 	vmov\.i32	q0, #0  @ v16qi
      95  ** 	vcx1a	p0, q0, #33
      96  ** 	bx	lr
      97  */
      98  /*
      99  ** test_cde_vcx1qauint32x4_tintint:
     100  ** 	vmov\.i32	q0, #0  @ v16qi
     101  ** 	vcx1a	p0, q0, #33
     102  ** 	bx	lr
     103  */
     104  /*
     105  ** test_cde_vcx1qauint64x2_tintint:
     106  ** 	vmov\.i32	q0, #0  @ v16qi
     107  ** 	vcx1a	p0, q0, #33
     108  ** 	bx	lr
     109  */
     110  /*
     111  ** test_cde_vcx1qaint8x16_tintint:
     112  ** 	vmov\.i32	q0, #0  @ v16qi
     113  ** 	vcx1a	p0, q0, #33
     114  ** 	bx	lr
     115  */
     116  /*
     117  ** test_cde_vcx1qaint16x8_tintint:
     118  ** 	vmov\.i32	q0, #0  @ v16qi
     119  ** 	vcx1a	p0, q0, #33
     120  ** 	bx	lr
     121  */
     122  /*
     123  ** test_cde_vcx1qaint32x4_tintint:
     124  ** 	vmov\.i32	q0, #0  @ v16qi
     125  ** 	vcx1a	p0, q0, #33
     126  ** 	bx	lr
     127  */
     128  /*
     129  ** test_cde_vcx1qaint64x2_tintint:
     130  ** 	vmov\.i32	q0, #0  @ v16qi
     131  ** 	vcx1a	p0, q0, #33
     132  ** 	bx	lr
     133  */
     134  /*
     135  ** test_cde_vcx2q_u8float16x8_tuint16x8_tint:
     136  ** 	vcx2	p0, q0, q0, #33
     137  ** 	bx	lr
     138  */
     139  /*
     140  ** test_cde_vcx2q_u8float16x8_tfloat32x4_tint:
     141  ** 	vcx2	p0, q0, q0, #33
     142  ** 	bx	lr
     143  */
     144  /*
     145  ** test_cde_vcx2q_u8float32x4_tuint8x16_tint:
     146  ** 	vcx2	p0, q0, q0, #33
     147  ** 	bx	lr
     148  */
     149  /*
     150  ** test_cde_vcx2q_u8int64x2_tuint8x16_tint:
     151  ** 	vcx2	p0, q0, q0, #33
     152  ** 	bx	lr
     153  */
     154  /*
     155  ** test_cde_vcx2q_u8int8x16_tuint8x16_tint:
     156  ** 	vcx2	p0, q0, q0, #33
     157  ** 	bx	lr
     158  */
     159  /*
     160  ** test_cde_vcx2q_u8uint16x8_tuint8x16_tint:
     161  ** 	vcx2	p0, q0, q0, #33
     162  ** 	bx	lr
     163  */
     164  /*
     165  ** test_cde_vcx2q_u8uint8x16_tint64x2_tint:
     166  ** 	vcx2	p0, q0, q0, #33
     167  ** 	bx	lr
     168  */
     169  /*
     170  ** test_cde_vcx2q_u8uint8x16_tint8x16_tint:
     171  ** 	vcx2	p0, q0, q0, #33
     172  ** 	bx	lr
     173  */
     174  /*
     175  ** test_cde_vcx2q_u8uint8x16_tuint16x8_tint:
     176  ** 	vcx2	p0, q0, q0, #33
     177  ** 	bx	lr
     178  */
     179  /*
     180  ** test_cde_vcx2q_u8uint8x16_tuint8x16_tint:
     181  ** 	vcx2	p0, q0, q0, #33
     182  ** 	bx	lr
     183  */
     184  /*
     185  ** test_cde_vcx2qfloat16x8_tuint16x8_tint:
     186  ** 	vcx2	p0, q0, q0, #33
     187  ** 	bx	lr
     188  */
     189  /*
     190  ** test_cde_vcx2qfloat16x8_tfloat32x4_tint:
     191  ** 	vcx2	p0, q0, q0, #33
     192  ** 	bx	lr
     193  */
     194  /*
     195  ** test_cde_vcx2qfloat32x4_tuint8x16_tint:
     196  ** 	vcx2	p0, q0, q0, #33
     197  ** 	bx	lr
     198  */
     199  /*
     200  ** test_cde_vcx2qint64x2_tuint8x16_tint:
     201  ** 	vcx2	p0, q0, q0, #33
     202  ** 	bx	lr
     203  */
     204  /*
     205  ** test_cde_vcx2qint8x16_tuint8x16_tint:
     206  ** 	vcx2	p0, q0, q0, #33
     207  ** 	bx	lr
     208  */
     209  /*
     210  ** test_cde_vcx2quint16x8_tuint8x16_tint:
     211  ** 	vcx2	p0, q0, q0, #33
     212  ** 	bx	lr
     213  */
     214  /*
     215  ** test_cde_vcx2quint8x16_tint64x2_tint:
     216  ** 	vcx2	p0, q0, q0, #33
     217  ** 	bx	lr
     218  */
     219  /*
     220  ** test_cde_vcx2quint8x16_tint8x16_tint:
     221  ** 	vcx2	p0, q0, q0, #33
     222  ** 	bx	lr
     223  */
     224  /*
     225  ** test_cde_vcx2quint8x16_tuint16x8_tint:
     226  ** 	vcx2	p0, q0, q0, #33
     227  ** 	bx	lr
     228  */
     229  /*
     230  ** test_cde_vcx2quint8x16_tuint8x16_tint:
     231  ** 	vcx2	p0, q0, q0, #33
     232  ** 	bx	lr
     233  */
     234  /*
     235  ** test_cde_vcx2qafloat16x8_tuint16x8_tint:
     236  ** 	vmov\.i32	(q[1-7]), #0  @ v16qi
     237  ** 	vcx2a	p0, \1, q0, #33
     238  ** 	vmov	q0, \1
     239  ** 	bx	lr
     240  */
     241  /*
     242  ** test_cde_vcx2qafloat16x8_tfloat32x4_tint:
     243  ** 	vmov\.i32	(q[1-7]), #0  @ v16qi
     244  ** 	vcx2a	p0, \1, q0, #33
     245  ** 	vmov	q0, \1
     246  ** 	bx	lr
     247  */
     248  /*
     249  ** test_cde_vcx2qafloat32x4_tuint8x16_tint:
     250  ** 	vmov\.i32	(q[1-7]), #0  @ v16qi
     251  ** 	vcx2a	p0, \1, q0, #33
     252  ** 	vmov	q0, \1
     253  ** 	bx	lr
     254  */
     255  /*
     256  ** test_cde_vcx2qaint64x2_tuint8x16_tint:
     257  ** 	vmov\.i32	(q[1-7]), #0  @ v16qi
     258  ** 	vcx2a	p0, \1, q0, #33
     259  ** 	vmov	q0, \1
     260  ** 	bx	lr
     261  */
     262  /*
     263  ** test_cde_vcx2qaint8x16_tuint8x16_tint:
     264  ** 	vmov\.i32	(q[1-7]), #0  @ v16qi
     265  ** 	vcx2a	p0, \1, q0, #33
     266  ** 	vmov	q0, \1
     267  ** 	bx	lr
     268  */
     269  /*
     270  ** test_cde_vcx2qauint16x8_tuint8x16_tint:
     271  ** 	vmov\.i32	(q[1-7]), #0  @ v16qi
     272  ** 	vcx2a	p0, \1, q0, #33
     273  ** 	vmov	q0, \1
     274  ** 	bx	lr
     275  */
     276  /*
     277  ** test_cde_vcx2qauint8x16_tint64x2_tint:
     278  ** 	vmov\.i32	(q[1-7]), #0  @ v16qi
     279  ** 	vcx2a	p0, \1, q0, #33
     280  ** 	vmov	q0, \1
     281  ** 	bx	lr
     282  */
     283  /*
     284  ** test_cde_vcx2qauint8x16_tint8x16_tint:
     285  ** 	vmov\.i32	(q[1-7]), #0  @ v16qi
     286  ** 	vcx2a	p0, \1, q0, #33
     287  ** 	vmov	q0, \1
     288  ** 	bx	lr
     289  */
     290  /*
     291  ** test_cde_vcx2qauint8x16_tuint16x8_tint:
     292  ** 	vmov\.i32	(q[1-7]), #0  @ v16qi
     293  ** 	vcx2a	p0, \1, q0, #33
     294  ** 	vmov	q0, \1
     295  ** 	bx	lr
     296  */
     297  /*
     298  ** test_cde_vcx2qauint8x16_tuint8x16_tint:
     299  ** 	vmov\.i32	(q[1-7]), #0  @ v16qi
     300  ** 	vcx2a	p0, \1, q0, #33
     301  ** 	vmov	q0, \1
     302  ** 	bx	lr
     303  */
     304  /*
     305  ** test_cde_vcx3q_u8uint8x16_tuint8x16_tuint8x16_t:
     306  ** 	vcx3	p0, q0, q0, q1, #12
     307  ** 	bx	lr
     308  */
     309  /*
     310  ** test_cde_vcx3q_u8uint16x8_tuint8x16_tuint8x16_t:
     311  ** 	vcx3	p0, q0, q0, q1, #12
     312  ** 	bx	lr
     313  */
     314  /*
     315  ** test_cde_vcx3q_u8uint8x16_tuint16x8_tuint8x16_t:
     316  ** 	vcx3	p0, q0, q0, q1, #12
     317  ** 	bx	lr
     318  */
     319  /*
     320  ** test_cde_vcx3q_u8uint8x16_tuint8x16_tuint16x8_t:
     321  ** 	vcx3	p0, q0, q0, q1, #12
     322  ** 	bx	lr
     323  */
     324  /*
     325  ** test_cde_vcx3q_u8float16x8_tfloat16x8_tfloat16x8_t:
     326  ** 	vcx3	p0, q0, q0, q1, #12
     327  ** 	bx	lr
     328  */
     329  /*
     330  ** test_cde_vcx3q_u8float32x4_tuint64x2_tfloat16x8_t:
     331  ** 	vcx3	p0, q0, q0, q1, #12
     332  ** 	bx	lr
     333  */
     334  /*
     335  ** test_cde_vcx3q_u8int8x16_tuint8x16_tuint8x16_t:
     336  ** 	vcx3	p0, q0, q0, q1, #12
     337  ** 	bx	lr
     338  */
     339  /*
     340  ** test_cde_vcx3q_u8uint8x16_tint8x16_tuint8x16_t:
     341  ** 	vcx3	p0, q0, q0, q1, #12
     342  ** 	bx	lr
     343  */
     344  /*
     345  ** test_cde_vcx3q_u8uint8x16_tuint8x16_tint8x16_t:
     346  ** 	vcx3	p0, q0, q0, q1, #12
     347  ** 	bx	lr
     348  */
     349  /*
     350  ** test_cde_vcx3q_u8int64x2_tuint8x16_tuint8x16_t:
     351  ** 	vcx3	p0, q0, q0, q1, #12
     352  ** 	bx	lr
     353  */
     354  /*
     355  ** test_cde_vcx3q_u8uint8x16_tint64x2_tuint8x16_t:
     356  ** 	vcx3	p0, q0, q0, q1, #12
     357  ** 	bx	lr
     358  */
     359  /*
     360  ** test_cde_vcx3q_u8uint8x16_tuint8x16_tint64x2_t:
     361  ** 	vcx3	p0, q0, q0, q1, #12
     362  ** 	bx	lr
     363  */
     364  /*
     365  ** test_cde_vcx3q_u8uint8x16_tint64x2_tint64x2_t:
     366  ** 	vcx3	p0, q0, q0, q1, #12
     367  ** 	bx	lr
     368  */
     369  /*
     370  ** test_cde_vcx3quint8x16_tuint8x16_tuint8x16_t:
     371  ** 	vcx3	p0, q0, q0, q1, #12
     372  ** 	bx	lr
     373  */
     374  /*
     375  ** test_cde_vcx3qfloat16x8_tfloat16x8_tfloat16x8_t:
     376  ** 	vcx3	p0, q0, q0, q1, #12
     377  ** 	bx	lr
     378  */
     379  /*
     380  ** test_cde_vcx3qfloat32x4_tuint64x2_tfloat16x8_t:
     381  ** 	vcx3	p0, q0, q0, q1, #12
     382  ** 	bx	lr
     383  */
     384  /*
     385  ** test_cde_vcx3quint16x8_tuint8x16_tuint8x16_t:
     386  ** 	vcx3	p0, q0, q0, q1, #12
     387  ** 	bx	lr
     388  */
     389  /*
     390  ** test_cde_vcx3quint8x16_tuint16x8_tuint8x16_t:
     391  ** 	vcx3	p0, q0, q0, q1, #12
     392  ** 	bx	lr
     393  */
     394  /*
     395  ** test_cde_vcx3quint8x16_tuint8x16_tuint16x8_t:
     396  ** 	vcx3	p0, q0, q0, q1, #12
     397  ** 	bx	lr
     398  */
     399  /*
     400  ** test_cde_vcx3qint8x16_tuint8x16_tuint8x16_t:
     401  ** 	vcx3	p0, q0, q0, q1, #12
     402  ** 	bx	lr
     403  */
     404  /*
     405  ** test_cde_vcx3quint8x16_tint8x16_tuint8x16_t:
     406  ** 	vcx3	p0, q0, q0, q1, #12
     407  ** 	bx	lr
     408  */
     409  /*
     410  ** test_cde_vcx3quint8x16_tuint8x16_tint8x16_t:
     411  ** 	vcx3	p0, q0, q0, q1, #12
     412  ** 	bx	lr
     413  */
     414  /*
     415  ** test_cde_vcx3qint64x2_tuint8x16_tuint8x16_t:
     416  ** 	vcx3	p0, q0, q0, q1, #12
     417  ** 	bx	lr
     418  */
     419  /*
     420  ** test_cde_vcx3quint8x16_tint64x2_tuint8x16_t:
     421  ** 	vcx3	p0, q0, q0, q1, #12
     422  ** 	bx	lr
     423  */
     424  /*
     425  ** test_cde_vcx3quint8x16_tuint8x16_tint64x2_t:
     426  ** 	vcx3	p0, q0, q0, q1, #12
     427  ** 	bx	lr
     428  */
     429  /*
     430  ** test_cde_vcx3quint8x16_tint64x2_tint64x2_t:
     431  ** 	vcx3	p0, q0, q0, q1, #12
     432  ** 	bx	lr
     433  */
     434  /*
     435  ** test_cde_vcx3qauint8x16_tuint8x16_tuint8x16_t:
     436  ** 	vmov\.i32	(q[2-7]), #0  @ v16qi
     437  ** 	vcx3a	p0, \1, q0, q1, #12
     438  ** 	vmov	q0, \1
     439  ** 	bx	lr
     440  */
     441  /*
     442  ** test_cde_vcx3qafloat16x8_tfloat16x8_tfloat16x8_t:
     443  ** 	vmov\.i32	(q[2-7]), #0  @ v16qi
     444  ** 	vcx3a	p0, \1, q0, q1, #12
     445  ** 	vmov	q0, \1
     446  ** 	bx	lr
     447  */
     448  /*
     449  ** test_cde_vcx3qafloat32x4_tuint64x2_tfloat16x8_t:
     450  ** 	vmov\.i32	(q[2-7]), #0  @ v16qi
     451  ** 	vcx3a	p0, \1, q0, q1, #12
     452  ** 	vmov	q0, \1
     453  ** 	bx	lr
     454  */
     455  /*
     456  ** test_cde_vcx3qauint16x8_tuint8x16_tuint8x16_t:
     457  ** 	vmov\.i32	(q[2-7]), #0  @ v16qi
     458  ** 	vcx3a	p0, \1, q0, q1, #12
     459  ** 	vmov	q0, \1
     460  ** 	bx	lr
     461  */
     462  /*
     463  ** test_cde_vcx3qauint8x16_tuint16x8_tuint8x16_t:
     464  ** 	vmov\.i32	(q[2-7]), #0  @ v16qi
     465  ** 	vcx3a	p0, \1, q0, q1, #12
     466  ** 	vmov	q0, \1
     467  ** 	bx	lr
     468  */
     469  /*
     470  ** test_cde_vcx3qauint8x16_tuint8x16_tuint16x8_t:
     471  ** 	vmov\.i32	(q[2-7]), #0  @ v16qi
     472  ** 	vcx3a	p0, \1, q0, q1, #12
     473  ** 	vmov	q0, \1
     474  ** 	bx	lr
     475  */
     476  /*
     477  ** test_cde_vcx3qaint8x16_tuint8x16_tuint8x16_t:
     478  ** 	vmov\.i32	(q[2-7]), #0  @ v16qi
     479  ** 	vcx3a	p0, \1, q0, q1, #12
     480  ** 	vmov	q0, \1
     481  ** 	bx	lr
     482  */
     483  /*
     484  ** test_cde_vcx3qauint8x16_tint8x16_tuint8x16_t:
     485  ** 	vmov\.i32	(q[2-7]), #0  @ v16qi
     486  ** 	vcx3a	p0, \1, q0, q1, #12
     487  ** 	vmov	q0, \1
     488  ** 	bx	lr
     489  */
     490  /*
     491  ** test_cde_vcx3qauint8x16_tuint8x16_tint8x16_t:
     492  ** 	vmov\.i32	(q[2-7]), #0  @ v16qi
     493  ** 	vcx3a	p0, \1, q0, q1, #12
     494  ** 	vmov	q0, \1
     495  ** 	bx	lr
     496  */
     497  /*
     498  ** test_cde_vcx3qaint64x2_tuint8x16_tuint8x16_t:
     499  ** 	vmov\.i32	(q[2-7]), #0  @ v16qi
     500  ** 	vcx3a	p0, \1, q0, q1, #12
     501  ** 	vmov	q0, \1
     502  ** 	bx	lr
     503  */
     504  /*
     505  ** test_cde_vcx3qauint8x16_tint64x2_tuint8x16_t:
     506  ** 	vmov\.i32	(q[2-7]), #0  @ v16qi
     507  ** 	vcx3a	p0, \1, q0, q1, #12
     508  ** 	vmov	q0, \1
     509  ** 	bx	lr
     510  */
     511  /*
     512  ** test_cde_vcx3qauint8x16_tuint8x16_tint64x2_t:
     513  ** 	vmov\.i32	(q[2-7]), #0  @ v16qi
     514  ** 	vcx3a	p0, \1, q0, q1, #12
     515  ** 	vmov	q0, \1
     516  ** 	bx	lr
     517  */
     518  /*
     519  ** test_cde_vcx3qauint8x16_tint64x2_tint64x2_t:
     520  ** 	vmov\.i32	(q[2-7]), #0  @ v16qi
     521  ** 	vcx3a	p0, \1, q0, q1, #12
     522  ** 	vmov	q0, \1
     523  ** 	bx	lr
     524  */
     525  
     526  /* Predicated MVE intrinsics.  */
     527  /* Merging lane predication types.
     528     NOTE: Depending on the target, the setup instructions (vmov's and vmsr) can
     529     be in a different order.  Here we just check that all the expected setup
     530     instructions are there.  We don't check that the setup instructions are
     531     different since the likelyhood of the compiler generating repeated versions
     532     of one rather than one and the other is very low and it's difficult to apply
     533     such a constraint in TCL regexps (lookahead/lookbehind constraints may not
     534     contain back references).  */
     535  /*
     536  ** test_cde_vcx1q_mfloat16x8_tintint:
     537  ** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	p0, r2	@ movhi)
     538  ** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	p0, r2	@ movhi)
     539  ** 	vpst
     540  ** 	vcx1t	p0, q0, #32
     541  ** 	bx	lr
     542  */
     543  /*
     544  ** test_cde_vcx1q_mfloat32x4_tintint:
     545  ** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	p0, r2	@ movhi)
     546  ** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	p0, r2	@ movhi)
     547  ** 	vpst
     548  ** 	vcx1t	p0, q0, #32
     549  ** 	bx	lr
     550  */
     551  /*
     552  ** test_cde_vcx1q_muint8x16_tintint:
     553  ** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	p0, r2	@ movhi)
     554  ** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	p0, r2	@ movhi)
     555  ** 	vpst
     556  ** 	vcx1t	p0, q0, #32
     557  ** 	bx	lr
     558  */
     559  /*
     560  ** test_cde_vcx1q_muint16x8_tintint:
     561  ** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	p0, r2	@ movhi)
     562  ** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	p0, r2	@ movhi)
     563  ** 	vpst
     564  ** 	vcx1t	p0, q0, #32
     565  ** 	bx	lr
     566  */
     567  /*
     568  ** test_cde_vcx1q_muint32x4_tintint:
     569  ** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	p0, r2	@ movhi)
     570  ** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	p0, r2	@ movhi)
     571  ** 	vpst
     572  ** 	vcx1t	p0, q0, #32
     573  ** 	bx	lr
     574  */
     575  /*
     576  ** test_cde_vcx1q_muint64x2_tintint:
     577  ** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	p0, r2	@ movhi)
     578  ** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	p0, r2	@ movhi)
     579  ** 	vpst
     580  ** 	vcx1t	p0, q0, #32
     581  ** 	bx	lr
     582  */
     583  /*
     584  ** test_cde_vcx1q_mint8x16_tintint:
     585  ** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	p0, r2	@ movhi)
     586  ** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	p0, r2	@ movhi)
     587  ** 	vpst
     588  ** 	vcx1t	p0, q0, #32
     589  ** 	bx	lr
     590  */
     591  /*
     592  ** test_cde_vcx1q_mint16x8_tintint:
     593  ** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	p0, r2	@ movhi)
     594  ** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	p0, r2	@ movhi)
     595  ** 	vpst
     596  ** 	vcx1t	p0, q0, #32
     597  ** 	bx	lr
     598  */
     599  /*
     600  ** test_cde_vcx1q_mint32x4_tintint:
     601  ** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	p0, r2	@ movhi)
     602  ** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	p0, r2	@ movhi)
     603  ** 	vpst
     604  ** 	vcx1t	p0, q0, #32
     605  ** 	bx	lr
     606  */
     607  /*
     608  ** test_cde_vcx1q_mint64x2_tintint:
     609  ** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	p0, r2	@ movhi)
     610  ** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	p0, r2	@ movhi)
     611  ** 	vpst
     612  ** 	vcx1t	p0, q0, #32
     613  ** 	bx	lr
     614  */
     615  
     616  
     617  /*
     618  ** test_cde_vcx1qa_mfloat16x8_tintint:
     619  ** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	p0, r2	@ movhi)
     620  ** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	p0, r2	@ movhi)
     621  ** 	vpst
     622  ** 	vcx1at	p0, q0, #32
     623  ** 	bx	lr
     624  */
     625  /*
     626  ** test_cde_vcx1qa_mfloat32x4_tintint:
     627  ** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	p0, r2	@ movhi)
     628  ** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	p0, r2	@ movhi)
     629  ** 	vpst
     630  ** 	vcx1at	p0, q0, #32
     631  ** 	bx	lr
     632  */
     633  /*
     634  ** test_cde_vcx1qa_muint8x16_tintint:
     635  ** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	p0, r2	@ movhi)
     636  ** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	p0, r2	@ movhi)
     637  ** 	vpst
     638  ** 	vcx1at	p0, q0, #32
     639  ** 	bx	lr
     640  */
     641  /*
     642  ** test_cde_vcx1qa_muint16x8_tintint:
     643  ** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	p0, r2	@ movhi)
     644  ** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	p0, r2	@ movhi)
     645  ** 	vpst
     646  ** 	vcx1at	p0, q0, #32
     647  ** 	bx	lr
     648  */
     649  /*
     650  ** test_cde_vcx1qa_muint32x4_tintint:
     651  ** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	p0, r2	@ movhi)
     652  ** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	p0, r2	@ movhi)
     653  ** 	vpst
     654  ** 	vcx1at	p0, q0, #32
     655  ** 	bx	lr
     656  */
     657  /*
     658  ** test_cde_vcx1qa_muint64x2_tintint:
     659  ** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	p0, r2	@ movhi)
     660  ** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	p0, r2	@ movhi)
     661  ** 	vpst
     662  ** 	vcx1at	p0, q0, #32
     663  ** 	bx	lr
     664  */
     665  /*
     666  ** test_cde_vcx1qa_mint8x16_tintint:
     667  ** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	p0, r2	@ movhi)
     668  ** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	p0, r2	@ movhi)
     669  ** 	vpst
     670  ** 	vcx1at	p0, q0, #32
     671  ** 	bx	lr
     672  */
     673  /*
     674  ** test_cde_vcx1qa_mint16x8_tintint:
     675  ** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	p0, r2	@ movhi)
     676  ** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	p0, r2	@ movhi)
     677  ** 	vpst
     678  ** 	vcx1at	p0, q0, #32
     679  ** 	bx	lr
     680  */
     681  /*
     682  ** test_cde_vcx1qa_mint32x4_tintint:
     683  ** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	p0, r2	@ movhi)
     684  ** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	p0, r2	@ movhi)
     685  ** 	vpst
     686  ** 	vcx1at	p0, q0, #32
     687  ** 	bx	lr
     688  */
     689  /*
     690  ** test_cde_vcx1qa_mint64x2_tintint:
     691  ** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	p0, r2	@ movhi)
     692  ** 	(?:vmov\.i32	q0, #0  @ v16qi|vmsr	p0, r2	@ movhi)
     693  ** 	vpst
     694  ** 	vcx1at	p0, q0, #32
     695  ** 	bx	lr
     696  */
     697  
     698  
     699  /*
     700  ** test_cde_vcx2q_mfloat16x8_tuint16x8_tint:
     701  ** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	p0, r1	@ movhi)
     702  ** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	p0, r1	@ movhi)
     703  ** 	vpst
     704  ** 	vcx2t	p0, (q[1-7]), q0, #32
     705  ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
     706  ** 	bx	lr
     707  */
     708  /*
     709  ** test_cde_vcx2q_mfloat16x8_tfloat32x4_tint:
     710  ** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	p0, r1	@ movhi)
     711  ** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	p0, r1	@ movhi)
     712  ** 	vpst
     713  ** 	vcx2t	p0, (q[1-7]), q0, #32
     714  ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
     715  ** 	bx	lr
     716  */
     717  /*
     718  ** test_cde_vcx2q_mfloat32x4_tuint8x16_tint:
     719  ** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	p0, r1	@ movhi)
     720  ** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	p0, r1	@ movhi)
     721  ** 	vpst
     722  ** 	vcx2t	p0, (q[1-7]), q0, #32
     723  ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
     724  ** 	bx	lr
     725  */
     726  /*
     727  ** test_cde_vcx2q_mint64x2_tuint8x16_tint:
     728  ** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	p0, r1	@ movhi)
     729  ** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	p0, r1	@ movhi)
     730  ** 	vpst
     731  ** 	vcx2t	p0, (q[1-7]), q0, #32
     732  ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
     733  ** 	bx	lr
     734  */
     735  /*
     736  ** test_cde_vcx2q_mint8x16_tuint8x16_tint:
     737  ** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	p0, r1	@ movhi)
     738  ** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	p0, r1	@ movhi)
     739  ** 	vpst
     740  ** 	vcx2t	p0, (q[1-7]), q0, #32
     741  ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
     742  ** 	bx	lr
     743  */
     744  /*
     745  ** test_cde_vcx2q_muint16x8_tuint8x16_tint:
     746  ** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	p0, r1	@ movhi)
     747  ** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	p0, r1	@ movhi)
     748  ** 	vpst
     749  ** 	vcx2t	p0, (q[1-7]), q0, #32
     750  ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
     751  ** 	bx	lr
     752  */
     753  /*
     754  ** test_cde_vcx2q_muint8x16_tint64x2_tint:
     755  ** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	p0, r1	@ movhi)
     756  ** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	p0, r1	@ movhi)
     757  ** 	vpst
     758  ** 	vcx2t	p0, (q[1-7]), q0, #32
     759  ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
     760  ** 	bx	lr
     761  */
     762  /*
     763  ** test_cde_vcx2q_muint8x16_tint8x16_tint:
     764  ** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	p0, r1	@ movhi)
     765  ** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	p0, r1	@ movhi)
     766  ** 	vpst
     767  ** 	vcx2t	p0, (q[1-7]), q0, #32
     768  ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
     769  ** 	bx	lr
     770  */
     771  /*
     772  ** test_cde_vcx2q_muint8x16_tuint16x8_tint:
     773  ** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	p0, r1	@ movhi)
     774  ** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	p0, r1	@ movhi)
     775  ** 	vpst
     776  ** 	vcx2t	p0, (q[1-7]), q0, #32
     777  ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
     778  ** 	bx	lr
     779  */
     780  /*
     781  ** test_cde_vcx2q_muint8x16_tuint8x16_tint:
     782  ** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	p0, r1	@ movhi)
     783  ** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	p0, r1	@ movhi)
     784  ** 	vpst
     785  ** 	vcx2t	p0, (q[1-7]), q0, #32
     786  ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
     787  ** 	bx	lr
     788  */
     789  
     790  
     791  /*
     792  ** test_cde_vcx2qa_mfloat16x8_tuint16x8_tint:
     793  ** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	p0, r1	@ movhi)
     794  ** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	p0, r1	@ movhi)
     795  ** 	vpst
     796  ** 	vcx2at	p0, (q[1-7]), q0, #32
     797  ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
     798  ** 	bx	lr
     799  */
     800  /*
     801  ** test_cde_vcx2qa_mfloat16x8_tfloat32x4_tint:
     802  ** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	p0, r1	@ movhi)
     803  ** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	p0, r1	@ movhi)
     804  ** 	vpst
     805  ** 	vcx2at	p0, (q[1-7]), q0, #32
     806  ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
     807  ** 	bx	lr
     808  */
     809  /*
     810  ** test_cde_vcx2qa_mfloat32x4_tuint8x16_tint:
     811  ** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	p0, r1	@ movhi)
     812  ** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	p0, r1	@ movhi)
     813  ** 	vpst
     814  ** 	vcx2at	p0, (q[1-7]), q0, #32
     815  ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
     816  ** 	bx	lr
     817  */
     818  /*
     819  ** test_cde_vcx2qa_mint64x2_tuint8x16_tint:
     820  ** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	p0, r1	@ movhi)
     821  ** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	p0, r1	@ movhi)
     822  ** 	vpst
     823  ** 	vcx2at	p0, (q[1-7]), q0, #32
     824  ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
     825  ** 	bx	lr
     826  */
     827  /*
     828  ** test_cde_vcx2qa_mint8x16_tuint8x16_tint:
     829  ** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	p0, r1	@ movhi)
     830  ** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	p0, r1	@ movhi)
     831  ** 	vpst
     832  ** 	vcx2at	p0, (q[1-7]), q0, #32
     833  ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
     834  ** 	bx	lr
     835  */
     836  /*
     837  ** test_cde_vcx2qa_muint16x8_tuint8x16_tint:
     838  ** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	p0, r1	@ movhi)
     839  ** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	p0, r1	@ movhi)
     840  ** 	vpst
     841  ** 	vcx2at	p0, (q[1-7]), q0, #32
     842  ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
     843  ** 	bx	lr
     844  */
     845  /*
     846  ** test_cde_vcx2qa_muint8x16_tint64x2_tint:
     847  ** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	p0, r1	@ movhi)
     848  ** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	p0, r1	@ movhi)
     849  ** 	vpst
     850  ** 	vcx2at	p0, (q[1-7]), q0, #32
     851  ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
     852  ** 	bx	lr
     853  */
     854  /*
     855  ** test_cde_vcx2qa_muint8x16_tint8x16_tint:
     856  ** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	p0, r1	@ movhi)
     857  ** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	p0, r1	@ movhi)
     858  ** 	vpst
     859  ** 	vcx2at	p0, (q[1-7]), q0, #32
     860  ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
     861  ** 	bx	lr
     862  */
     863  /*
     864  ** test_cde_vcx2qa_muint8x16_tuint16x8_tint:
     865  ** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	p0, r1	@ movhi)
     866  ** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	p0, r1	@ movhi)
     867  ** 	vpst
     868  ** 	vcx2at	p0, (q[1-7]), q0, #32
     869  ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
     870  ** 	bx	lr
     871  */
     872  /*
     873  ** test_cde_vcx2qa_muint8x16_tuint8x16_tint:
     874  ** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	p0, r1	@ movhi)
     875  ** 	(?:vmov\.i32	q[1-7], #0  @ v16qi|vmsr	p0, r1	@ movhi)
     876  ** 	vpst
     877  ** 	vcx2at	p0, (q[1-7]), q0, #32
     878  ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
     879  ** 	bx	lr
     880  */
     881  
     882  
     883  /*
     884  ** test_cde_vcx3q_muint8x16_tuint8x16_tuint8x16_t:
     885  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
     886  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
     887  ** 	vpst
     888  ** 	vcx3t	p0, (q[2-7]), q0, q1, #15
     889  ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
     890  ** 	bx	lr
     891  */
     892  /*
     893  ** test_cde_vcx3q_mfloat16x8_tfloat16x8_tfloat16x8_t:
     894  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
     895  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
     896  ** 	vpst
     897  ** 	vcx3t	p0, (q[2-7]), q0, q1, #15
     898  ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
     899  ** 	bx	lr
     900  */
     901  /*
     902  ** test_cde_vcx3q_mfloat32x4_tuint64x2_tfloat16x8_t:
     903  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
     904  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
     905  ** 	vpst
     906  ** 	vcx3t	p0, (q[2-7]), q0, q1, #15
     907  ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
     908  ** 	bx	lr
     909  */
     910  /*
     911  ** test_cde_vcx3q_muint16x8_tuint8x16_tuint8x16_t:
     912  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
     913  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
     914  ** 	vpst
     915  ** 	vcx3t	p0, (q[2-7]), q0, q1, #15
     916  ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
     917  ** 	bx	lr
     918  */
     919  /*
     920  ** test_cde_vcx3q_muint8x16_tuint16x8_tuint8x16_t:
     921  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
     922  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
     923  ** 	vpst
     924  ** 	vcx3t	p0, (q[2-7]), q0, q1, #15
     925  ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
     926  ** 	bx	lr
     927  */
     928  /*
     929  ** test_cde_vcx3q_muint8x16_tuint8x16_tuint16x8_t:
     930  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
     931  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
     932  ** 	vpst
     933  ** 	vcx3t	p0, (q[2-7]), q0, q1, #15
     934  ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
     935  ** 	bx	lr
     936  */
     937  /*
     938  ** test_cde_vcx3q_mint8x16_tuint8x16_tuint8x16_t:
     939  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
     940  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
     941  ** 	vpst
     942  ** 	vcx3t	p0, (q[2-7]), q0, q1, #15
     943  ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
     944  ** 	bx	lr
     945  */
     946  /*
     947  ** test_cde_vcx3q_muint8x16_tint8x16_tuint8x16_t:
     948  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
     949  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
     950  ** 	vpst
     951  ** 	vcx3t	p0, (q[2-7]), q0, q1, #15
     952  ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
     953  ** 	bx	lr
     954  */
     955  /*
     956  ** test_cde_vcx3q_muint8x16_tuint8x16_tint8x16_t:
     957  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
     958  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
     959  ** 	vpst
     960  ** 	vcx3t	p0, (q[2-7]), q0, q1, #15
     961  ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
     962  ** 	bx	lr
     963  */
     964  /*
     965  ** test_cde_vcx3q_mint64x2_tuint8x16_tuint8x16_t:
     966  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
     967  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
     968  ** 	vpst
     969  ** 	vcx3t	p0, (q[2-7]), q0, q1, #15
     970  ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
     971  ** 	bx	lr
     972  */
     973  /*
     974  ** test_cde_vcx3q_muint8x16_tint64x2_tuint8x16_t:
     975  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
     976  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
     977  ** 	vpst
     978  ** 	vcx3t	p0, (q[2-7]), q0, q1, #15
     979  ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
     980  ** 	bx	lr
     981  */
     982  /*
     983  ** test_cde_vcx3q_muint8x16_tuint8x16_tint64x2_t:
     984  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
     985  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
     986  ** 	vpst
     987  ** 	vcx3t	p0, (q[2-7]), q0, q1, #15
     988  ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
     989  ** 	bx	lr
     990  */
     991  /*
     992  ** test_cde_vcx3q_muint8x16_tint64x2_tint64x2_t:
     993  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
     994  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
     995  ** 	vpst
     996  ** 	vcx3t	p0, (q[2-7]), q0, q1, #15
     997  ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
     998  ** 	bx	lr
     999  */
    1000  
    1001  
    1002  /*
    1003  ** test_cde_vcx3qa_muint8x16_tuint8x16_tuint8x16_t:
    1004  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
    1005  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
    1006  ** 	vpst
    1007  ** 	vcx3at	p0, (q[2-7]), q0, q1, #15
    1008  ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
    1009  ** 	bx	lr
    1010  */
    1011  /*
    1012  ** test_cde_vcx3qa_mfloat16x8_tfloat16x8_tfloat16x8_t:
    1013  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
    1014  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
    1015  ** 	vpst
    1016  ** 	vcx3at	p0, (q[2-7]), q0, q1, #15
    1017  ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
    1018  ** 	bx	lr
    1019  */
    1020  /*
    1021  ** test_cde_vcx3qa_mfloat32x4_tuint64x2_tfloat16x8_t:
    1022  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
    1023  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
    1024  ** 	vpst
    1025  ** 	vcx3at	p0, (q[2-7]), q0, q1, #15
    1026  ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
    1027  ** 	bx	lr
    1028  */
    1029  /*
    1030  ** test_cde_vcx3qa_muint16x8_tuint8x16_tuint8x16_t:
    1031  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
    1032  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
    1033  ** 	vpst
    1034  ** 	vcx3at	p0, (q[2-7]), q0, q1, #15
    1035  ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
    1036  ** 	bx	lr
    1037  */
    1038  /*
    1039  ** test_cde_vcx3qa_muint8x16_tuint16x8_tuint8x16_t:
    1040  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
    1041  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
    1042  ** 	vpst
    1043  ** 	vcx3at	p0, (q[2-7]), q0, q1, #15
    1044  ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
    1045  ** 	bx	lr
    1046  */
    1047  /*
    1048  ** test_cde_vcx3qa_muint8x16_tuint8x16_tuint16x8_t:
    1049  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
    1050  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
    1051  ** 	vpst
    1052  ** 	vcx3at	p0, (q[2-7]), q0, q1, #15
    1053  ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
    1054  ** 	bx	lr
    1055  */
    1056  /*
    1057  ** test_cde_vcx3qa_mint8x16_tuint8x16_tuint8x16_t:
    1058  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
    1059  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
    1060  ** 	vpst
    1061  ** 	vcx3at	p0, (q[2-7]), q0, q1, #15
    1062  ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
    1063  ** 	bx	lr
    1064  */
    1065  /*
    1066  ** test_cde_vcx3qa_muint8x16_tint8x16_tuint8x16_t:
    1067  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
    1068  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
    1069  ** 	vpst
    1070  ** 	vcx3at	p0, (q[2-7]), q0, q1, #15
    1071  ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
    1072  ** 	bx	lr
    1073  */
    1074  /*
    1075  ** test_cde_vcx3qa_muint8x16_tuint8x16_tint8x16_t:
    1076  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
    1077  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
    1078  ** 	vpst
    1079  ** 	vcx3at	p0, (q[2-7]), q0, q1, #15
    1080  ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
    1081  ** 	bx	lr
    1082  */
    1083  /*
    1084  ** test_cde_vcx3qa_mint64x2_tuint8x16_tuint8x16_t:
    1085  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
    1086  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
    1087  ** 	vpst
    1088  ** 	vcx3at	p0, (q[2-7]), q0, q1, #15
    1089  ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
    1090  ** 	bx	lr
    1091  */
    1092  /*
    1093  ** test_cde_vcx3qa_muint8x16_tint64x2_tuint8x16_t:
    1094  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
    1095  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
    1096  ** 	vpst
    1097  ** 	vcx3at	p0, (q[2-7]), q0, q1, #15
    1098  ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
    1099  ** 	bx	lr
    1100  */
    1101  /*
    1102  ** test_cde_vcx3qa_muint8x16_tuint8x16_tint64x2_t:
    1103  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
    1104  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
    1105  ** 	vpst
    1106  ** 	vcx3at	p0, (q[2-7]), q0, q1, #15
    1107  ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
    1108  ** 	bx	lr
    1109  */
    1110  /*
    1111  ** test_cde_vcx3qa_muint8x16_tint64x2_tint64x2_t:
    1112  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
    1113  ** 	(?:vmov\.i32	q[2-7], #0  @ v16qi|vmsr	p0, r0	@ movhi)
    1114  ** 	vpst
    1115  ** 	vcx3at	p0, (q[2-7]), q0, q1, #15
    1116  ** 	vmov	q0, \1([[:space:]]+@ [^\n]*)?
    1117  ** 	bx	lr
    1118  */