(root)/
gcc-13.2.0/
gcc/
testsuite/
gcc.target/
arm/
armv8_1m-fp64-move-1.c
       1  /* { dg-do compile } */
       2  /* { dg-options "-O" } */
       3  /* { dg-require-effective-target arm_v8_1m_mve_ok } */
       4  /* { dg-add-options arm_v8_1m_mve } */
       5  /* { dg-additional-options "-mfloat-abi=hard" } *
       6  /* { dg-final { check-function-bodies "**" "" } } */
       7  
       8  /*
       9  ** r_w:
      10  **	vmov	r0, r1, d0
      11  **	bx	lr
      12  */
      13  void
      14  r_w (double d0)
      15  {
      16    register double r0 asm ("r0");
      17    r0 = d0;
      18    asm volatile ("" :: "r" (r0));
      19  }
      20  
      21  /*
      22  ** w_r:
      23  **	vmov	d0, r0, r1
      24  **	bx	lr
      25  */
      26  double
      27  w_r ()
      28  {
      29    register double r0 asm ("r0");
      30    asm volatile ("" : "=r" (r0));
      31    return r0;
      32  }
      33  
      34  /*
      35  ** w_w:
      36  ** (
      37  **	vmov.f32	s2, s0
      38  **	vmov.f32	s3, s1
      39  ** |
      40  **	vmov.f32	s3, s1
      41  **	vmov.f32	s2, s0
      42  ** )
      43  **	bx	lr
      44  */
      45  void
      46  w_w (double d0)
      47  {
      48    register double d1 asm ("d1");
      49    d1 = d0;
      50    asm volatile ("" :: "w" (d1));
      51  }
      52  
      53  /*
      54  ** r_m_m32:
      55  **	sub	(r[0-9]+), r0, #256
      56  **	ldrd	r2, \[\1\]
      57  **	bx	lr
      58  */
      59  void
      60  r_m_m32 (double *r0)
      61  {
      62    register double r2 asm ("r2");
      63    r2 = r0[-32];
      64    asm volatile ("" :: "r" (r2));
      65  }
      66  
      67  /*
      68  ** r_m_m31:
      69  **	ldrd	r2, \[r0, #-248\]
      70  **	bx	lr
      71  */
      72  void
      73  r_m_m31 (double *r0)
      74  {
      75    register double r2 asm ("r2");
      76    r2 = r0[-31];
      77    asm volatile ("" :: "r" (r2));
      78  }
      79  
      80  /*
      81  ** r_m_m1:
      82  **	ldrd	r2, \[r0, #-8\]
      83  **	bx	lr
      84  */
      85  void
      86  r_m_m1 (double *r0)
      87  {
      88    register double r2 asm ("r2");
      89    r2 = r0[-1];
      90    asm volatile ("" :: "r" (r2));
      91  }
      92  
      93  /*
      94  ** r_m_0:
      95  **	ldrd	r2, \[r0\]
      96  **	bx	lr
      97  */
      98  void
      99  r_m_0 (double *r0)
     100  {
     101    register double r2 asm ("r2");
     102    r2 = r0[0];
     103    asm volatile ("" :: "r" (r2));
     104  }
     105  
     106  /*
     107  ** r_m_1:
     108  **	ldrd	r2, \[r0, #8\]
     109  **	bx	lr
     110  */
     111  void
     112  r_m_1 (double *r0)
     113  {
     114    register double r2 asm ("r2");
     115    r2 = r0[1];
     116    asm volatile ("" :: "r" (r2));
     117  }
     118  
     119  /*
     120  ** r_m_127:
     121  **	ldrd	r2, \[r0, #1016\]
     122  **	bx	lr
     123  */
     124  void
     125  r_m_127 (double *r0)
     126  {
     127    register double r2 asm ("r2");
     128    r2 = r0[127];
     129    asm volatile ("" :: "r" (r2));
     130  }
     131  
     132  /*
     133  ** r_m_128:
     134  **	add	(r[0-9]+), r0, #1024
     135  **	ldrd	r2, \[r0\]
     136  **	bx	lr
     137  */
     138  void
     139  r_m_128 (double *r0)
     140  {
     141    register double r2 asm ("r2");
     142    r2 = r0[128];
     143    asm volatile ("" :: "r" (r2));
     144  }
     145  
     146  /* ??? This could be done in one instruction, but without mve.fp,
     147     it makes more sense for memory_operand to enforce the GPR range.  */
     148  /*
     149  ** w_m_m32:
     150  **	sub	(r[0-9]+), r0, #256
     151  **	vldr.64	d0, \[\1\]
     152  **	bx	lr
     153  */
     154  void
     155  w_m_m32 (double *r0)
     156  {
     157    register double d0 asm ("d0");
     158    d0 = r0[-32];
     159    asm volatile ("" :: "w" (d0));
     160  }
     161  
     162  /*
     163  ** w_m_m31:
     164  **	vldr.64	d0, \[r0, #-248\]
     165  **	bx	lr
     166  */
     167  void
     168  w_m_m31 (double *r0)
     169  {
     170    register double d0 asm ("d0");
     171    d0 = r0[-31];
     172    asm volatile ("" :: "w" (d0));
     173  }
     174  
     175  /*
     176  ** w_m_m1:
     177  **	vldr.64	d0, \[r0, #-8\]
     178  **	bx	lr
     179  */
     180  void
     181  w_m_m1 (double *r0)
     182  {
     183    register double d0 asm ("d0");
     184    d0 = r0[-1];
     185    asm volatile ("" :: "w" (d0));
     186  }
     187  
     188  /*
     189  ** w_m_0:
     190  **	vldr.64	d0, \[r0\]
     191  **	bx	lr
     192  */
     193  void
     194  w_m_0 (double *r0)
     195  {
     196    register double d0 asm ("d0");
     197    d0 = r0[0];
     198    asm volatile ("" :: "w" (d0));
     199  }
     200  
     201  /*
     202  ** w_m_1:
     203  **	vldr.64	d0, \[r0, #8\]
     204  **	bx	lr
     205  */
     206  void
     207  w_m_1 (double *r0)
     208  {
     209    register double d0 asm ("d0");
     210    d0 = r0[1];
     211    asm volatile ("" :: "w" (d0));
     212  }
     213  
     214  /*
     215  ** w_m_127:
     216  **	vldr.64	d0, \[r0, #1016\]
     217  **	bx	lr
     218  */
     219  void
     220  w_m_127 (double *r0)
     221  {
     222    register double d0 asm ("d0");
     223    d0 = r0[127];
     224    asm volatile ("" :: "w" (d0));
     225  }
     226  
     227  /*
     228  ** w_m_128:
     229  **	add	(r[0-9]+), r0, #1024
     230  **	vldr.64	d0, \[\1\]
     231  **	bx	lr
     232  */
     233  void
     234  w_m_128 (double *r0)
     235  {
     236    register double d0 asm ("d0");
     237    d0 = r0[128];
     238    asm volatile ("" :: "w" (d0));
     239  }
     240  
     241  /*
     242  ** m_m32_r:
     243  **	sub	(r[0-9]+), r0, #256
     244  **	strd	r2, \[\1\]
     245  **	bx	lr
     246  */
     247  void
     248  m_m32_r (double *r0)
     249  {
     250    register double r2 asm ("r2");
     251    asm volatile ("" : "=r" (r2));
     252    r0[-32] = r2;
     253  }
     254  
     255  /*
     256  ** m_m31_r:
     257  **	strd	r2, \[r0, #-248\]
     258  **	bx	lr
     259  */
     260  void
     261  m_m31_r (double *r0)
     262  {
     263    register double r2 asm ("r2");
     264    asm volatile ("" : "=r" (r2));
     265    r0[-31] = r2;
     266  }
     267  
     268  /*
     269  ** m_m1_r:
     270  **	strd	r2, \[r0, #-8\]
     271  **	bx	lr
     272  */
     273  void
     274  m_m1_r (double *r0)
     275  {
     276    register double r2 asm ("r2");
     277    asm volatile ("" : "=r" (r2));
     278    r0[-1] = r2;
     279  }
     280  
     281  /*
     282  ** m_0_r:
     283  **	strd	r2, \[r0\]
     284  **	bx	lr
     285  */
     286  void
     287  m_0_r (double *r0)
     288  {
     289    register double r2 asm ("r2");
     290    asm volatile ("" : "=r" (r2));
     291    r0[0] = r2;
     292  }
     293  
     294  /*
     295  ** m_1_r:
     296  **	strd	r2, \[r0, #8\]
     297  **	bx	lr
     298  */
     299  void
     300  m_1_r (double *r0)
     301  {
     302    register double r2 asm ("r2");
     303    asm volatile ("" : "=r" (r2));
     304    r0[1] = r2;
     305  }
     306  
     307  /*
     308  ** m_127_r:
     309  **	strd	r2, \[r0, #1016\]
     310  **	bx	lr
     311  */
     312  void
     313  m_127_r (double *r0)
     314  {
     315    register double r2 asm ("r2");
     316    asm volatile ("" : "=r" (r2));
     317    r0[127] = r2;
     318  }
     319  
     320  /*
     321  ** m_128_r:
     322  **	add	(r[0-9]+), r0, #1024
     323  **	strd	r2, \[r0\]
     324  **	bx	lr
     325  */
     326  void
     327  m_128_r (double *r0)
     328  {
     329    register double r2 asm ("r2");
     330    asm volatile ("" : "=r" (r2));
     331    r0[128] = r2;
     332  }
     333  
     334  /* ??? This could be done in one instruction, but without mve.fp,
     335     it makes more sense for memory_operand to enforce the GPR range.  */
     336  /*
     337  ** m_m32_w:
     338  **	sub	(r[0-9]+), r0, #256
     339  **	vstr.64	d0, \[\1\]
     340  **	bx	lr
     341  */
     342  void
     343  m_m32_w (double *r0)
     344  {
     345    register double d0 asm ("d0");
     346    asm volatile ("" : "=w" (d0));
     347    r0[-32] = d0;
     348  }
     349  
     350  /*
     351  ** m_m31_w:
     352  **	vstr.64	d0, \[r0, #-248\]
     353  **	bx	lr
     354  */
     355  void
     356  m_m31_w (double *r0)
     357  {
     358    register double d0 asm ("d0");
     359    asm volatile ("" : "=w" (d0));
     360    r0[-31] = d0;
     361  }
     362  
     363  /*
     364  ** m_m1_w:
     365  **	vstr.64	d0, \[r0, #-8\]
     366  **	bx	lr
     367  */
     368  void
     369  m_m1_w (double *r0)
     370  {
     371    register double d0 asm ("d0");
     372    asm volatile ("" : "=w" (d0));
     373    r0[-1] = d0;
     374  }
     375  
     376  /*
     377  ** m_0_w:
     378  **	vstr.64	d0, \[r0\]
     379  **	bx	lr
     380  */
     381  void
     382  m_0_w (double *r0)
     383  {
     384    register double d0 asm ("d0");
     385    asm volatile ("" : "=w" (d0));
     386    r0[0] = d0;
     387  }
     388  
     389  /*
     390  ** m_1_w:
     391  **	vstr.64	d0, \[r0, #8\]
     392  **	bx	lr
     393  */
     394  void
     395  m_1_w (double *r0)
     396  {
     397    register double d0 asm ("d0");
     398    asm volatile ("" : "=w" (d0));
     399    r0[1] = d0;
     400  }
     401  
     402  /*
     403  ** m_127_w:
     404  **	vstr.64	d0, \[r0, #1016\]
     405  **	bx	lr
     406  */
     407  void
     408  m_127_w (double *r0)
     409  {
     410    register double d0 asm ("d0");
     411    asm volatile ("" : "=w" (d0));
     412    r0[127] = d0;
     413  }
     414  
     415  /*
     416  ** m_128_w:
     417  **	add	(r[0-9]+), r0, #1024
     418  **	vstr.64	d0, \[\1\]
     419  **	bx	lr
     420  */
     421  void
     422  m_128_w (double *r0)
     423  {
     424    register double d0 asm ("d0");
     425    asm volatile ("" : "=w" (d0));
     426    r0[128] = d0;
     427  }