(root)/
gcc-13.2.0/
gcc/
testsuite/
gcc.target/
arm/
mve/
mve_load_memory_modes.c
       1  /* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
       2  /* { dg-add-options arm_v8_1m_mve_fp } */
       3  /* { dg-additional-options "-O2" } */
       4  /* { dg-final { check-function-bodies "**" "" } } */
       5  
       6  #include "arm_mve.h"
       7  /*
       8  **off_load8_0:
       9  **	...
      10  **	vldrb.8	q[0-7], \[r0, #16\]
      11  **	...
      12  */
      13  int8x16_t off_load8_0 (int8_t * a)
      14  {
      15    return vld1q_s8 (a + 16);
      16  }
      17  
      18  /*
      19  **off_load8_1:
      20  **	...
      21  **	vldrb.u16	q[0-7], \[r0, #1\]
      22  **	...
      23  */
      24  uint16x8_t off_load8_1 (uint8_t * a)
      25  {
      26    return vldrbq_u16 (a + 1);
      27  }
      28  
      29  /*
      30  **off_load8_2:
      31  **	...
      32  **	vldrb.s32	q[0-7], \[r0, #127\]
      33  **	...
      34  */
      35  int32x4_t off_load8_2 (int8_t * a)
      36  {
      37    return vldrbq_s32 (a + 127);
      38  }
      39  
      40  /*
      41  **off_load8_3:
      42  **	...
      43  **	vldrb.8	q[0-7], \[r0, #-127\]
      44  **	...
      45  */
      46  uint8x16_t off_load8_3 (uint8_t * a)
      47  {
      48    return vldrbq_u8 (a - 127);
      49  }
      50  
      51  /*
      52  **not_off_load8_0:
      53  **	...
      54  **	vldrb.8	q[0-7], \[r[0-7]+\]
      55  **	...
      56  */
      57  int8x16_t not_off_load8_0 (int8_t * a)
      58  {
      59    return vld1q_s8 (a + 128);
      60  }
      61  
      62  /*
      63  **off_loadfp16_0:
      64  **	...
      65  **	vldrh.16	q[0-7], \[r0, #-244\]
      66  **	...
      67  */
      68  float16x8_t off_loadfp16_0 (float16_t *a)
      69  {
      70    return vld1q_f16 (a - 122);
      71  }
      72  
      73  /*
      74  **off_load16_0:
      75  **	...
      76  **	vldrh.16	q[0-7], \[r0, #-2\]
      77  **	...
      78  */
      79  uint16x8_t off_load16_0 (uint16_t * a)
      80  {
      81    return vld1q_u16 (a - 1);
      82  }
      83  
      84  /*
      85  **off_load16_1:
      86  **	...
      87  **	vldrh.u32	q[0-7], \[r0, #254\]
      88  **	...
      89  */
      90  uint32x4_t off_load16_1 (uint16_t * a)
      91  {
      92    return vldrhq_u32 (a + 127);
      93  }
      94  
      95  /*
      96  **not_off_load16_0:
      97  **	...
      98  **	vldrh.16	q[0-7], \[r[0-7]+\]
      99  **	...
     100  */
     101  int16x8_t not_off_load16_0 (int8_t * a)
     102  {
     103    return vld1q_s16 ((int16_t *)(a + 1));
     104  }
     105  
     106  /*
     107  **not_off_load16_1:
     108  **	...
     109  **	vldrh.u32	q[0-7], \[r[0-7]+\]
     110  **	...
     111  */
     112  uint32x4_t not_off_load16_1 (uint16_t * a)
     113  {
     114    return vldrhq_u32 ((a - 128));
     115  }
     116  
     117  /*
     118  **off_loadfp32_0:
     119  **	...
     120  **	vldrw.32	q[0-7], \[r0, #24\]
     121  **	...
     122  */
     123  float32x4_t off_loadfp32_0 (float32_t *a)
     124  {
     125    return vld1q_f32 (a + 6);
     126  }
     127  
     128  /*
     129  **off_load32_0:
     130  **	...
     131  **	vldrw.32	q[0-7], \[r0, #4\]
     132  **	...
     133  */
     134  uint32x4_t off_load32_0 (uint32_t * a)
     135  {
     136    return vld1q_u32 (a + 1);
     137  }
     138  
     139  /*
     140  **off_load32_1:
     141  **	...
     142  **	vldrw.32	q[0-7], \[r0, #-508\]
     143  **	...
     144  */
     145  int32x4_t off_load32_1 (int32_t * a)
     146  {
     147    return vldrwq_s32 (a - 127);
     148  }
     149  /*
     150  **pre_load8_0:
     151  **	...
     152  **	vldrb.8	q[0-7], \[r0, #16\]!
     153  **	...
     154  */
     155  int8_t* pre_load8_0 (int8_t * a, int8x16_t *v)
     156  {
     157    a += 16;
     158    *v = vld1q_s8 (a);
     159    return a;
     160  }
     161  
     162  /*
     163  **pre_load8_1:
     164  **	...
     165  **	vldrb.u16	q[0-7], \[r0, #4\]!
     166  **	...
     167  */
     168  uint8_t* pre_load8_1 (uint8_t * a, uint16x8_t *v)
     169  {
     170    a += 4;
     171    *v = vldrbq_u16 (a);
     172    return a;
     173  }
     174  
     175  /*
     176  **pre_loadfp16_0:
     177  **	...
     178  **	vldrh.16	q[0-7], \[r0, #128\]!
     179  **	...
     180  */
     181  float16_t* pre_loadfp16_0 (float16_t *a, float16x8_t *v)
     182  {
     183    a += 64;
     184    *v = vld1q_f16 (a);
     185    return a;
     186  }
     187  
     188  /*
     189  **pre_load16_0:
     190  **	...
     191  **	vldrh.16	q[0-7], \[r0, #-254\]!
     192  **	...
     193  */
     194  int16_t* pre_load16_0 (int16_t * a, int16x8_t *v)
     195  {
     196    a -= 127;
     197    *v = vldrhq_s16 (a);
     198    return a;
     199  }
     200  
     201  /*
     202  **pre_load16_1:
     203  **	...
     204  **	vldrh.s32	q[0-7], \[r0, #52\]!
     205  **	...
     206  */
     207  int16_t* pre_load16_1 (int16_t * a, int32x4_t *v)
     208  {
     209    a += 26;
     210    *v = vldrhq_s32 (a);
     211    return a;
     212  }
     213  
     214  /*
     215  **pre_loadfp32_0:
     216  **	...
     217  **	vldrw.32	q[0-7], \[r0, #-72\]!
     218  **	...
     219  */
     220  float32_t* pre_loadfp32_0 (float32_t *a, float32x4_t *v)
     221  {
     222    a -= 18;
     223    *v = vld1q_f32 (a);
     224    return a;
     225  }
     226  
     227  
     228  /*
     229  **pre_load32_0:
     230  **	...
     231  **	vldrw.32	q[0-7], \[r0, #-4\]!
     232  **	...
     233  */
     234  uint32_t* pre_load32_0 (uint32_t * a, uint32x4_t *v)
     235  {
     236    a -= 1;
     237    *v = vld1q_u32 (a);
     238    return a;
     239  }
     240  
     241  
     242  /*
     243  **post_load8_0:
     244  **	...
     245  **	vldrb.8	q[0-7], \[r0\], #26
     246  **	...
     247  */
     248  uint8_t* post_load8_0 (uint8_t * a, uint8x16_t *v)
     249  {
     250    *v = vld1q_u8 (a);
     251    a += 26;
     252    return a;
     253  }
     254  
     255  /*
     256  **post_load8_1:
     257  **	...
     258  **	vldrb.s16	q[0-7], \[r0\], #-1
     259  **	...
     260  */
     261  int8_t* post_load8_1 (int8_t * a, int16x8_t *v)
     262  {
     263    *v = vldrbq_s16 (a);
     264    a--;
     265    return a;
     266  }
     267  
     268  /*
     269  **post_load8_2:
     270  **	...
     271  **	vldrb.8	q[0-7], \[r0\], #26
     272  **	...
     273  */
     274  uint8_t* post_load8_2 (uint8_t * a, uint8x16_t *v)
     275  {
     276    *v = vld1q_u8 (a);
     277    a += 26;
     278    return a;
     279  }
     280  
     281  /*
     282  **post_load8_3:
     283  **	...
     284  **	vldrb.s16	q[0-7], \[r0\], #-1
     285  **	...
     286  */
     287  int8_t* post_load8_3 (int8_t * a, int16x8_t *v)
     288  {
     289    *v = vldrbq_s16 (a);
     290    a--;
     291    return a;
     292  }
     293  
     294  /*
     295  **post_loadfp16_0:
     296  **	...
     297  **	vldrh.16	q[0-7], \[r0\], #-24
     298  **	...
     299  */
     300  float16_t* post_loadfp16_0 (float16_t *a, float16x8_t *v)
     301  {
     302    *v = vld1q_f16 (a);
     303    a -= 12;
     304    return a;
     305  }
     306  
     307  /*
     308  **post_load16_0:
     309  **	...
     310  **	vldrh.16	q[0-7], \[r0\], #-126
     311  **	...
     312  */
     313  uint16_t* post_load16_0 (uint16_t * a, uint16x8_t *v)
     314  {
     315    *v = vldrhq_u16 (a);
     316    a -= 63;
     317    return a;
     318  }
     319  
     320  /*
     321  **post_load16_1:
     322  **	...
     323  **	vldrh.u32	q[0-7], \[r0\], #16
     324  **	...
     325  */
     326  uint16_t* post_load16_1 (uint16_t * a, uint32x4_t *v)
     327  {
     328    *v = vldrhq_u32 (a);
     329    a += 8;
     330    return a;
     331  }
     332  
     333  /*
     334  **post_loadfp32_0:
     335  **	...
     336  **	vldrw.32	q[0-7], \[r0\], #4
     337  **	...
     338  */
     339  float32_t* post_loadfp32_0 (float32_t *a, float32x4_t *v)
     340  {
     341    *v = vld1q_f32 (a);
     342    a++;
     343    return a;
     344  }
     345  
     346  /*
     347  **post_load32_0:
     348  **	...
     349  **	vldrw.32	q[0-7], \[r0\], #-16
     350  **	...
     351  */
     352  int32_t* post_load32_0 (int32_t * a, int32x4_t *v)
     353  {
     354    *v = vld1q_s32 (a);
     355    a -= 4;
     356    return a;
     357  }