(root)/
gcc-13.2.0/
gcc/
testsuite/
gcc.target/
riscv/
rvv/
base/
shift_vx_constraint-1.c
       1  /* { dg-do compile } */
       2  /* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */
       3  /* { dg-final { check-function-bodies "**" "" } } */
       4  #include "riscv_vector.h"
       5  
       6  /*
       7  ** f1:
       8  **	vsetivli\tzero,4,e32,m1,tu,ma
       9  **	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
      10  **	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
      11  **	vsll\.vi\tv[0-9]+,\s*v[0-9]+,31
      12  **	vsll\.vi\tv[0-9]+,\s*v[0-9]+,31
      13  **	vse32\.v\tv[0-9]+,0\([a-x0-9]+\)
      14  **	ret
      15  */
      16  void f1 (void * in, void *out)
      17  {
      18      vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
      19      vint32m1_t v2 = __riscv_vle32_v_i32m1_tu (v, in, 4);
      20      vint32m1_t v3 = __riscv_vsll_vx_i32m1 (v2, 31, 4);
      21      vint32m1_t v4 = __riscv_vsll_vx_i32m1_tu (v3, v2, 31, 4);
      22      __riscv_vse32_v_i32m1 (out, v4, 4);
      23  }
      24  
      25  /*
      26  ** f2:
      27  **	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
      28  **	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
      29  **	...
      30  **	vsetivli\tzero,4,e32,m1,ta,ma
      31  **	...
      32  **	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
      33  **	vsll\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
      34  **	vsll\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
      35  **	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
      36  **	ret
      37  */
      38  void f2 (void * in, void *out)
      39  {
      40      vbool32_t mask = *(vbool32_t*)in;
      41      asm volatile ("":::"memory");
      42      vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
      43      vint32m1_t v2 = __riscv_vle32_v_i32m1_m (mask, in, 4);
      44      vint32m1_t v3 = __riscv_vsll_vx_i32m1 (v2, 32, 4);
      45      vint32m1_t v4 = __riscv_vsll_vx_i32m1_m (mask, v3, 32, 4);
      46      __riscv_vse32_v_i32m1 (out, v4, 4);
      47  }
      48  
      49  /*
      50  ** f3:
      51  **	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
      52  **	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
      53  **	vsetivli\tzero,4,e32,m1,tu,mu
      54  **	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
      55  **	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
      56  **	vsll\.vi\tv[0-9]+,\s*v[0-9]+,\s*17
      57  **	vsll\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*17,\s*v0.t
      58  **	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
      59  **	ret
      60  */
      61  void f3 (void * in, void *out)
      62  {
      63      vbool32_t mask = *(vbool32_t*)in;
      64      asm volatile ("":::"memory");
      65      vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
      66      vint32m1_t v2 = __riscv_vle32_v_i32m1_tumu (mask, v, in, 4);
      67      vint32m1_t v3 = __riscv_vsll_vx_i32m1 (v2, 17, 4);
      68      vint32m1_t v4 = __riscv_vsll_vx_i32m1_tumu (mask, v3, v2, 17, 4);
      69      __riscv_vse32_v_i32m1 (out, v4, 4);
      70  }
      71  
      72  /*
      73  ** f4:
      74  **	vsetivli\tzero,4,e8,mf8,tu,ma
      75  **	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
      76  **	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
      77  **	vsll\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
      78  **	vsll\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
      79  **	vse8\.v\tv[0-9]+,0\([a-x0-9]+\)
      80  **	ret
      81  */
      82  void f4 (void * in, void *out, size_t x)
      83  {
      84      vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
      85      vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tu (v, in, 4);
      86      vint8mf8_t v3 = __riscv_vsll_vx_i8mf8 (v2, x, 4);
      87      vint8mf8_t v4 = __riscv_vsll_vx_i8mf8_tu (v3, v2, x, 4);
      88      __riscv_vse8_v_i8mf8 (out, v4, 4);
      89  }
      90  
      91  /*
      92  ** f5:
      93  **	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
      94  **	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
      95  **	vsetivli\tzero,4,e8,mf8,ta,ma
      96  **	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
      97  **	vsll\.vi\tv[0-9]+,\s*v[0-9]+,\s*5
      98  **	vsll\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*5,\s*v0.t
      99  **	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
     100  **	ret
     101  */
     102  void f5 (void * in, void *out)
     103  {
     104      vbool64_t mask = *(vbool64_t*)in;
     105      asm volatile ("":::"memory");
     106      vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
     107      vint8mf8_t v2 = __riscv_vle8_v_i8mf8_m (mask, in, 4);
     108      vint8mf8_t v3 = __riscv_vsll_vx_i8mf8 (v2, 5, 4);
     109      vint8mf8_t v4 = __riscv_vsll_vx_i8mf8_m (mask, v3, 5, 4);
     110      __riscv_vse8_v_i8mf8 (out, v4, 4);
     111  }
     112  
     113  /*
     114  ** f6:
     115  **	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
     116  **	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
     117  **	vsetivli\tzero,4,e8,mf8,tu,mu
     118  **	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
     119  **	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
     120  **	vsll\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
     121  **	vsll\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
     122  **	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
     123  **	ret
     124  */
     125  void f6 (void * in, void *out, size_t x)
     126  {
     127      vbool64_t mask = *(vbool64_t*)in;
     128      asm volatile ("":::"memory");
     129      vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
     130      vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tumu (mask, v, in, 4);
     131      vint8mf8_t v3 = __riscv_vsll_vx_i8mf8 (v2, x, 4);
     132      vint8mf8_t v4 = __riscv_vsll_vx_i8mf8_tumu (mask, v3, v2, x, 4);
     133      __riscv_vse8_v_i8mf8 (out, v4, 4);
     134  }