(root)/
gcc-13.2.0/
gcc/
testsuite/
gcc.target/
riscv/
rvv/
base/
unop_v_constraint-2.c
       1  /* { dg-do compile } */
       2  /* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
       3  /* { dg-final { check-function-bodies "**" "" } } */
       4  #include "riscv_vector.h"
       5  
       6  /*
       7  ** f1:
       8  **	vsetivli\tzero,4,e32,m1,tu,ma
       9  **	vle16\.v\tv[0-9]+,0\([a-x0-9]+\)
      10  **	vle16\.v\tv[0-9]+,0\([a-x0-9]+\)
      11  **	vsext\.vf2\tv[0-9]+,\s*v[0-9]+
      12  **	vsext\.vf2\tv[0-9]+,\s*v[0-9]+
      13  **	vse32\.v\tv[0-9]+,0\([a-x0-9]+\)
      14  **	ret
      15  */
      16  void f1 (void * in, void *out)
      17  {
      18      vint16mf2_t v = __riscv_vle16_v_i16mf2 (in, 4);
      19      vint16mf2_t v2 = __riscv_vle16_v_i16mf2_tu (v, in, 4);
      20      vint32m1_t v3 = __riscv_vsext_vf2_i32m1 (v2, 4);
      21      vint32m1_t v4 = __riscv_vsext_vf2_i32m1_tu (v3, v2, 4);
      22      __riscv_vse32_v_i32m1 (out, v4, 4);
      23  }
      24  
      25  /*
      26  ** f2:
      27  **	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
      28  **	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
      29  **	vsetivli\tzero,4,e32,m1,ta,ma
      30  **	vle16\.v\tv[0-9]+,0\([a-x0-9]+\)
      31  **	vsext\.vf2\tv[0-9]+,\s*v[0-9]+
      32  **	vsetvli\tzero,zero,e64,m2,ta,ma
      33  **	vsext\.vf2\tv[1-9][0-9]?,\s*v[0-9]+,\s*v0.t
      34  **	vse64\.v\tv[0-9]+,0\([a-x0-9]+\)
      35  **	ret
      36  */
      37  void f2 (void * in, void *out)
      38  {
      39      vbool32_t mask = *(vbool32_t*)in;
      40      asm volatile ("":::"memory");
      41      vint16mf2_t v = __riscv_vle16_v_i16mf2 (in, 4);
      42      vint32m1_t v3 = __riscv_vsext_vf2_i32m1 (v, 4);
      43      vint64m2_t v4 = __riscv_vsext_vf2_i64m2_m (mask, v3, 4);
      44      __riscv_vse64_v_i64m2 (out, v4, 4);
      45  }
      46  
      47  /*
      48  ** f3:
      49  **	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
      50  **	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
      51  **	vsetivli\tzero,4,e32,m1,tu,mu
      52  **	vle16\.v\tv[0-9]+,0\([a-x0-9]+\)
      53  **	vle16\.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
      54  **	vsext\.vf2\tv[0-9]+,\s*v[0-9]+
      55  **	vsext\.vf2\tv[1-9][0-9]?,\s*v[0-9]+,\s*v0.t
      56  **	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
      57  **	ret
      58  */
      59  void f3 (void * in, void *out)
      60  {
      61      vbool32_t mask = *(vbool32_t*)in;
      62      asm volatile ("":::"memory");
      63      vint16mf2_t v = __riscv_vle16_v_i16mf2 (in, 4);
      64      vint16mf2_t v2 = __riscv_vle16_v_i16mf2_tumu (mask, v, in, 4);
      65      vint32m1_t v3 = __riscv_vsext_vf2_i32m1 (v2, 4);
      66      vint32m1_t v4 = __riscv_vsext_vf2_i32m1_tumu (mask, v3, v2, 4);
      67      __riscv_vse32_v_i32m1 (out, v4, 4);
      68  }
      69  
      70  /*
      71  ** f4:
      72  **	vsetivli\tzero,4,e16,mf4,tu,ma
      73  **	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
      74  **	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
      75  **	vsext\.vf2\tv[0-9]+,\s*v[0-9]+
      76  **	vsext\.vf2\tv[0-9]+,\s*v[0-9]+
      77  **	vse16\.v\tv[0-9]+,0\([a-x0-9]+\)
      78  **	ret
      79  */
      80  void f4 (void * in, void *out)
      81  {
      82      vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
      83      vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tu (v, in, 4);
      84      vint16mf4_t v3 = __riscv_vsext_vf2_i16mf4 (v2, 4);
      85      vint16mf4_t v4 = __riscv_vsext_vf2_i16mf4_tu (v3, v2, 4);
      86      __riscv_vse16_v_i16mf4 (out, v4, 4);
      87  }
      88  
      89  /*
      90  ** f5:
      91  **	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
      92  **	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
      93  **	vsetivli\tzero,4,e16,mf4,ta,ma
      94  **	vle8.v\tv[0-9]+,0\([a-x0-9]+\)
      95  **	vsext\.vf2\tv[0-9]+,\s*v[0-9]+
      96  **	vsetvli\tzero,zero,e32,mf2,ta,ma
      97  **	vsext\.vf2\tv[1-9][0-9]?,\s*v[0-9]+,\s*v0.t
      98  **	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
      99  **	ret
     100  */
     101  void f5 (void * in, void *out)
     102  {
     103      vbool64_t mask = *(vbool64_t*)in;
     104      asm volatile ("":::"memory");
     105      vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
     106      vint16mf4_t v3 = __riscv_vsext_vf2_i16mf4 (v, 4);
     107      vint32mf2_t v4 = __riscv_vsext_vf2_i32mf2_m (mask, v3, 4);
     108      __riscv_vse32_v_i32mf2 (out, v4, 4);
     109  }
     110  
     111  /*
     112  ** f6:
     113  **	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
     114  **	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
     115  **	vsetivli\tzero,4,e16,mf4,tu,mu
     116  **	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
     117  **	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
     118  **	vsext\.vf2\tv[0-9]+,\s*v[0-9]+
     119  **	vsext\.vf2\tv[1-9][0-9]?,\s*v[0-9]+,\s*v0.t
     120  **	vse16.v\tv[0-9]+,0\([a-x0-9]+\)
     121  **	ret
     122  */
     123  void f6 (void * in, void *out)
     124  {
     125      vbool64_t mask = *(vbool64_t*)in;
     126      asm volatile ("":::"memory");
     127      vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
     128      vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tumu (mask, v, in, 4);
     129      vint16mf4_t v3 = __riscv_vsext_vf2_i16mf4 (v2, 4);
     130      vint16mf4_t v4 = __riscv_vsext_vf2_i16mf4_tumu (mask, v3, v2, 4);
     131      __riscv_vse16_v_i16mf4 (out, v4, 4);
     132  }