(root)/
gcc-13.2.0/
gcc/
testsuite/
gcc.target/
riscv/
rvv/
base/
binop_vx_constraint-17.c
       1  /* { dg-do compile } */
       2  /* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */
       3  /* { dg-final { check-function-bodies "**" "" } } */
       4  #include "riscv_vector.h"
       5  
       6  /*
       7  ** f1:
       8  **  ...
       9  **	vsetivli\tzero,4,e32,m1,tu,ma
      10  **  ...
      11  **	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
      12  **  ...
      13  **	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
      14  **  ...
      15  **	vmul\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
      16  **	vmul\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
      17  **	vse32\.v\tv[0-9]+,0\([a-x0-9]+\)
      18  **	ret
      19  */
      20  void f1 (void * in, void *out, int32_t x)
      21  {
      22      vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
      23      vint32m1_t v2 = __riscv_vle32_v_i32m1_tu (v, in, 4);
      24      vint32m1_t v3 = __riscv_vmul_vx_i32m1 (v2, 5, 4);
      25      vint32m1_t v4 = __riscv_vmul_vx_i32m1_tu (v3, v2, 5, 4);
      26      __riscv_vse32_v_i32m1 (out, v4, 4);
      27  }
      28  
      29  /*
      30  ** f2:
      31  **  ...
      32  **	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
      33  **  ...
      34  **	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
      35  **  ...
      36  **	vsetivli\tzero,4,e32,m1,ta,ma
      37  **  ...
      38  **	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
      39  **  ...
      40  **	vmul\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
      41  **	vmul\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
      42  **	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
      43  **	ret
      44  */
      45  void f2 (void * in, void *out, int32_t x)
      46  {
      47      vbool32_t mask = *(vbool32_t*)in;
      48      asm volatile ("":::"memory");
      49      vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
      50      vint32m1_t v2 = __riscv_vle32_v_i32m1_m (mask, in, 4);
      51      vint32m1_t v3 = __riscv_vmul_vx_i32m1 (v2, 5, 4);
      52      vint32m1_t v4 = __riscv_vmul_vx_i32m1_m (mask, v3, 5, 4);
      53      __riscv_vse32_v_i32m1 (out, v4, 4);
      54  }
      55  
      56  /*
      57  ** f3:
      58  **  ...
      59  **	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
      60  **  ...
      61  **	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
      62  **  ...
      63  **	vsetivli\tzero,4,e32,m1,tu,mu
      64  **  ...
      65  **	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
      66  **  ...
      67  **	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
      68  **  ...
      69  **	vmul\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
      70  **	vmul\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
      71  **	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
      72  **	ret
      73  */
      74  void f3 (void * in, void *out, int32_t x)
      75  {
      76      vbool32_t mask = *(vbool32_t*)in;
      77      asm volatile ("":::"memory");
      78      vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
      79      vint32m1_t v2 = __riscv_vle32_v_i32m1_tumu (mask, v, in, 4);
      80      vint32m1_t v3 = __riscv_vmul_vx_i32m1 (v2, 5, 4);
      81      vint32m1_t v4 = __riscv_vmul_vx_i32m1_tumu (mask, v3, v2, 5, 4);
      82      __riscv_vse32_v_i32m1 (out, v4, 4);
      83  }
      84  
      85  /*
      86  ** f4:
      87  **  ...
      88  **	vsetivli\tzero,4,e8,mf8,tu,ma
      89  **  ...
      90  **	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
      91  **  ...
      92  **	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
      93  **  ...
      94  **	vmul\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
      95  **	vmul\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
      96  **	vse8\.v\tv[0-9]+,0\([a-x0-9]+\)
      97  **	ret
      98  */
      99  void f4 (void * in, void *out, int8_t x)
     100  {
     101      vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
     102      vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tu (v, in, 4);
     103      vint8mf8_t v3 = __riscv_vmul_vx_i8mf8 (v2, 5, 4);
     104      vint8mf8_t v4 = __riscv_vmul_vx_i8mf8_tu (v3, v2, 5, 4);
     105      __riscv_vse8_v_i8mf8 (out, v4, 4);
     106  }
     107  
     108  /*
     109  ** f5:
     110  **  ...
     111  **	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
     112  **  ...
     113  **	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
     114  **  ...
     115  **	vsetivli\tzero,4,e8,mf8,ta,ma
     116  **  ...
     117  **	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
     118  **  ...
     119  **	vmul\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
     120  **	vmul\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
     121  **	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
     122  **	ret
     123  */
     124  void f5 (void * in, void *out, int8_t x)
     125  {
     126      vbool64_t mask = *(vbool64_t*)in;
     127      asm volatile ("":::"memory");
     128      vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
     129      vint8mf8_t v2 = __riscv_vle8_v_i8mf8_m (mask, in, 4);
     130      vint8mf8_t v3 = __riscv_vmul_vx_i8mf8 (v2, 5, 4);
     131      vint8mf8_t v4 = __riscv_vmul_vx_i8mf8_m (mask, v3, 5, 4);
     132      __riscv_vse8_v_i8mf8 (out, v4, 4);
     133  }
     134  
     135  /*
     136  ** f6:
     137  **  ...
     138  **	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
     139  **  ...
     140  **	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
     141  **  ...
     142  **	vsetivli\tzero,4,e8,mf8,tu,mu
     143  **  ...
     144  **	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
     145  **	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
     146  **	vmul\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
     147  **	vmul\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
     148  **	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
     149  **	ret
     150  */
     151  void f6 (void * in, void *out, int8_t x)
     152  {
     153      vbool64_t mask = *(vbool64_t*)in;
     154      asm volatile ("":::"memory");
     155      vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
     156      vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tumu (mask, v, in, 4);
     157      vint8mf8_t v3 = __riscv_vmul_vx_i8mf8 (v2, 5, 4);
     158      vint8mf8_t v4 = __riscv_vmul_vx_i8mf8_tumu (mask, v3, v2, 5, 4);
     159      __riscv_vse8_v_i8mf8 (out, v4, 4);
     160  }