(root)/
gcc-13.2.0/
gcc/
testsuite/
gcc.target/
riscv/
rvv/
base/
binop_vx_constraint-153.c
       1  /* { dg-do compile } */
       2  /* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
       3  /* { dg-final { check-function-bodies "**" "" } } */
       4  #include "riscv_vector.h"
       5  
       6  /*
       7  ** f1:
       8  **	vsetivli\tzero,4,e32,m1,t[au],m[au]
       9  **	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
      10  **	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
      11  **	vmslt\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
      12  **  vmnot\.m\s+v[0-9]+,\s*v[0-9]+
      13  **	vmslt\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
      14  **	vmandn\.mm\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+
      15  **	vsm\.v\tv[0-9]+,0\([a-x0-9]+\)
      16  **	ret
      17  */
      18  void f1 (void * in, void * in2, void *out, int32_t x)
      19  {
      20      vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
      21      vint32m1_t v2 = __riscv_vle32_v_i32m1 (in2, 4);
      22      vbool32_t m3 = __riscv_vmsge_vx_i32m1_b32 (v, x, 4);
      23      vbool32_t m4 = __riscv_vmsge_vx_i32m1_b32_mu (m3, m3, v2, x, 4);
      24      __riscv_vsm_v_b32 (out, m4, 4);
      25  }
      26  
      27  /*
      28  ** f2:
      29  **	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
      30  **	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
      31  **	vsetivli\tzero,4,e32,m1,t[au],mu
      32  **	vle32.v\tv[0-9]+,0\([a-x0-9]+\)
      33  **	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
      34  **	vmslt\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
      35  **  vmnot\.m\s+v[0-9]+,\s*v[0-9]+
      36  **	vmslt\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
      37  **	vmxor\.mm\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+
      38  **	vsm.v\tv[0-9]+,0\([a-x0-9]+\)
      39  **	ret
      40  */
      41  void f2 (void * in, void *out, int32_t x)
      42  {
      43      vbool32_t mask = *(vbool32_t*)in;
      44      asm volatile ("":::"memory");
      45      vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
      46      vint32m1_t v2 = __riscv_vle32_v_i32m1_m (mask, in, 4);
      47      vbool32_t m3 = __riscv_vmsge_vx_i32m1_b32 (v, x, 4);
      48      vbool32_t m4 = __riscv_vmsge_vx_i32m1_b32_mu (mask, m3, v2, x, 4);
      49      __riscv_vsm_v_b32 (out, m4, 4);
      50  }
      51  
      52  /*
      53  ** f3:
      54  **	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
      55  **	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
      56  **	vsetivli\tzero,4,e32,m1,t[au],m[au]
      57  **	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
      58  **	vle32\.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
      59  **	vmslt\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
      60  **  vmnot\.m\s+v[0-9]+,\s*v[0-9]+
      61  **	vmslt\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
      62  **	vmxor\.mm\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+
      63  **	vsm.v\tv[0-9]+,0\([a-x0-9]+\)
      64  **	ret
      65  */
      66  void f3 (void * in, void *out, int32_t x)
      67  {
      68      vbool32_t mask = *(vbool32_t*)in;
      69      asm volatile ("":::"memory");
      70      vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
      71      vint32m1_t v2 = __riscv_vle32_v_i32m1_m (mask, in, 4);
      72      vbool32_t m3 = __riscv_vmsge_vx_i32m1_b32 (v, x, 4);
      73      vbool32_t m4 = __riscv_vmsge_vx_i32m1_b32_m (m3, v2, x, 4);
      74      __riscv_vsm_v_b32 (out, m4, 4);
      75  }