1  /* { dg-do compile } */
       2  /* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
       3  /* { dg-final { check-function-bodies "**" "" } } */
       4  #include "riscv_vector.h"
       5  
       6  /*
       7  ** f1:
       8  **	vsetivli\tzero,4,e32,m1,t[au],mu
       9  **	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
      10  **	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
      11  **	vmslt\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
      12  **	vmslt\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,v0.t
      13  **	vsm\.v\tv[0-9]+,0\([a-x0-9]+\)
      14  **	ret
      15  */
      16  void f1 (void * in, void * in2, void *out, int32_t x)
      17  {
      18      vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
      19      vint32m1_t v2 = __riscv_vle32_v_i32m1 (in2, 4);
      20      vbool32_t m3 = __riscv_vmslt_vx_i32m1_b32 (v, x, 4);
      21      vbool32_t m4 = __riscv_vmslt_vx_i32m1_b32_mu (m3, m3, v2, x, 4);
      22      __riscv_vsm_v_b32 (out, m4, 4);
      23  }
      24  
      25  /*
      26  ** f2:
      27  **	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
      28  **	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
      29  **	vsetivli\tzero,4,e32,m1,t[au],mu
      30  **	vle32.v\tv[0-9]+,0\([a-x0-9]+\)
      31  **	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
      32  **	vmslt\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
      33  **	vmslt\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
      34  **	vsm.v\tv[0-9]+,0\([a-x0-9]+\)
      35  **	ret
      36  */
      37  void f2 (void * in, void *out, int32_t x)
      38  {
      39      vbool32_t mask = *(vbool32_t*)in;
      40      asm volatile ("":::"memory");
      41      vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
      42      vint32m1_t v2 = __riscv_vle32_v_i32m1_m (mask, in, 4);
      43      vbool32_t m3 = __riscv_vmslt_vx_i32m1_b32 (v, x, 4);
      44      vbool32_t m4 = __riscv_vmslt_vx_i32m1_b32_mu (mask, m3, v2, x, 4);
      45      __riscv_vsm_v_b32 (out, m4, 4);
      46  }
      47  
      48  /*
      49  ** f3:
      50  **	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
      51  **	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
      52  **	vsetivli\tzero,4,e32,m1,t[au],m[au]
      53  **	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
      54  **	vle32\.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
      55  **	vmslt\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
      56  **	vmslt\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
      57  **	vsm.v\tv[0-9]+,0\([a-x0-9]+\)
      58  **	ret
      59  */
      60  void f3 (void * in, void *out, int32_t x)
      61  {
      62      vbool32_t mask = *(vbool32_t*)in;
      63      asm volatile ("":::"memory");
      64      vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
      65      vint32m1_t v2 = __riscv_vle32_v_i32m1_m (mask, in, 4);
      66      vbool32_t m3 = __riscv_vmslt_vx_i32m1_b32 (v, x, 4);
      67      vbool32_t m4 = __riscv_vmslt_vx_i32m1_b32_m (m3, v2, x, 4);
      68      __riscv_vsm_v_b32 (out, m4, 4);
      69  }