(root)/
gcc-13.2.0/
gcc/
testsuite/
gcc.target/
riscv/
rvv/
base/
binop_vx_constraint-168.c
       1  /* { dg-do compile } */
       2  /* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
       3  /* { dg-final { check-function-bodies "**" "" } } */
       4  
       5  #include "riscv_vector.h"
       6  
       7  /*
       8  ** f0:
       9  **  ...
      10  **	vslide1up\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
      11  **	vslide1up\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
      12  **  ...
      13  **	ret
      14  */
      15  void f0 (void * in, void *out, int64_t x, int n)
      16  {
      17    vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
      18    vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
      19    vint64m1_t v3 = __riscv_vslide1up_vx_i64m1 (v2, -16, 4);
      20    vint64m1_t v4 = __riscv_vslide1up_vx_i64m1 (v3, -16, 4);
      21    __riscv_vse64_v_i64m1 (out + 2, v4, 4);
      22  }
      23  
      24  /*
      25  ** f1:
      26  **  ...
      27  **	vslide1up\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
      28  **	vslide1up\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
      29  **  ...
      30  **	ret
      31  */
      32  void f1 (void * in, void *out, int64_t x, int n)
      33  {
      34    vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
      35    vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
      36    vint64m1_t v3 = __riscv_vslide1up_vx_i64m1 (v2, 15, 4);
      37    vint64m1_t v4 = __riscv_vslide1up_vx_i64m1 (v3, 15, 4);
      38    __riscv_vse64_v_i64m1 (out + 2, v4, 4);
      39  }
      40  
      41  /*
      42  ** f2:
      43  **  ...
      44  **	vslide1up\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
      45  **	vslide1up\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
      46  **  ...
      47  **	ret
      48  */
      49  void f2 (void * in, void *out, int64_t x, int n)
      50  {
      51    vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
      52    vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
      53    vint64m1_t v3 = __riscv_vslide1up_vx_i64m1 (v2, 16, 4);
      54    vint64m1_t v4 = __riscv_vslide1up_vx_i64m1 (v3, 16, 4);
      55    __riscv_vse64_v_i64m1 (out + 2, v4, 4);
      56  }
      57  
      58  /*
      59  ** f3:
      60  **  ...
      61  **	vslide1up\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
      62  **	vslide1up\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
      63  **  ...
      64  **	ret
      65  */
      66  void f3 (void * in, void *out, int64_t x, int n)
      67  {
      68    vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
      69    vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
      70    vint64m1_t v3 = __riscv_vslide1up_vx_i64m1 (v2, 0xAAAAAAAA, 4);
      71    vint64m1_t v4 = __riscv_vslide1up_vx_i64m1 (v3, 0xAAAAAAAA, 4);
      72    __riscv_vse64_v_i64m1 (out + 2, v4, 4);
      73  }
      74  
      75  /*
      76  ** f4:
      77  **  ...
      78  **	vslide1up\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
      79  **	vslide1up\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
      80  **  ...
      81  **	ret
      82  */
      83  void f4 (void * in, void *out, int64_t x, int n)
      84  {
      85    vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
      86    vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
      87    vint64m1_t v3 = __riscv_vslide1up_vx_i64m1 (v2, 0xAAAAAAAAAAAAAAAA, 4);
      88    vint64m1_t v4 = __riscv_vslide1up_vx_i64m1 (v3, 0xAAAAAAAAAAAAAAAA, 4);
      89    __riscv_vse64_v_i64m1 (out + 2, v4, 4);
      90  }
      91  
      92  /*
      93  ** f5:
      94  **  ...
      95  **	vslide1up\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
      96  **	vslide1up\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
      97  **  ...
      98  **	ret
      99  */
     100  void f5 (void * in, void *out, int64_t x, int n)
     101  {
     102    vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
     103    vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
     104    vint64m1_t v3 = __riscv_vslide1up_vx_i64m1 (v2, 0xAAAAAAAAAAAAAAAA, 4);
     105    vint64m1_t v4 = __riscv_vslide1up_vx_i64m1 (v3, 0xAAAAAAAAAAAAAAAA, 4);
     106    __riscv_vse64_v_i64m1 (out + 2, v4, 4);
     107  }
     108  
     109  /*
     110  ** f6:
     111  **  ...
     112  **	vslide1up\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
     113  **	vslide1up\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
     114  **  ...
     115  **	ret
     116  */
     117  void f6 (void * in, void *out, int64_t x, int n)
     118  {
     119    vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
     120    vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
     121    vint64m1_t v3 = __riscv_vslide1up_vx_i64m1 (v2, x, 4);
     122    vint64m1_t v4 = __riscv_vslide1up_vx_i64m1 (v3, x, 4);
     123    __riscv_vse64_v_i64m1 (out + 2, v4, 4);
     124  }
     125  
     126  /*
     127  ** f7:
     128  **  ...
     129  **	vslide1up\.vx\tv[0-9]+,\s*v[0-9]+,\s*zero
     130  **	vslide1up\.vx\tv[0-9]+,\s*v[0-9]+,\s*zero
     131  **  ...
     132  **	ret
     133  */
     134  void f7 (void * in, void *out, int64_t x, int n)
     135  {
     136    vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
     137    vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
     138    vint64m1_t v3 = __riscv_vslide1up_vx_i64m1 (v2, 0, 4);
     139    vint64m1_t v4 = __riscv_vslide1up_vx_i64m1 (v3, 0, 4);
     140    __riscv_vse64_v_i64m1 (out + 2, v4, 4);
     141  }
     142  
     143  /* { dg-final { scan-assembler-not {vmv} } } */