(root)/
gcc-13.2.0/
gcc/
testsuite/
gcc.target/
riscv/
rvv/
vsetvl/
imm_switch-3.c
       1  /* { dg-do compile } */
       2  /* { dg-options "-march=rv32gcv -mabi=ilp32 -fno-schedule-insns -fno-move-loop-invariants" } */
       3  
       4  #include "riscv_vector.h"
       5  
       6  void f (int * restrict in, int * restrict out, void * restrict mask_in, int n)
       7  {
       8    vbool64_t mask = *(vbool64_t*)mask_in;
       9    for (int i = 0; i < n; i++)
      10      {
      11        vint8mf8_t v_8mf8_0 = __riscv_vle8_v_i8mf8 ((int8_t *)(in + i), 0);
      12        __riscv_vse8_v_i8mf8 ((int8_t *)(out + i), v_8mf8_0, 0);
      13  
      14        vint8mf8_t v_8mf8_0_tu = __riscv_vle8_v_i8mf8_tu (v_8mf8_0, (int8_t *)(in + i + 1), 0);
      15        __riscv_vse8_v_i8mf8_m (mask, (int8_t *)(out + i + 1), v_8mf8_0_tu, 0);
      16  
      17        vint8mf8_t v_8mf8_0_mu = __riscv_vle8_v_i8mf8_mu (mask, v_8mf8_0, (int8_t *)(in + i + 2), 0);
      18        __riscv_vse8_v_i8mf8 ((int8_t *)(out + i + 2), v_8mf8_0_tu, 0);
      19  
      20        vint8mf8_t v_8mf8_1 = __riscv_vle8_v_i8mf8 ((int8_t *)(in + i + 3), 7);
      21        __riscv_vse8_v_i8mf8 ((int8_t *)(out + i + 3), v_8mf8_1, 7);
      22        
      23        vint8mf8_t v_8mf8_2 = __riscv_vle8_v_i8mf8 ((int8_t *)(in + i + 4), 17);
      24        __riscv_vse8_v_i8mf8 ((int8_t *)(out + i + 4), v_8mf8_2, 17);
      25  
      26        vint8mf8_t v_8mf8_3 = __riscv_vle8_v_i8mf8 ((int8_t *)(in + i + 5), 27);
      27        __riscv_vse8_v_i8mf8 ((int8_t *)(out + i + 5), v_8mf8_3, 27);
      28  
      29        vint8mf4_t v_8mf4_1 = __riscv_vle8_v_i8mf4 ((int8_t *)(in + i + 6), 7);
      30        __riscv_vse8_v_i8mf4 ((int8_t *)(out + i + 6), v_8mf4_1, 7);
      31        
      32        vint8mf4_t v_8mf4_2 = __riscv_vle8_v_i8mf4 ((int8_t *)(in + i + 7), 17);
      33        __riscv_vse8_v_i8mf4 ((int8_t *)(out + i + 7), v_8mf4_2, 17);
      34  
      35        vint8mf4_t v_8mf4_3 = __riscv_vle8_v_i8mf4 ((int8_t *)(in + i + 8), 27);
      36        __riscv_vse8_v_i8mf4 ((int8_t *)(out + i + 8), v_8mf4_3, 27);
      37  
      38        vint8mf2_t v_8mf2_1 = __riscv_vle8_v_i8mf2 ((int8_t *)(in + i + 9), 7);
      39        __riscv_vse8_v_i8mf2 ((int8_t *)(out + i + 9), v_8mf2_1, 7);
      40        
      41        vint8mf2_t v_8mf2_2 = __riscv_vle8_v_i8mf2 ((int8_t *)(in + i + 10), 17);
      42        __riscv_vse8_v_i8mf2 ((int8_t *)(out + i + 10), v_8mf2_2, 17);
      43  
      44        vint8mf2_t v_8mf2_3 = __riscv_vle8_v_i8mf2 ((int8_t *)(in + i + 11), 27);
      45        __riscv_vse8_v_i8mf2 ((int8_t *)(out + i + 11), v_8mf2_3, 27);
      46  
      47        vint8m1_t v_8m1_1 = __riscv_vle8_v_i8m1 ((int8_t *)(in + i + 12), 7);
      48        __riscv_vse8_v_i8m1 ((int8_t *)(out + i + 12), v_8m1_1, 7);
      49        
      50        vint8m1_t v_8m1_2 = __riscv_vle8_v_i8m1 ((int8_t *)(in + i + 13), 17);
      51        __riscv_vse8_v_i8m1 ((int8_t *)(out + i + 13), v_8m1_2, 17);
      52  
      53        vint8m1_t v_8m1_3 = __riscv_vle8_v_i8m1 ((int8_t *)(in + i + 14), 27);
      54        __riscv_vse8_v_i8m1 ((int8_t *)(out + i + 14), v_8m1_3, 27);
      55  
      56        vint8m2_t v_8m2_1 = __riscv_vle8_v_i8m2 ((int8_t *)(in + i + 15), 7);
      57        __riscv_vse8_v_i8m2 ((int8_t *)(out + i + 15), v_8m2_1, 7);
      58        
      59        vint8m2_t v_8m2_2 = __riscv_vle8_v_i8m2 ((int8_t *)(in + i + 16), 17);
      60        __riscv_vse8_v_i8m2 ((int8_t *)(out + i + 16), v_8m2_2, 17);
      61  
      62        vint8m2_t v_8m2_3 = __riscv_vle8_v_i8m2 ((int8_t *)(in + i + 17), 27);
      63        __riscv_vse8_v_i8m2 ((int8_t *)(out + i + 17), v_8m2_3, 27);
      64  
      65        vint8m4_t v_8m4_1 = __riscv_vle8_v_i8m4 ((int8_t *)(in + i + 18), 7);
      66        __riscv_vse8_v_i8m4 ((int8_t *)(out + i + 18), v_8m4_1, 7);
      67        
      68        vint8m4_t v_8m4_2 = __riscv_vle8_v_i8m4 ((int8_t *)(in + i + 19), 17);
      69        __riscv_vse8_v_i8m4 ((int8_t *)(out + i + 19), v_8m4_2, 17);
      70  
      71        vint8m4_t v_8m4_3 = __riscv_vle8_v_i8m4 ((int8_t *)(in + i + 20), 27);
      72        __riscv_vse8_v_i8m4 ((int8_t *)(out + i + 20), v_8m4_3, 27);
      73  
      74        vint8m8_t v_8m8_1 = __riscv_vle8_v_i8m8 ((int8_t *)(in + i + 21), 7);
      75        __riscv_vse8_v_i8m8 ((int8_t *)(out + i + 21), v_8m8_1, 7);
      76        
      77        vint8m8_t v_8m8_2 = __riscv_vle8_v_i8m8 ((int8_t *)(in + i + 22), 17);
      78        __riscv_vse8_v_i8m8 ((int8_t *)(out + i + 22), v_8m8_2, 17);
      79  
      80        vint8m8_t v_8m8_3 = __riscv_vle8_v_i8m8 ((int8_t *)(in + i + 23), 27);
      81        __riscv_vse8_v_i8m8 ((int8_t *)(out + i + 23), v_8m8_3, 27);
      82  
      83        vuint16mf4_t v_16mf4_1 = *(vuint16mf4_t*)(in + 24 + i);
      84        *(vuint16mf4_t*)(out + 24 + i) = v_16mf4_1;
      85        
      86        vuint16mf2_t v_16mf2_1 = *(vuint16mf2_t*)(in + 25 + i);
      87        *(vuint16mf2_t*)(out + 25 + i) = v_16mf2_1;
      88  
      89        vuint32mf2_t v_32mf2_t = *(vuint32mf2_t*)(in + 26 + i);
      90        *(vuint32mf2_t*)(out + 26 + i) = v_32mf2_t;
      91  
      92        vuint8mf2_t v_8mf2_4 = *(vuint8mf2_t*)(in + 27 + i);
      93        *(vuint8mf2_t*)(out + 27 + i) = v_8mf2_4;
      94  
      95        vuint8mf4_t v_8mf4_4 = *(vuint8mf4_t*)(in + 28 + i);
      96        *(vuint8mf4_t*)(out + 28 + i) = v_8mf4_4;
      97  
      98        vint32mf2_t v_32mf2_1 = __riscv_vle32_v_i32mf2 ((int32_t *)(in + i + 49), 7);
      99        __riscv_vse32_v_i32mf2 ((int32_t *)(out + i + 49), v_32mf2_1, 7);
     100        
     101        vint32mf2_t v_32mf2_2 = __riscv_vle32_v_i32mf2 ((int32_t *)(in + i + 30), 17);
     102        __riscv_vse32_v_i32mf2 ((int32_t *)(out + i + 30), v_32mf2_2, 17);
     103  
     104        vint32mf2_t v_32mf2_3 = __riscv_vle32_v_i32mf2 ((int32_t *)(in + i + 31), 27);
     105        __riscv_vse32_v_i32mf2 ((int32_t *)(out + i + 31), v_32mf2_3, 27);
     106  
     107        vint32m1_t v_32m1_1 = __riscv_vle32_v_i32m1 ((int32_t *)(in + i + 32), 7);
     108        __riscv_vse32_v_i32m1 ((int32_t *)(out + i + 32), v_32m1_1, 7);
     109        
     110        vint32m1_t v_32m1_2 = __riscv_vle32_v_i32m1 ((int32_t *)(in + i + 33), 17);
     111        __riscv_vse32_v_i32m1 ((int32_t *)(out + i + 33), v_32m1_2, 17);
     112  
     113        vint32m1_t v_32m1_3 = __riscv_vle32_v_i32m1 ((int32_t *)(in + i + 34), 27);
     114        __riscv_vse32_v_i32m1 ((int32_t *)(out + i + 34), v_32m1_3, 27);
     115  
     116        vint32m2_t v_32m2_1 = __riscv_vle32_v_i32m2 ((int32_t *)(in + i + 35), 7);
     117        __riscv_vse32_v_i32m2 ((int32_t *)(out + i + 35), v_32m2_1, 7);
     118        
     119        vint32m2_t v_32m2_2 = __riscv_vle32_v_i32m2 ((int32_t *)(in + i + 36), 17);
     120        __riscv_vse32_v_i32m2 ((int32_t *)(out + i + 36), v_32m2_2, 17);
     121  
     122        vint32m2_t v_32m2_3 = __riscv_vle32_v_i32m2 ((int32_t *)(in + i + 37), 27);
     123        __riscv_vse32_v_i32m2 ((int32_t *)(out + i + 37), v_32m2_3, 27);
     124  
     125        vint32m4_t v_32m4_1 = __riscv_vle32_v_i32m4 ((int32_t *)(in + i + 38), 7);
     126        __riscv_vse32_v_i32m4 ((int32_t *)(out + i + 38), v_32m4_1, 7);
     127        
     128        vint32m4_t v_32m4_2 = __riscv_vle32_v_i32m4 ((int32_t *)(in + i + 39), 17);
     129        __riscv_vse32_v_i32m4 ((int32_t *)(out + i + 39), v_32m4_2, 17);
     130  
     131        vint32m4_t v_32m4_3 = __riscv_vle32_v_i32m4 ((int32_t *)(in + i + 40), 27);
     132        __riscv_vse32_v_i32m4 ((int32_t *)(out + i + 40), v_32m4_3, 27);
     133  
     134        vint32m8_t v_32m8_1 = __riscv_vle32_v_i32m8 ((int32_t *)(in + i + 41), 7);
     135        __riscv_vse32_v_i32m8 ((int32_t *)(out + i + 41), v_32m8_1, 7);
     136        
     137        vint32m8_t v_32m8_2 = __riscv_vle32_v_i32m8 ((int32_t *)(in + i + 42), 17);
     138        __riscv_vse32_v_i32m8 ((int32_t *)(out + i + 42), v_32m8_2, 17);
     139  
     140        vint32m8_t v_32m8_3 = __riscv_vle32_v_i32m8 ((int32_t *)(in + i + 43), 27);
     141        __riscv_vse32_v_i32m8 ((int32_t *)(out + i + 43), v_32m8_3, 27);
     142      }
     143  }
     144  
     145  /* { dg-final { scan-assembler-times {vsetvli\s+[a-x0-9]+,\s*zero,\s*e8,\s*mf8,\s*t[au],\s*m[au]} 1 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */
     146  /* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*0,\s*e8,\s*mf8,\s*tu,\s*m[au]} 1 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */
     147  /* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*7,\s*e8,\s*mf8,\s*t[au],\s*m[au]} 1 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */
     148  /* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*17,\s*e8,\s*mf8,\s*t[au],\s*m[au]} 1 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */
     149  /* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*27,\s*e8,\s*mf8,\s*t[au],\s*m[au]} 1 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */
     150  /* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*7,\s*e8,\s*mf4,\s*t[au],\s*m[au]} 1 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */
     151  /* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*17,\s*e8,\s*mf4,\s*t[au],\s*m[au]} 1 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */
     152  /* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*27,\s*e8,\s*mf4,\s*t[au],\s*m[au]} 1 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */
     153  /* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*7,\s*e8,\s*mf2,\s*t[au],\s*m[au]} 1 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */
     154  /* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*17,\s*e8,\s*mf2,\s*t[au],\s*m[au]} 1 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */
     155  /* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*27,\s*e8,\s*mf2,\s*t[au],\s*m[au]} 1 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */
     156  /* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*7,\s*e8,\s*m1,\s*t[au],\s*m[au]} 1 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */
     157  /* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*17,\s*e8,\s*m1,\s*t[au],\s*m[au]} 1 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */
     158  /* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*27,\s*e8,\s*m1,\s*t[au],\s*m[au]} 1 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */
     159  /* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*7,\s*e8,\s*m2,\s*t[au],\s*m[au]} 1 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */
     160  /* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*17,\s*e8,\s*m2,\s*t[au],\s*m[au]} 1 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */
     161  /* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*27,\s*e8,\s*m2,\s*t[au],\s*m[au]} 1 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */
     162  /* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*7,\s*e8,\s*m4,\s*t[au],\s*m[au]} 1 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */
     163  /* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*17,\s*e8,\s*m4,\s*t[au],\s*m[au]} 1 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */
     164  /* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*27,\s*e8,\s*m4,\s*t[au],\s*m[au]} 1 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */
     165  /* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*7,\s*e8,\s*m8,\s*t[au],\s*m[au]} 1 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */
     166  /* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*17,\s*e8,\s*m8,\s*t[au],\s*m[au]} 1 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */
     167  /* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*27,\s*e8,\s*m8,\s*t[au],\s*m[au]} 1 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */
     168  /* { dg-final { scan-assembler-times {vsetvli\s+[a-x0-9]+,\s*zero,\s*e16,\s*mf4,\s*t[au],\s*m[au]} 1 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */
     169  /* { dg-final { scan-assembler-times {vsetvli\s+[a-x0-9]+,\s*zero,\s*e16,\s*mf2,\s*t[au],\s*m[au]} 1 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */
     170  /* { dg-final { scan-assembler-times {vsetvli\s+[a-x0-9]+,\s*zero,\s*e32,\s*mf2,\s*t[au],\s*m[au]} 1 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */
     171  /* { dg-final { scan-assembler-times {vsetvli\s+[a-x0-9]+,\s*zero,\s*e8,\s*mf2,\s*t[au],\s*m[au]} 1 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */
     172  /* { dg-final { scan-assembler-times {vsetvli\s+[a-x0-9]+,\s*zero,\s*e8,\s*mf4,\s*t[au],\s*m[au]} 1 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */
     173  /* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*7,\s*e32,\s*mf2,\s*t[au],\s*m[au]} 1 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */
     174  /* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*17,\s*e32,\s*mf2,\s*t[au],\s*m[au]} 1 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */
     175  /* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*27,\s*e32,\s*mf2,\s*t[au],\s*m[au]} 1 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */
     176  /* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*7,\s*e32,\s*m1,\s*t[au],\s*m[au]} 1 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */
     177  /* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*17,\s*e32,\s*m1,\s*t[au],\s*m[au]} 1 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */
     178  /* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*27,\s*e32,\s*m1,\s*t[au],\s*m[au]} 1 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */
     179  /* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*7,\s*e32,\s*m2,\s*t[au],\s*m[au]} 1 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */
     180  /* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*17,\s*e32,\s*m2,\s*t[au],\s*m[au]} 1 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */
     181  /* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*27,\s*e32,\s*m2,\s*t[au],\s*m[au]} 1 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */
     182  /* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*7,\s*e32,\s*m4,\s*t[au],\s*m[au]} 1 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */
     183  /* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*17,\s*e32,\s*m4,\s*t[au],\s*m[au]} 1 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */
     184  /* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*27,\s*e32,\s*m4,\s*t[au],\s*m[au]} 1 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */
     185  /* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*7,\s*e32,\s*m8,\s*t[au],\s*m[au]} 1 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */
     186  /* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*17,\s*e32,\s*m8,\s*t[au],\s*m[au]} 1 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */
     187  /* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*27,\s*e32,\s*m8,\s*t[au],\s*m[au]} 1 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */
     188  /* { dg-final { scan-assembler-times {vsetvli} 6 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */
     189  /* { dg-final { scan-assembler-times {vsetivli} 37 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */