@@ -51,7 +51,7 @@ define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2(<vsc
5151;
5252; CHECK-64-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2:
5353; CHECK-64: # %bb.0: # %entry
54- ; CHECK-64-NEXT: vsetvli a2, zero , e32, m1, ta, ma
54+ ; CHECK-64-NEXT: vsetivli zero, 2 , e32, m1, ta, ma
5555; CHECK-64-NEXT: vslide1down.vx v8, v8, a0
5656; CHECK-64-NEXT: vslide1down.vx v8, v8, a1
5757; CHECK-64-NEXT: ret
@@ -84,7 +84,7 @@ define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl3(<vsc
8484;
8585; CHECK-64-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl3:
8686; CHECK-64: # %bb.0: # %entry
87- ; CHECK-64-NEXT: vsetvli a2, zero , e32, m1, ta, ma
87+ ; CHECK-64-NEXT: vsetivli zero, 2 , e32, m1, ta, ma
8888; CHECK-64-NEXT: vslide1down.vx v8, v8, a0
8989; CHECK-64-NEXT: vslide1down.vx v8, v8, a1
9090; CHECK-64-NEXT: ret
@@ -117,7 +117,7 @@ define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl8(<vsc
117117;
118118; CHECK-64-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl8:
119119; CHECK-64: # %bb.0: # %entry
120- ; CHECK-64-NEXT: vsetvli a2, zero , e32, m1, ta, ma
120+ ; CHECK-64-NEXT: vsetivli zero, 2 , e32, m1, ta, ma
121121; CHECK-64-NEXT: vslide1down.vx v8, v8, a0
122122; CHECK-64-NEXT: vslide1down.vx v8, v8, a1
123123; CHECK-64-NEXT: ret
@@ -152,7 +152,7 @@ define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl9(<vsc
152152;
153153; CHECK-64-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl9:
154154; CHECK-64: # %bb.0: # %entry
155- ; CHECK-64-NEXT: vsetvli a2, zero , e32, m1, ta, ma
155+ ; CHECK-64-NEXT: vsetivli zero, 2 , e32, m1, ta, ma
156156; CHECK-64-NEXT: vslide1down.vx v8, v8, a0
157157; CHECK-64-NEXT: vslide1down.vx v8, v8, a1
158158; CHECK-64-NEXT: ret
@@ -187,7 +187,7 @@ define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl15(<vs
187187;
188188; CHECK-64-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl15:
189189; CHECK-64: # %bb.0: # %entry
190- ; CHECK-64-NEXT: vsetvli a2, zero , e32, m1, ta, ma
190+ ; CHECK-64-NEXT: vsetivli zero, 2 , e32, m1, ta, ma
191191; CHECK-64-NEXT: vslide1down.vx v8, v8, a0
192192; CHECK-64-NEXT: vslide1down.vx v8, v8, a1
193193; CHECK-64-NEXT: ret
@@ -213,14 +213,14 @@ define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl16(<vs
213213;
214214; CHECK-512-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl16:
215215; CHECK-512: # %bb.0: # %entry
216- ; CHECK-512-NEXT: vsetvli a2, zero , e32, m1, ta, ma
216+ ; CHECK-512-NEXT: vsetivli zero, 16 , e32, m1, ta, ma
217217; CHECK-512-NEXT: vslide1down.vx v8, v8, a0
218218; CHECK-512-NEXT: vslide1down.vx v8, v8, a1
219219; CHECK-512-NEXT: ret
220220;
221221; CHECK-64-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl16:
222222; CHECK-64: # %bb.0: # %entry
223- ; CHECK-64-NEXT: vsetvli a2, zero , e32, m1, ta, ma
223+ ; CHECK-64-NEXT: vsetivli zero, 2 , e32, m1, ta, ma
224224; CHECK-64-NEXT: vslide1down.vx v8, v8, a0
225225; CHECK-64-NEXT: vslide1down.vx v8, v8, a1
226226; CHECK-64-NEXT: ret
@@ -247,14 +247,14 @@ define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2047(<
247247;
248248; CHECK-512-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2047:
249249; CHECK-512: # %bb.0: # %entry
250- ; CHECK-512-NEXT: vsetvli a2, zero , e32, m1, ta, ma
250+ ; CHECK-512-NEXT: vsetivli zero, 16 , e32, m1, ta, ma
251251; CHECK-512-NEXT: vslide1down.vx v8, v8, a0
252252; CHECK-512-NEXT: vslide1down.vx v8, v8, a1
253253; CHECK-512-NEXT: ret
254254;
255255; CHECK-64-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2047:
256256; CHECK-64: # %bb.0: # %entry
257- ; CHECK-64-NEXT: vsetvli a2, zero , e32, m1, ta, ma
257+ ; CHECK-64-NEXT: vsetivli zero, 2 , e32, m1, ta, ma
258258; CHECK-64-NEXT: vslide1down.vx v8, v8, a0
259259; CHECK-64-NEXT: vslide1down.vx v8, v8, a1
260260; CHECK-64-NEXT: ret
@@ -269,12 +269,26 @@ entry:
269269}
270270
271271define <vscale x 1 x i64 > @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2048 (<vscale x 1 x i64 > %0 , i64 %1 ) nounwind {
272- ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2048:
273- ; CHECK: # %bb.0: # %entry
274- ; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma
275- ; CHECK-NEXT: vslide1down.vx v8, v8, a0
276- ; CHECK-NEXT: vslide1down.vx v8, v8, a1
277- ; CHECK-NEXT: ret
272+ ; CHECK-128-65536-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2048:
273+ ; CHECK-128-65536: # %bb.0: # %entry
274+ ; CHECK-128-65536-NEXT: vsetvli a2, zero, e32, m1, ta, ma
275+ ; CHECK-128-65536-NEXT: vslide1down.vx v8, v8, a0
276+ ; CHECK-128-65536-NEXT: vslide1down.vx v8, v8, a1
277+ ; CHECK-128-65536-NEXT: ret
278+ ;
279+ ; CHECK-512-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2048:
280+ ; CHECK-512: # %bb.0: # %entry
281+ ; CHECK-512-NEXT: vsetivli zero, 16, e32, m1, ta, ma
282+ ; CHECK-512-NEXT: vslide1down.vx v8, v8, a0
283+ ; CHECK-512-NEXT: vslide1down.vx v8, v8, a1
284+ ; CHECK-512-NEXT: ret
285+ ;
286+ ; CHECK-64-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2048:
287+ ; CHECK-64: # %bb.0: # %entry
288+ ; CHECK-64-NEXT: vsetivli zero, 2, e32, m1, ta, ma
289+ ; CHECK-64-NEXT: vslide1down.vx v8, v8, a0
290+ ; CHECK-64-NEXT: vslide1down.vx v8, v8, a1
291+ ; CHECK-64-NEXT: ret
278292entry:
279293 %a = call <vscale x 1 x i64 > @llvm.riscv.vslide1down.nxv1i64.i64 (
280294 <vscale x 1 x i64 > undef ,
0 commit comments