Skip to content

Commit

Permalink
[CIR][CIRGen][Builtin][Neon] Lower neon_vabs_v and neon_vabsq_v (#1081)
Browse files Browse the repository at this point in the history
Now implement the same as
[OG](https://github.com/llvm/clangir/blob/7619b20d7461b2d46c17a3154ec4b2f12ca35ea5/clang/lib/CodeGen/CGBuiltin.cpp#L7886),
which is to call llvm aarch64 intrinsic which would eventually become
[an ARM64
instruction](https://developer.arm.com/documentation/ddi0596/2021-03/SIMD-FP-Instructions/ABS--Absolute-value--vector--?lang=en).
However, clearly there is an alternative, which is to extend CIR::AbsOp
and CIR::FAbsOp to support vector type and only lower it at LLVM
Lowering stage to either [LLVM::FAbsOP
](https://mlir.llvm.org/docs/Dialects/LLVM/#llvmintrfabs-llvmfabsop) or
[[LLVM::AbsOP
]](https://mlir.llvm.org/docs/Dialects/LLVM/#llvmintrabs-llvmabsop),
provided LLVM dialect could do the right thing of TargetLowering by
translating to llvm aarch64 intrinsic eventually.

The question is whether it is worth doing it? 

Any way, put up this diff for suggestions and ideas.
  • Loading branch information
ghehg authored Nov 25, 2024
1 parent 9db508a commit 41078e9
Show file tree
Hide file tree
Showing 2 changed files with 152 additions and 0 deletions.
9 changes: 9 additions & 0 deletions clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2362,6 +2362,15 @@ mlir::Value CIRGenFunction::emitCommonNeonBuiltinExpr(
return emitNeonSplat(builder, getLoc(e->getExprLoc()), ops[0], ops[1],
numElements);
}
case NEON::BI__builtin_neon_vabs_v:
case NEON::BI__builtin_neon_vabsq_v: {
mlir::Location loc = getLoc(e->getExprLoc());
ops[0] = builder.createBitcast(ops[0], vTy);
if (mlir::isa<cir::SingleType, cir::DoubleType>(vTy.getEltType())) {
return builder.create<cir::FAbsOp>(loc, ops[0]);
}
return builder.create<cir::AbsOp>(loc, ops[0]);
}
case NEON::BI__builtin_neon_vmovl_v: {
cir::VectorType dTy = builder.getExtendedOrTruncatedElementVectorType(
vTy, false /* truncate */,
Expand Down
143 changes: 143 additions & 0 deletions clang/test/CIR/CodeGen/AArch64/neon-arith.c
Original file line number Diff line number Diff line change
Expand Up @@ -739,3 +739,146 @@ uint64x2_t test_vpaddlq_u32(uint32x4_t a) {
// LLVM: [[VPADDL1_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.uaddlp.v2i64.v4i32(<4 x i32> [[A]])
// LLVM: ret <2 x i64> [[VPADDL1_I]]
}

int8x8_t test_vabs_s8(int8x8_t a) {
return vabs_s8(a);

// CIR-LABEL: vabs_s8
// CIR: cir.abs {{%.*}} : !cir.vector<!s8i x 8>

// LLVM: {{.*}}test_vabs_s8(<8 x i8>{{.*}}[[a:%.*]])
// LLVM: [[VABS_I:%.*]] = call <8 x i8> @llvm.abs.v8i8(<8 x i8> [[a]], i1 false)
// LLVM: ret <8 x i8> [[VABS_I]]
}

int8x16_t test_vabsq_s8(int8x16_t a) {
return vabsq_s8(a);

// CIR-LABEL: vabsq_s8
// CIR: cir.abs {{%.*}} : !cir.vector<!s8i x 16>

// LLVM: {{.*}}test_vabsq_s8(<16 x i8>{{.*}}[[a:%.*]])
// LLVM: [[VABS_I:%.*]] = call <16 x i8> @llvm.abs.v16i8(<16 x i8> [[a]], i1 false)
// LLVM: ret <16 x i8> [[VABS_I]]
}

int16x4_t test_vabs_s16(int16x4_t a) {
return vabs_s16(a);

// CIR-LABEL: vabs_s16
// CIR: [[TMP0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector<!s8i x 8>), !cir.vector<!s16i x 4>
// CIR: cir.abs [[TMP0]] : !cir.vector<!s16i x 4>

// LLVM: {{.*}}test_vabs_s16(<4 x i16>{{.*}}[[a:%.*]])
// LLVM: [[VABS1_I:%.*]] = call <4 x i16> @llvm.abs.v4i16(<4 x i16> [[a]], i1 false)
// LLVM: ret <4 x i16> [[VABS1_I]]
}

int16x8_t test_vabsq_s16(int16x8_t a) {
return vabsq_s16(a);

// CIR-LABEL: vabsq_s16
// CIR: [[TMP0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector<!s8i x 16>), !cir.vector<!s16i x 8>
// CIR: cir.abs [[TMP0]] : !cir.vector<!s16i x 8>

// LLVM: {{.*}}test_vabsq_s16(<8 x i16>{{.*}}[[a:%.*]])
// LLVM: [[VABS1_I:%.*]] = call <8 x i16> @llvm.abs.v8i16(<8 x i16> [[a]], i1 false)
// LLVM: ret <8 x i16> [[VABS1_I]]
}

int32x2_t test_vabs_s32(int32x2_t a) {
return vabs_s32(a);

// CIR-LABEL: vabs_s32
// CIR: [[TMP0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector<!s8i x 8>), !cir.vector<!s32i x 2>
// CIR: cir.abs [[TMP0]] : !cir.vector<!s32i x 2>

// LLVM: {{.*}}test_vabs_s32(<2 x i32>{{.*}}[[a:%.*]])
// LLVM: [[VABS1_I:%.*]] = call <2 x i32> @llvm.abs.v2i32(<2 x i32> [[a]], i1 false)
// LLVM: ret <2 x i32> [[VABS1_I]]
}

int32x4_t test_vabsq_s32(int32x4_t a) {
return vabsq_s32(a);

// CIR-LABEL: vabsq_s32
// CIR: [[TMP0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector<!s8i x 16>), !cir.vector<!s32i x 4>
// CIR: cir.abs [[TMP0]] : !cir.vector<!s32i x 4>

// LLVM: {{.*}}test_vabsq_s32(<4 x i32>{{.*}}[[a:%.*]])
// LLVM: [[VABS1_I:%.*]] = call <4 x i32> @llvm.abs.v4i32(<4 x i32> [[a]], i1 false)
// LLVM: ret <4 x i32> [[VABS1_I]]
}

int64x1_t test_vabs_s64(int64x1_t a) {
return vabs_s64(a);

// CIR-LABEL: vabs_s64
// CIR: [[TMP0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector<!s8i x 8>), !cir.vector<!s64i x 1>
// CIR: cir.abs [[TMP0]] : !cir.vector<!s64i x 1>

// LLVM: {{.*}}test_vabs_s64(<1 x i64>{{.*}}[[a:%.*]])
// LLVM: [[VABS1_I:%.*]] = call <1 x i64> @llvm.abs.v1i64(<1 x i64> [[a]], i1 false)
// LLVM: ret <1 x i64> [[VABS1_I]]
}

int64x2_t test_vabsq_s64(int64x2_t a) {
return vabsq_s64(a);

// CIR-LABEL: vabsq_s64
// CIR: [[TMP0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector<!s8i x 16>), !cir.vector<!s64i x 2>
// CIR: cir.abs [[TMP0]] : !cir.vector<!s64i x 2>

// LLVM: {{.*}}test_vabsq_s64(<2 x i64>{{.*}}[[a:%.*]])
// LLVM: [[VABS1_I:%.*]] = call <2 x i64> @llvm.abs.v2i64(<2 x i64> [[a]], i1 false)
// LLVM: ret <2 x i64> [[VABS1_I]]
}


float32x2_t test_vabs_f32(float32x2_t a) {
return vabs_f32(a);

// CIR-LABEL: vabs_f32
// CIR: [[TMP0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector<!s8i x 8>), !cir.vector<!cir.float x 2>
// CIR: cir.fabs [[TMP0]] : !cir.vector<!cir.float x 2>

// LLVM: {{.*}}test_vabs_f32(<2 x float>{{.*}}[[a:%.*]])
// LLVM: [[VABS_F:%.*]] = call <2 x float> @llvm.fabs.v2f32(<2 x float> [[a]])
// LLVM: ret <2 x float> [[VABS_F]]
}

float32x4_t test_vabsq_f32(float32x4_t a) {
return vabsq_f32(a);

// CIR-LABEL: vabsq_f32
// CIR: [[TMP0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector<!s8i x 16>), !cir.vector<!cir.float x 4>
// CIR: cir.fabs [[TMP0]] : !cir.vector<!cir.float x 4>

// LLVM: {{.*}}test_vabsq_f32(<4 x float>{{.*}}[[a:%.*]])
// LLVM: [[VABS_F:%.*]] = call <4 x float> @llvm.fabs.v4f32(<4 x float> [[a]])
// LLVM: ret <4 x float> [[VABS_F]]
}

float64x1_t test_vabs_f64(float64x1_t a) {
return vabs_f64(a);

// CIR-LABEL: vabs_f64
// CIR: [[TMP0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector<!s8i x 8>), !cir.vector<!cir.double x 1>
// CIR: cir.fabs [[TMP0]] : !cir.vector<!cir.double x 1>

// LLVM: {{.*}}test_vabs_f64(<1 x double>{{.*}}[[a:%.*]])
// LLVM: [[VABS_F:%.*]] = call <1 x double> @llvm.fabs.v1f64(<1 x double> [[a]])
// LLVM: ret <1 x double> [[VABS_F]]
}

float64x2_t test_vabsq_f64(float64x2_t a) {
return vabsq_f64(a);

// CIR-LABEL: vabsq_f64
// CIR: [[TMP0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector<!s8i x 16>), !cir.vector<!cir.double x 2>
// CIR: cir.fabs [[TMP0]] : !cir.vector<!cir.double x 2>

// LLVM: {{.*}}test_vabsq_f64(<2 x double>{{.*}}[[a:%.*]])
// LLVM: [[VABS_F:%.*]] = call <2 x double> @llvm.fabs.v2f64(<2 x double> [[a]])
// LLVM: ret <2 x double> [[VABS_F]]
}

0 comments on commit 41078e9

Please sign in to comment.