aboutsummaryrefslogtreecommitdiff
path: root/test/CodeGen/AMDGPU/fabs.f64.ll
diff options
context:
space:
mode:
Diffstat (limited to 'test/CodeGen/AMDGPU/fabs.f64.ll')
-rw-r--r--test/CodeGen/AMDGPU/fabs.f64.ll16
1 files changed, 8 insertions, 8 deletions
diff --git a/test/CodeGen/AMDGPU/fabs.f64.ll b/test/CodeGen/AMDGPU/fabs.f64.ll
index f7780b875ff5..998e02f7bdf8 100644
--- a/test/CodeGen/AMDGPU/fabs.f64.ll
+++ b/test/CodeGen/AMDGPU/fabs.f64.ll
@@ -10,7 +10,7 @@ declare <4 x double> @llvm.fabs.v4f64(<4 x double>) readnone
; FUNC-LABEL: {{^}}v_fabs_f64:
; SI: v_and_b32
; SI: s_endpgm
-define void @v_fabs_f64(double addrspace(1)* %out, double addrspace(1)* %in) {
+define amdgpu_kernel void @v_fabs_f64(double addrspace(1)* %out, double addrspace(1)* %in) {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%tidext = sext i32 %tid to i64
%gep = getelementptr double, double addrspace(1)* %in, i64 %tidext
@@ -24,7 +24,7 @@ define void @v_fabs_f64(double addrspace(1)* %out, double addrspace(1)* %in) {
; SI: v_and_b32
; SI-NOT: v_and_b32
; SI: s_endpgm
-define void @fabs_f64(double addrspace(1)* %out, double %in) {
+define amdgpu_kernel void @fabs_f64(double addrspace(1)* %out, double %in) {
%fabs = call double @llvm.fabs.f64(double %in)
store double %fabs, double addrspace(1)* %out
ret void
@@ -34,7 +34,7 @@ define void @fabs_f64(double addrspace(1)* %out, double %in) {
; SI: v_and_b32
; SI: v_and_b32
; SI: s_endpgm
-define void @fabs_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %in) {
+define amdgpu_kernel void @fabs_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %in) {
%fabs = call <2 x double> @llvm.fabs.v2f64(<2 x double> %in)
store <2 x double> %fabs, <2 x double> addrspace(1)* %out
ret void
@@ -46,7 +46,7 @@ define void @fabs_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %in) {
; SI: v_and_b32
; SI: v_and_b32
; SI: s_endpgm
-define void @fabs_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %in) {
+define amdgpu_kernel void @fabs_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %in) {
%fabs = call <4 x double> @llvm.fabs.v4f64(<4 x double> %in)
store <4 x double> %fabs, <4 x double> addrspace(1)* %out
ret void
@@ -57,7 +57,7 @@ define void @fabs_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %in) {
; SI-NOT: and
; SI: v_mul_f64 {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, |[[ABS_VALUE]]|
; SI: s_endpgm
-define void @fabs_fold_f64(double addrspace(1)* %out, double %in0, double %in1) {
+define amdgpu_kernel void @fabs_fold_f64(double addrspace(1)* %out, double %in0, double %in1) {
%fabs = call double @llvm.fabs.f64(double %in0)
%fmul = fmul double %fabs, %in1
store double %fmul, double addrspace(1)* %out
@@ -69,7 +69,7 @@ define void @fabs_fold_f64(double addrspace(1)* %out, double %in0, double %in1)
; SI-NOT: and
; SI: v_mul_f64 {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, |[[ABS_VALUE]]|
; SI: s_endpgm
-define void @fabs_fn_fold_f64(double addrspace(1)* %out, double %in0, double %in1) {
+define amdgpu_kernel void @fabs_fn_fold_f64(double addrspace(1)* %out, double %in0, double %in1) {
%fabs = call double @fabs(double %in0)
%fmul = fmul double %fabs, %in1
store double %fmul, double addrspace(1)* %out
@@ -79,7 +79,7 @@ define void @fabs_fn_fold_f64(double addrspace(1)* %out, double %in0, double %in
; FUNC-LABEL: {{^}}fabs_free_f64:
; SI: v_and_b32
; SI: s_endpgm
-define void @fabs_free_f64(double addrspace(1)* %out, i64 %in) {
+define amdgpu_kernel void @fabs_free_f64(double addrspace(1)* %out, i64 %in) {
%bc= bitcast i64 %in to double
%fabs = call double @llvm.fabs.f64(double %bc)
store double %fabs, double addrspace(1)* %out
@@ -89,7 +89,7 @@ define void @fabs_free_f64(double addrspace(1)* %out, i64 %in) {
; FUNC-LABEL: {{^}}fabs_fn_free_f64:
; SI: v_and_b32
; SI: s_endpgm
-define void @fabs_fn_free_f64(double addrspace(1)* %out, i64 %in) {
+define amdgpu_kernel void @fabs_fn_free_f64(double addrspace(1)* %out, i64 %in) {
%bc= bitcast i64 %in to double
%fabs = call double @fabs(double %bc)
store double %fabs, double addrspace(1)* %out