aboutsummaryrefslogtreecommitdiff
path: root/test/CodeGen/x86_32-xsave.c
diff options
context:
space:
mode:
Diffstat (limited to 'test/CodeGen/x86_32-xsave.c')
-rw-r--r--test/CodeGen/x86_32-xsave.c60
1 files changed, 30 insertions, 30 deletions
diff --git a/test/CodeGen/x86_32-xsave.c b/test/CodeGen/x86_32-xsave.c
index b475d63fb357..f5d84e2d920a 100644
--- a/test/CodeGen/x86_32-xsave.c
+++ b/test/CodeGen/x86_32-xsave.c
@@ -15,57 +15,57 @@ void test() {
void* tmp_vp = 0;
#ifdef TEST_XSAVE
-// XSAVE: [[tmp_vp_1:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 4
-// XSAVE: [[tmp_ULLi_1:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8
-// XSAVE: [[high64_1:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_1]], 32
-// XSAVE: [[high32_1:%[0-9a-zA-z]+]] = trunc i64 [[high64_1]] to i32
-// XSAVE: [[low32_1:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_1]] to i32
+// XSAVE: [[tmp_vp_1:%[0-9a-zA-Z]+]] = load i8*, i8** %tmp_vp, align 4
+// XSAVE: [[tmp_ULLi_1:%[0-9a-zA-Z]+]] = load i64, i64* %tmp_ULLi, align 8
+// XSAVE: [[high64_1:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_1]], 32
+// XSAVE: [[high32_1:%[0-9a-zA-Z]+]] = trunc i64 [[high64_1]] to i32
+// XSAVE: [[low32_1:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_1]] to i32
// XSAVE: call void @llvm.x86.xsave(i8* [[tmp_vp_1]], i32 [[high32_1]], i32 [[low32_1]])
(void)__builtin_ia32_xsave(tmp_vp, tmp_ULLi);
-// XSAVE: [[tmp_vp_3:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 4
-// XSAVE: [[tmp_ULLi_3:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8
-// XSAVE: [[high64_3:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_3]], 32
-// XSAVE: [[high32_3:%[0-9a-zA-z]+]] = trunc i64 [[high64_3]] to i32
-// XSAVE: [[low32_3:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_3]] to i32
+// XSAVE: [[tmp_vp_3:%[0-9a-zA-Z]+]] = load i8*, i8** %tmp_vp, align 4
+// XSAVE: [[tmp_ULLi_3:%[0-9a-zA-Z]+]] = load i64, i64* %tmp_ULLi, align 8
+// XSAVE: [[high64_3:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_3]], 32
+// XSAVE: [[high32_3:%[0-9a-zA-Z]+]] = trunc i64 [[high64_3]] to i32
+// XSAVE: [[low32_3:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_3]] to i32
// XSAVE: call void @llvm.x86.xrstor(i8* [[tmp_vp_3]], i32 [[high32_3]], i32 [[low32_3]])
(void)__builtin_ia32_xrstor(tmp_vp, tmp_ULLi);
#endif
#ifdef TEST_XSAVEOPT
-// XSAVEOPT: [[tmp_vp_1:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 4
-// XSAVEOPT: [[tmp_ULLi_1:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8
-// XSAVEOPT: [[high64_1:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_1]], 32
-// XSAVEOPT: [[high32_1:%[0-9a-zA-z]+]] = trunc i64 [[high64_1]] to i32
-// XSAVEOPT: [[low32_1:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_1]] to i32
+// XSAVEOPT: [[tmp_vp_1:%[0-9a-zA-Z]+]] = load i8*, i8** %tmp_vp, align 4
+// XSAVEOPT: [[tmp_ULLi_1:%[0-9a-zA-Z]+]] = load i64, i64* %tmp_ULLi, align 8
+// XSAVEOPT: [[high64_1:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_1]], 32
+// XSAVEOPT: [[high32_1:%[0-9a-zA-Z]+]] = trunc i64 [[high64_1]] to i32
+// XSAVEOPT: [[low32_1:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_1]] to i32
// XSAVEOPT: call void @llvm.x86.xsaveopt(i8* [[tmp_vp_1]], i32 [[high32_1]], i32 [[low32_1]])
(void)__builtin_ia32_xsaveopt(tmp_vp, tmp_ULLi);
#endif
#ifdef TEST_XSAVEC
-// XSAVEC: [[tmp_vp_1:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 4
-// XSAVEC: [[tmp_ULLi_1:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8
-// XSAVEC: [[high64_1:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_1]], 32
-// XSAVEC: [[high32_1:%[0-9a-zA-z]+]] = trunc i64 [[high64_1]] to i32
-// XSAVEC: [[low32_1:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_1]] to i32
+// XSAVEC: [[tmp_vp_1:%[0-9a-zA-Z]+]] = load i8*, i8** %tmp_vp, align 4
+// XSAVEC: [[tmp_ULLi_1:%[0-9a-zA-Z]+]] = load i64, i64* %tmp_ULLi, align 8
+// XSAVEC: [[high64_1:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_1]], 32
+// XSAVEC: [[high32_1:%[0-9a-zA-Z]+]] = trunc i64 [[high64_1]] to i32
+// XSAVEC: [[low32_1:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_1]] to i32
// XSAVEC: call void @llvm.x86.xsavec(i8* [[tmp_vp_1]], i32 [[high32_1]], i32 [[low32_1]])
(void)__builtin_ia32_xsavec(tmp_vp, tmp_ULLi);
#endif
#ifdef TEST_XSAVES
-// XSAVES: [[tmp_vp_1:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 4
-// XSAVES: [[tmp_ULLi_1:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8
-// XSAVES: [[high64_1:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_1]], 32
-// XSAVES: [[high32_1:%[0-9a-zA-z]+]] = trunc i64 [[high64_1]] to i32
-// XSAVES: [[low32_1:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_1]] to i32
+// XSAVES: [[tmp_vp_1:%[0-9a-zA-Z]+]] = load i8*, i8** %tmp_vp, align 4
+// XSAVES: [[tmp_ULLi_1:%[0-9a-zA-Z]+]] = load i64, i64* %tmp_ULLi, align 8
+// XSAVES: [[high64_1:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_1]], 32
+// XSAVES: [[high32_1:%[0-9a-zA-Z]+]] = trunc i64 [[high64_1]] to i32
+// XSAVES: [[low32_1:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_1]] to i32
// XSAVES: call void @llvm.x86.xsaves(i8* [[tmp_vp_1]], i32 [[high32_1]], i32 [[low32_1]])
(void)__builtin_ia32_xsaves(tmp_vp, tmp_ULLi);
-// XSAVES: [[tmp_vp_3:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 4
-// XSAVES: [[tmp_ULLi_3:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8
-// XSAVES: [[high64_3:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_3]], 32
-// XSAVES: [[high32_3:%[0-9a-zA-z]+]] = trunc i64 [[high64_3]] to i32
-// XSAVES: [[low32_3:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_3]] to i32
+// XSAVES: [[tmp_vp_3:%[0-9a-zA-Z]+]] = load i8*, i8** %tmp_vp, align 4
+// XSAVES: [[tmp_ULLi_3:%[0-9a-zA-Z]+]] = load i64, i64* %tmp_ULLi, align 8
+// XSAVES: [[high64_3:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_3]], 32
+// XSAVES: [[high32_3:%[0-9a-zA-Z]+]] = trunc i64 [[high64_3]] to i32
+// XSAVES: [[low32_3:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_3]] to i32
// XSAVES: call void @llvm.x86.xrstors(i8* [[tmp_vp_3]], i32 [[high32_3]], i32 [[low32_3]])
(void)__builtin_ia32_xrstors(tmp_vp, tmp_ULLi);
#endif