blob: 3747969c1da173e674ef97b7ca66277e7d47ab85 (
plain) (
tree)
|
|
--- llvm-14.0.4.src/lib/Analysis/TargetLibraryInfo.cpp.old 2022-05-24 22:02:58.000000000 +0000
+++ llvm-14.0.4.src/lib/Analysis/TargetLibraryInfo.cpp 2022-07-06 01:38:22.016862561 +0000
@@ -634,6 +634,13 @@
TLI.setUnavailable(LibFunc_statvfs64);
TLI.setUnavailable(LibFunc_tmpfile64);
+ // These functions are unavailable on musl.
+ if (T.isMusl()) {
+ TLI.setUnavailable(LibFunc_roundeven);
+ TLI.setUnavailable(LibFunc_roundevenf);
+ TLI.setUnavailable(LibFunc_roundevenl);
+ }
+
// Relaxed math functions are included in math-finite.h on Linux (GLIBC).
// Note that math-finite.h is no longer supported by top-of-tree GLIBC,
// so we keep these functions around just so that they're recognized by
--- llvm-14.0.4.src/test/Transforms/InstCombine/double-float-shrink-2.ll.old 2022-05-24 22:02:58.000000000 +0000
+++ llvm-14.0.4.src/test/Transforms/InstCombine/double-float-shrink-2.ll 2022-07-06 02:03:20.657791057 +0000
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; REQUIRES: x86-registered-target,sparc-registered-target
-; RUN: opt < %s -passes=instcombine -S -mtriple "i386-pc-linux" | FileCheck %s --check-prefixes=CHECK,DOUBLE-4BYTE-ALIGN
+; RUN: opt < %s -passes=instcombine -S -mtriple "i386-pc-linux-gnu" | FileCheck %s --check-prefixes=CHECK,DOUBLE-4BYTE-ALIGN
; RUN: opt < %s -passes=instcombine -S -mtriple "i386-pc-win32" | FileCheck %s --check-prefixes=CHECK,DOUBLE-8BYTE-ALIGN
; RUN: opt < %s -passes=instcombine -S -mtriple "x86_64-pc-win32" | FileCheck %s --check-prefixes=CHECK,DOUBLE-8BYTE-ALIGN
--- llvm-14.0.4.src/test/ExecutionEngine/Interpreter/intrinsics.ll.old 2022-05-24 22:02:58.000000000 +0000
+++ llvm-14.0.4.src/test/ExecutionEngine/Interpreter/intrinsics.ll 2022-07-23 06:50:59.336665672 +0000
@@ -13,8 +13,8 @@
declare double @llvm.trunc.f64(double)
declare float @llvm.round.f32(float)
declare double @llvm.round.f64(double)
-declare float @llvm.roundeven.f32(float)
-declare double @llvm.roundeven.f64(double)
+;declare float @llvm.roundeven.f32(float)
+;declare double @llvm.roundeven.f64(double)
declare float @llvm.copysign.f32(float, float)
declare double @llvm.copysign.f64(double, double)
@@ -31,8 +31,8 @@
%trunc64 = call double @llvm.trunc.f64(double 0.000000e+00)
%round32 = call float @llvm.round.f32(float 0.000000e+00)
%round64 = call double @llvm.round.f64(double 0.000000e+00)
- %roundeven32 = call float @llvm.roundeven.f32(float 0.000000e+00)
- %roundeven64 = call double @llvm.roundeven.f64(double 0.000000e+00)
+ ;%roundeven32 = call float @llvm.roundeven.f32(float 0.000000e+00)
+ ;%roundeven64 = call double @llvm.roundeven.f64(double 0.000000e+00)
%copysign32 = call float @llvm.copysign.f32(float 0.000000e+00, float 0.000000e+00)
%copysign64 = call double @llvm.copysign.f64(double 0.000000e+00, double 0.000000e+00)
ret i32 0
|