From 28e11623bd22fcf933350aae980b28008f113286 Mon Sep 17 00:00:00 2001 From: Nikolas Nyby Date: Tue, 6 Aug 2019 07:19:19 -0400 Subject: [PATCH] Codechange: math functions - use cpp-style casts --- src/core/bitmath_func.hpp | 10 +++++----- src/core/math_func.hpp | 10 +++++----- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/src/core/bitmath_func.hpp b/src/core/bitmath_func.hpp index fd05aa3f59..8fdc7100e8 100644 --- a/src/core/bitmath_func.hpp +++ b/src/core/bitmath_func.hpp @@ -367,12 +367,12 @@ static inline T ROR(const T x, const uint8 n) * (since it will use hardware swapping if available). * Even though they should return uint16 and uint32, we get * warnings if we don't cast those (why?) */ - #define BSWAP32(x) ((uint32)CFSwapInt32(x)) - #define BSWAP16(x) ((uint16)CFSwapInt16(x)) +# define BSWAP32(x) (static_cast(CFSwapInt32(x))) +# define BSWAP16(x) (static_cast(CFSwapInt16(x))) #elif defined(_MSC_VER) /* MSVC has intrinsics for swapping, resulting in faster code */ - #define BSWAP32(x) (_byteswap_ulong(x)) - #define BSWAP16(x) (_byteswap_ushort(x)) +# define BSWAP32(x) (_byteswap_ulong(x)) +# define BSWAP16(x) (_byteswap_ushort(x)) #else /** * Perform a 32 bits endianness bitswap on x. @@ -383,7 +383,7 @@ static inline T ROR(const T x, const uint8 n) { #if !defined(__ICC) && defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && __GNUC_MINOR__ >= 3)) /* GCC >= 4.3 provides a builtin, resulting in faster code */ - return (uint32)__builtin_bswap32((int32)x); + return static_cast(__builtin_bswap32(static_cast(x))); #else return ((x >> 24) & 0xFF) | ((x >> 8) & 0xFF00) | ((x << 8) & 0xFF0000) | ((x << 24) & 0xFF000000); #endif /* defined(__GNUC__) */ diff --git a/src/core/math_func.hpp b/src/core/math_func.hpp index 0b51d6bbff..570f54c232 100644 --- a/src/core/math_func.hpp +++ b/src/core/math_func.hpp @@ -115,7 +115,7 @@ template static inline T *AlignPtr(T *x, uint n) { assert_compile(sizeof(size_t) == sizeof(void *)); - return (T *)Align((size_t)x, n); + return reinterpret_cast(Align((size_t)x, n)); } /** @@ -202,7 +202,7 @@ static inline uint ClampU(const uint a, const uint min, const uint max) */ static inline int32 ClampToI32(const int64 a) { - return (int32)Clamp(a, INT32_MIN, INT32_MAX); + return static_cast(Clamp(a, INT32_MIN, INT32_MAX)); } /** @@ -218,7 +218,7 @@ static inline uint16 ClampToU16(const uint64 a) * match for min(uint64, uint) than uint64 min(uint64, uint64). As such we * need to cast the UINT16_MAX to prevent MSVC from displaying its * infinite loads of warnings. */ - return (uint16)min(a, (uint64)UINT16_MAX); + return static_cast(min(a, static_cast(UINT16_MAX))); } /** @@ -339,10 +339,10 @@ static inline int RoundDivSU(int a, uint b) { if (a > 0) { /* 0.5 is rounded to 1 */ - return (a + (int)b / 2) / (int)b; + return (a + static_cast(b) / 2) / static_cast(b); } else { /* -0.5 is rounded to 0 */ - return (a - ((int)b - 1) / 2) / (int)b; + return (a - (static_cast(b) - 1) / 2) / static_cast(b); } }