diff --git a/CMakeLists.txt b/CMakeLists.txt index 6fa5879776..8b0d972160 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -115,25 +115,53 @@ if(DOLPHIN_IS_STABLE) else() set(DOLPHIN_VERSION_PATCH ${DOLPHIN_WC_REVISION}) endif() + +# Architecture detection and arch specific settings message(${CMAKE_SYSTEM_PROCESSOR}) -if(${CMAKE_SYSTEM_PROCESSOR} MATCHES "^arm") - set(_M_GENERIC 1) + +# Detect 64bit or 32bit +# CMake doesn't provide a simple way to determine 32bit or 64bit +# If we ever support a architecture that is 64bit with 32bit pointers then this'll break +# Of course the chances of that are slim(x32?) so who cares +if(CMAKE_SIZEOF_VOID_P EQUAL 8) + set(_ARCH_64 1) + add_definitions(-D_ARCH_64=1) +else() + set(_ARCH_32 1) + add_definitions(-D_ARCH_32=1) +endif() + +if(${CMAKE_SYSTEM_PROCESSOR} MATCHES "^x86") + add_definitions(-msse2) + set(_M_X86 1) + add_definitions(-D_M_X86=1) + if(_ARCH_64) + set(_M_X86_64 1) + add_definitions(-D_M_X86_64=1) + else() + set(_M_X86_32 1) + add_definitions(-D_M_X86_32=1) + endif() +elseif(${CMAKE_SYSTEM_PROCESSOR} MATCHES "^arm") + # This option only applies to 32bit ARM set(_M_ARM 1) + set(_M_ARM_32 1) + add_definitions(-D_M_ARM=1 -D_M_ARM_32=1) + set(_M_GENERIC 1) + add_definitions(-D_M_GENERIC=1) if(${ANDROID_NDK_ABI_NAME} MATCHES "armeabi-v7a") add_definitions(-marm -march=armv7-a) endif() -endif() - -if(${CMAKE_SYSTEM_PROCESSOR} MATCHES "mips") +elseif(${CMAKE_SYSTEM_PROCESSOR} MATCHES "aarch64") + # This option only applies to 64bit ARM + set(_M_ARM 1) + set(_M_ARM_64 1) + add_definitions(-D_M_ARM=1 -D_M_ARM_64=1) set(_M_GENERIC 1) -endif() - -# Set these next two lines to test generic -#set(_M_GENERIC 1) -#add_definitions(-D_M_GENERIC=1) -# Various compile flags -if(NOT _M_GENERIC) - add_definitions(-msse2) + add_definitions(-D_M_GENERIC=1) +else() + set(_M_GENERIC 1) + add_definitions(-D_M_GENERIC=1) endif() include(CheckCXXCompilerFlag) diff --git a/Source/Core/Common/CMakeLists.txt b/Source/Core/Common/CMakeLists.txt index 4614c7d266..02d46e1987 100644 --- a/Source/Core/Common/CMakeLists.txt +++ b/Source/Core/Common/CMakeLists.txt @@ -27,15 +27,14 @@ set(SRCS BreakPoints.cpp Crypto/bn.cpp Crypto/ec.cpp) -if(_M_ARM) #ARM - set(SRCS ${SRCS} - ArmCPUDetect.cpp - ArmEmitter.cpp) +if(_M_ARM_32) #ARMv7 + set(SRCS ${SRCS} + ArmCPUDetect.cpp + ArmEmitter.cpp) else() - if(NOT _M_GENERIC) #X86 - set(SRCS ${SRCS} - x64FPURoundMode.cpp - ) + if(_M_X86) #X86 + set(SRCS ${SRCS} + x64FPURoundMode.cpp) endif() set(SRCS ${SRCS} x64CPUDetect.cpp) endif() diff --git a/Source/Core/Common/Common.h b/Source/Core/Common/Common.h index 77307e73e3..6b1a50b8d3 100644 --- a/Source/Core/Common/Common.h +++ b/Source/Core/Common/Common.h @@ -101,25 +101,25 @@ private: #endif +// Architecture detection for Windows +// Architecture detection is done in cmake on all other platforms +// Windows is built on only x86/x86_64 +#if _WIN32 || _WIN64 +#define _M_X86 1 +#if _WIN64 +#define _ARCH_64 1 +#define _M_X86_64 1 +#else +#define _ARCH_32 1 +#define _M_X86_32 1 +#endif +#endif + // Windows compatibility -#define _M_64BIT defined(_LP64) || defined(_WIN64) #ifndef _WIN32 #include #define MAX_PATH PATH_MAX -#ifdef __x86_64__ -#define _M_X64 1 -#endif -#ifdef __i386__ -#define _M_IX86 1 -#endif -#ifdef __arm__ -#define _M_ARM 1 -#define _M_GENERIC 1 -#endif -#ifdef __mips__ -#define _M_MIPS 1 -#define _M_GENERIC 1 -#endif + #define __forceinline inline __attribute__((always_inline)) #define GC_ALIGNED16(x) __attribute__((aligned(16))) x #define GC_ALIGNED32(x) __attribute__((aligned(32))) x diff --git a/Source/Core/Common/CommonFuncs.h b/Source/Core/Common/CommonFuncs.h index ab683221b3..f46cc47572 100644 --- a/Source/Core/Common/CommonFuncs.h +++ b/Source/Core/Common/CommonFuncs.h @@ -148,7 +148,7 @@ inline u64 _rotr64(u64 x, unsigned int shift){ #define fstat64 _fstat64 #define fileno _fileno - #if _M_IX86 + #if _M_X86_32 #define Crash() {__asm int 3} #else extern "C" { @@ -188,7 +188,7 @@ inline u32 swap24(const u8* _data) {return (_data[0] << 16) | (_data[1] << 8) | inline u16 swap16(u16 _data) {return _byteswap_ushort(_data);} inline u32 swap32(u32 _data) {return _byteswap_ulong (_data);} inline u64 swap64(u64 _data) {return _byteswap_uint64(_data);} -#elif _M_ARM +#elif _M_ARM_32 inline u16 swap16 (u16 _data) { u32 data = _data; __asm__ ("rev16 %0, %1\n" : "=l" (data) : "l" (data)); return (u16)data;} inline u32 swap32 (u32 _data) {__asm__ ("rev %0, %1\n" : "=l" (_data) : "l" (_data)); return _data;} inline u64 swap64(u64 _data) {return ((u64)swap32(_data) << 32) | swap32(_data >> 32);} diff --git a/Source/Core/Common/ExtendedTrace.cpp b/Source/Core/Common/ExtendedTrace.cpp index e3586e4fb7..081d2969d9 100644 --- a/Source/Core/Common/ExtendedTrace.cpp +++ b/Source/Core/Common/ExtendedTrace.cpp @@ -152,7 +152,7 @@ static BOOL GetFunctionInfoFromAddresses( ULONG fnAddress, ULONG stackAddress, L _tcscpy( lpszSymbol, _T("?") ); // Get symbol info for IP -#ifndef _M_X64 +#if _M_X86_32 DWORD dwDisp = 0; if ( SymGetSymFromAddr( GetCurrentProcess(), (ULONG)fnAddress, &dwDisp, pSym ) ) #else @@ -313,7 +313,7 @@ void StackTrace( HANDLE hThread, const char* lpszMessage, FILE *file ) } ::ZeroMemory( &callStack, sizeof(callStack) ); -#ifndef _M_X64 +#if _M_X86_32 callStack.AddrPC.Offset = context.Eip; callStack.AddrStack.Offset = context.Esp; callStack.AddrFrame.Offset = context.Ebp; diff --git a/Source/Core/Common/Hash.cpp b/Source/Core/Common/Hash.cpp index df1010c419..f9a6d6fd4d 100644 --- a/Source/Core/Common/Hash.cpp +++ b/Source/Core/Common/Hash.cpp @@ -99,7 +99,7 @@ u32 HashEctor(const u8* ptr, int length) } -#ifdef _M_X64 +#if _ARCH_64 //----------------------------------------------------------------------------- // Block read - if your platform needs to do endian-swapping or can only diff --git a/Source/Core/Common/MathUtil.h b/Source/Core/Common/MathUtil.h index 14b9309fc6..0febed3014 100644 --- a/Source/Core/Common/MathUtil.h +++ b/Source/Core/Common/MathUtil.h @@ -155,7 +155,7 @@ inline int Log2(u64 val) #if defined(__GNUC__) return 63 - __builtin_clzll(val); -#elif defined(_MSC_VER) && defined(_M_X64) +#elif defined(_MSC_VER) && _ARCH_64 unsigned long result = -1; _BitScanReverse64(&result, val); return result; diff --git a/Source/Core/Common/MemArena.cpp b/Source/Core/Common/MemArena.cpp index 63ccd63df1..ac55dc0371 100644 --- a/Source/Core/Common/MemArena.cpp +++ b/Source/Core/Common/MemArena.cpp @@ -129,7 +129,7 @@ void MemArena::ReleaseView(void* view, size_t size) u8* MemArena::Find4GBBase() { -#ifdef _M_X64 +#if _ARCH_64 #ifdef _WIN32 // 64 bit u8* base = (u8*)VirtualAlloc(0, 0xE1000000, MEM_RESERVE, PAGE_READWRITE); @@ -206,7 +206,7 @@ static bool Memory_TryBase(u8 *base, const MemoryView *views, int num_views, u32 if (!*views[i].out_ptr_low) goto bail; } -#ifdef _M_X64 +#if _ARCH_64 *views[i].out_ptr = (u8*)arena->CreateView( position, views[i].size, base + views[i].virtual_address); #else @@ -247,7 +247,7 @@ u8 *MemoryMap_Setup(const MemoryView *views, int num_views, u32 flags, MemArena arena->GrabLowMemSpace(total_mem); // Now, create views in high memory where there's plenty of space. -#ifdef _M_X64 +#if _ARCH_64 u8 *base = MemArena::Find4GBBase(); // This really shouldn't fail - in 64-bit, there will always be enough // address space. diff --git a/Source/Core/Common/MemoryUtil.cpp b/Source/Core/Common/MemoryUtil.cpp index cfdc3ad730..441ba2f10b 100644 --- a/Source/Core/Common/MemoryUtil.cpp +++ b/Source/Core/Common/MemoryUtil.cpp @@ -75,7 +75,7 @@ void* AllocateExecutableMemory(size_t size, bool low) } #endif -#if defined(_M_X64) +#if _ARCH_64 if ((u64)ptr >= 0x80000000 && low == true) PanicAlert("Executable memory ended up above 2GB!"); #endif diff --git a/Source/Core/Common/StdConditionVariable.h b/Source/Core/Common/StdConditionVariable.h index 810b15ce74..05b753df73 100644 --- a/Source/Core/Common/StdConditionVariable.h +++ b/Source/Core/Common/StdConditionVariable.h @@ -40,7 +40,7 @@ #define USE_RVALUE_REFERENCES #endif -#if defined(_WIN32) && defined(_M_X64) +#if defined(_WIN32) && _M_X86_64 #define USE_CONDITION_VARIABLES #elif defined(_WIN32) #define USE_EVENTS diff --git a/Source/Core/Common/StdMutex.h b/Source/Core/Common/StdMutex.h index 904743db80..0559ee8fc9 100644 --- a/Source/Core/Common/StdMutex.h +++ b/Source/Core/Common/StdMutex.h @@ -44,7 +44,7 @@ #define USE_RVALUE_REFERENCES #endif -#if defined(_WIN32) && defined(_M_X64) +#if defined(_WIN32) && _M_X86_64 #define USE_SRWLOCKS #endif diff --git a/Source/Core/Common/Version.cpp b/Source/Core/Common/Version.cpp index 1223bf373d..0e282ad7e8 100644 --- a/Source/Core/Common/Version.cpp +++ b/Source/Core/Common/Version.cpp @@ -24,14 +24,16 @@ const char *scm_rev_str = "Dolphin " BUILD_TYPE_STR SCM_DESC_STR; #endif -#ifdef _M_X64 +#if _M_X86_64 #define NP_ARCH "x64" -#else -#ifdef _M_ARM -#define NP_ARCH "ARM" -#else +#elif _M_ARM_32 +#define NP_ARCH "ARM32" +#elif _M_ARM_64 +#define NP_ARCH "ARM64" +#elif _M_X86_32 #define NP_ARCH "x86" -#endif +#else +#define NP_ARCH "Unk" #endif #ifdef _WIN32 diff --git a/Source/Core/Common/x64ABI.cpp b/Source/Core/Common/x64ABI.cpp index ecbbfa0bb2..6f423eaa6a 100644 --- a/Source/Core/Common/x64ABI.cpp +++ b/Source/Core/Common/x64ABI.cpp @@ -20,7 +20,7 @@ unsigned int XEmitter::ABI_GetAlignedFrameSize(unsigned int frameSize, bool noPr // for Win64) into this rather than having a separate prolog. // On Windows 32-bit, the required alignment is only 4 bytes, so we just // ensure that the frame size isn't misaligned. -#ifdef _M_X64 +#if _M_X86_64 // expect frameSize == 0 frameSize = noProlog ? 0x28 : 0; #elif defined(_WIN32) @@ -38,7 +38,7 @@ void XEmitter::ABI_AlignStack(unsigned int frameSize, bool noProlog) { unsigned int fillSize = ABI_GetAlignedFrameSize(frameSize, noProlog) - frameSize; if (fillSize != 0) { -#ifdef _M_X64 +#if _M_X86_64 SUB(64, R(RSP), Imm8(fillSize)); #else SUB(32, R(ESP), Imm8(fillSize)); @@ -49,7 +49,7 @@ void XEmitter::ABI_AlignStack(unsigned int frameSize, bool noProlog) { void XEmitter::ABI_RestoreStack(unsigned int frameSize, bool noProlog) { unsigned int alignedSize = ABI_GetAlignedFrameSize(frameSize, noProlog); if (alignedSize != 0) { -#ifdef _M_X64 +#if _M_X86_64 ADD(64, R(RSP), Imm8(alignedSize)); #else ADD(32, R(ESP), Imm8(alignedSize)); @@ -60,13 +60,13 @@ void XEmitter::ABI_RestoreStack(unsigned int frameSize, bool noProlog) { void XEmitter::ABI_PushRegistersAndAdjustStack(u32 mask, bool noProlog) { int regSize = -#ifdef _M_X64 +#if _M_X86_64 8; #else 4; #endif int shadow = 0; -#if defined(_WIN32) && defined(_M_X64) +#if defined(_WIN32) && _M_X86_64 shadow = 0x20; #endif int count = 0; @@ -101,13 +101,13 @@ void XEmitter::ABI_PushRegistersAndAdjustStack(u32 mask, bool noProlog) void XEmitter::ABI_PopRegistersAndAdjustStack(u32 mask, bool noProlog) { int regSize = -#ifdef _M_X64 +#if _M_X86_64 8; #else 4; #endif int size = 0; -#if defined(_WIN32) && defined(_M_X64) +#if defined(_WIN32) && _M_X86_64 size += 0x20; #endif for (int x = 0; x < 16; x++) @@ -137,7 +137,7 @@ void XEmitter::ABI_PopRegistersAndAdjustStack(u32 mask, bool noProlog) } } -#ifdef _M_IX86 // All32 +#if _M_X86_32 // All32 // Shared code between Win32 and Unix32 void XEmitter::ABI_CallFunction(void *func) { diff --git a/Source/Core/Common/x64ABI.h b/Source/Core/Common/x64ABI.h index ed9841919f..aa5a3582d3 100644 --- a/Source/Core/Common/x64ABI.h +++ b/Source/Core/Common/x64ABI.h @@ -31,7 +31,7 @@ // Callee-save: RBX RBP R12 R13 R14 R15 // Parameters: RDI RSI RDX RCX R8 R9 -#ifdef _M_IX86 // 32 bit calling convention, shared by all +#if _M_X86_32 // 32 bit calling convention, shared by all // 32-bit don't pass parameters in regs, but these are convenient to have anyway when we have to // choose regs to put stuff in. diff --git a/Source/Core/Common/x64CPUDetect.cpp b/Source/Core/Common/x64CPUDetect.cpp index 5ce1775843..64ded2df3f 100644 --- a/Source/Core/Common/x64CPUDetect.cpp +++ b/Source/Core/Common/x64CPUDetect.cpp @@ -98,16 +98,16 @@ CPUInfo::CPUInfo() { void CPUInfo::Detect() { memset(this, 0, sizeof(*this)); -#ifdef _M_IX86 +#if _M_X86_32 Mode64bit = false; -#elif defined (_M_X64) +#elif _M_X86_64 Mode64bit = true; OS64bit = true; #endif num_cores = 1; #ifdef _WIN32 -#ifdef _M_IX86 +#if _M_X86_32 BOOL f64 = false; IsWow64Process(GetCurrentProcess(), &f64); OS64bit = (f64 == TRUE) ? true : false; @@ -170,9 +170,9 @@ void CPUInfo::Detect() GC_ALIGNED16(u8 fx_state[512]); memset(fx_state, 0, sizeof(fx_state)); #ifdef _WIN32 -#ifdef _M_IX86 +#if _M_X86_32 _fxsave(fx_state); -#elif defined (_M_X64) +#elif _M_X86_64 _fxsave64(fx_state); #endif #else diff --git a/Source/Core/Common/x64Emitter.cpp b/Source/Core/Common/x64Emitter.cpp index c361d9e625..6ba5e16c0f 100644 --- a/Source/Core/Common/x64Emitter.cpp +++ b/Source/Core/Common/x64Emitter.cpp @@ -124,7 +124,7 @@ void XEmitter::WriteSIB(int scale, int index, int base) void OpArg::WriteRex(XEmitter *emit, int opBits, int bits, int customOp) const { if (customOp == -1) customOp = operandReg; -#ifdef _M_X64 +#if _M_X86_64 u8 op = 0x40; if (opBits == 64) op |= 8; if (customOp & 8) op |= 4; @@ -205,7 +205,7 @@ void OpArg::WriteRest(XEmitter *emit, int extraBytes, X64Reg _operandReg, _offsetOrBaseReg = 5; emit->WriteModRM(0, _operandReg, _offsetOrBaseReg); //TODO : add some checks -#ifdef _M_X64 +#if _M_X86_64 u64 ripAddr = (u64)emit->GetCodePtr() + 4 + extraBytes; s64 distance = (s64)offset - (s64)ripAddr; _assert_msg_(DYNA_REC, (distance < 0x80000000LL @@ -1191,7 +1191,7 @@ void XEmitter::MOVD_xmm(X64Reg dest, const OpArg &arg) {WriteSSEOp(64, 0x6E, tru void XEmitter::MOVD_xmm(const OpArg &arg, X64Reg src) {WriteSSEOp(64, 0x7E, true, src, arg, 0);} void XEmitter::MOVQ_xmm(X64Reg dest, OpArg arg) { -#ifdef _M_X64 +#if _M_X86_64 // Alternate encoding // This does not display correctly in MSVC's debugger, it thinks it's a MOVD arg.operandReg = dest; @@ -1551,7 +1551,7 @@ void XEmitter::RTDSC() { Write8(0x0F); Write8(0x31); } void XEmitter::CallCdeclFunction3(void* fnptr, u32 arg0, u32 arg1, u32 arg2) { using namespace Gen; -#ifdef _M_X64 +#if _M_X86_64 #ifdef _MSC_VER MOV(32, R(RCX), Imm32(arg0)); @@ -1582,7 +1582,7 @@ void XEmitter::CallCdeclFunction3(void* fnptr, u32 arg0, u32 arg1, u32 arg2) void XEmitter::CallCdeclFunction4(void* fnptr, u32 arg0, u32 arg1, u32 arg2, u32 arg3) { using namespace Gen; -#ifdef _M_X64 +#if _M_X86_64 #ifdef _MSC_VER MOV(32, R(RCX), Imm32(arg0)); @@ -1616,7 +1616,7 @@ void XEmitter::CallCdeclFunction4(void* fnptr, u32 arg0, u32 arg1, u32 arg2, u32 void XEmitter::CallCdeclFunction5(void* fnptr, u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4) { using namespace Gen; -#ifdef _M_X64 +#if _M_X86_64 #ifdef _MSC_VER MOV(32, R(RCX), Imm32(arg0)); @@ -1653,7 +1653,7 @@ void XEmitter::CallCdeclFunction5(void* fnptr, u32 arg0, u32 arg1, u32 arg2, u32 void XEmitter::CallCdeclFunction6(void* fnptr, u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4, u32 arg5) { using namespace Gen; -#ifdef _M_X64 +#if _M_X86_64 #ifdef _MSC_VER MOV(32, R(RCX), Imm32(arg0)); @@ -1690,7 +1690,7 @@ void XEmitter::CallCdeclFunction6(void* fnptr, u32 arg0, u32 arg1, u32 arg2, u32 #endif } -#ifdef _M_X64 +#if _M_X86_64 // See header void XEmitter::___CallCdeclImport3(void* impptr, u32 arg0, u32 arg1, u32 arg2) { diff --git a/Source/Core/Common/x64Emitter.h b/Source/Core/Common/x64Emitter.h index 368939ed05..f8acb6fe95 100644 --- a/Source/Core/Common/x64Emitter.h +++ b/Source/Core/Common/x64Emitter.h @@ -193,13 +193,13 @@ inline OpArg Imm8 (u8 imm) {return OpArg(imm, SCALE_IMM8);} inline OpArg Imm16(u16 imm) {return OpArg(imm, SCALE_IMM16);} //rarely used inline OpArg Imm32(u32 imm) {return OpArg(imm, SCALE_IMM32);} inline OpArg Imm64(u64 imm) {return OpArg(imm, SCALE_IMM64);} -#ifdef _M_X64 +#ifdef _ARCH_64 inline OpArg ImmPtr(void* imm) {return Imm64((u64)imm);} #else inline OpArg ImmPtr(void* imm) {return Imm32((u32)imm);} #endif inline u32 PtrOffset(void* ptr, void* base) { -#ifdef _M_X64 +#ifdef _ARCH_64 s64 distance = (s64)ptr-(s64)base; if (distance >= 0x80000000LL || distance < -0x80000000LL) { @@ -695,7 +695,7 @@ public: void ABI_AlignStack(unsigned int frameSize, bool noProlog = false); void ABI_RestoreStack(unsigned int frameSize, bool noProlog = false); - #ifdef _M_IX86 + #if _M_X86_32 inline int ABI_GetNumXMMRegs() { return 8; } #else inline int ABI_GetNumXMMRegs() { return 16; } @@ -707,7 +707,7 @@ public: void CallCdeclFunction5(void* fnptr, u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4); void CallCdeclFunction6(void* fnptr, u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4, u32 arg5); -#if defined(_M_IX86) +#if _M_X86_32 #define CallCdeclFunction3_I(a,b,c,d) CallCdeclFunction3((void *)(a), (b), (c), (d)) #define CallCdeclFunction4_I(a,b,c,d,e) CallCdeclFunction4((void *)(a), (b), (c), (d), (e)) diff --git a/Source/Core/Common/x64FPURoundMode.cpp b/Source/Core/Common/x64FPURoundMode.cpp index 5876ffbd0e..b3dc669523 100644 --- a/Source/Core/Common/x64FPURoundMode.cpp +++ b/Source/Core/Common/x64FPURoundMode.cpp @@ -21,7 +21,7 @@ namespace FPURoundMode void SetRoundMode(RoundModes mode) { // Set FPU rounding mode to mimic the PowerPC's - #ifdef _M_IX86 + #ifdef _M_X86_32 // This shouldn't really be needed anymore since we use SSE #ifdef _WIN32 const int table[4] = @@ -51,7 +51,7 @@ namespace FPURoundMode void SetPrecisionMode(PrecisionModes mode) { - #ifdef _M_IX86 + #ifdef _M_X86_32 // sets the floating-point lib to 53-bit // PowerPC has a 53bit floating pipeline only // eg: sscanf is very sensitive diff --git a/Source/Core/Core/CMakeLists.txt b/Source/Core/Core/CMakeLists.txt index 71f9a8eb7b..726a056865 100644 --- a/Source/Core/Core/CMakeLists.txt +++ b/Source/Core/Core/CMakeLists.txt @@ -178,7 +178,7 @@ set(SRCS ActionReplay.cpp PowerPC/JitILCommon/JitILBase_Integer.cpp ) -if(NOT _M_GENERIC) +if(_M_X86) set(SRCS ${SRCS} x64MemTools.cpp PowerPC/Jit64IL/IR_X86.cpp @@ -201,7 +201,7 @@ if(NOT _M_GENERIC) PowerPC/JitCommon/JitAsmCommon.cpp PowerPC/JitCommon/Jit_Util.cpp) endif() -if(_M_ARM) +if(_M_ARM_32) set(SRCS ${SRCS} ArmMemTools.cpp PowerPC/JitArm32/Jit.cpp diff --git a/Source/Core/Core/ConfigManager.cpp b/Source/Core/Core/ConfigManager.cpp index 8a58f1d1e6..5698a21dc7 100644 --- a/Source/Core/Core/ConfigManager.cpp +++ b/Source/Core/Core/ConfigManager.cpp @@ -377,10 +377,12 @@ void SConfig::LoadSettings() // Core ini.Get("Core", "HLE_BS2", &m_LocalCoreStartupParameter.bHLE_BS2, false); -#ifdef _M_ARM +#ifdef _M_X86 + ini.Get("Core", "CPUCore", &m_LocalCoreStartupParameter.iCPUCore, 1); +#elif _M_ARM_32 ini.Get("Core", "CPUCore", &m_LocalCoreStartupParameter.iCPUCore, 3); #else - ini.Get("Core", "CPUCore", &m_LocalCoreStartupParameter.iCPUCore, 1); + ini.Get("Core", "CPUCore", &m_LocalCoreStartupParameter.iCPUCore, 0); #endif ini.Get("Core", "Fastmem", &m_LocalCoreStartupParameter.bFastmem, true); ini.Get("Core", "DSPThread", &m_LocalCoreStartupParameter.bDSPThread, false); diff --git a/Source/Core/Core/Core.cpp b/Source/Core/Core/Core.cpp index ad3cbd6f8f..529951f624 100644 --- a/Source/Core/Core/Core.cpp +++ b/Source/Core/Core/Core.cpp @@ -297,7 +297,7 @@ void CpuThread() g_video_backend->Video_Prepare(); } - #if defined(_M_X64) || _M_ARM + #if _M_X86_64 || _M_ARM_32 if (_CoreParameter.bFastmem) EMM::InstallExceptionHandler(); // Let's run under memory watch #endif diff --git a/Source/Core/Core/DSP/DSPEmitter.cpp b/Source/Core/Core/DSP/DSPEmitter.cpp index 7ef3169991..82f93a3252 100644 --- a/Source/Core/Core/DSP/DSPEmitter.cpp +++ b/Source/Core/Core/DSP/DSPEmitter.cpp @@ -401,7 +401,7 @@ void DSPEmitter::CompileDispatcher() // Execute block. Cycles executed returned in EAX. -#ifdef _M_IX86 +#if _M_X86_32 MOVZX(32, 16, ECX, M(&g_dsp.pc)); MOV(32, R(EBX), ImmPtr(blocks)); JMPptr(MComplex(EBX, ECX, SCALE_4, 0)); diff --git a/Source/Core/Core/DSP/Jit/DSPJitArithmetic.cpp b/Source/Core/Core/DSP/Jit/DSPJitArithmetic.cpp index f39057c1a6..72eb1aa689 100644 --- a/Source/Core/Core/DSP/Jit/DSPJitArithmetic.cpp +++ b/Source/Core/Core/DSP/Jit/DSPJitArithmetic.cpp @@ -18,7 +18,7 @@ using namespace Gen; // flags out: --10 0100 void DSPEmitter::clr(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 reg = (opc >> 11) & 0x1; // dsp_set_long_acc(reg, 0); MOV(64, R(RAX), Imm64(0)); @@ -40,7 +40,7 @@ void DSPEmitter::clr(const UDSPInstruction opc) // flags out: --xx xx00 void DSPEmitter::clrl(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 reg = (opc >> 8) & 0x1; // s64 acc = dsp_round_long_acc(dsp_get_long_acc(reg)); get_long_acc(reg); @@ -68,7 +68,7 @@ void DSPEmitter::clrl(const UDSPInstruction opc) // flags out: -x-- ---- void DSPEmitter::andcf(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 if (FlagsNeeded()) { u8 reg = (opc >> 8) & 0x1; @@ -108,7 +108,7 @@ void DSPEmitter::andcf(const UDSPInstruction opc) // flags out: -x-- ---- void DSPEmitter::andf(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 if (FlagsNeeded()) { u8 reg = (opc >> 8) & 0x1; @@ -146,7 +146,7 @@ void DSPEmitter::andf(const UDSPInstruction opc) // flags out: --xx xx00 void DSPEmitter::tst(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 if (FlagsNeeded()) { u8 reg = (opc >> 11) & 0x1; @@ -167,7 +167,7 @@ void DSPEmitter::tst(const UDSPInstruction opc) // flags out: --x0 xx00 void DSPEmitter::tstaxh(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 if (FlagsNeeded()) { u8 reg = (opc >> 8) & 0x1; @@ -190,7 +190,7 @@ void DSPEmitter::tstaxh(const UDSPInstruction opc) // flags out: x-xx xxxx void DSPEmitter::cmp(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 if (FlagsNeeded()) { X64Reg tmp1; @@ -220,7 +220,7 @@ void DSPEmitter::cmp(const UDSPInstruction opc) // flags out: x-xx xxxx void DSPEmitter::cmpar(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 if (FlagsNeeded()) { u8 rreg = ((opc >> 12) & 0x1); @@ -256,7 +256,7 @@ void DSPEmitter::cmpar(const UDSPInstruction opc) // flags out: x-xx xxxx void DSPEmitter::cmpi(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 if (FlagsNeeded()) { u8 reg = (opc >> 8) & 0x1; @@ -289,7 +289,7 @@ void DSPEmitter::cmpi(const UDSPInstruction opc) // flags out: x-xx xxxx void DSPEmitter::cmpis(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 if (FlagsNeeded()) { u8 areg = (opc >> 8) & 0x1; @@ -324,7 +324,7 @@ void DSPEmitter::cmpis(const UDSPInstruction opc) // flags out: --xx xx00 void DSPEmitter::xorr(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 dreg = (opc >> 8) & 0x1; u8 sreg = (opc >> 9) & 0x1; // u16 accm = g_dsp.r.acm[dreg] ^ g_dsp.r.axh[sreg]; @@ -353,7 +353,7 @@ void DSPEmitter::xorr(const UDSPInstruction opc) // flags out: --xx xx00 void DSPEmitter::andr(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 dreg = (opc >> 8) & 0x1; u8 sreg = (opc >> 9) & 0x1; // u16 accm = g_dsp.r.acm[dreg] & g_dsp.r.axh[sreg]; @@ -382,7 +382,7 @@ void DSPEmitter::andr(const UDSPInstruction opc) // flags out: --xx xx00 void DSPEmitter::orr(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 dreg = (opc >> 8) & 0x1; u8 sreg = (opc >> 9) & 0x1; // u16 accm = g_dsp.r.acm[dreg] | g_dsp.r.axh[sreg]; @@ -411,7 +411,7 @@ void DSPEmitter::orr(const UDSPInstruction opc) // flags out: --xx xx00 void DSPEmitter::andc(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 dreg = (opc >> 8) & 0x1; // u16 accm = g_dsp.r.acm[dreg] & g_dsp.r.acm[1 - dreg]; get_acc_m(dreg, RAX); @@ -439,7 +439,7 @@ void DSPEmitter::andc(const UDSPInstruction opc) // flags out: --xx xx00 void DSPEmitter::orc(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 dreg = (opc >> 8) & 0x1; // u16 accm = g_dsp.r.acm[dreg] | g_dsp.r.acm[1 - dreg]; get_acc_m(dreg, RAX); @@ -466,7 +466,7 @@ void DSPEmitter::orc(const UDSPInstruction opc) // flags out: --xx xx00 void DSPEmitter::xorc(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 dreg = (opc >> 8) & 0x1; // u16 accm = g_dsp.r.acm[dreg] ^ g_dsp.r.acm[1 - dreg]; get_acc_m(dreg, RAX); @@ -493,7 +493,7 @@ void DSPEmitter::xorc(const UDSPInstruction opc) // flags out: --xx xx00 void DSPEmitter::notc(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 dreg = (opc >> 8) & 0x1; // u16 accm = g_dsp.r.acm[dreg] ^ 0xffff; get_acc_m(dreg, RAX); @@ -520,7 +520,7 @@ void DSPEmitter::notc(const UDSPInstruction opc) // flags out: --xx xx00 void DSPEmitter::xori(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 reg = (opc >> 8) & 0x1; // u16 imm = dsp_fetch_code(); u16 imm = dsp_imem_read(compilePC+1); @@ -547,7 +547,7 @@ void DSPEmitter::xori(const UDSPInstruction opc) // flags out: --xx xx00 void DSPEmitter::andi(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 reg = (opc >> 8) & 0x1; // u16 imm = dsp_fetch_code(); u16 imm = dsp_imem_read(compilePC+1); @@ -574,7 +574,7 @@ void DSPEmitter::andi(const UDSPInstruction opc) // flags out: --xx xx00 void DSPEmitter::ori(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 reg = (opc >> 8) & 0x1; // u16 imm = dsp_fetch_code(); u16 imm = dsp_imem_read(compilePC+1); @@ -602,7 +602,7 @@ void DSPEmitter::ori(const UDSPInstruction opc) // flags out: x-xx xxxx void DSPEmitter::addr(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 dreg = (opc >> 8) & 0x1; u8 sreg = ((opc >> 9) & 0x3) + DSP_REG_AXL0; @@ -642,7 +642,7 @@ void DSPEmitter::addr(const UDSPInstruction opc) // flags out: x-xx xxxx void DSPEmitter::addax(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 dreg = (opc >> 8) & 0x1; u8 sreg = (opc >> 9) & 0x1; @@ -681,7 +681,7 @@ void DSPEmitter::addax(const UDSPInstruction opc) // flags out: x-xx xxxx void DSPEmitter::add(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 dreg = (opc >> 8) & 0x1; X64Reg tmp1; @@ -719,7 +719,7 @@ void DSPEmitter::add(const UDSPInstruction opc) // flags out: x-xx xxxx void DSPEmitter::addp(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 dreg = (opc >> 8) & 0x1; X64Reg tmp1; @@ -758,7 +758,7 @@ void DSPEmitter::addp(const UDSPInstruction opc) // flags out: x-xx xxxx void DSPEmitter::addaxl(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 sreg = (opc >> 9) & 0x1; u8 dreg = (opc >> 8) & 0x1; @@ -799,7 +799,7 @@ void DSPEmitter::addaxl(const UDSPInstruction opc) // flags out: x-xx xxxx void DSPEmitter::addi(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 areg = (opc >> 8) & 0x1; X64Reg tmp1; gpr.getFreeXReg(tmp1); @@ -840,7 +840,7 @@ void DSPEmitter::addi(const UDSPInstruction opc) // flags out: x-xx xxxx void DSPEmitter::addis(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 dreg = (opc >> 8) & 0x1; X64Reg tmp1; @@ -881,7 +881,7 @@ void DSPEmitter::addis(const UDSPInstruction opc) // flags out: x-xx xxxx void DSPEmitter::incm(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 dreg = (opc >> 8) & 0x1; s64 subtract = 0x10000; X64Reg tmp1; @@ -918,7 +918,7 @@ void DSPEmitter::incm(const UDSPInstruction opc) // flags out: x-xx xxxx void DSPEmitter::inc(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 dreg = (opc >> 8) & 0x1; X64Reg tmp1; gpr.getFreeXReg(tmp1); @@ -956,7 +956,7 @@ void DSPEmitter::inc(const UDSPInstruction opc) // flags out: x-xx xxxx void DSPEmitter::subr(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 dreg = (opc >> 8) & 0x1; u8 sreg = ((opc >> 9) & 0x3) + DSP_REG_AXL0; @@ -998,7 +998,7 @@ void DSPEmitter::subr(const UDSPInstruction opc) // flags out: x-xx xxxx void DSPEmitter::subax(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 dreg = (opc >> 8) & 0x1; u8 sreg = (opc >> 9) & 0x1; @@ -1038,7 +1038,7 @@ void DSPEmitter::subax(const UDSPInstruction opc) // flags out: x-xx xxxx void DSPEmitter::sub(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 dreg = (opc >> 8) & 0x1; X64Reg tmp1; gpr.getFreeXReg(tmp1); @@ -1076,7 +1076,7 @@ void DSPEmitter::sub(const UDSPInstruction opc) // flags out: x-xx xxxx void DSPEmitter::subp(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 dreg = (opc >> 8) & 0x1; X64Reg tmp1; gpr.getFreeXReg(tmp1); @@ -1114,7 +1114,7 @@ void DSPEmitter::subp(const UDSPInstruction opc) // flags out: x-xx xxxx void DSPEmitter::decm(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 dreg = (opc >> 8) & 0x01; s64 subtract = 0x10000; X64Reg tmp1; @@ -1151,7 +1151,7 @@ void DSPEmitter::decm(const UDSPInstruction opc) // flags out: x-xx xxxx void DSPEmitter::dec(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 dreg = (opc >> 8) & 0x01; X64Reg tmp1; gpr.getFreeXReg(tmp1); @@ -1189,7 +1189,7 @@ void DSPEmitter::dec(const UDSPInstruction opc) // flags out: --xx xx00 void DSPEmitter::neg(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 dreg = (opc >> 8) & 0x1; // s64 acc = dsp_get_long_acc(dreg); get_long_acc(dreg); @@ -1214,7 +1214,7 @@ void DSPEmitter::neg(const UDSPInstruction opc) // flags out: --xx xx00 void DSPEmitter::abs(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 dreg = (opc >> 11) & 0x1; // s64 acc = dsp_get_long_acc(dreg); @@ -1245,7 +1245,7 @@ void DSPEmitter::abs(const UDSPInstruction opc) // flags out: --xx xx00 void DSPEmitter::movr(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 areg = (opc >> 8) & 0x1; u8 sreg = ((opc >> 9) & 0x3) + DSP_REG_AXL0; @@ -1272,7 +1272,7 @@ void DSPEmitter::movr(const UDSPInstruction opc) // flags out: --xx xx00 void DSPEmitter::movax(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 dreg = (opc >> 8) & 0x1; u8 sreg = (opc >> 9) & 0x1; @@ -1297,7 +1297,7 @@ void DSPEmitter::movax(const UDSPInstruction opc) // flags out: --x0 xx00 void DSPEmitter::mov(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 dreg = (opc >> 8) & 0x1; // u64 acc = dsp_get_long_acc(1 - dreg); get_long_acc(1 - dreg); @@ -1322,7 +1322,7 @@ void DSPEmitter::mov(const UDSPInstruction opc) // flags out: --xx xx00 void DSPEmitter::lsl16(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 areg = (opc >> 8) & 0x1; // s64 acc = dsp_get_long_acc(areg); get_long_acc(areg); @@ -1347,7 +1347,7 @@ void DSPEmitter::lsl16(const UDSPInstruction opc) // flags out: --xx xx00 void DSPEmitter::lsr16(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 areg = (opc >> 8) & 0x1; // u64 acc = dsp_get_long_acc(areg); @@ -1375,7 +1375,7 @@ void DSPEmitter::lsr16(const UDSPInstruction opc) // flags out: --xx xx00 void DSPEmitter::asr16(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 areg = (opc >> 11) & 0x1; // s64 acc = dsp_get_long_acc(areg); @@ -1401,7 +1401,7 @@ void DSPEmitter::asr16(const UDSPInstruction opc) // flags out: --xx xx00 void DSPEmitter::lsl(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 rreg = (opc >> 8) & 0x01; u16 shift = opc & 0x3f; // u64 acc = dsp_get_long_acc(rreg); @@ -1430,7 +1430,7 @@ void DSPEmitter::lsl(const UDSPInstruction opc) // flags out: --xx xx00 void DSPEmitter::lsr(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 rreg = (opc >> 8) & 0x01; u16 shift; // u64 acc = dsp_get_long_acc(rreg); @@ -1468,7 +1468,7 @@ void DSPEmitter::lsr(const UDSPInstruction opc) // flags out: --xx xx00 void DSPEmitter::asl(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 rreg = (opc >> 8) & 0x01; u16 shift = opc & 0x3f; // u64 acc = dsp_get_long_acc(rreg); @@ -1495,7 +1495,7 @@ void DSPEmitter::asl(const UDSPInstruction opc) // flags out: --xx xx00 void DSPEmitter::asr(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 dreg = (opc >> 8) & 0x01; u16 shift; @@ -1530,7 +1530,7 @@ void DSPEmitter::asr(const UDSPInstruction opc) // flags out: --xx xx00 void DSPEmitter::lsrn(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 // s16 shift; // u16 accm = (u16)dsp_get_acc_m(1); get_acc_m(1); @@ -1593,7 +1593,7 @@ void DSPEmitter::lsrn(const UDSPInstruction opc) // flags out: --xx xx00 void DSPEmitter::asrn(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 // s16 shift; // u16 accm = (u16)dsp_get_acc_m(1); get_acc_m(1); @@ -1651,7 +1651,7 @@ void DSPEmitter::asrn(const UDSPInstruction opc) // flags out: --xx xx00 void DSPEmitter::lsrnrx(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 dreg = (opc >> 8) & 0x1; u8 sreg = (opc >> 9) & 0x1; @@ -1715,7 +1715,7 @@ void DSPEmitter::lsrnrx(const UDSPInstruction opc) // flags out: --xx xx00 void DSPEmitter::asrnrx(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 dreg = (opc >> 8) & 0x1; u8 sreg = (opc >> 9) & 0x1; @@ -1776,7 +1776,7 @@ void DSPEmitter::asrnrx(const UDSPInstruction opc) // flags out: --xx xx00 void DSPEmitter::lsrnr(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 dreg = (opc >> 8) & 0x1; // s16 shift; @@ -1838,7 +1838,7 @@ void DSPEmitter::lsrnr(const UDSPInstruction opc) // flags out: --xx xx00 void DSPEmitter::asrnr(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 dreg = (opc >> 8) & 0x1; // s16 shift; diff --git a/Source/Core/Core/DSP/Jit/DSPJitCCUtil.cpp b/Source/Core/Core/DSP/Jit/DSPJitCCUtil.cpp index fdaa67b3a5..dcff664724 100644 --- a/Source/Core/Core/DSP/Jit/DSPJitCCUtil.cpp +++ b/Source/Core/Core/DSP/Jit/DSPJitCCUtil.cpp @@ -13,7 +13,7 @@ using namespace Gen; // Clobbers RDX void DSPEmitter::Update_SR_Register(Gen::X64Reg val) { -#ifdef _M_X64 +#if _M_X86_64 OpArg sr_reg; gpr.getReg(DSP_REG_SR,sr_reg); // // 0x04 @@ -57,7 +57,7 @@ void DSPEmitter::Update_SR_Register(Gen::X64Reg val) // Clobbers RDX void DSPEmitter::Update_SR_Register64(Gen::X64Reg val) { -#ifdef _M_X64 +#if _M_X86_64 // g_dsp.r[DSP_REG_SR] &= ~SR_CMP_MASK; OpArg sr_reg; gpr.getReg(DSP_REG_SR,sr_reg); @@ -72,7 +72,7 @@ void DSPEmitter::Update_SR_Register64(Gen::X64Reg val) // Clobbers RDX void DSPEmitter::Update_SR_Register64_Carry(X64Reg val, X64Reg carry_ovfl) { -#ifdef _M_X64 +#if _M_X86_64 OpArg sr_reg; gpr.getReg(DSP_REG_SR,sr_reg); // g_dsp.r[DSP_REG_SR] &= ~SR_CMP_MASK; @@ -109,7 +109,7 @@ void DSPEmitter::Update_SR_Register64_Carry(X64Reg val, X64Reg carry_ovfl) // Clobbers RDX void DSPEmitter::Update_SR_Register64_Carry2(X64Reg val, X64Reg carry_ovfl) { -#ifdef _M_X64 +#if _M_X86_64 OpArg sr_reg; gpr.getReg(DSP_REG_SR,sr_reg); // g_dsp.r[DSP_REG_SR] &= ~SR_CMP_MASK; @@ -155,7 +155,7 @@ void DSPEmitter::Update_SR_Register64_Carry2(X64Reg val, X64Reg carry_ovfl) // Clobbers RDX void DSPEmitter::Update_SR_Register16(X64Reg val) { -#ifdef _M_X64 +#if _M_X86_64 OpArg sr_reg; gpr.getReg(DSP_REG_SR,sr_reg); AND(16, sr_reg, Imm16(~SR_CMP_MASK)); @@ -197,7 +197,7 @@ void DSPEmitter::Update_SR_Register16(X64Reg val) // Clobbers RDX void DSPEmitter::Update_SR_Register16_OverS32(Gen::X64Reg val) { -#ifdef _M_X64 +#if _M_X86_64 OpArg sr_reg; gpr.getReg(DSP_REG_SR,sr_reg); AND(16, sr_reg, Imm16(~SR_CMP_MASK)); diff --git a/Source/Core/Core/DSP/Jit/DSPJitMultiplier.cpp b/Source/Core/Core/DSP/Jit/DSPJitMultiplier.cpp index 234226aab3..0e9d5cee5d 100644 --- a/Source/Core/Core/DSP/Jit/DSPJitMultiplier.cpp +++ b/Source/Core/Core/DSP/Jit/DSPJitMultiplier.cpp @@ -17,7 +17,7 @@ using namespace Gen; // In: RCX = s16 a, RAX = s16 b void DSPEmitter::multiply() { -#ifdef _M_X64 +#if _M_X86_64 // prod = (s16)a * (s16)b; //signed IMUL(64, R(ECX)); @@ -39,7 +39,7 @@ void DSPEmitter::multiply() // Clobbers RDX void DSPEmitter::multiply_add() { -#ifdef _M_X64 +#if _M_X86_64 // s64 prod = dsp_get_long_prod() + dsp_get_multiply_prod(a, b, sign); multiply(); MOV(64, R(RDX), R(RAX)); @@ -53,7 +53,7 @@ void DSPEmitter::multiply_add() // Clobbers RDX void DSPEmitter::multiply_sub() { -#ifdef _M_X64 +#if _M_X86_64 // s64 prod = dsp_get_long_prod() - dsp_get_multiply_prod(a, b, sign); multiply(); MOV(64, R(RDX), R(RAX)); @@ -69,7 +69,7 @@ void DSPEmitter::multiply_sub() // Returns s64 in RAX void DSPEmitter::multiply_mulx(u8 axh0, u8 axh1) { -#ifdef _M_X64 +#if _M_X86_64 // s64 result; // if ((axh0==0) && (axh1==0)) @@ -169,7 +169,7 @@ void DSPEmitter::clrp(const UDSPInstruction opc) // flags out: --xx xx0x void DSPEmitter::tstprod(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 if (FlagsNeeded()) { // s64 prod = dsp_get_long_prod(); @@ -191,7 +191,7 @@ void DSPEmitter::tstprod(const UDSPInstruction opc) // flags out: --xx xx0x void DSPEmitter::movp(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 dreg = (opc >> 8) & 0x1; // s64 acc = dsp_get_long_prod(); @@ -216,7 +216,7 @@ void DSPEmitter::movp(const UDSPInstruction opc) // flags out: --xx xx0x void DSPEmitter::movnp(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 dreg = (opc >> 8) & 0x1; // s64 acc = -dsp_get_long_prod(); @@ -242,7 +242,7 @@ void DSPEmitter::movnp(const UDSPInstruction opc) // flags out: --xx xx0x void DSPEmitter::movpz(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 dreg = (opc >> 8) & 0x01; // s64 acc = dsp_get_long_prod_round_prodl(); @@ -267,7 +267,7 @@ void DSPEmitter::movpz(const UDSPInstruction opc) // flags out: --xx xx0x void DSPEmitter::addpaxz(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 dreg = (opc >> 8) & 0x1; u8 sreg = (opc >> 9) & 0x1; @@ -311,7 +311,7 @@ void DSPEmitter::addpaxz(const UDSPInstruction opc) // Multiply $ax0.h by $ax0.h void DSPEmitter::mulaxh(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 // s64 prod = dsp_multiply(dsp_get_ax_h(0), dsp_get_ax_h(0)); dsp_op_read_reg(DSP_REG_AXH0, RCX, SIGN); MOV(64, R(RAX), R(RCX)); @@ -331,7 +331,7 @@ void DSPEmitter::mulaxh(const UDSPInstruction opc) // $axS.h of secondary accumulator $axS (treat them both as signed). void DSPEmitter::mul(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 sreg = (opc >> 11) & 0x1; // u16 axl = dsp_get_ax_l(sreg); @@ -356,7 +356,7 @@ void DSPEmitter::mul(const UDSPInstruction opc) // flags out: --xx xx0x void DSPEmitter::mulac(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 rreg = (opc >> 8) & 0x1; u8 sreg = (opc >> 11) & 0x1; @@ -396,7 +396,7 @@ void DSPEmitter::mulac(const UDSPInstruction opc) // flags out: --xx xx0x void DSPEmitter::mulmv(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 rreg = (opc >> 8) & 0x1; // s64 acc = dsp_get_long_prod(); @@ -426,7 +426,7 @@ void DSPEmitter::mulmv(const UDSPInstruction opc) // flags out: --xx xx0x void DSPEmitter::mulmvz(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 rreg = (opc >> 8) & 0x1; // s64 acc = dsp_get_long_prod_round_prodl(); @@ -452,7 +452,7 @@ void DSPEmitter::mulmvz(const UDSPInstruction opc) // Part is selected by S and T bits. Zero selects low part, one selects high part. void DSPEmitter::mulx(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 treg = ((opc >> 11) & 0x1); u8 sreg = ((opc >> 12) & 0x1); @@ -478,7 +478,7 @@ void DSPEmitter::mulx(const UDSPInstruction opc) // flags out: --xx xx0x void DSPEmitter::mulxac(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 rreg = (opc >> 8) & 0x1; u8 treg = (opc >> 11) & 0x1; u8 sreg = (opc >> 12) & 0x1; @@ -520,7 +520,7 @@ void DSPEmitter::mulxac(const UDSPInstruction opc) // flags out: --xx xx0x void DSPEmitter::mulxmv(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 rreg = ((opc >> 8) & 0x1); u8 treg = (opc >> 11) & 0x1; u8 sreg = (opc >> 12) & 0x1; @@ -561,7 +561,7 @@ void DSPEmitter::mulxmv(const UDSPInstruction opc) // flags out: --xx xx0x void DSPEmitter::mulxmvz(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 rreg = (opc >> 8) & 0x1; u8 treg = (opc >> 11) & 0x1; u8 sreg = (opc >> 12) & 0x1; @@ -600,7 +600,7 @@ void DSPEmitter::mulxmvz(const UDSPInstruction opc) // secondary accumulator $axS (treat them both as signed). void DSPEmitter::mulc(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 treg = (opc >> 11) & 0x1; u8 sreg = (opc >> 12) & 0x1; @@ -626,7 +626,7 @@ void DSPEmitter::mulc(const UDSPInstruction opc) // flags out: --xx xx0x void DSPEmitter::mulcac(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 rreg = (opc >> 8) & 0x1; u8 treg = (opc >> 11) & 0x1; u8 sreg = (opc >> 12) & 0x1; @@ -668,7 +668,7 @@ void DSPEmitter::mulcac(const UDSPInstruction opc) // flags out: --xx xx0x void DSPEmitter::mulcmv(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 rreg = (opc >> 8) & 0x1; u8 treg = (opc >> 11) & 0x1; u8 sreg = (opc >> 12) & 0x1; @@ -708,7 +708,7 @@ void DSPEmitter::mulcmv(const UDSPInstruction opc) // flags out: --xx xx0x void DSPEmitter::mulcmvz(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 rreg = (opc >> 8) & 0x1; u8 treg = (opc >> 11) & 0x1; u8 sreg = (opc >> 12) & 0x1; @@ -746,7 +746,7 @@ void DSPEmitter::mulcmvz(const UDSPInstruction opc) // signed) and add result to product register. void DSPEmitter::maddx(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 treg = (opc >> 8) & 0x1; u8 sreg = (opc >> 9) & 0x1; @@ -770,7 +770,7 @@ void DSPEmitter::maddx(const UDSPInstruction opc) // signed) and subtract result from product register. void DSPEmitter::msubx(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 treg = (opc >> 8) & 0x1; u8 sreg = (opc >> 9) & 0x1; @@ -794,7 +794,7 @@ void DSPEmitter::msubx(const UDSPInstruction opc) // register. void DSPEmitter::maddc(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 treg = (opc >> 8) & 0x1; u8 sreg = (opc >> 9) & 0x1; @@ -818,7 +818,7 @@ void DSPEmitter::maddc(const UDSPInstruction opc) // product register. void DSPEmitter::msubc(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 treg = (opc >> 8) & 0x1; u8 sreg = (opc >> 9) & 0x1; @@ -842,7 +842,7 @@ void DSPEmitter::msubc(const UDSPInstruction opc) // result to product register. void DSPEmitter::madd(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 sreg = (opc >> 8) & 0x1; // u16 axl = dsp_get_ax_l(sreg); @@ -865,7 +865,7 @@ void DSPEmitter::madd(const UDSPInstruction opc) // subtract result from product register. void DSPEmitter::msub(const UDSPInstruction opc) { -#ifdef _M_X64 +#if _M_X86_64 u8 sreg = (opc >> 8) & 0x1; // u16 axl = dsp_get_ax_l(sreg); diff --git a/Source/Core/Core/DSP/Jit/DSPJitRegCache.cpp b/Source/Core/Core/DSP/Jit/DSPJitRegCache.cpp index 53127469a9..1fb86db251 100644 --- a/Source/Core/Core/DSP/Jit/DSPJitRegCache.cpp +++ b/Source/Core/Core/DSP/Jit/DSPJitRegCache.cpp @@ -62,7 +62,7 @@ static void *reg_ptr(int reg) case DSP_REG_AX0_32: case DSP_REG_AX1_32: return &g_dsp.r.ax[reg - DSP_REG_AX0_32].val; -#ifdef _M_X64 +#if _M_X86_64 case DSP_REG_ACC0_64: case DSP_REG_ACC1_64: return &g_dsp.r.ac[reg - DSP_REG_ACC0_64].val; @@ -101,7 +101,7 @@ DSPJitRegCache::DSPJitRegCache(DSPEmitter &_emitter) xregs[RSI].guest_reg = DSP_REG_NONE; xregs[RDI].guest_reg = DSP_REG_NONE; -#ifdef _M_X64 +#if _M_X86_64 #ifdef STATIC_REG_ACCS xregs[R8].guest_reg = DSP_REG_STATIC; //acc0 xregs[R9].guest_reg = DSP_REG_STATIC; //acc1 @@ -135,7 +135,7 @@ DSPJitRegCache::DSPJitRegCache(DSPEmitter &_emitter) regs[i].size = 2; } //special composite registers -#ifdef _M_X64 +#if _M_X86_64 #ifdef STATIC_REG_ACCS regs[DSP_REG_ACC0_64].host_reg = R8; regs[DSP_REG_ACC1_64].host_reg = R9; @@ -375,7 +375,7 @@ void DSPJitRegCache::flushRegs() _assert_msg_(DSPLLE, xregs[RDI].guest_reg == DSP_REG_NONE, "wrong xreg state for %d", RDI); -#ifdef _M_X64 +#if _M_X86_64 #ifdef STATIC_REG_ACCS _assert_msg_(DSPLLE, xregs[R8].guest_reg == DSP_REG_STATIC, @@ -428,7 +428,7 @@ void DSPJitRegCache::loadRegs(bool emit) if (emit) { -#ifdef _M_X64 +#if _M_X86_64 emitter.MOV(64, M(&ebp_store), R(RBP)); #else emitter.MOV(32, M(&ebp_store), R(EBP)); @@ -455,7 +455,7 @@ void DSPJitRegCache::saveRegs() "register %x is still a simple reg", i); } -#ifdef _M_X64 +#if _M_X86_64 emitter.MOV(64, R(RBP), M(&ebp_store)); #else emitter.MOV(32, R(EBP), M(&ebp_store)); @@ -482,7 +482,7 @@ void DSPJitRegCache::pushRegs() } //hardcoding alignment to 16 bytes -#ifdef _M_X64 +#if _M_X86_64 if (push_count & 1) { emitter.SUB(64,R(RSP),Imm32(8)); @@ -519,7 +519,7 @@ void DSPJitRegCache::pushRegs() "register %x is still used", i); } -#ifdef _M_X64 +#if _M_X86_64 emitter.MOV(64, R(RBP), M(&ebp_store)); #else emitter.MOV(32, R(EBP), M(&ebp_store)); @@ -527,7 +527,7 @@ void DSPJitRegCache::pushRegs() } void DSPJitRegCache::popRegs() { -#ifdef _M_X64 +#if _M_X86_64 emitter.MOV(64, M(&ebp_store), R(RBP)); #else emitter.MOV(32, M(&ebp_store), R(EBP)); @@ -552,7 +552,7 @@ void DSPJitRegCache::popRegs() { } //hardcoding alignment to 16 bytes -#ifdef _M_X64 +#if _M_X86_64 if (push_count & 1) { emitter.ADD(64,R(RSP),Imm32(8)); @@ -589,7 +589,7 @@ X64Reg DSPJitRegCache::makeABICallSafe(X64Reg reg) emitter.INT3(); } xregs[RBP].guest_reg = rbp_guest; -#ifdef _M_X64 +#if _M_X86_64 emitter.MOV(64,R(safe),R(reg)); #else emitter.MOV(32,R(safe),R(reg)); @@ -626,7 +626,7 @@ void DSPJitRegCache::movToHostReg(int reg, X64Reg host_reg, bool load) case 4: emitter.MOV(32, R(host_reg), regs[reg].loc); break; -#ifdef _M_X64 +#if _M_X86_64 case 8: emitter.MOV(64, R(host_reg), regs[reg].loc); break; @@ -698,7 +698,7 @@ void DSPJitRegCache::rotateHostReg(int reg, int shift, bool emit) case 4: emitter.ROR(32, regs[reg].loc, Imm8(shift - regs[reg].shift)); break; -#ifdef _M_X64 +#if _M_X86_64 case 8: emitter.ROR(64, regs[reg].loc, Imm8(shift - regs[reg].shift)); break; @@ -715,7 +715,7 @@ void DSPJitRegCache::rotateHostReg(int reg, int shift, bool emit) case 4: emitter.ROL(32, regs[reg].loc, Imm8(regs[reg].shift - shift)); break; -#ifdef _M_X64 +#if _M_X86_64 case 8: emitter.ROL(64, regs[reg].loc, Imm8(regs[reg].shift - shift)); break; @@ -770,7 +770,7 @@ void DSPJitRegCache::movToMemory(int reg) case 4: emitter.MOV(32, tmp, regs[reg].loc); break; -#ifdef _M_X64 +#if _M_X86_64 case 8: emitter.MOV(64, tmp, regs[reg].loc); break; @@ -837,7 +837,7 @@ void DSPJitRegCache::getReg(int reg, OpArg &oparg, bool load) //do some register specific fixup switch(reg) { -#ifdef _M_X64 +#if _M_X86_64 case DSP_REG_ACC0_64: case DSP_REG_ACC1_64: if (load) @@ -876,7 +876,7 @@ void DSPJitRegCache::putReg(int reg, bool dirty) // (if at all) // sign extend from the bottom 8 bits. -#ifndef _M_X64 +#if _M_X86_32 // cannot use movsx with SPL, BPL, SIL or DIL // on 32 bit if (oparg.GetSimpleReg() == RSP || @@ -910,7 +910,7 @@ void DSPJitRegCache::putReg(int reg, bool dirty) } } break; -#ifdef _M_X64 +#if _M_X86_64 case DSP_REG_ACC0_64: case DSP_REG_ACC1_64: if (dirty) @@ -944,7 +944,7 @@ void DSPJitRegCache::readReg(int sreg, X64Reg host_dreg, DSPJitSignExtend extend case 2: switch(extend) { -#ifdef _M_X64 +#if _M_X86_64 case SIGN: emitter.MOVSX(64, 16, host_dreg, reg); break; @@ -965,7 +965,7 @@ void DSPJitRegCache::readReg(int sreg, X64Reg host_dreg, DSPJitSignExtend extend } break; case 4: -#ifdef _M_X64 +#if _M_X86_64 switch(extend) { case SIGN: @@ -982,7 +982,7 @@ void DSPJitRegCache::readReg(int sreg, X64Reg host_dreg, DSPJitSignExtend extend emitter.MOV(32, R(host_dreg), reg); #endif break; -#ifdef _M_X64 +#if _M_X86_64 case 8: emitter.MOV(64, R(host_dreg), reg); break; @@ -1008,7 +1008,7 @@ void DSPJitRegCache::writeReg(int dreg, OpArg arg) case 4: emitter.MOV(32, reg, Imm32((u32) arg.offset)); break; -#ifdef _M_X64 +#if _M_X86_64 case 8: if ((u32) arg.offset == arg.offset) { @@ -1035,7 +1035,7 @@ void DSPJitRegCache::writeReg(int dreg, OpArg arg) case 4: emitter.MOV(32, reg, arg); break; -#ifdef _M_X64 +#if _M_X86_64 case 8: emitter.MOV(64, reg, arg); break; @@ -1051,7 +1051,7 @@ void DSPJitRegCache::writeReg(int dreg, OpArg arg) //ordered in order of prefered use //not all of these are actually available static X64Reg alloc_order[] = { -#ifdef _M_X64 +#if _M_X86_64 R8,R9,R10,R11,R12,R13,R14,R15,RSI,RDI,RBX,RCX,RDX,RAX,RBP #else ESI,EDI,EBX,ECX,EDX,EAX,EBP diff --git a/Source/Core/Core/DSP/Jit/DSPJitRegCache.h b/Source/Core/Core/DSP/Jit/DSPJitRegCache.h index 9603964660..41fa16fc6c 100644 --- a/Source/Core/Core/DSP/Jit/DSPJitRegCache.h +++ b/Source/Core/Core/DSP/Jit/DSPJitRegCache.h @@ -12,7 +12,7 @@ enum DSPJitRegSpecial { DSP_REG_AX0_32 =32, DSP_REG_AX1_32 =33, -#ifdef _M_X64 +#if _M_X86_64 DSP_REG_ACC0_64 =34, DSP_REG_ACC1_64 =35, DSP_REG_PROD_64 =36, @@ -33,7 +33,7 @@ enum DSPJitSignExtend NONE }; -#ifdef _M_X64 +#if _M_X86_64 #define NUMXREGS 16 #else #define NUMXREGS 8 diff --git a/Source/Core/Core/DSP/Jit/DSPJitUtil.cpp b/Source/Core/Core/DSP/Jit/DSPJitUtil.cpp index f32d0e5175..da951be040 100644 --- a/Source/Core/Core/DSP/Jit/DSPJitUtil.cpp +++ b/Source/Core/Core/DSP/Jit/DSPJitUtil.cpp @@ -25,7 +25,7 @@ void DSPEmitter::dsp_reg_stack_push(int stack_reg) gpr.getFreeXReg(tmp1); //g_dsp.reg_stack[stack_reg][g_dsp.reg_stack_ptr[stack_reg]] = g_dsp.r[DSP_REG_ST0 + stack_reg]; MOV(16, R(tmp1), M(&g_dsp.r.st[stack_reg])); -#ifdef _M_X64 +#if _M_X86_64 MOVZX(64, 8, RAX, R(AL)); #else MOVZX(32, 8, EAX, R(AL)); @@ -44,7 +44,7 @@ void DSPEmitter::dsp_reg_stack_pop(int stack_reg) MOV(8, R(AL), M(&g_dsp.reg_stack_ptr[stack_reg])); X64Reg tmp1; gpr.getFreeXReg(tmp1); -#ifdef _M_X64 +#if _M_X86_64 MOVZX(64, 8, RAX, R(AL)); #else MOVZX(32, 8, EAX, R(AL)); @@ -207,14 +207,14 @@ void DSPEmitter::dsp_op_read_reg_dont_saturate(int reg, Gen::X64Reg host_dreg, D switch(extend) { case SIGN: -#ifdef _M_X64 +#if _M_X86_64 MOVSX(64, 16, host_dreg, R(host_dreg)); #else MOVSX(32, 16, host_dreg, R(host_dreg)); #endif break; case ZERO: -#ifdef _M_X64 +#if _M_X86_64 MOVZX(64, 16, host_dreg, R(host_dreg)); #else MOVZX(32, 16, host_dreg, R(host_dreg)); @@ -243,14 +243,14 @@ void DSPEmitter::dsp_op_read_reg(int reg, Gen::X64Reg host_dreg, DSPJitSignExten switch(extend) { case SIGN: -#ifdef _M_X64 +#if _M_X86_64 MOVSX(64, 16, host_dreg, R(host_dreg)); #else MOVSX(32, 16, host_dreg, R(host_dreg)); #endif break; case ZERO: -#ifdef _M_X64 +#if _M_X86_64 MOVZX(64, 16, host_dreg, R(host_dreg)); #else MOVZX(32, 16, host_dreg, R(host_dreg)); @@ -265,7 +265,7 @@ void DSPEmitter::dsp_op_read_reg(int reg, Gen::X64Reg host_dreg, DSPJitSignExten case DSP_REG_ACM1: { //we already know this is ACCM0 or ACCM1 -#ifdef _M_X64 +#if _M_X86_64 OpArg acc_reg; gpr.getReg(reg-DSP_REG_ACM0+DSP_REG_ACC0_64, acc_reg); #else @@ -279,7 +279,7 @@ void DSPEmitter::dsp_op_read_reg(int reg, Gen::X64Reg host_dreg, DSPJitSignExten FixupBranch not_40bit = J_CC(CC_Z, true); -#ifdef _M_X64 +#if _M_X86_64 MOVSX(64,32,host_dreg,acc_reg); CMP(64,R(host_dreg),acc_reg); FixupBranch no_saturate = J_CC(CC_Z); @@ -585,7 +585,7 @@ void DSPEmitter::dmem_write(X64Reg value) // g_dsp.dram[addr & DSP_DRAM_MASK] = val; AND(16, R(EAX), Imm16(DSP_DRAM_MASK)); -#ifdef _M_X64 +#if _M_X86_64 MOV(64, R(ECX), ImmPtr(g_dsp.dram)); #else MOV(32, R(ECX), ImmPtr(g_dsp.dram)); @@ -610,7 +610,7 @@ void DSPEmitter::dmem_write_imm(u16 address, X64Reg value) switch (address >> 12) { case 0x0: // 0xxx DRAM -#ifdef _M_X64 +#if _M_X86_64 MOV(64, R(RDX), ImmPtr(g_dsp.dram)); MOV(16, MDisp(RDX, (address & DSP_DRAM_MASK)*2), R(value)); #else @@ -644,7 +644,7 @@ void DSPEmitter::imem_read(X64Reg address) FixupBranch irom = J_CC(CC_A); // return g_dsp.iram[addr & DSP_IRAM_MASK]; AND(16, R(address), Imm16(DSP_IRAM_MASK)); -#ifdef _M_X64 +#if _M_X86_64 MOV(64, R(ECX), ImmPtr(g_dsp.iram)); #else MOV(32, R(ECX), ImmPtr(g_dsp.iram)); @@ -656,7 +656,7 @@ void DSPEmitter::imem_read(X64Reg address) // else if (addr == 0x8) // return g_dsp.irom[addr & DSP_IROM_MASK]; AND(16, R(address), Imm16(DSP_IROM_MASK)); -#ifdef _M_X64 +#if _M_X86_64 MOV(64, R(ECX), ImmPtr(g_dsp.irom)); #else MOV(32, R(ECX), ImmPtr(g_dsp.irom)); @@ -676,7 +676,7 @@ void DSPEmitter::dmem_read(X64Reg address) FixupBranch dram = J_CC(CC_A); // return g_dsp.dram[addr & DSP_DRAM_MASK]; AND(32, R(address), Imm32(DSP_DRAM_MASK)); -#ifdef _M_X64 +#if _M_X86_64 MOVZX(64, 16, address, R(address)); MOV(64, R(ECX), ImmPtr(g_dsp.dram)); #else @@ -691,7 +691,7 @@ void DSPEmitter::dmem_read(X64Reg address) FixupBranch ifx = J_CC(CC_A); // return g_dsp.coef[addr & DSP_COEF_MASK]; AND(32, R(address), Imm32(DSP_COEF_MASK)); -#ifdef _M_X64 +#if _M_X86_64 MOVZX(64, 16, address, R(address)); MOV(64, R(ECX), ImmPtr(g_dsp.coef)); #else @@ -718,7 +718,7 @@ void DSPEmitter::dmem_read_imm(u16 address) switch (address >> 12) { case 0x0: // 0xxx DRAM -#ifdef _M_X64 +#if _M_X86_64 MOV(64, R(RDX), ImmPtr(g_dsp.dram)); MOV(16, R(EAX), MDisp(RDX, (address & DSP_DRAM_MASK)*2)); #else @@ -727,7 +727,7 @@ void DSPEmitter::dmem_read_imm(u16 address) break; case 0x1: // 1xxx COEF -#ifdef _M_X64 +#if _M_X86_64 MOV(64, R(RDX), ImmPtr(g_dsp.coef)); MOV(16, R(EAX), MDisp(RDX, (address & DSP_COEF_MASK)*2)); #else @@ -751,7 +751,7 @@ void DSPEmitter::dmem_read_imm(u16 address) // Returns s64 in RAX void DSPEmitter::get_long_prod(X64Reg long_prod) { -#ifdef _M_X64 +#if _M_X86_64 //s64 val = (s8)(u8)g_dsp.r[DSP_REG_PRODH]; OpArg prod_reg; gpr.getReg(DSP_REG_PROD_64, prod_reg); @@ -775,7 +775,7 @@ void DSPEmitter::get_long_prod(X64Reg long_prod) // Clobbers RCX void DSPEmitter::get_long_prod_round_prodl(X64Reg long_prod) { -#ifdef _M_X64 +#if _M_X86_64 //s64 prod = dsp_get_long_prod(); get_long_prod(long_prod); @@ -804,7 +804,7 @@ void DSPEmitter::get_long_prod_round_prodl(X64Reg long_prod) // In: RAX = s64 val void DSPEmitter::set_long_prod() { -#ifdef _M_X64 +#if _M_X86_64 X64Reg tmp; gpr.getFreeXReg(tmp); @@ -824,7 +824,7 @@ void DSPEmitter::set_long_prod() // Clobbers RCX void DSPEmitter::round_long_acc(X64Reg long_acc) { -#ifdef _M_X64 +#if _M_X86_64 //if (prod & 0x10000) prod = (prod + 0x8000) & ~0xffff; TEST(32, R(long_acc), Imm32(0x10000)); FixupBranch jump = J_CC(CC_Z); @@ -845,7 +845,7 @@ void DSPEmitter::round_long_acc(X64Reg long_acc) // Returns s64 in acc void DSPEmitter::get_long_acc(int _reg, X64Reg acc) { -#ifdef _M_X64 +#if _M_X86_64 OpArg reg; gpr.getReg(DSP_REG_ACC0_64+_reg, reg); MOV(64, R(acc), reg); @@ -856,7 +856,7 @@ void DSPEmitter::get_long_acc(int _reg, X64Reg acc) // In: acc = s64 val void DSPEmitter::set_long_acc(int _reg, X64Reg acc) { -#ifdef _M_X64 +#if _M_X86_64 OpArg reg; gpr.getReg(DSP_REG_ACC0_64+_reg, reg, false); MOV(64, reg, R(acc)); diff --git a/Source/Core/Core/HW/Memmap.cpp b/Source/Core/Core/HW/Memmap.cpp index ca57d24f44..d61507f205 100644 --- a/Source/Core/Core/HW/Memmap.cpp +++ b/Source/Core/Core/HW/Memmap.cpp @@ -126,7 +126,7 @@ static const MemoryView views[] = // Don't map any memory for the EFB. We want all access to this area to go // through the hardware access handlers. -#ifndef _M_X64 +#if _ARCH_32 // {&m_pEFB, &m_pVirtualEFB, 0xC8000000, EFB_SIZE, 0}, #endif {&m_pL1Cache, &m_pVirtualL1Cache, 0xE0000000, L1_CACHE_SIZE, 0}, diff --git a/Source/Core/Core/HW/Memmap.h b/Source/Core/Core/HW/Memmap.h index 85c2a363d4..8398627c82 100644 --- a/Source/Core/Core/HW/Memmap.h +++ b/Source/Core/Core/HW/Memmap.h @@ -69,7 +69,7 @@ enum ADDR_MASK_HW_ACCESS = 0x0c000000, ADDR_MASK_MEM1 = 0x20000000, -#ifndef _M_X64 +#if _ARCH_32 MEMVIEW32_MASK = 0x3FFFFFFF, #endif }; @@ -99,10 +99,10 @@ inline u8* GetCachePtr() {return m_pL1Cache;} inline u8* GetMainRAMPtr() {return m_pRAM;} inline u32 ReadFast32(const u32 _Address) { -#if defined(_M_X64) - return Common::swap32(*(u32 *)(base + _Address)); -#else +#if _ARCH_32 return Common::swap32(*(u32 *)(base + (_Address & MEMVIEW32_MASK))); // ReadUnchecked_U32(_Address); +#else + return Common::swap32(*(u32 *)(base + _Address)); #endif } diff --git a/Source/Core/Core/PowerPC/Interpreter/Interpreter.cpp b/Source/Core/Core/PowerPC/Interpreter/Interpreter.cpp index 1eb74300bf..6ee978631d 100644 --- a/Source/Core/Core/PowerPC/Interpreter/Interpreter.cpp +++ b/Source/Core/Core/PowerPC/Interpreter/Interpreter.cpp @@ -327,7 +327,7 @@ void Interpreter::ClearCache() const char *Interpreter::GetName() { -#ifdef _M_X64 +#ifdef _ARCH_64 return "Interpreter64"; #else return "Interpreter32"; diff --git a/Source/Core/Core/PowerPC/Jit64/Jit.cpp b/Source/Core/Core/PowerPC/Jit64/Jit.cpp index 05796de9de..b43699fae9 100644 --- a/Source/Core/Core/PowerPC/Jit64/Jit.cpp +++ b/Source/Core/Core/PowerPC/Jit64/Jit.cpp @@ -246,7 +246,7 @@ static void ImHere() { if (!f) { -#ifdef _M_X64 +#if _M_X86_64 f.Open("log64.txt", "w"); #else f.Open("log32.txt", "w"); @@ -696,7 +696,7 @@ const u8* Jit64::DoJit(u32 em_address, PPCAnalyst::CodeBuffer *code_buf, JitBloc OR(32, M((void *)&PowerPC::ppcState.Exceptions), Imm32(EXCEPTION_ISI)); // Remove the invalid instruction from the icache, forcing a recompile -#ifdef _M_IX86 +#if _M_X86_32 MOV(32, M(jit->GetBlockCache()->GetICachePtr(js.compilerPC)), Imm32(JIT_ICACHE_INVALID_WORD)); #else MOV(64, R(RAX), ImmPtr(jit->GetBlockCache()->GetICachePtr(js.compilerPC))); @@ -726,7 +726,7 @@ const u8* Jit64::DoJit(u32 em_address, PPCAnalyst::CodeBuffer *code_buf, JitBloc u32 Jit64::RegistersInUse() { -#ifdef _M_X64 +#if _M_X86_64 u32 result = 0; for (int i = 0; i < NUMXREGS; i++) { diff --git a/Source/Core/Core/PowerPC/Jit64/Jit.h b/Source/Core/Core/PowerPC/Jit64/Jit.h index b1c9d0ef45..80a5e55097 100644 --- a/Source/Core/Core/PowerPC/Jit64/Jit.h +++ b/Source/Core/Core/PowerPC/Jit64/Jit.h @@ -76,7 +76,7 @@ public: } const char *GetName() override { -#ifdef _M_X64 +#if _M_X86_64 return "JIT64"; #else return "JIT32"; diff --git a/Source/Core/Core/PowerPC/Jit64/JitAsm.cpp b/Source/Core/Core/PowerPC/Jit64/JitAsm.cpp index 667a6d0816..e690d08974 100644 --- a/Source/Core/Core/PowerPC/Jit64/JitAsm.cpp +++ b/Source/Core/Core/PowerPC/Jit64/JitAsm.cpp @@ -39,7 +39,7 @@ void Jit64AsmRoutineManager::Generate() { enterCode = AlignCode16(); ABI_PushAllCalleeSavedRegsAndAdjustStack(); -#ifndef _M_IX86 +#if _M_X86_64 // Two statically allocated registers. MOV(64, R(RBX), Imm64((u64)Memory::base)); MOV(64, R(R15), Imm64((u64)jit->GetBlockCache()->GetCodePointers())); //It's below 2GB so 32 bits are good enough @@ -87,7 +87,7 @@ void Jit64AsmRoutineManager::Generate() no_mem = J_CC(CC_NZ); } AND(32, R(EAX), Imm32(JIT_ICACHE_MASK)); -#ifdef _M_IX86 +#if _M_X86_32 MOV(32, R(EAX), MDisp(EAX, (u32)jit->GetBlockCache()->iCache)); #else MOV(64, R(RSI), Imm64((u64)jit->GetBlockCache()->iCache)); @@ -103,7 +103,7 @@ void Jit64AsmRoutineManager::Generate() TEST(32, R(EAX), Imm32(JIT_ICACHE_VMEM_BIT)); FixupBranch no_vmem = J_CC(CC_Z); AND(32, R(EAX), Imm32(JIT_ICACHE_MASK)); -#ifdef _M_IX86 +#if _M_X86_32 MOV(32, R(EAX), MDisp(EAX, (u32)jit->GetBlockCache()->iCacheVMEM)); #else MOV(64, R(RSI), Imm64((u64)jit->GetBlockCache()->iCacheVMEM)); @@ -117,7 +117,7 @@ void Jit64AsmRoutineManager::Generate() TEST(32, R(EAX), Imm32(JIT_ICACHE_EXRAM_BIT)); FixupBranch no_exram = J_CC(CC_Z); AND(32, R(EAX), Imm32(JIT_ICACHEEX_MASK)); -#ifdef _M_IX86 +#if _M_X86_32 MOV(32, R(EAX), MDisp(EAX, (u32)jit->GetBlockCache()->iCacheEx)); #else MOV(64, R(RSI), Imm64((u64)jit->GetBlockCache()->iCacheEx)); @@ -138,7 +138,7 @@ void Jit64AsmRoutineManager::Generate() ADD(32, M(&PowerPC::ppcState.DebugCount), Imm8(1)); } //grab from list and jump to it -#ifdef _M_IX86 +#if _M_X86_32 MOV(32, R(EDX), ImmPtr(jit->GetBlockCache()->GetCodePointers())); JMPptr(MComplex(EDX, EAX, 4, 0)); #else @@ -147,7 +147,7 @@ void Jit64AsmRoutineManager::Generate() SetJumpTarget(notfound); //Ok, no block, let's jit -#ifdef _M_IX86 +#if _M_X86_32 ABI_AlignStack(4); PUSH(32, M(&PowerPC::ppcState.pc)); CALL(reinterpret_cast(&Jit)); diff --git a/Source/Core/Core/PowerPC/Jit64/JitRegCache.cpp b/Source/Core/Core/PowerPC/Jit64/JitRegCache.cpp index 05417bfcb2..cbbc60dd6a 100644 --- a/Source/Core/Core/PowerPC/Jit64/JitRegCache.cpp +++ b/Source/Core/Core/PowerPC/Jit64/JitRegCache.cpp @@ -200,13 +200,13 @@ const int *GPRRegCache::GetAllocationOrder(int &count) static const int allocationOrder[] = { // R12, when used as base register, for example in a LEA, can generate bad code! Need to look into this. -#ifdef _M_X64 +#if _M_X86_64 #ifdef _WIN32 RSI, RDI, R13, R14, R8, R9, R10, R11, R12, //, RCX #else RBP, R13, R14, R8, R9, R10, R11, R12, //, RCX #endif -#elif _M_IX86 +#elif _M_X86_32 ESI, EDI, EBX, EBP, EDX, ECX, #endif }; @@ -218,9 +218,9 @@ const int *FPURegCache::GetAllocationOrder(int &count) { static const int allocationOrder[] = { -#ifdef _M_X64 +#if _M_X86_64 XMM6, XMM7, XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, XMM2, XMM3, XMM4, XMM5 -#elif _M_IX86 +#elif _M_X86_32 XMM2, XMM3, XMM4, XMM5, XMM6, XMM7, #endif }; diff --git a/Source/Core/Core/PowerPC/Jit64/JitRegCache.h b/Source/Core/Core/PowerPC/Jit64/JitRegCache.h index c27f46680a..b687f17d23 100644 --- a/Source/Core/Core/PowerPC/Jit64/JitRegCache.h +++ b/Source/Core/Core/PowerPC/Jit64/JitRegCache.h @@ -35,9 +35,9 @@ struct X64CachedReg typedef int XReg; typedef int PReg; -#ifdef _M_X64 +#if _M_X86_64 #define NUMXREGS 16 -#elif _M_IX86 +#elif _M_X86_32 #define NUMXREGS 8 #endif diff --git a/Source/Core/Core/PowerPC/Jit64/Jit_Integer.cpp b/Source/Core/Core/PowerPC/Jit64/Jit_Integer.cpp index a288efc44e..090ae84014 100644 --- a/Source/Core/Core/PowerPC/Jit64/Jit_Integer.cpp +++ b/Source/Core/Core/PowerPC/Jit64/Jit_Integer.cpp @@ -1298,7 +1298,7 @@ void Jit64::divwux(UGeckoInstruction inst) if (((u64)(magic+1) * (max_quotient*divisor-1)) >> (shift + 32) != max_quotient-1) { // If failed, use slower round-down method -#ifdef _M_X64 +#if _M_X86_64 gpr.Lock(a, b, d); gpr.BindToRegister(d, d == a, true); MOV(32, R(EAX), Imm32(magic)); @@ -1324,7 +1324,7 @@ void Jit64::divwux(UGeckoInstruction inst) else { // If success, use faster round-up method -#ifdef _M_X64 +#if _M_X86_64 gpr.Lock(a, b, d); gpr.BindToRegister(a, true, false); gpr.BindToRegister(d, false, true); @@ -1920,7 +1920,7 @@ void Jit64::srwx(UGeckoInstruction inst) } else { -#ifdef _M_X64 +#if _M_X86_64 gpr.FlushLockX(ECX); gpr.Lock(a, b, s); gpr.BindToRegister(a, (a == b || a == s), true); @@ -1976,7 +1976,7 @@ void Jit64::slwx(UGeckoInstruction inst) } else { -#ifdef _M_X64 +#if _M_X86_64 gpr.FlushLockX(ECX); gpr.Lock(a, b, s); gpr.BindToRegister(a, (a == b || a == s), true); @@ -2030,7 +2030,7 @@ void Jit64::srawx(UGeckoInstruction inst) int a = inst.RA; int b = inst.RB; int s = inst.RS; -#ifdef _M_X64 +#if _M_X86_64 gpr.Lock(a, s, b); gpr.FlushLockX(ECX); gpr.BindToRegister(a, (a == s || a == b), true); diff --git a/Source/Core/Core/PowerPC/Jit64/Jit_LoadStore.cpp b/Source/Core/Core/PowerPC/Jit64/Jit_LoadStore.cpp index 7c5078a816..7014160a30 100644 --- a/Source/Core/Core/PowerPC/Jit64/Jit_LoadStore.cpp +++ b/Source/Core/Core/PowerPC/Jit64/Jit_LoadStore.cpp @@ -242,7 +242,7 @@ void Jit64::dcbz(UGeckoInstruction inst) ADD(32, R(EAX), gpr.R(inst.RA)); AND(32, R(EAX), Imm32(~31)); XORPD(XMM0, R(XMM0)); -#ifdef _M_X64 +#if _M_X86_64 MOVAPS(MComplex(EBX, EAX, SCALE_1, 0), XMM0); MOVAPS(MComplex(EBX, EAX, SCALE_1, 16), XMM0); #else @@ -333,7 +333,7 @@ void Jit64::stX(UGeckoInstruction inst) MOV(32, R(ABI_PARAM1), gpr.R(a)); MOV(32, R(EAX), gpr.R(s)); BSWAP(32, EAX); -#ifdef _M_X64 +#if _M_X86_64 MOV(accessSize, MComplex(RBX, ABI_PARAM1, SCALE_1, (u32)offset), R(EAX)); #else AND(32, R(ABI_PARAM1), Imm32(Memory::MEMVIEW32_MASK)); @@ -351,7 +351,7 @@ void Jit64::stX(UGeckoInstruction inst) } /* // TODO - figure out why Beyond Good and Evil hates this - #if defined(_WIN32) && defined(_M_X64) + #if defined(_WIN32) && _M_X86_64 if (accessSize == 32 && !update) { // Fast and daring - requires 64-bit @@ -437,7 +437,7 @@ void Jit64::lmw(UGeckoInstruction inst) INSTRUCTION_START JITDISABLE(bJITLoadStoreOff) -#ifdef _M_X64 +#if _M_X86_64 gpr.FlushLockX(ECX); MOV(32, R(EAX), Imm32((u32)(s32)inst.SIMM_16)); if (inst.RA) @@ -460,7 +460,7 @@ void Jit64::stmw(UGeckoInstruction inst) INSTRUCTION_START JITDISABLE(bJITLoadStoreOff) -#ifdef _M_X64 +#if _M_X86_64 gpr.FlushLockX(ECX); MOV(32, R(EAX), Imm32((u32)(s32)inst.SIMM_16)); if (inst.RA) diff --git a/Source/Core/Core/PowerPC/Jit64/Jit_LoadStoreFloating.cpp b/Source/Core/Core/PowerPC/Jit64/Jit_LoadStoreFloating.cpp index 6d8879a722..49868d8a7f 100644 --- a/Source/Core/Core/PowerPC/Jit64/Jit_LoadStoreFloating.cpp +++ b/Source/Core/Core/PowerPC/Jit64/Jit_LoadStoreFloating.cpp @@ -80,7 +80,7 @@ void Jit64::lfd(UGeckoInstruction inst) if (cpu_info.bSSSE3) { -#ifdef _M_X64 +#if _M_X86_64 MOVQ_xmm(XMM0, MComplex(RBX, ABI_PARAM1, SCALE_1, offset)); #else AND(32, R(ABI_PARAM1), Imm32(Memory::MEMVIEW32_MASK)); @@ -89,7 +89,7 @@ void Jit64::lfd(UGeckoInstruction inst) PSHUFB(XMM0, M((void *)bswapShuffle1x8Dupe)); MOVSD(xd, R(XMM0)); } else { -#ifdef _M_X64 +#if _M_X86_64 MOV(64, R(EAX), MComplex(RBX, ABI_PARAM1, SCALE_1, offset)); BSWAP(64, EAX); MOV(64, M(&temp64), R(EAX)); @@ -165,7 +165,7 @@ void Jit64::stfd(UGeckoInstruction inst) if (cpu_info.bSSSE3) { MOVAPD(XMM0, fpr.R(s)); PSHUFB(XMM0, M((void*)bswapShuffle1x8)); -#ifdef _M_X64 +#if _M_X86_64 MOVQ_xmm(MComplex(RBX, ABI_PARAM1, SCALE_1, 0), XMM0); #else AND(32, R(ECX), Imm32(Memory::MEMVIEW32_MASK)); @@ -307,7 +307,7 @@ void Jit64::lfsx(UGeckoInstruction inst) fpr.BindToRegister(inst.RS, false); X64Reg s = fpr.RX(inst.RS); if (cpu_info.bSSSE3 && !js.memcheck) { -#ifdef _M_IX86 +#if _M_X86_32 AND(32, R(EAX), Imm32(Memory::MEMVIEW32_MASK)); MOVD_xmm(XMM0, MDisp(EAX, (u32)Memory::base)); #else diff --git a/Source/Core/Core/PowerPC/Jit64/Jit_LoadStorePaired.cpp b/Source/Core/Core/PowerPC/Jit64/Jit_LoadStorePaired.cpp index 14c5c6164a..0b830d88aa 100644 --- a/Source/Core/Core/PowerPC/Jit64/Jit_LoadStorePaired.cpp +++ b/Source/Core/Core/PowerPC/Jit64/Jit_LoadStorePaired.cpp @@ -47,7 +47,7 @@ void Jit64::psq_st(UGeckoInstruction inst) MOVZX(32, 16, EAX, M(&PowerPC::ppcState.spr[SPR_GQR0 + inst.I])); MOVZX(32, 8, EDX, R(AL)); // FIXME: Fix ModR/M encoding to allow [EDX*4+disp32] without a base register! -#ifdef _M_IX86 +#if _M_X86_32 int addr_scale = SCALE_4; #else int addr_scale = SCALE_8; @@ -96,7 +96,7 @@ void Jit64::psq_l(UGeckoInstruction inst) MOVZX(32, 8, EDX, R(AL)); if (inst.W) OR(32, R(EDX), Imm8(8)); -#ifdef _M_IX86 +#if _M_X86_32 int addr_scale = SCALE_4; #else int addr_scale = SCALE_8; diff --git a/Source/Core/Core/PowerPC/Jit64IL/IR_X86.cpp b/Source/Core/Core/PowerPC/Jit64IL/IR_X86.cpp index 8a0843eb67..d88d749aa4 100644 --- a/Source/Core/Core/PowerPC/Jit64IL/IR_X86.cpp +++ b/Source/Core/Core/PowerPC/Jit64IL/IR_X86.cpp @@ -61,7 +61,7 @@ struct RegInfo { }; static u32 regsInUse(RegInfo& R) { -#ifdef _M_X64 +#if _M_X86_64 u32 result = 0; for (unsigned i = 0; i < MAX_NUMBER_OF_REGS; i++) { @@ -140,7 +140,7 @@ static void fregSpill(RegInfo& RI, X64Reg reg) { } // ECX is scratch, so we don't allocate it -#ifdef _M_X64 +#if _M_X86_64 // 64-bit - calling conventions differ between linux & windows, so... #ifdef _WIN32 @@ -259,7 +259,7 @@ static X64Reg fregEnsureInReg(RegInfo& RI, InstLoc I) { } static void regSpillCallerSaved(RegInfo& RI) { -#ifdef _M_IX86 +#if _M_X86_32 // 32-bit regSpill(RI, EDX); regSpill(RI, ECX); @@ -1134,7 +1134,7 @@ static void DoWriteCode(IRBuilder* ibuild, JitIL* Jit, u32 exitAddress) { if (cpu_info.bSSSE3) { static const u32 GC_ALIGNED16(maskSwapa64_1[4]) = {0x04050607L, 0x00010203L, 0xFFFFFFFFL, 0xFFFFFFFFL}; -#ifdef _M_X64 +#if _M_X86_64 // TODO: Remove regEnsureInReg() and use ECX X64Reg address = regEnsureInReg(RI, getOp1(I)); Jit->MOVQ_xmm(reg, MComplex(RBX, address, SCALE_1, 0)); @@ -1170,7 +1170,7 @@ static void DoWriteCode(IRBuilder* ibuild, JitIL* Jit, u32 exitAddress) { Jit->MOVZX(32, 16, EAX, M(((char *)&GQR(quantreg)) + 2)); Jit->MOVZX(32, 8, EDX, R(AL)); Jit->OR(32, R(EDX), Imm8(w << 3)); -#ifdef _M_IX86 +#if _M_X86_32 int addr_scale = SCALE_4; #else int addr_scale = SCALE_8; @@ -1223,7 +1223,7 @@ static void DoWriteCode(IRBuilder* ibuild, JitIL* Jit, u32 exitAddress) { X64Reg value = fregBinLHSRegWithMov(RI, I); Jit->PSHUFB(value, M((void*)maskSwapa64_1)); Jit->MOV(32, R(ECX), regLocForInst(RI, getOp2(I))); -#ifdef _M_X64 +#if _M_X86_64 Jit->MOVQ_xmm(MComplex(RBX, ECX, SCALE_1, 0), value); #else Jit->AND(32, R(ECX), Imm32(Memory::MEMVIEW32_MASK)); @@ -1274,7 +1274,7 @@ static void DoWriteCode(IRBuilder* ibuild, JitIL* Jit, u32 exitAddress) { u32 quantreg = *I >> 24; Jit->MOVZX(32, 16, EAX, M(&PowerPC::ppcState.spr[SPR_GQR0 + quantreg])); Jit->MOVZX(32, 8, EDX, R(AL)); -#ifdef _M_IX86 +#if _M_X86_32 int addr_scale = SCALE_4; #else int addr_scale = SCALE_8; @@ -1763,7 +1763,7 @@ static void DoWriteCode(IRBuilder* ibuild, JitIL* Jit, u32 exitAddress) { Jit->OR(32, M((void *)&PowerPC::ppcState.Exceptions), Imm32(EXCEPTION_ISI)); // Remove the invalid instruction from the icache, forcing a recompile -#ifdef _M_IX86 +#if _M_X86_32 Jit->MOV(32, M(jit->GetBlockCache()->GetICachePtr(InstLoc)), Imm32(JIT_ICACHE_INVALID_WORD)); #else Jit->MOV(64, R(RAX), ImmPtr(jit->GetBlockCache()->GetICachePtr(InstLoc))); diff --git a/Source/Core/Core/PowerPC/Jit64IL/JitIL.cpp b/Source/Core/Core/PowerPC/Jit64IL/JitIL.cpp index 6f71dd3ae8..3bceaf2fc8 100644 --- a/Source/Core/Core/PowerPC/Jit64IL/JitIL.cpp +++ b/Source/Core/Core/PowerPC/Jit64IL/JitIL.cpp @@ -346,7 +346,7 @@ static void ImHere() { if (!f) { -#ifdef _M_X64 +#if _M_X86_64 f.Open("log64.txt", "w"); #else f.Open("log32.txt", "w"); diff --git a/Source/Core/Core/PowerPC/Jit64IL/JitIL.h b/Source/Core/Core/PowerPC/Jit64IL/JitIL.h index 146f905f73..b3178cd2e7 100644 --- a/Source/Core/Core/PowerPC/Jit64IL/JitIL.h +++ b/Source/Core/Core/PowerPC/Jit64IL/JitIL.h @@ -36,7 +36,7 @@ #include "Core/PowerPC/JitILCommon/IR.h" #include "Core/PowerPC/JitILCommon/JitILBase.h" -#ifdef _M_X64 +#if _M_X86_64 #define DISABLE64 \ {Default(inst); return;} #else @@ -82,7 +82,7 @@ public: } const char *GetName() override { -#ifdef _M_X64 +#if _M_X86_64 return "JIT64IL"; #else return "JIT32IL"; diff --git a/Source/Core/Core/PowerPC/Jit64IL/JitILAsm.cpp b/Source/Core/Core/PowerPC/Jit64IL/JitILAsm.cpp index 79ea71a811..030ffdc817 100644 --- a/Source/Core/Core/PowerPC/Jit64IL/JitILAsm.cpp +++ b/Source/Core/Core/PowerPC/Jit64IL/JitILAsm.cpp @@ -39,7 +39,7 @@ void JitILAsmRoutineManager::Generate() { enterCode = AlignCode16(); ABI_PushAllCalleeSavedRegsAndAdjustStack(); -#ifndef _M_IX86 +#if _M_X86_64 // Two statically allocated registers. MOV(64, R(RBX), Imm64((u64)Memory::base)); MOV(64, R(R15), Imm64((u64)jit->GetBlockCache()->GetCodePointers())); //It's below 2GB so 32 bits are good enough @@ -89,7 +89,7 @@ void JitILAsmRoutineManager::Generate() no_mem = J_CC(CC_NZ); } AND(32, R(EAX), Imm32(JIT_ICACHE_MASK)); -#ifdef _M_IX86 +#if _M_X86_32 MOV(32, R(EAX), MDisp(EAX, (u32)jit->GetBlockCache()->iCache)); #else MOV(64, R(RSI), Imm64((u64)jit->GetBlockCache()->iCache)); @@ -105,7 +105,7 @@ void JitILAsmRoutineManager::Generate() TEST(32, R(EAX), Imm32(JIT_ICACHE_VMEM_BIT)); FixupBranch no_vmem = J_CC(CC_Z); AND(32, R(EAX), Imm32(JIT_ICACHE_MASK)); -#ifdef _M_IX86 +#if _M_X86_32 MOV(32, R(EAX), MDisp(EAX, (u32)jit->GetBlockCache()->iCacheVMEM)); #else MOV(64, R(RSI), Imm64((u64)jit->GetBlockCache()->iCacheVMEM)); @@ -119,7 +119,7 @@ void JitILAsmRoutineManager::Generate() TEST(32, R(EAX), Imm32(JIT_ICACHE_EXRAM_BIT)); FixupBranch no_exram = J_CC(CC_Z); AND(32, R(EAX), Imm32(JIT_ICACHEEX_MASK)); -#ifdef _M_IX86 +#if _M_X86_32 MOV(32, R(EAX), MDisp(EAX, (u32)jit->GetBlockCache()->iCacheEx)); #else MOV(64, R(RSI), Imm64((u64)jit->GetBlockCache()->iCacheEx)); @@ -140,7 +140,7 @@ void JitILAsmRoutineManager::Generate() ADD(32, M(&PowerPC::ppcState.DebugCount), Imm8(1)); } //grab from list and jump to it -#ifdef _M_IX86 +#if _M_X86_32 MOV(32, R(EDX), ImmPtr(jit->GetBlockCache()->GetCodePointers())); JMPptr(MComplex(EDX, EAX, 4, 0)); #else @@ -149,7 +149,7 @@ void JitILAsmRoutineManager::Generate() SetJumpTarget(notfound); //Ok, no block, let's jit -#ifdef _M_IX86 +#if _M_X86_32 ABI_AlignStack(4); PUSH(32, M(&PowerPC::ppcState.pc)); CALL(reinterpret_cast(&Jit)); diff --git a/Source/Core/Core/PowerPC/JitCommon/JitAsmCommon.cpp b/Source/Core/Core/PowerPC/JitCommon/JitAsmCommon.cpp index a448aac6a7..2856feec2a 100644 --- a/Source/Core/Core/PowerPC/JitCommon/JitAsmCommon.cpp +++ b/Source/Core/Core/PowerPC/JitCommon/JitAsmCommon.cpp @@ -144,7 +144,7 @@ void CommonAsmRoutines::GenQuantizedStores() UD2(); const u8* storePairedFloat = AlignCode4(); -#ifdef _M_X64 +#if _M_X86_64 SHUFPS(XMM0, R(XMM0), 1); MOVQ_xmm(M(&psTemp[0]), XMM0); TEST(32, R(ECX), Imm32(0x0C000000)); @@ -362,7 +362,7 @@ void CommonAsmRoutines::GenQuantizedLoads() const u8* loadPairedFloatTwo = AlignCode4(); if (cpu_info.bSSSE3) { -#ifdef _M_X64 +#if _M_X86_64 MOVQ_xmm(XMM0, MComplex(RBX, RCX, 1, 0)); #else AND(32, R(ECX), Imm32(Memory::MEMVIEW32_MASK)); @@ -370,7 +370,7 @@ void CommonAsmRoutines::GenQuantizedLoads() #endif PSHUFB(XMM0, M((void *)pbswapShuffle2x4)); } else { -#ifdef _M_X64 +#if _M_X86_64 MOV(64, R(RCX), MComplex(RBX, RCX, 1, 0)); BSWAP(64, RCX); ROL(64, R(RCX), Imm8(32)); @@ -401,7 +401,7 @@ void CommonAsmRoutines::GenQuantizedLoads() const u8* loadPairedFloatOne = AlignCode4(); if (cpu_info.bSSSE3) { -#ifdef _M_X64 +#if _M_X86_64 MOVD_xmm(XMM0, MComplex(RBX, RCX, 1, 0)); #else AND(32, R(ECX), Imm32(Memory::MEMVIEW32_MASK)); @@ -410,7 +410,7 @@ void CommonAsmRoutines::GenQuantizedLoads() PSHUFB(XMM0, M((void *)pbswapShuffle1x4)); UNPCKLPS(XMM0, M((void*)m_one)); } else { -#ifdef _M_X64 +#if _M_X86_64 MOV(32, R(RCX), MComplex(RBX, RCX, 1, 0)); BSWAP(32, RCX); MOVD_xmm(XMM0, R(RCX)); diff --git a/Source/Core/Core/PowerPC/JitCommon/JitBackpatch.cpp b/Source/Core/Core/PowerPC/JitCommon/JitBackpatch.cpp index e65d614187..6cde1d388b 100644 --- a/Source/Core/Core/PowerPC/JitCommon/JitBackpatch.cpp +++ b/Source/Core/Core/PowerPC/JitCommon/JitBackpatch.cpp @@ -21,13 +21,13 @@ using namespace Gen; extern u8 *trampolineCodePtr; -#ifdef _M_X64 +#if _M_X86_64 static void BackPatchError(const std::string &text, u8 *codePtr, u32 emAddress) { u64 code_addr = (u64)codePtr; disassembler disasm; char disbuf[256]; memset(disbuf, 0, 256); -#ifdef _M_IX86 +#if _M_X86_32 disasm.disasm32(0, code_addr, codePtr, disbuf); #else disasm.disasm64(0, code_addr, codePtr, disbuf); @@ -57,7 +57,7 @@ const u8 *TrampolineCache::GetReadTrampoline(const InstructionInfo &info, u32 re PanicAlert("Trampoline cache full"); const u8 *trampoline = GetCodePtr(); -#ifdef _M_X64 +#if _M_X86_64 X64Reg addrReg = (X64Reg)info.scaledReg; X64Reg dataReg = (X64Reg)info.regOperandReg; @@ -105,7 +105,7 @@ const u8 *TrampolineCache::GetWriteTrampoline(const InstructionInfo &info, u32 r const u8 *trampoline = GetCodePtr(); -#ifdef _M_X64 +#if _M_X86_64 X64Reg dataReg = (X64Reg)info.regOperandReg; X64Reg addrReg = (X64Reg)info.scaledReg; @@ -167,7 +167,7 @@ const u8 *TrampolineCache::GetWriteTrampoline(const InstructionInfo &info, u32 r // that many of them in a typical program/game. const u8 *Jitx86Base::BackPatch(u8 *codePtr, u32 emAddress, void *ctx_void) { -#ifdef _M_X64 +#if _M_X86_64 SContext *ctx = (SContext *)ctx_void; if (!jit->IsInCodeSpace(codePtr)) diff --git a/Source/Core/Core/PowerPC/JitCommon/JitBackpatch.h b/Source/Core/Core/PowerPC/JitCommon/JitBackpatch.h index 44cdfa00d0..3147f32d31 100644 --- a/Source/Core/Core/PowerPC/JitCommon/JitBackpatch.h +++ b/Source/Core/Core/PowerPC/JitCommon/JitBackpatch.h @@ -12,7 +12,7 @@ #if defined(_WIN32) #include typedef CONTEXT SContext; - #if defined(_M_X64) + #if _M_X86_64 #define CTX_RAX Rax #define CTX_RBX Rbx #define CTX_RCX Rcx @@ -30,7 +30,7 @@ #define CTX_R14 R14 #define CTX_R15 R15 #define CTX_RIP Rip - #elif defined(_M_IX86) + #elif _M_X86_32 #define CTX_EAX Eax #define CTX_EBX Ebx #define CTX_ECX Ecx @@ -46,7 +46,7 @@ #elif defined(__APPLE__) #include #include - #if defined(_M_X64) + #if _M_X86_64 typedef x86_thread_state64_t SContext; #define CTX_RAX __rax #define CTX_RBX __rbx @@ -65,7 +65,7 @@ #define CTX_R14 __r14 #define CTX_R15 __r15 #define CTX_RIP __rip - #elif defined(_M_IX86) + #elif _M_X86_32 typedef x86_thread_state32_t SContext; #define CTX_EAX __eax #define CTX_EBX __ebx @@ -76,16 +76,12 @@ #define CTX_EBP __ebp #define CTX_ESP __esp #define CTX_EIP __eip - #elif defined(_M_ARM) - typedef arm_thread_state_t SContext; - // Add others if required. - #define CTX_PC __pc #else #error No context definition for OS #endif #elif defined(__linux__) #include - #if defined(_M_X64) + #if _M_X86_64 #include typedef mcontext_t SContext; #define CTX_RAX gregs[REG_RAX] @@ -105,7 +101,7 @@ #define CTX_R14 gregs[REG_R14] #define CTX_R15 gregs[REG_R15] #define CTX_RIP gregs[REG_RIP] - #elif defined(_M_IX86) + #elif _M_X86_32 #ifdef ANDROID #include typedef sigcontext SContext; @@ -131,7 +127,7 @@ #define CTX_ESP gregs[REG_ESP] #define CTX_EIP gregs[REG_EIP] #endif - #elif defined(_M_ARM) + #elif _M_ARM_32 // Add others if required. typedef struct sigcontext SContext; #define CTX_PC arm_pc @@ -141,7 +137,7 @@ #elif defined(__NetBSD__) #include typedef mcontext_t SContext; - #if defined(_M_X64) + #if _M_X86_64 #define CTX_RAX __gregs[_REG_RAX] #define CTX_RBX __gregs[_REG_RBX] #define CTX_RCX __gregs[_REG_RCX] @@ -159,7 +155,7 @@ #define CTX_R14 __gregs[_REG_R14] #define CTX_R15 __gregs[_REG_R15] #define CTX_RIP __gregs[_REG_RIP] - #elif defined(_M_IX86) + #elif _M_X86_32 #define CTX_EAX __gregs[__REG_EAX] #define CTX_EBX __gregs[__REG_EBX] #define CTX_ECX __gregs[__REG_ECX] @@ -175,7 +171,7 @@ #elif defined(__FreeBSD__) #include typedef mcontext_t SContext; - #if defined(_M_X64) + #if _M_X86_64 #define CTX_RAX mc_rax #define CTX_RBX mc_rbx #define CTX_RCX mc_rcx @@ -193,7 +189,7 @@ #define CTX_R14 mc_r14 #define CTX_R15 mc_r15 #define CTX_RIP mc_rip - #elif defined(_M_IX86) + #elif _M_X86_32 #define CTX_EAX mc_eax #define CTX_EBX mc_ebx #define CTX_ECX mc_ecx @@ -208,7 +204,7 @@ #endif #endif -#if defined(_M_X64) +#if _M_X86_64 #define CTX_PC CTX_RIP #include static inline u64 *ContextRN(SContext* ctx, int n) @@ -234,7 +230,7 @@ static inline u64 *ContextRN(SContext* ctx, int n) }; return (u64 *) ((char *) ctx + offsets[n]); } -#elif defined(_M_IX86) +#elif _M_X86_32 #define CTX_PC CTX_EIP #endif diff --git a/Source/Core/Core/PowerPC/JitCommon/JitBase.cpp b/Source/Core/Core/PowerPC/JitCommon/JitBase.cpp index dc49c89b52..39798ef0f0 100644 --- a/Source/Core/Core/PowerPC/JitCommon/JitBase.cpp +++ b/Source/Core/Core/PowerPC/JitCommon/JitBase.cpp @@ -47,7 +47,7 @@ void LogGeneratedX86(int size, PPCAnalyst::CodeBuffer *code_buffer, const u8 *no while ((u8*)disasmPtr < end) { char sptr[1000] = ""; -#ifdef _M_X64 +#if _ARCH_64 disasmPtr += x64disasm.disasm64(disasmPtr, disasmPtr, (u8*)disasmPtr, sptr); #else disasmPtr += x64disasm.disasm32(disasmPtr, disasmPtr, (u8*)disasmPtr, sptr); diff --git a/Source/Core/Core/PowerPC/JitCommon/Jit_Util.cpp b/Source/Core/Core/PowerPC/JitCommon/Jit_Util.cpp index d9f33e535a..8140c235eb 100644 --- a/Source/Core/Core/PowerPC/JitCommon/Jit_Util.cpp +++ b/Source/Core/Core/PowerPC/JitCommon/Jit_Util.cpp @@ -16,7 +16,7 @@ static u32 GC_ALIGNED16(float_buffer); void EmuCodeBlock::UnsafeLoadRegToReg(X64Reg reg_addr, X64Reg reg_value, int accessSize, s32 offset, bool signExtend) { -#ifdef _M_X64 +#if _M_X86_64 MOVZX(32, accessSize, reg_value, MComplex(RBX, reg_addr, SCALE_1, offset)); #else AND(32, R(reg_addr), Imm32(Memory::MEMVIEW32_MASK)); @@ -43,7 +43,7 @@ void EmuCodeBlock::UnsafeLoadRegToReg(X64Reg reg_addr, X64Reg reg_value, int acc void EmuCodeBlock::UnsafeLoadRegToRegNoSwap(X64Reg reg_addr, X64Reg reg_value, int accessSize, s32 offset) { -#ifdef _M_X64 +#if _M_X86_64 MOVZX(32, accessSize, reg_value, MComplex(RBX, reg_addr, SCALE_1, offset)); #else AND(32, R(reg_addr), Imm32(Memory::MEMVIEW32_MASK)); @@ -54,7 +54,7 @@ void EmuCodeBlock::UnsafeLoadRegToRegNoSwap(X64Reg reg_addr, X64Reg reg_value, i u8 *EmuCodeBlock::UnsafeLoadToReg(X64Reg reg_value, Gen::OpArg opAddress, int accessSize, s32 offset, bool signExtend) { u8 *result; -#ifdef _M_X64 +#if _M_X86_64 if (opAddress.IsSimpleReg()) { // Deal with potential wraparound. (This is just a heuristic, and it would @@ -124,7 +124,7 @@ void EmuCodeBlock::SafeLoadToReg(X64Reg reg_value, const Gen::OpArg & opAddress, { registersInUse &= ~(1 << RAX | 1 << reg_value); } -#if defined(_M_X64) +#if _M_X86_64 if (!Core::g_CoreStartupParameter.bMMU && Core::g_CoreStartupParameter.bFastmem && !opAddress.IsImm() && @@ -268,7 +268,7 @@ u8 *EmuCodeBlock::UnsafeWriteRegToReg(X64Reg reg_value, X64Reg reg_addr, int acc PanicAlert("WARNING: likely incorrect use of UnsafeWriteRegToReg!"); } if (swap) BSWAP(accessSize, reg_value); -#ifdef _M_X64 +#if _M_X86_64 result = GetWritableCodePtr(); MOV(accessSize, MComplex(RBX, reg_addr, SCALE_1, offset), R(reg_value)); #else @@ -283,7 +283,7 @@ u8 *EmuCodeBlock::UnsafeWriteRegToReg(X64Reg reg_value, X64Reg reg_addr, int acc void EmuCodeBlock::SafeWriteRegToReg(X64Reg reg_value, X64Reg reg_addr, int accessSize, s32 offset, u32 registersInUse, int flags) { registersInUse &= ~(1 << RAX); -#if defined(_M_X64) +#if _M_X86_64 if (!Core::g_CoreStartupParameter.bMMU && Core::g_CoreStartupParameter.bFastmem && !(flags & (SAFE_LOADSTORE_NO_SWAP | SAFE_LOADSTORE_NO_FASTMEM)) @@ -366,7 +366,7 @@ void EmuCodeBlock::SafeWriteFloatToReg(X64Reg xmm_value, X64Reg reg_addr, u32 re FixupBranch arg2 = J(); SetJumpTarget(argh); PSHUFB(xmm_value, M((void *)pbswapShuffle1x4)); -#ifdef _M_X64 +#if _M_X86_64 MOVD_xmm(MComplex(RBX, reg_addr, SCALE_1, 0), xmm_value); #else AND(32, R(reg_addr), Imm32(Memory::MEMVIEW32_MASK)); @@ -382,7 +382,7 @@ void EmuCodeBlock::SafeWriteFloatToReg(X64Reg xmm_value, X64Reg reg_addr, u32 re void EmuCodeBlock::WriteToConstRamAddress(int accessSize, const Gen::OpArg& arg, u32 address) { -#ifdef _M_X64 +#if _M_X86_64 MOV(accessSize, MDisp(RBX, address & 0x3FFFFFFF), arg); #else MOV(accessSize, M((void*)(Memory::base + (address & Memory::MEMVIEW32_MASK))), arg); @@ -391,7 +391,7 @@ void EmuCodeBlock::WriteToConstRamAddress(int accessSize, const Gen::OpArg& arg, void EmuCodeBlock::WriteFloatToConstRamAddress(const Gen::X64Reg& xmm_reg, u32 address) { -#ifdef _M_X64 +#if _M_X86_64 MOV(32, R(RAX), Imm32(address)); MOVSS(MComplex(RBX, RAX, 1, 0), xmm_reg); #else @@ -420,7 +420,7 @@ void EmuCodeBlock::ForceSinglePrecisionP(X64Reg xmm) { static u32 GC_ALIGNED16(temp32); static u64 GC_ALIGNED16(temp64); -#ifdef _M_X64 +#if _M_X86_64 static const __m128i GC_ALIGNED16(single_qnan_bit) = _mm_set_epi64x(0, 0x0000000000400000); static const __m128i GC_ALIGNED16(single_exponent) = _mm_set_epi64x(0, 0x000000007f800000); static const __m128i GC_ALIGNED16(double_qnan_bit) = _mm_set_epi64x(0, 0x0008000000000000); @@ -445,7 +445,7 @@ static const __m128i GC_ALIGNED16(double_exponent) = _mm_set_epi32(0, 0, 0x7ff00 //#define MORE_ACCURATE_DOUBLETOSINGLE #ifdef MORE_ACCURATE_DOUBLETOSINGLE -#ifdef _M_X64 +#if _M_X86_64 static const __m128i GC_ALIGNED16(double_fraction) = _mm_set_epi64x(0, 0x000fffffffffffff); static const __m128i GC_ALIGNED16(double_sign_bit) = _mm_set_epi64x(0, 0x8000000000000000); static const __m128i GC_ALIGNED16(double_explicit_top_bit) = _mm_set_epi64x(0, 0x0010000000000000); diff --git a/Source/Core/Core/PowerPC/JitILCommon/JitILBase_LoadStore.cpp b/Source/Core/Core/PowerPC/JitILCommon/JitILBase_LoadStore.cpp index 67a140c2e6..5f3787eb1b 100644 --- a/Source/Core/Core/PowerPC/JitILCommon/JitILBase_LoadStore.cpp +++ b/Source/Core/Core/PowerPC/JitILCommon/JitILBase_LoadStore.cpp @@ -118,7 +118,7 @@ void JitILBase::dcbz(UGeckoInstruction inst) ADD(32, R(EAX), gpr.R(inst.RA)); AND(32, R(EAX), Imm32(~31)); XORPD(XMM0, R(XMM0)); -#ifdef _M_X64 +#if _M_X86_64 MOVAPS(MComplex(EBX, EAX, SCALE_1, 0), XMM0); MOVAPS(MComplex(EBX, EAX, SCALE_1, 16), XMM0); #else diff --git a/Source/Core/Core/PowerPC/JitInterface.cpp b/Source/Core/Core/PowerPC/JitInterface.cpp index 8ee54c60b8..6cfd96f65e 100644 --- a/Source/Core/Core/PowerPC/JitInterface.cpp +++ b/Source/Core/Core/PowerPC/JitInterface.cpp @@ -16,14 +16,14 @@ #include "Core/PowerPC/Profiler.h" #include "Core/PowerPC/JitCommon/JitBase.h" -#ifndef _M_GENERIC +#if _M_X86 #include "Core/PowerPC/Jit64/Jit.h" #include "Core/PowerPC/Jit64/Jit64_Tables.h" #include "Core/PowerPC/Jit64IL/JitIL.h" #include "Core/PowerPC/Jit64IL/JitIL_Tables.h" #endif -#ifdef _M_ARM +#if _M_ARM_32 #include "Core/PowerPC/JitArm32/Jit.h" #include "Core/PowerPC/JitArm32/JitArm_Tables.h" #include "Core/PowerPC/JitArmIL/JitIL.h" @@ -48,7 +48,7 @@ namespace JitInterface CPUCoreBase *ptr = NULL; switch(core) { - #ifndef _M_GENERIC + #if _M_X86 case 1: { ptr = new Jit64(); @@ -60,7 +60,7 @@ namespace JitInterface break; } #endif - #ifdef _M_ARM + #if _M_ARM_32 case 3: { ptr = new JitArm(); @@ -87,7 +87,7 @@ namespace JitInterface { switch(core) { - #ifndef _M_GENERIC + #if _M_X86 case 1: { Jit64Tables::InitTables(); @@ -99,7 +99,7 @@ namespace JitInterface break; } #endif - #ifdef _M_ARM + #if _M_ARM_32 case 3: { JitArmTables::InitTables(); @@ -126,7 +126,7 @@ namespace JitInterface void WriteProfileResults(const char *filename) { // Can't really do this with no jit core available - #ifndef _M_GENERIC + #if _M_X86 std::vector stats; stats.reserve(jit->GetBlockCache()->GetNumBlocks()); diff --git a/Source/Core/Core/PowerPC/Profiler.h b/Source/Core/Core/PowerPC/Profiler.h index 771dc37a1e..24da8b8e9f 100644 --- a/Source/Core/Core/PowerPC/Profiler.h +++ b/Source/Core/Core/PowerPC/Profiler.h @@ -7,7 +7,7 @@ #ifdef _WIN32 -#ifdef _M_IX86 +#if _M_X86_32 #define PROFILER_QUERY_PERFORMANCE_COUNTER(pt) \ LEA(32, EAX, M(pt)); PUSH(EAX); \ CALL(QueryPerformanceCounter) diff --git a/Source/Core/Core/x64MemTools.cpp b/Source/Core/Core/x64MemTools.cpp index 241cc44ace..9554164262 100644 --- a/Source/Core/Core/x64MemTools.cpp +++ b/Source/Core/Core/x64MemTools.cpp @@ -51,7 +51,7 @@ bool DoFault(u64 bad_address, SContext *ctx) u64 memspace_bottom = (u64)Memory::base; u64 memspace_top = memspace_bottom + -#ifdef _M_X64 +#if _ARCH_64 0x100000000ULL; #else 0x40000000; @@ -131,7 +131,7 @@ LONG NTAPI Handler(PEXCEPTION_POINTERS pPtrs) void InstallExceptionHandler() { -#ifdef _M_X64 +#if _M_X86_64 // Make sure this is only called once per process execution // Instead, could make a Uninstall function, but whatever.. static bool handlerInstalled = false; @@ -153,7 +153,7 @@ void CheckKR(const char* name, kern_return_t kr) } } -#ifdef _M_X64 +#if _M_X86_64 void ExceptionThread(mach_port_t port) { Common::SetCurrentThreadName("Mach exception thread"); @@ -248,7 +248,7 @@ void ExceptionThread(mach_port_t port) void InstallExceptionHandler() { -#ifdef _M_IX86 +#if _M_X86_32 PanicAlertT("InstallExceptionHandler called, but this platform does not yet support it."); #else mach_port_t port; @@ -299,7 +299,7 @@ void sigsegv_handler(int sig, siginfo_t *info, void *raw_context) void InstallExceptionHandler() { -#ifdef _M_IX86 +#if _M_X86_32 PanicAlertT("InstallExceptionHandler called, but this platform does not yet support it."); #else struct sigaction sa; diff --git a/Source/Core/DolphinWX/Debugger/JitWindow.cpp b/Source/Core/DolphinWX/Debugger/JitWindow.cpp index 61810a032a..8aa3bcdfc0 100644 --- a/Source/Core/DolphinWX/Debugger/JitWindow.cpp +++ b/Source/Core/DolphinWX/Debugger/JitWindow.cpp @@ -136,7 +136,7 @@ void CJitWindow::Compare(u32 em_address) int num_x86_instructions = 0; while ((u8*)disasmPtr < end) { -#ifdef _M_X64 +#if _M_X86_64 disasmPtr += x64disasm.disasm64(disasmPtr, disasmPtr, (u8*)disasmPtr, sptr); #else disasmPtr += x64disasm.disasm32(disasmPtr, disasmPtr, (u8*)disasmPtr, sptr); diff --git a/Source/Core/DolphinWX/Main.cpp b/Source/Core/DolphinWX/Main.cpp index f86a9db5e5..8b29432b72 100644 --- a/Source/Core/DolphinWX/Main.cpp +++ b/Source/Core/DolphinWX/Main.cpp @@ -118,7 +118,7 @@ LONG WINAPI MyUnhandledExceptionFilter(LPEXCEPTION_POINTERS e) { //dumpCurrentDate(file); etfprintf(file.GetHandle(), "Unhandled Exception\n Code: 0x%08X\n", e->ExceptionRecord->ExceptionCode); -#ifndef _M_X64 +#if _M_X86_32 STACKTRACE2(file.GetHandle(), e->ContextRecord->Eip, e->ContextRecord->Esp, e->ContextRecord->Ebp); #else STACKTRACE2(file.GetHandle(), e->ContextRecord->Rip, e->ContextRecord->Rsp, e->ContextRecord->Rbp); diff --git a/Source/Core/DolphinWX/stdafx.h b/Source/Core/DolphinWX/stdafx.h index c66951d4f0..8adaf25e66 100644 --- a/Source/Core/DolphinWX/stdafx.h +++ b/Source/Core/DolphinWX/stdafx.h @@ -15,7 +15,7 @@ #define WIN32_LEAN_AND_MEAN #include // wxWidgets -#if defined _M_IX86 +#if _M_X86_32 #pragma comment(linker,"/manifestdependency:\"type='win32' name='Microsoft.Windows.Common-Controls' version='6.0.0.0' processorArchitecture='x86' publicKeyToken='6595b64144ccf1df' language='*'\"") @@ -23,7 +23,7 @@ #pragma comment(linker,"/manifestdependency:\"type='win32' name='Microsoft.Windows.Common-Controls' version='6.0.0.0' processorArchitecture='ia64' publicKeyToken='6595b64144ccf1df' language='*'\"") -#elif defined _M_X64 +#elif _M_X86_64 #pragma comment(linker,"/manifestdependency:\"type='win32' name='Microsoft.Windows.Common-Controls' version='6.0.0.0' processorArchitecture='amd64' publicKeyToken='6595b64144ccf1df' language='*'\"") diff --git a/Source/Core/VideoBackends/Software/Rasterizer.cpp b/Source/Core/VideoBackends/Software/Rasterizer.cpp index 8c59455531..13f1d89642 100644 --- a/Source/Core/VideoBackends/Software/Rasterizer.cpp +++ b/Source/Core/VideoBackends/Software/Rasterizer.cpp @@ -86,17 +86,9 @@ inline int iround(float x) { int t; -#if defined(_WIN32) && !defined(_M_X64) - __asm - { - fld x - fistp t - } -#else t = (int)x; if((x - t) >= 0.5) return t + 1; -#endif return t; } diff --git a/Source/Core/VideoCommon/VertexLoader.cpp b/Source/Core/VideoCommon/VertexLoader.cpp index 29abdb227c..dac42c0b4d 100644 --- a/Source/Core/VideoCommon/VertexLoader.cpp +++ b/Source/Core/VideoCommon/VertexLoader.cpp @@ -768,7 +768,7 @@ void VertexLoader::CompileVertexTranslator() #ifdef USE_VERTEX_LOADER_JIT // End loop here -#ifdef _M_X64 +#if _M_X86_64 MOV(64, R(RAX), Imm64((u64)&loop_counter)); SUB(32, MatR(RAX), Imm8(1)); #else @@ -787,7 +787,7 @@ void VertexLoader::CompileVertexTranslator() void VertexLoader::WriteCall(TPipelineFunction func) { #ifdef USE_VERTEX_LOADER_JIT -#ifdef _M_X64 +#if _M_X86_64 MOV(64, R(RAX), Imm64((u64)func)); CALLptr(R(RAX)); #else @@ -802,7 +802,7 @@ void VertexLoader::WriteCall(TPipelineFunction func) void VertexLoader::WriteGetVariable(int bits, OpArg dest, void *address) { #ifdef USE_VERTEX_LOADER_JIT -#ifdef _M_X64 +#if _M_X86_64 MOV(64, R(RAX), Imm64((u64)address)); MOV(bits, dest, MatR(RAX)); #else @@ -814,7 +814,7 @@ void VertexLoader::WriteGetVariable(int bits, OpArg dest, void *address) void VertexLoader::WriteSetVariable(int bits, void *address, OpArg value) { #ifdef USE_VERTEX_LOADER_JIT -#ifdef _M_X64 +#if _M_X86_64 MOV(64, R(RAX), Imm64((u64)address)); MOV(bits, MatR(RAX), value); #else diff --git a/Source/Core/VideoCommon/VideoCommon.h b/Source/Core/VideoCommon/VideoCommon.h index b18afce482..12b1863871 100644 --- a/Source/Core/VideoCommon/VideoCommon.h +++ b/Source/Core/VideoCommon/VideoCommon.h @@ -13,7 +13,7 @@ #include "Common/MathUtil.h" #include "VideoCommon/VideoBackendBase.h" -#if defined(_MSC_VER) && !defined(__x86_64__) && !defined(_M_X64) +#if defined(_MSC_VER) && _M_X86_32 void * memcpy_amd(void *dest, const void *src, size_t n); unsigned char memcmp_mmx(const void* src1, const void* src2, int cmpsize); #define memcpy_gc memcpy_amd diff --git a/Source/Core/VideoCommon/memcpy_amd.cpp b/Source/Core/VideoCommon/memcpy_amd.cpp index a563b80886..a0ef5b7881 100644 --- a/Source/Core/VideoCommon/memcpy_amd.cpp +++ b/Source/Core/VideoCommon/memcpy_amd.cpp @@ -75,7 +75,7 @@ MEMCPY_AMD.CPP #include #endif -#if defined(_MSC_VER) && !defined(__x86_64__) && !defined(_M_X64) +#if defined(_MSC_VER) && _M_X86_32 void * memcpy_amd(void *dest, const void *src, size_t n) {