From d791e5d3a8e89d54608b1145412d954518c17e4d Mon Sep 17 00:00:00 2001 From: Markus Wick Date: Fri, 11 Aug 2017 23:52:45 +0200 Subject: [PATCH] JitArm64: Use the updated wrappers. They are faster, no need to use the slow path in the CPU. --- Source/Core/Core/PowerPC/JitArm64/JitArm64_BackPatch.cpp | 4 ++-- Source/Core/Core/PowerPC/JitArm64/JitArm64_LoadStore.cpp | 4 ++-- .../Core/Core/PowerPC/JitArm64/JitArm64_SystemRegisters.cpp | 6 +++--- Source/Core/Core/PowerPC/JitArm64/JitAsm.cpp | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Source/Core/Core/PowerPC/JitArm64/JitArm64_BackPatch.cpp b/Source/Core/Core/PowerPC/JitArm64/JitArm64_BackPatch.cpp index 64906d7804..8b6993eceb 100644 --- a/Source/Core/Core/PowerPC/JitArm64/JitArm64_BackPatch.cpp +++ b/Source/Core/Core/PowerPC/JitArm64/JitArm64_BackPatch.cpp @@ -196,14 +196,14 @@ void JitArm64::EmitBackpatchRoutine(u32 flags, bool fastmem, bool do_farcode, AR { m_float_emit.FCVTN(32, D0, RS); m_float_emit.UMOV(64, X0, D0, 0); - ORR(X0, SP, X0, ArithOption(X0, ST_ROR, 32)); + ROR(X0, X0, 32); MOVP2R(X30, &PowerPC::Write_U64); BLR(X30); } else if (flags & BackPatchInfo::FLAG_SIZE_F32X2I) { m_float_emit.UMOV(64, X0, RS, 0); - ORR(X0, SP, X0, ArithOption(X0, ST_ROR, 32)); + ROR(X0, X0, 32); MOVP2R(X30, &PowerPC::Write_U64); BLR(X30); } diff --git a/Source/Core/Core/PowerPC/JitArm64/JitArm64_LoadStore.cpp b/Source/Core/Core/PowerPC/JitArm64/JitArm64_LoadStore.cpp index c1e08bdf29..b4e057753e 100644 --- a/Source/Core/Core/PowerPC/JitArm64/JitArm64_LoadStore.cpp +++ b/Source/Core/Core/PowerPC/JitArm64/JitArm64_LoadStore.cpp @@ -499,8 +499,8 @@ void JitArm64::lmw(UGeckoInstruction inst) LDP(INDEX_POST, EncodeRegTo64(RX1), EncodeRegTo64(RX3), XA, 16); REV32(EncodeRegTo64(RX1), EncodeRegTo64(RX1)); REV32(EncodeRegTo64(RX3), EncodeRegTo64(RX3)); - ORR(EncodeRegTo64(RX2), ZR, EncodeRegTo64(RX1), ArithOption(EncodeRegTo64(RX1), ST_LSR, 32)); - ORR(EncodeRegTo64(RX4), ZR, EncodeRegTo64(RX3), ArithOption(EncodeRegTo64(RX3), ST_LSR, 32)); + LSR(EncodeRegTo64(RX2), EncodeRegTo64(RX1), 32); + LSR(EncodeRegTo64(RX4), EncodeRegTo64(RX3), 32); i += 3; } else if (remaining >= 2) diff --git a/Source/Core/Core/PowerPC/JitArm64/JitArm64_SystemRegisters.cpp b/Source/Core/Core/PowerPC/JitArm64/JitArm64_SystemRegisters.cpp index 0a06ac51a1..f5071e71bc 100644 --- a/Source/Core/Core/PowerPC/JitArm64/JitArm64_SystemRegisters.cpp +++ b/Source/Core/Core/PowerPC/JitArm64/JitArm64_SystemRegisters.cpp @@ -300,12 +300,12 @@ void JitArm64::mfspr(UGeckoInstruction inst) if (iIndex == SPR_TL) MOV(gpr.R(d), Wresult); else - ORR(EncodeRegTo64(gpr.R(d)), ZR, Xresult, ArithOption(Xresult, ST_LSR, 32)); + LSR(EncodeRegTo64(gpr.R(d)), Xresult, 32); if (nextIndex == SPR_TL) MOV(gpr.R(n), Wresult); else - ORR(EncodeRegTo64(gpr.R(n)), ZR, Xresult, ArithOption(Xresult, ST_LSR, 32)); + LSR(EncodeRegTo64(gpr.R(n)), Xresult, 32); gpr.Unlock(Wg, Wresult, WA, WB); fpr.Unlock(VC, VD); @@ -314,7 +314,7 @@ void JitArm64::mfspr(UGeckoInstruction inst) } gpr.BindToRegister(d, false); if (iIndex == SPR_TU) - ORR(EncodeRegTo64(gpr.R(d)), ZR, Xresult, ArithOption(Xresult, ST_LSR, 32)); + LSR(EncodeRegTo64(gpr.R(d)), Xresult, 32); else MOV(gpr.R(d), Wresult); diff --git a/Source/Core/Core/PowerPC/JitArm64/JitAsm.cpp b/Source/Core/Core/PowerPC/JitArm64/JitAsm.cpp index 603c36d38c..316336b667 100644 --- a/Source/Core/Core/PowerPC/JitArm64/JitAsm.cpp +++ b/Source/Core/Core/PowerPC/JitArm64/JitAsm.cpp @@ -374,7 +374,7 @@ void JitArm64::GenerateCommonAsm() storePairedFloatSlow = GetCodePtr(); float_emit.UMOV(64, X0, Q0, 0); - ORR(X0, SP, X0, ArithOption(X0, ST_ROR, 32)); + ROR(X0, X0, 32); MOVP2R(X2, &PowerPC::Write_U64); BR(X2); }