diff options
author | Michael Smith <mikesmiffy128@gmail.com> | 2024-01-21 21:59:23 +0000 |
---|---|---|
committer | Michael Smith <mikesmiffy128@gmail.com> | 2024-01-21 22:02:14 +0000 |
commit | 670488716dde7ba7813dd281f24403a0b24d8690 (patch) | |
tree | 9b138c009e4131e32ed7bf4645e3eba7f8a5b18d | |
parent | 506f095bcb528468f25a637977efcc408590ae67 (diff) |
Rethink mem_loadoffset, and consequently, kill it
Suggested by bill. Having something semantically pointer-sized that's
only ever used for stuff that's always 32-bit doesn't really make sense.
Note that I intentionally did not add a copyright line for myself in
hud.c because, I mean, come on. I'll just say I waive any claim to that
tiny trivial change.
-rw-r--r-- | src/ac.c | 6 | ||||
-rw-r--r-- | src/build/mkentprops.c | 4 | ||||
-rw-r--r-- | src/democustom.c | 4 | ||||
-rw-r--r-- | src/demorec.c | 10 | ||||
-rw-r--r-- | src/ent.c | 6 | ||||
-rw-r--r-- | src/fastfwd.c | 10 | ||||
-rw-r--r-- | src/fov.c | 4 | ||||
-rw-r--r-- | src/hook.c | 6 | ||||
-rw-r--r-- | src/hud.c | 2 | ||||
-rw-r--r-- | src/l4dreset.c | 14 | ||||
-rw-r--r-- | src/mem.h | 35 |
11 files changed, 58 insertions, 43 deletions
@@ -1,5 +1,5 @@ /* - * Copyright © 2023 Michael Smith <mikesmiffy128@gmail.com> + * Copyright © 2024 Michael Smith <mikesmiffy128@gmail.com> * Copyright © 2023 Willian Henrique <wsimanbrazil@yahoo.com.br> * * Permission to use, copy, modify, and/or distribute this software for any @@ -327,7 +327,7 @@ static bool find_Key_Event(void) { ok: insns = (const uchar *)VFUNC(cgame, DispatchAllStoredGameMessages); for (const uchar *p = insns; p - insns < 128;) { if (p[0] == X86_CALL) { - orig_Key_Event = (Key_Event_func)(p + 5 + mem_loadoffset(p + 1)); + orig_Key_Event = (Key_Event_func)(p + 5 + mem_loads32(p + 1)); goto ok2; } NEXT_INSN(p, "DispatchInputEvent/Key_Event function"); @@ -342,7 +342,7 @@ ok2: // DispatchInputEvent and this CALL points to Key_Event. for (const uchar *p = insns; p - insns < 32;) { if (p[0] == X86_CALL) { - orig_Key_Event = (Key_Event_func)(p + 5 + mem_loadoffset(p + 1)); + orig_Key_Event = (Key_Event_func)(p + 5 + mem_loads32(p + 1)); break; } NEXT_INSN(p, "Key_Event function"); diff --git a/src/build/mkentprops.c b/src/build/mkentprops.c index 9fb2a50..0781f25 100644 --- a/src/build/mkentprops.c +++ b/src/build/mkentprops.c @@ -1,5 +1,5 @@ /* - * Copyright © 2022 Michael Smith <mikesmiffy128@gmail.com> + * Copyright © 2024 Michael Smith <mikesmiffy128@gmail.com> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -157,7 +157,7 @@ _( " const char *varname = mem_loadptr(mem_offset(p, off_SP_varname));") pp - c->props.data < c->props.sz; ++pp) { F( " %sif (!strcmp(varname, \"%s\")) {", else2, (*pp)->propname) F( " has_%s = true;", (*pp)->varname) -F( " %s = mem_load32(mem_offset(p, off_SP_offset));", +F( " %s = mem_loads32(mem_offset(p, off_SP_offset));", (*pp)->varname) _( " if (!--needprops) break;") _( " }") diff --git a/src/democustom.c b/src/democustom.c index 5dcbe01..4c1baf2 100644 --- a/src/democustom.c +++ b/src/democustom.c @@ -1,5 +1,5 @@ /* - * Copyright © 2023 Michael Smith <mikesmiffy128@gmail.com> + * Copyright © 2024 Michael Smith <mikesmiffy128@gmail.com> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -91,7 +91,7 @@ static bool find_WriteMessages(void) { // RecordPacket calls WriteMessages right away, so just look for a call for (const uchar *p = insns; p - insns < 32;) { if (*p == X86_CALL) { - WriteMessages = (WriteMessages_func)(p + 5 + mem_loadoffset(p + 1)); + WriteMessages = (WriteMessages_func)(p + 5 + mem_loads32(p + 1)); return true; } NEXT_INSN(p, "WriteMessages function"); diff --git a/src/demorec.c b/src/demorec.c index 6e3b2ec..8abba77 100644 --- a/src/demorec.c +++ b/src/demorec.c @@ -1,6 +1,6 @@ /* * Copyright © 2021 Willian Henrique <wsimanbrazil@yahoo.com.br> - * Copyright © 2023 Michael Smith <mikesmiffy128@gmail.com> + * Copyright © 2024 Michael Smith <mikesmiffy128@gmail.com> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -193,12 +193,12 @@ static inline bool find_recmembers(void *StopRecording) { // m_nDemoNumber = 0 -> mov dword ptr [<reg> + off], 0 // XXX: might end up wanting constants for the MRM field masks? if (p[0] == X86_MOVMIW && (p[1] & 0xC0) == 0x80 && - mem_load32(p + 6) == 0) { - demonum = mem_offset(demorecorder, mem_load32(p + 2)); + mem_loads32(p + 6) == 0) { + demonum = mem_offset(demorecorder, mem_loads32(p + 2)); } // m_bRecording = false -> mov byte ptr [<reg> + off], 0 else if (p[0] == X86_MOVMI8 && (p[1] & 0xC0) == 0x80 && p[6] == 0) { - recording = mem_offset(demorecorder, mem_load32(p + 2)); + recording = mem_offset(demorecorder, mem_loads32(p + 2)); } if (recording && demonum) return true; // blegh NEXT_INSN(p, "recording state variables"); @@ -217,7 +217,7 @@ static inline bool find_demoname(void *StartRecording) { // `this` - look for a LEA some time *before* the first call instruction if (p[0] == X86_CALL) return false; if (p[0] == X86_LEA && (p[1] & 0xC0) == 0x80) { - demorec_basename = mem_offset(demorecorder, mem_load32(p + 2)); + demorec_basename = mem_offset(demorecorder, mem_loads32(p + 2)); return true; } NEXT_INSN(p, "demo basename variable"); @@ -1,5 +1,5 @@ /* - * Copyright © 2023 Michael Smith <mikesmiffy128@gmail.com> + * Copyright © 2024 Michael Smith <mikesmiffy128@gmail.com> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -113,11 +113,11 @@ static inline ctor_func findctor(const struct CEntityFactory *factory, else { if (p[0] == X86_MOVRMW && (p[1] & 0xF8) == 0xC8 && p[2] == X86_CALL) { - return (ctor_func)(p + 7 + mem_loadoffset(p + 3)); + return (ctor_func)(p + 7 + mem_loads32(p + 3)); } if (p[0] == X86_RET || p[0] == X86_RETI16) { if (seencall && --depth) { - p = seencall + 5 + mem_loadoffset(seencall + 1); insns = p; + p = seencall + 5 + mem_loads32(seencall + 1); insns = p; seencall = 0; continue; } diff --git a/src/fastfwd.c b/src/fastfwd.c index b5b7e6a..fb16e13 100644 --- a/src/fastfwd.c +++ b/src/fastfwd.c @@ -1,6 +1,6 @@ /* * Copyright © 2023 Matthew Wozniak <sirtomato999@gmail.com> - * Copyright © 2023 Michael Smith <mikesmiffy128@gmail.com> + * Copyright © 2024 Michael Smith <mikesmiffy128@gmail.com> * Copyright © 2023 Willian Henrique <wsimanbrazil@yahoo.com.br> * * Permission to use, copy, modify, and/or distribute this software for any @@ -101,7 +101,7 @@ static inline void *find_HostState_Frame(void *Frame) { NEXT_INSN(p, "HostState_Frame"); while (p - insns < 640) { if (p[0] == X86_CALL) { - return (uchar *)p + 5 + mem_loadoffset(p + 1); + return (uchar *)p + 5 + mem_loads32(p + 1); } NEXT_INSN(p, "HostState_Frame"); } @@ -120,7 +120,7 @@ static inline void *find_FrameUpdate(void *HostState_Frame) { // HostState_Frame() calls another non-virtual member function (FrameUpdate) const uchar *insns = (const uchar *)HostState_Frame; for (const uchar *p = insns; p - insns < 384;) { - if (p[0] == X86_CALL) return (uchar *)p + 5 + mem_loadoffset(p + 1); + if (p[0] == X86_CALL) return (uchar *)p + 5 + mem_loads32(p + 1); NEXT_INSN(p, "CHostState::FrameUpdate"); } #else @@ -138,7 +138,7 @@ static inline bool find_Host_AccumulateTime(void *_Host_RunFrame) { while (p - insns < 384) { if (p[0] == X86_CALL) { orig_Host_AccumulateTime = (Host_AccumulateTime_func)( - p + 5 + mem_loadoffset(p + 1)); + p + 5 + mem_loads32(p + 1)); return true; } NEXT_INSN(p, "Host_AccumulateTime"); @@ -173,7 +173,7 @@ static void *find_floatcall(void *func, int fldcnt, const char *name) { NEXT_INSN(p, name); while (p - insns < 384) { if (p[0] == X86_CALL) { - if (!--fldcnt) return (uchar *)p + 5 + mem_loadoffset(p + 1); + if (!--fldcnt) return (uchar *)p + 5 + mem_loads32(p + 1); goto next; } NEXT_INSN(p, name); @@ -1,5 +1,5 @@ /* - * Copyright © 2023 Michael Smith <mikesmiffy128@gmail.com> + * Copyright © 2024 Michael Smith <mikesmiffy128@gmail.com> * Copyright © 2022 Willian Henrique <wsimanbrazil@yahoo.com.br> * * Permission to use, copy, modify, and/or distribute this software for any @@ -56,7 +56,7 @@ static bool find_SetDefaultFOV(struct con_cmd *fov) { // direct calls, SetDefaultFOV() is the third. if (p[0] == X86_CALL && ++callcnt == 3) { orig_SetDefaultFOV = (SetDefaultFOV_func)(p + 5 + - mem_loadoffset(p + 1)); + mem_loads32(p + 1)); return true; } NEXT_INSN(p, "SetDefaultFOV function"); @@ -1,5 +1,5 @@ /* - * Copyright © 2023 Michael Smith <mikesmiffy128@gmail.com> + * Copyright © 2024 Michael Smith <mikesmiffy128@gmail.com> * Copyright © 2022 Willian Henrique <wsimanbrazil@yahoo.com.br> * * Permission to use, copy, modify, and/or distribute this software for any @@ -59,7 +59,7 @@ void *hook_inline(void *func_, void *target) { uchar *func = func_; // dumb hack: if we hit some thunk that immediately jumps elsewhere (which // seems common for win32 API functions), hook the underlying thing instead. - while (*func == X86_JMPIW) func += mem_loadoffset(func + 1) + 5; + while (*func == X86_JMPIW) func += mem_loads32(func + 1) + 5; if (!os_mprot(func, 5, PAGE_EXECUTE_READWRITE)) return false; int len = 0; for (;;) { @@ -105,7 +105,7 @@ void *hook_inline(void *func_, void *target) { void unhook_inline(void *orig) { uchar *p = orig; int len = p[-1]; - int off = mem_load32(p + len + 1); + int off = mem_loads32(p + len + 1); uchar *q = p + off + 5; memcpy(q, p, 5); // XXX: not atomic atm! (does any of it even need to be?) iflush(q, 5); @@ -144,7 +144,7 @@ static bool find_toolspanel(void *enginevgui) { // pointer to the specified panel if (p[0] == X86_CALL) { typedef void *(*VCALLCONV GetRootPanel_func)(void *this, int); - int off = mem_load32(p + 1); + int off = mem_loads32(p + 1); GetRootPanel_func GetRootPanel = (GetRootPanel_func)(p + 5 + off); toolspanel = GetRootPanel(enginevgui, /*PANEL_TOOLS*/ 3); return true; diff --git a/src/l4dreset.c b/src/l4dreset.c index 479a1c8..e95a1ac 100644 --- a/src/l4dreset.c +++ b/src/l4dreset.c @@ -1,6 +1,6 @@ /* * Copyright © 2023 Willian Henrique <wsimanbrazil@yahoo.com.br> - * Copyright © 2023 Michael Smith <mikesmiffy128@gmail.com> + * Copyright © 2024 Michael Smith <mikesmiffy128@gmail.com> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -318,7 +318,7 @@ static inline bool find_voteissues(const uchar *insns) { for (const uchar *p = insns; p - insns < 16;) { // Look for the last call before the ret - that has to be ListIssues() if (p[0] == X86_CALL && p[5] == X86_RET) { - insns = p + 5 + mem_loadoffset(p + 1); + insns = p + 5 + mem_loads32(p + 1); goto ok; } NEXT_INSN(p, "ListIssues call"); @@ -329,7 +329,7 @@ ok: for (const uchar *p = insns; p - insns < 96;) { // Each pointer is loaded from a CUtlVector at an offset from `this`, so // we can find that offset from the mov into ECX. if (p[0] == X86_MOVRMW && (p[1] & 0xF8) == 0x88) { - int off = mem_loadoffset(p + 2); + int off = mem_loads32(p + 2); if (off > 800) { // sanity check: offset is always fairly high off_voteissues = off; return true; @@ -357,8 +357,8 @@ static inline bool find_votecallers(void *votectrlspawn) { // to happen), but the vector of interest always comes 8 bytes later. // "mov dword ptr [<reg> + off], 0", mod == 0b11 if (p[0] == X86_MOVMIW && (p[1] & 0xC0) == 0x80 && - mem_load32(p + 6) == 0) { - off_callerrecords = mem_load32(p + 2) + 8; + mem_loads32(p + 6) == 0) { + off_callerrecords = mem_loads32(p + 2) + 8; return true; } NEXT_INSN(p, "offset to vote caller record vector"); @@ -394,7 +394,7 @@ static inline bool find_UnfreezeTeam(void *GameFrame) { // note: L4D1 only if (p[0] == X86_MOVRMW && p[1] == X86_MODRM(0, 1, 5) && mem_loadptr(mem_loadptr(p + 2)) == director && p[6] == X86_CALL) { - p += 11 + mem_loadoffset(p + 7); + p += 11 + mem_loads32(p + 7); insns = p; goto ok; } @@ -406,7 +406,7 @@ ok: // Director::Update calls UnfreezeTeam after the first jmp instruction // jz XXX; mov ecx, <reg>; call Director::UnfreezeTeam if (p[0] == X86_JZ && p[2] == X86_MOVRMW && (p[3] & 0xF8) == 0xC8 && p[4] == X86_CALL) { - p += 9 + mem_loadoffset(p + 5); + p += 9 + mem_loads32(p + 5); orig_UnfreezeTeam = (UnfreezeTeam_func)p; return true; } @@ -1,5 +1,5 @@ /* - * Copyright © 2023 Michael Smith <mikesmiffy128@gmail.com> + * Copyright © 2024 Michael Smith <mikesmiffy128@gmail.com> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -19,8 +19,8 @@ #include "intdefs.h" -/* Retrieves a 32-bit integer from an unaligned pointer. */ -static inline u32 mem_load32(const void *p) { +/* Retrieves an unsigned 32-bit integer from an unaligned pointer. */ +static inline u32 mem_loadu32(const void *p) { // XXX: Turns out the pedantically-safe approach below causes most compilers // to generate horribly braindead x86 output in at least some cases (and the // cases also differ by compiler). So, for now, use the simple pointer cast @@ -31,26 +31,41 @@ static inline u32 mem_load32(const void *p) { //return (u32)cp[0] | (u32)cp[1] << 8 | (u32)cp[2] << 16 | (u32)cp[3] << 24; } -/* Retrieves a 64-bit integer from an unaligned pointer. */ -static inline u64 mem_load64(const void *p) { +/* Retreives a signed 32-bit integer from an unaligned pointer. */ +static inline s32 mem_loads32(const void *p) { + return (s32)mem_loadu32(p); +} + +/* Retrieves an unsigned 64-bit integer from an unaligned pointer. */ +static inline u64 mem_loadu64(const void *p) { // this seems not to get butchered as badly in most cases? - return (u64)mem_load32(p) | (u64)mem_load32((uchar *)p + 4) << 32; + return (u64)mem_loadu32(p) | (u64)mem_loadu32((uchar *)p + 4) << 32; +} + +/* Retreives a signed 64-bit integer from an unaligned pointer. */ +static inline s64 mem_loads64(const void *p) { + return (s64)mem_loadu64(p); } /* Retrieves a pointer from an unaligned pointer-to-pointer. */ static inline void *mem_loadptr(const void *p) { #if defined(_WIN64) || defined(__x86_64__) - return (void *)mem_load64(p); + return (void *)mem_loadu64(p); #else - return (void *)mem_load32(p); + return (void *)mem_loadu32(p); #endif } -/* Retreives a signed offset from an unaligned pointer. */ -static inline ssize mem_loadoffset(const void *p) { +/* Retreives a signed size/offset value from an unaligned pointer. */ +static inline ssize mem_loadssize(const void *p) { return (ssize)mem_loadptr(p); } +/* Retreives an unsigned size or raw address value from an unaligned pointer. */ +static inline usize mem_loadusize(const void *p) { + return (usize)mem_loadptr(p); +} + /* Adds a byte count to a pointer and returns a freely-assignable pointer. */ static inline void *mem_offset(void *p, int off) { return (char *)p + off; } |