summaryrefslogtreecommitdiffhomepage
path: root/src/mem.h
diff options
context:
space:
mode:
authorMichael Smith <mikesmiffy128@gmail.com>2023-04-08 12:23:32 +0100
committerMichael Smith <mikesmiffy128@gmail.com>2023-04-17 20:46:16 +0100
commiteb19af607e88513053fe871a92684564b86ee7b8 (patch)
tree27df16d7bfd5089699c4fc2573135e0bded18e58 /src/mem.h
parente7962fa5ea4e0399bea7d16d08beb29e76c48ff0 (diff)
Tidy up mem.h and prevent horrible assembly output
Removes the mem_store functions because they're never used anywhere.
Diffstat (limited to 'src/mem.h')
-rw-r--r--src/mem.h44
1 files changed, 15 insertions, 29 deletions
diff --git a/src/mem.h b/src/mem.h
index 077b74d..97dfa91 100644
--- a/src/mem.h
+++ b/src/mem.h
@@ -1,5 +1,5 @@
/*
- * Copyright © 2021 Michael Smith <mikesmiffy128@gmail.com>
+ * Copyright © 2023 Michael Smith <mikesmiffy128@gmail.com>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -19,18 +19,25 @@
#include "intdefs.h"
-/* retrieves a 32-bit integer from an unaligned pointer; avoids UB, probably */
+/* retrieves a 32-bit integer from an unaligned pointer */
static inline u32 mem_load32(const void *p) {
- const uchar *cp = p;
- return (u32)cp[0] | (u32)cp[1] << 8 | (u32)cp[2] << 16 | (u32)cp[3] << 24;
+ // XXX: Turns out the pedantically-safe approach below causes most compilers
+ // to generate horribly braindead x86 output in at least some cases (and the
+ // cases also differ by compiler). So, for now, use the simple pointer cast
+ // instead, even though it's technically UB - it'll be fine probably...
+ return *(u32 *)p;
+ // For future reference, the pedantically-safe approach would be to do this:
+ //const uchar *cp = p;
+ //return (u32)cp[0] | (u32)cp[1] << 8 | (u32)cp[2] << 16 | (u32)cp[3] << 24;
}
-/* retrieves a 64-bit integer from an unaligned pointer; avoids UB, possibly */
+/* retrieves a 64-bit integer from an unaligned pointer */
static inline u64 mem_load64(const void *p) {
+ // this seems not to get butchered as badly in most cases?
return (u64)mem_load32(p) | (u64)mem_load32((uchar *)p + 4) << 32;
}
-/* retrieves a pointer from an unaligned pointer-to-pointer; avoids UB, maybe */
+/* retrieves a pointer from an unaligned pointer-to-pointer */
static inline void *mem_loadptr(const void *p) {
#if defined(_WIN64) || defined(__x86_64__)
return (void *)mem_load64(p);
@@ -39,33 +46,12 @@ static inline void *mem_loadptr(const void *p) {
#endif
}
-/* retreives a signed offset from an unaligned pointer; avoids UB, hopefully */
+/* retreives a signed offset from an unaligned pointer */
static inline ssize mem_loadoffset(const void *p) {
return (ssize)mem_loadptr(p);
}
-/* stores a 32-bit integer to an unaligned pointer; avoids UB, most likely */
-static inline void mem_store32(void *p, u32 val) {
- uchar *cp = p;
- cp[0] = val; cp[1] = val >> 8; cp[2] = val >> 16; cp[3] = val >> 24;
-}
-
-/* stores a 64-bit integer to an unaligned pointer; avoids UB, I'd assume */
-static inline void mem_store64(void *p, u64 val) {
- mem_store32(p, val); mem_store32((uchar *)p + 4, val >> 32);
-}
-
-/* stores a pointer value to an unaligned pointer; avoids UB, I guess */
-static inline void mem_storeptr(void *to, const void *val) {
-#if defined(_WIN64) || defined(__x86_64__)
- mem_store64(to, (u64)val);
-#else
- mem_store32(to, (u32)val);
-#endif
-}
-
-/* adds a byte count to a pointer, and returns something that can be assigned
- * to any pointer type */
+/* adds a byte count to a pointer and returns a freely-assignable void pointer */
static inline void *mem_offset(void *p, int off) { return (char *)p + off; }
/* returns the offset in bytes from one pointer to another (p - q) */