summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--src/common/CMakeLists.txt2
-rw-r--r--src/common/atomic_ops.cpp70
-rw-r--r--src/common/atomic_ops.h17
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_64.cpp66
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_64.h6
-rw-r--r--src/core/arm/exclusive_monitor.h6
-rw-r--r--src/core/hle/kernel/address_arbiter.cpp6
-rw-r--r--src/core/hle/kernel/mutex.cpp5
-rw-r--r--src/core/hle/kernel/svc.cpp2
-rw-r--r--src/core/hle/kernel/thread.cpp6
-rw-r--r--src/core/memory.cpp98
-rw-r--r--src/core/memory.h65
12 files changed, 325 insertions, 24 deletions
diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt
index 3cc17d0e9..d120c8d3d 100644
--- a/src/common/CMakeLists.txt
+++ b/src/common/CMakeLists.txt
@@ -98,6 +98,8 @@ add_library(common STATIC
algorithm.h
alignment.h
assert.h
+ atomic_ops.cpp
+ atomic_ops.h
detached_tasks.cpp
detached_tasks.h
bit_field.h
diff --git a/src/common/atomic_ops.cpp b/src/common/atomic_ops.cpp
new file mode 100644
index 000000000..65cdfb4fd
--- /dev/null
+++ b/src/common/atomic_ops.cpp
@@ -0,0 +1,70 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <cstring>
+
+#include "common/atomic_ops.h"
+
+#if _MSC_VER
+#include <intrin.h>
+#endif
+
+namespace Common {
+
+#if _MSC_VER
+
+bool AtomicCompareAndSwap(u8 volatile* pointer, u8 value, u8 expected) {
+ u8 result = _InterlockedCompareExchange8((char*)pointer, value, expected);
+ return result == expected;
+}
+
+bool AtomicCompareAndSwap(u16 volatile* pointer, u16 value, u16 expected) {
+ u16 result = _InterlockedCompareExchange16((short*)pointer, value, expected);
+ return result == expected;
+}
+
+bool AtomicCompareAndSwap(u32 volatile* pointer, u32 value, u32 expected) {
+ u32 result = _InterlockedCompareExchange((long*)pointer, value, expected);
+ return result == expected;
+}
+
+bool AtomicCompareAndSwap(u64 volatile* pointer, u64 value, u64 expected) {
+ u64 result = _InterlockedCompareExchange64((__int64*)pointer, value, expected);
+ return result == expected;
+}
+
+bool AtomicCompareAndSwap(u64 volatile* pointer, u128 value, u128 expected) {
+ return _InterlockedCompareExchange128((__int64*)pointer, value[1], value[0], (__int64*)expected.data()) != 0;
+}
+
+
+#else
+
+bool AtomicCompareAndSwap(u8 volatile* pointer, u8 value, u8 expected) {
+ return __sync_bool_compare_and_swap (pointer, value, expected);
+}
+
+bool AtomicCompareAndSwap(u16 volatile* pointer, u16 value, u16 expected) {
+ return __sync_bool_compare_and_swap (pointer, value, expected);
+}
+
+bool AtomicCompareAndSwap(u32 volatile* pointer, u32 value, u32 expected) {
+ return __sync_bool_compare_and_swap (pointer, value, expected);
+}
+
+bool AtomicCompareAndSwap(u64 volatile* pointer, u64 value, u64 expected) {
+ return __sync_bool_compare_and_swap (pointer, value, expected);
+}
+
+bool AtomicCompareAndSwap(u64 volatile* pointer, u128 value, u128 expected) {
+ unsigned __int128 value_a;
+ unsigned __int128 expected_a;
+ std::memcpy(&value_a, value.data(), sizeof(u128));
+ std::memcpy(&expected_a, expected.data(), sizeof(u128));
+ return __sync_bool_compare_and_swap ((unsigned __int128*)pointer, value_a, expected_a);
+}
+
+#endif
+
+} // namespace Common
diff --git a/src/common/atomic_ops.h b/src/common/atomic_ops.h
new file mode 100644
index 000000000..22cb3a402
--- /dev/null
+++ b/src/common/atomic_ops.h
@@ -0,0 +1,17 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include "common/common_types.h"
+
+namespace Common {
+
+bool AtomicCompareAndSwap(u8 volatile * pointer, u8 value, u8 expected);
+bool AtomicCompareAndSwap(u16 volatile* pointer, u16 value, u16 expected);
+bool AtomicCompareAndSwap(u32 volatile* pointer, u32 value, u32 expected);
+bool AtomicCompareAndSwap(u64 volatile* pointer, u64 value, u64 expected);
+bool AtomicCompareAndSwap(u64 volatile* pointer, u128 value, u128 expected);
+
+} // namespace Common
diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.cpp b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
index 5e316ffd4..a22c22bf0 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_64.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
@@ -66,6 +66,22 @@ public:
memory.Write64(vaddr + 8, value[1]);
}
+ bool MemoryWriteExclusive8(u64 vaddr, std::uint8_t value, std::uint8_t expected) override {
+ return parent.system.Memory().WriteExclusive8(vaddr, value, expected);
+ }
+ bool MemoryWriteExclusive16(u64 vaddr, std::uint16_t value, std::uint16_t expected) override {
+ return parent.system.Memory().WriteExclusive16(vaddr, value, expected);
+ }
+ bool MemoryWriteExclusive32(u64 vaddr, std::uint32_t value, std::uint32_t expected) override {
+ return parent.system.Memory().WriteExclusive32(vaddr, value, expected);
+ }
+ bool MemoryWriteExclusive64(u64 vaddr, std::uint64_t value, std::uint64_t expected) override {
+ return parent.system.Memory().WriteExclusive64(vaddr, value, expected);
+ }
+ bool MemoryWriteExclusive128(u64 vaddr, Vector value, Vector expected) override {
+ return parent.system.Memory().WriteExclusive128(vaddr, value, expected);
+ }
+
void InterpreterFallback(u64 pc, std::size_t num_instructions) override {
LOG_INFO(Core_ARM, "Unicorn fallback @ 0x{:X} for {} instructions (instr = {:08X})", pc,
num_instructions, MemoryReadCode(pc));
@@ -284,9 +300,29 @@ DynarmicExclusiveMonitor::DynarmicExclusiveMonitor(Memory::Memory& memory, std::
DynarmicExclusiveMonitor::~DynarmicExclusiveMonitor() = default;
-void DynarmicExclusiveMonitor::SetExclusive(std::size_t core_index, VAddr addr) {
- // Size doesn't actually matter.
- monitor.Mark(core_index, addr, 16);
+void DynarmicExclusiveMonitor::SetExclusive8(std::size_t core_index, VAddr addr) {
+ monitor.Mark<u8>(core_index, addr, 1, [&]() -> u8 { return memory.Read8(addr); });
+}
+
+void DynarmicExclusiveMonitor::SetExclusive16(std::size_t core_index, VAddr addr) {
+ monitor.Mark<u16>(core_index, addr, 2, [&]() -> u16 { return memory.Read16(addr); });
+}
+
+void DynarmicExclusiveMonitor::SetExclusive32(std::size_t core_index, VAddr addr) {
+ monitor.Mark<u32>(core_index, addr, 4, [&]() -> u32 { return memory.Read32(addr); });
+}
+
+void DynarmicExclusiveMonitor::SetExclusive64(std::size_t core_index, VAddr addr) {
+ monitor.Mark<u64>(core_index, addr, 8, [&]() -> u64 { return memory.Read64(addr); });
+}
+
+void DynarmicExclusiveMonitor::SetExclusive128(std::size_t core_index, VAddr addr) {
+ monitor.Mark<u128>(core_index, addr, 16, [&]() -> u128 {
+ u128 result;
+ result[0] = memory.Read64(addr);
+ result[1] = memory.Read64(addr + 8);
+ return result;
+ });
}
void DynarmicExclusiveMonitor::ClearExclusive() {
@@ -294,28 +330,32 @@ void DynarmicExclusiveMonitor::ClearExclusive() {
}
bool DynarmicExclusiveMonitor::ExclusiveWrite8(std::size_t core_index, VAddr vaddr, u8 value) {
- return monitor.DoExclusiveOperation(core_index, vaddr, 1, [&] { memory.Write8(vaddr, value); });
+ return monitor.DoExclusiveOperation<u8>(core_index, vaddr, 1, [&](u8 expected) -> bool {
+ return memory.WriteExclusive8(vaddr, value, expected);
+ });
}
bool DynarmicExclusiveMonitor::ExclusiveWrite16(std::size_t core_index, VAddr vaddr, u16 value) {
- return monitor.DoExclusiveOperation(core_index, vaddr, 2,
- [&] { memory.Write16(vaddr, value); });
+ return monitor.DoExclusiveOperation<u16>(core_index, vaddr, 2, [&](u16 expected) -> bool {
+ return memory.WriteExclusive16(vaddr, value, expected);
+ });
}
bool DynarmicExclusiveMonitor::ExclusiveWrite32(std::size_t core_index, VAddr vaddr, u32 value) {
- return monitor.DoExclusiveOperation(core_index, vaddr, 4,
- [&] { memory.Write32(vaddr, value); });
+ return monitor.DoExclusiveOperation<u32>(core_index, vaddr, 4, [&](u32 expected) -> bool {
+ return memory.WriteExclusive32(vaddr, value, expected);
+ });
}
bool DynarmicExclusiveMonitor::ExclusiveWrite64(std::size_t core_index, VAddr vaddr, u64 value) {
- return monitor.DoExclusiveOperation(core_index, vaddr, 8,
- [&] { memory.Write64(vaddr, value); });
+ return monitor.DoExclusiveOperation<u64>(core_index, vaddr, 8, [&](u64 expected) -> bool {
+ return memory.WriteExclusive64(vaddr, value, expected);
+ });
}
bool DynarmicExclusiveMonitor::ExclusiveWrite128(std::size_t core_index, VAddr vaddr, u128 value) {
- return monitor.DoExclusiveOperation(core_index, vaddr, 16, [&] {
- memory.Write64(vaddr + 0, value[0]);
- memory.Write64(vaddr + 8, value[1]);
+ return monitor.DoExclusiveOperation<u128>(core_index, vaddr, 16, [&](u128 expected) -> bool {
+ return memory.WriteExclusive128(vaddr, value, expected);
});
}
diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.h b/src/core/arm/dynarmic/arm_dynarmic_64.h
index 9e94b58c2..3ead59f16 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_64.h
+++ b/src/core/arm/dynarmic/arm_dynarmic_64.h
@@ -82,7 +82,11 @@ public:
explicit DynarmicExclusiveMonitor(Memory::Memory& memory, std::size_t core_count);
~DynarmicExclusiveMonitor() override;
- void SetExclusive(std::size_t core_index, VAddr addr) override;
+ void SetExclusive8(std::size_t core_index, VAddr addr) override;
+ void SetExclusive16(std::size_t core_index, VAddr addr) override;
+ void SetExclusive32(std::size_t core_index, VAddr addr) override;
+ void SetExclusive64(std::size_t core_index, VAddr addr) override;
+ void SetExclusive128(std::size_t core_index, VAddr addr) override;
void ClearExclusive() override;
bool ExclusiveWrite8(std::size_t core_index, VAddr vaddr, u8 value) override;
diff --git a/src/core/arm/exclusive_monitor.h b/src/core/arm/exclusive_monitor.h
index ccd73b80f..2ee312eee 100644
--- a/src/core/arm/exclusive_monitor.h
+++ b/src/core/arm/exclusive_monitor.h
@@ -18,7 +18,11 @@ class ExclusiveMonitor {
public:
virtual ~ExclusiveMonitor();
- virtual void SetExclusive(std::size_t core_index, VAddr addr) = 0;
+ virtual void SetExclusive8(std::size_t core_index, VAddr addr) = 0;
+ virtual void SetExclusive16(std::size_t core_index, VAddr addr) = 0;
+ virtual void SetExclusive32(std::size_t core_index, VAddr addr) = 0;
+ virtual void SetExclusive64(std::size_t core_index, VAddr addr) = 0;
+ virtual void SetExclusive128(std::size_t core_index, VAddr addr) = 0;
virtual void ClearExclusive() = 0;
virtual bool ExclusiveWrite8(std::size_t core_index, VAddr vaddr, u8 value) = 0;
diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp
index ebabde921..07acabc1d 100644
--- a/src/core/hle/kernel/address_arbiter.cpp
+++ b/src/core/hle/kernel/address_arbiter.cpp
@@ -90,7 +90,7 @@ ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32
auto& monitor = system.Monitor();
u32 current_value;
do {
- monitor.SetExclusive(current_core, address);
+ monitor.SetExclusive32(current_core, address);
current_value = memory.Read32(address);
if (current_value != value) {
@@ -120,7 +120,7 @@ ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr a
auto& monitor = system.Monitor();
s32 updated_value;
do {
- monitor.SetExclusive(current_core, address);
+ monitor.SetExclusive32(current_core, address);
updated_value = memory.Read32(address);
if (updated_value != value) {
@@ -191,7 +191,7 @@ ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s6
const std::size_t current_core = system.CurrentCoreIndex();
auto& monitor = system.Monitor();
do {
- monitor.SetExclusive(current_core, address);
+ monitor.SetExclusive32(current_core, address);
current_value = static_cast<s32>(memory.Read32(address));
if (should_decrement) {
decrement_value = current_value - 1;
diff --git a/src/core/hle/kernel/mutex.cpp b/src/core/hle/kernel/mutex.cpp
index ebe3f6050..16c95782a 100644
--- a/src/core/hle/kernel/mutex.cpp
+++ b/src/core/hle/kernel/mutex.cpp
@@ -10,6 +10,7 @@
#include "common/logging/log.h"
#include "core/core.h"
#include "core/arm/exclusive_monitor.h"
+#include "core/core.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/kernel.h"
@@ -138,7 +139,7 @@ std::pair<ResultCode, std::shared_ptr<Thread>> Mutex::Unlock(std::shared_ptr<Thr
const std::size_t current_core = system.CurrentCoreIndex();
if (new_owner == nullptr) {
do {
- monitor.SetExclusive(current_core, address);
+ monitor.SetExclusive32(current_core, address);
} while (!monitor.ExclusiveWrite32(current_core, address, 0));
return {RESULT_SUCCESS, nullptr};
}
@@ -154,7 +155,7 @@ std::pair<ResultCode, std::shared_ptr<Thread>> Mutex::Unlock(std::shared_ptr<Thr
new_owner->ResumeFromWait();
do {
- monitor.SetExclusive(current_core, address);
+ monitor.SetExclusive32(current_core, address);
} while (!monitor.ExclusiveWrite32(current_core, address, mutex_value));
return {RESULT_SUCCESS, new_owner};
}
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index da2f90a1d..371beed0d 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -1641,7 +1641,7 @@ static void SignalProcessWideKey(Core::System& system, VAddr condition_variable_
u32 update_val = 0;
const VAddr mutex_address = thread->GetMutexWaitAddress();
do {
- monitor.SetExclusive(current_core, mutex_address);
+ monitor.SetExclusive32(current_core, mutex_address);
// If the mutex is not yet acquired, acquire it.
mutex_val = memory.Read32(mutex_address);
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index b99e3b7a5..51cc5dcca 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -236,7 +236,7 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadTy
ResetThreadContext64(thread->context_64, stack_top, entry_point, arg);
}
thread->host_context =
- std::make_shared<Common::Fiber>(std::move(thread_start_func), thread_start_parameter);
+ std::make_shared<Common::Fiber>(std::move(thread_start_func), thread_start_parameter);
return MakeResult<std::shared_ptr<Thread>>(std::move(thread));
}
@@ -412,12 +412,12 @@ ResultCode Thread::SetActivity(ThreadActivity value) {
}
if (value == ThreadActivity::Paused) {
- if (pausing_state & static_cast<u32>(ThreadSchedFlags::ThreadPauseFlag) != 0) {
+ if ((pausing_state & static_cast<u32>(ThreadSchedFlags::ThreadPauseFlag)) != 0) {
return ERR_INVALID_STATE;
}
AddSchedulingFlag(ThreadSchedFlags::ThreadPauseFlag);
} else {
- if (pausing_state & static_cast<u32>(ThreadSchedFlags::ThreadPauseFlag) == 0) {
+ if ((pausing_state & static_cast<u32>(ThreadSchedFlags::ThreadPauseFlag)) == 0) {
return ERR_INVALID_STATE;
}
RemoveSchedulingFlag(ThreadSchedFlags::ThreadPauseFlag);
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index 66634596d..4cb5d05e5 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -8,6 +8,7 @@
#include <utility>
#include "common/assert.h"
+#include "common/atomic_ops.h"
#include "common/common_types.h"
#include "common/logging/log.h"
#include "common/page_table.h"
@@ -176,6 +177,22 @@ struct Memory::Impl {
}
}
+ bool WriteExclusive8(const VAddr addr, const u8 data, const u8 expected) {
+ return WriteExclusive<u8>(addr, data, expected);
+ }
+
+ bool WriteExclusive16(const VAddr addr, const u16 data, const u16 expected) {
+ return WriteExclusive<u16_le>(addr, data, expected);
+ }
+
+ bool WriteExclusive32(const VAddr addr, const u32 data, const u32 expected) {
+ return WriteExclusive<u32_le>(addr, data, expected);
+ }
+
+ bool WriteExclusive64(const VAddr addr, const u64 data, const u64 expected) {
+ return WriteExclusive<u64_le>(addr, data, expected);
+ }
+
std::string ReadCString(VAddr vaddr, std::size_t max_length) {
std::string string;
string.reserve(max_length);
@@ -679,6 +696,67 @@ struct Memory::Impl {
}
}
+ template <typename T>
+ bool WriteExclusive(const VAddr vaddr, const T data, const T expected) {
+ u8* page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS];
+ if (page_pointer != nullptr) {
+ // NOTE: Avoid adding any extra logic to this fast-path block
+ T volatile* pointer = reinterpret_cast<T volatile*>(&page_pointer[vaddr]);
+ return Common::AtomicCompareAndSwap(pointer, data, expected);
+ }
+
+ const Common::PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
+ switch (type) {
+ case Common::PageType::Unmapped:
+ LOG_ERROR(HW_Memory, "Unmapped Write{} 0x{:08X} @ 0x{:016X}", sizeof(data) * 8,
+ static_cast<u32>(data), vaddr);
+ return true;
+ case Common::PageType::Memory:
+ ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr);
+ break;
+ case Common::PageType::RasterizerCachedMemory: {
+ u8* host_ptr{GetPointerFromVMA(vaddr)};
+ system.GPU().InvalidateRegion(ToCacheAddr(host_ptr), sizeof(T));
+ T volatile* pointer = reinterpret_cast<T volatile*>(&host_ptr);
+ return Common::AtomicCompareAndSwap(pointer, data, expected);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ return true;
+ }
+
+ bool WriteExclusive128(const VAddr vaddr, const u128 data, const u128 expected) {
+ u8* const page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS];
+ if (page_pointer != nullptr) {
+ // NOTE: Avoid adding any extra logic to this fast-path block
+ u64 volatile* pointer = reinterpret_cast<u64 volatile*>(&page_pointer[vaddr]);
+ return Common::AtomicCompareAndSwap(pointer, data, expected);
+ }
+
+ const Common::PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
+ switch (type) {
+ case Common::PageType::Unmapped:
+ LOG_ERROR(HW_Memory, "Unmapped Write{} 0x{:08X} @ 0x{:016X}{:016X}", sizeof(data) * 8,
+ static_cast<u64>(data[1]), static_cast<u64>(data[0]), vaddr);
+ return true;
+ case Common::PageType::Memory:
+ ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr);
+ break;
+ case Common::PageType::RasterizerCachedMemory: {
+ u8* host_ptr{GetPointerFromVMA(vaddr)};
+ system.GPU().InvalidateRegion(ToCacheAddr(host_ptr), sizeof(u128));
+ u64 volatile* pointer = reinterpret_cast<u64 volatile*>(&host_ptr);
+ return Common::AtomicCompareAndSwap(pointer, data, expected);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ return true;
+ }
+
Common::PageTable* current_page_table = nullptr;
Core::System& system;
};
@@ -761,6 +839,26 @@ void Memory::Write64(VAddr addr, u64 data) {
impl->Write64(addr, data);
}
+bool Memory::WriteExclusive8(VAddr addr, u8 data, u8 expected) {
+ return impl->WriteExclusive8(addr, data, expected);
+}
+
+bool Memory::WriteExclusive16(VAddr addr, u16 data, u16 expected) {
+ return impl->WriteExclusive16(addr, data, expected);
+}
+
+bool Memory::WriteExclusive32(VAddr addr, u32 data, u32 expected) {
+ return impl->WriteExclusive32(addr, data, expected);
+}
+
+bool Memory::WriteExclusive64(VAddr addr, u64 data, u64 expected) {
+ return impl->WriteExclusive64(addr, data, expected);
+}
+
+bool Memory::WriteExclusive128(VAddr addr, u128 data, u128 expected) {
+ return impl->WriteExclusive128(addr, data, expected);
+}
+
std::string Memory::ReadCString(VAddr vaddr, std::size_t max_length) {
return impl->ReadCString(vaddr, max_length);
}
diff --git a/src/core/memory.h b/src/core/memory.h
index 93f0c1d6c..4a1cc63f4 100644
--- a/src/core/memory.h
+++ b/src/core/memory.h
@@ -245,6 +245,71 @@ public:
void Write64(VAddr addr, u64 data);
/**
+ * Writes a 8-bit unsigned integer to the given virtual address in
+ * the current process' address space if and only if the address contains
+ * the expected value. This operation is atomic.
+ *
+ * @param addr The virtual address to write the 8-bit unsigned integer to.
+ * @param data The 8-bit unsigned integer to write to the given virtual address.
+ * @param expected The 8-bit unsigned integer to check against the given virtual address.
+ *
+ * @post The memory range [addr, sizeof(data)) contains the given data value.
+ */
+ bool WriteExclusive8(VAddr addr, u8 data, u8 expected);
+
+ /**
+ * Writes a 16-bit unsigned integer to the given virtual address in
+ * the current process' address space if and only if the address contains
+ * the expected value. This operation is atomic.
+ *
+ * @param addr The virtual address to write the 16-bit unsigned integer to.
+ * @param data The 16-bit unsigned integer to write to the given virtual address.
+ * @param expected The 16-bit unsigned integer to check against the given virtual address.
+ *
+ * @post The memory range [addr, sizeof(data)) contains the given data value.
+ */
+ bool WriteExclusive16(VAddr addr, u16 data, u16 expected);
+
+ /**
+ * Writes a 32-bit unsigned integer to the given virtual address in
+ * the current process' address space if and only if the address contains
+ * the expected value. This operation is atomic.
+ *
+ * @param addr The virtual address to write the 32-bit unsigned integer to.
+ * @param data The 32-bit unsigned integer to write to the given virtual address.
+ * @param expected The 32-bit unsigned integer to check against the given virtual address.
+ *
+ * @post The memory range [addr, sizeof(data)) contains the given data value.
+ */
+ bool WriteExclusive32(VAddr addr, u32 data, u32 expected);
+
+ /**
+ * Writes a 64-bit unsigned integer to the given virtual address in
+ * the current process' address space if and only if the address contains
+ * the expected value. This operation is atomic.
+ *
+ * @param addr The virtual address to write the 64-bit unsigned integer to.
+ * @param data The 64-bit unsigned integer to write to the given virtual address.
+ * @param expected The 64-bit unsigned integer to check against the given virtual address.
+ *
+ * @post The memory range [addr, sizeof(data)) contains the given data value.
+ */
+ bool WriteExclusive64(VAddr addr, u64 data, u64 expected);
+
+ /**
+ * Writes a 128-bit unsigned integer to the given virtual address in
+ * the current process' address space if and only if the address contains
+ * the expected value. This operation is atomic.
+ *
+ * @param addr The virtual address to write the 128-bit unsigned integer to.
+ * @param data The 128-bit unsigned integer to write to the given virtual address.
+ * @param expected The 128-bit unsigned integer to check against the given virtual address.
+ *
+ * @post The memory range [addr, sizeof(data)) contains the given data value.
+ */
+ bool WriteExclusive128(VAddr addr, u128 data, u128 expected);
+
+ /**
* Reads a null-terminated string from the given virtual address.
* This function will continually read characters until either:
*