summaryrefslogtreecommitdiffstats
path: root/src/core
diff options
context:
space:
mode:
authorbunnei <bunneidev@gmail.com>2020-12-17 06:09:06 +0100
committerbunnei <bunneidev@gmail.com>2020-12-29 06:33:34 +0100
commit6433b1dfd67f4c4f0c4b2e3742dc437a0d1e906e (patch)
treed2a9a23be1a80d9351f99d7feb7978f3c2237882 /src/core
parenthle: service: Ensure system is powered on before writing IPC result. (diff)
downloadyuzu-6433b1dfd67f4c4f0c4b2e3742dc437a0d1e906e.tar
yuzu-6433b1dfd67f4c4f0c4b2e3742dc437a0d1e906e.tar.gz
yuzu-6433b1dfd67f4c4f0c4b2e3742dc437a0d1e906e.tar.bz2
yuzu-6433b1dfd67f4c4f0c4b2e3742dc437a0d1e906e.tar.lz
yuzu-6433b1dfd67f4c4f0c4b2e3742dc437a0d1e906e.tar.xz
yuzu-6433b1dfd67f4c4f0c4b2e3742dc437a0d1e906e.tar.zst
yuzu-6433b1dfd67f4c4f0c4b2e3742dc437a0d1e906e.zip
Diffstat (limited to '')
-rw-r--r--src/core/hle/service/nvflinger/buffer_queue.cpp40
-rw-r--r--src/core/hle/service/nvflinger/buffer_queue.h17
-rw-r--r--src/core/hle/service/nvflinger/nvflinger.cpp13
-rw-r--r--src/core/hle/service/nvflinger/nvflinger.h2
-rw-r--r--src/core/hle/service/vi/vi.cpp19
5 files changed, 72 insertions, 19 deletions
diff --git a/src/core/hle/service/nvflinger/buffer_queue.cpp b/src/core/hle/service/nvflinger/buffer_queue.cpp
index 377f47e8e..c8c6a4d64 100644
--- a/src/core/hle/service/nvflinger/buffer_queue.cpp
+++ b/src/core/hle/service/nvflinger/buffer_queue.cpp
@@ -25,7 +25,12 @@ void BufferQueue::SetPreallocatedBuffer(u32 slot, const IGBPBuffer& igbp_buffer)
ASSERT(slot < buffer_slots);
LOG_WARNING(Service, "Adding graphics buffer {}", slot);
- free_buffers.push_back(slot);
+ {
+ std::unique_lock lock{queue_mutex};
+ free_buffers.push_back(slot);
+ }
+ condition.notify_one();
+
buffers[slot] = {
.slot = slot,
.status = Buffer::Status::Free,
@@ -41,10 +46,20 @@ void BufferQueue::SetPreallocatedBuffer(u32 slot, const IGBPBuffer& igbp_buffer)
std::optional<std::pair<u32, Service::Nvidia::MultiFence*>> BufferQueue::DequeueBuffer(u32 width,
u32 height) {
+ // Wait for first request before trying to dequeue
+ {
+ std::unique_lock lock{queue_mutex};
+ condition.wait(lock, [this] { return !free_buffers.empty() || !is_connect; });
+ }
- if (free_buffers.empty()) {
+ if (!is_connect) {
+ // Buffer was disconnected while the thread was blocked, this is most likely due to
+ // emulation being stopped
return std::nullopt;
}
+
+ std::unique_lock lock{queue_mutex};
+
auto f_itr = free_buffers.begin();
auto slot = buffers.size();
@@ -97,7 +112,11 @@ void BufferQueue::CancelBuffer(u32 slot, const Service::Nvidia::MultiFence& mult
buffers[slot].multi_fence = multi_fence;
buffers[slot].swap_interval = 0;
- free_buffers.push_back(slot);
+ {
+ std::unique_lock lock{queue_mutex};
+ free_buffers.push_back(slot);
+ }
+ condition.notify_one();
buffer_wait_event.writable->Signal();
}
@@ -127,15 +146,28 @@ void BufferQueue::ReleaseBuffer(u32 slot) {
ASSERT(buffers[slot].slot == slot);
buffers[slot].status = Buffer::Status::Free;
- free_buffers.push_back(slot);
+ {
+ std::unique_lock lock{queue_mutex};
+ free_buffers.push_back(slot);
+ }
+ condition.notify_one();
buffer_wait_event.writable->Signal();
}
+void BufferQueue::Connect() {
+ queue_sequence.clear();
+ id = 1;
+ layer_id = 1;
+ is_connect = true;
+}
+
void BufferQueue::Disconnect() {
buffers.fill({});
queue_sequence.clear();
buffer_wait_event.writable->Signal();
+ is_connect = false;
+ condition.notify_one();
}
u32 BufferQueue::Query(QueryType type) {
diff --git a/src/core/hle/service/nvflinger/buffer_queue.h b/src/core/hle/service/nvflinger/buffer_queue.h
index e610923cb..a2f60d9eb 100644
--- a/src/core/hle/service/nvflinger/buffer_queue.h
+++ b/src/core/hle/service/nvflinger/buffer_queue.h
@@ -4,7 +4,9 @@
#pragma once
+#include <condition_variable>
#include <list>
+#include <mutex>
#include <optional>
#include <vector>
@@ -99,6 +101,7 @@ public:
void CancelBuffer(u32 slot, const Service::Nvidia::MultiFence& multi_fence);
std::optional<std::reference_wrapper<const Buffer>> AcquireBuffer();
void ReleaseBuffer(u32 slot);
+ void Connect();
void Disconnect();
u32 Query(QueryType type);
@@ -106,18 +109,28 @@ public:
return id;
}
+ bool IsConnected() const {
+ return is_connect;
+ }
+
std::shared_ptr<Kernel::WritableEvent> GetWritableBufferWaitEvent() const;
std::shared_ptr<Kernel::ReadableEvent> GetBufferWaitEvent() const;
private:
- u32 id;
- u64 layer_id;
+ BufferQueue(const BufferQueue&) = delete;
+
+ u32 id{};
+ u64 layer_id{};
+ std::atomic_bool is_connect{};
std::list<u32> free_buffers;
std::array<Buffer, buffer_slots> buffers;
std::list<u32> queue_sequence;
Kernel::EventPair buffer_wait_event;
+
+ std::mutex queue_mutex;
+ std::condition_variable condition;
};
} // namespace Service::NVFlinger
diff --git a/src/core/hle/service/nvflinger/nvflinger.cpp b/src/core/hle/service/nvflinger/nvflinger.cpp
index a7a679df1..4b3581949 100644
--- a/src/core/hle/service/nvflinger/nvflinger.cpp
+++ b/src/core/hle/service/nvflinger/nvflinger.cpp
@@ -88,6 +88,10 @@ NVFlinger::NVFlinger(Core::System& system) : system(system) {
}
NVFlinger::~NVFlinger() {
+ for (auto& buffer_queue : buffer_queues) {
+ buffer_queue->Disconnect();
+ }
+
if (system.IsMulticore()) {
is_running = false;
wait_event->Set();
@@ -132,8 +136,9 @@ std::optional<u64> NVFlinger::CreateLayer(u64 display_id) {
const u64 layer_id = next_layer_id++;
const u32 buffer_queue_id = next_buffer_queue_id++;
- buffer_queues.emplace_back(system.Kernel(), buffer_queue_id, layer_id);
- display->CreateLayer(layer_id, buffer_queues.back());
+ buffer_queues.emplace_back(
+ std::make_unique<BufferQueue>(system.Kernel(), buffer_queue_id, layer_id));
+ display->CreateLayer(layer_id, *buffer_queues.back());
return layer_id;
}
@@ -170,13 +175,13 @@ std::shared_ptr<Kernel::ReadableEvent> NVFlinger::FindVsyncEvent(u64 display_id)
BufferQueue* NVFlinger::FindBufferQueue(u32 id) {
const auto guard = Lock();
const auto itr = std::find_if(buffer_queues.begin(), buffer_queues.end(),
- [id](const auto& queue) { return queue.GetId() == id; });
+ [id](const auto& queue) { return queue->GetId() == id; });
if (itr == buffer_queues.end()) {
return nullptr;
}
- return &*itr;
+ return itr->get();
}
VI::Display* NVFlinger::FindDisplay(u64 display_id) {
diff --git a/src/core/hle/service/nvflinger/nvflinger.h b/src/core/hle/service/nvflinger/nvflinger.h
index ce1347d6d..c6765259f 100644
--- a/src/core/hle/service/nvflinger/nvflinger.h
+++ b/src/core/hle/service/nvflinger/nvflinger.h
@@ -107,7 +107,7 @@ private:
std::shared_ptr<Nvidia::Module> nvdrv;
std::vector<VI::Display> displays;
- std::vector<BufferQueue> buffer_queues;
+ std::vector<std::unique_ptr<BufferQueue>> buffer_queues;
/// Id to use for the next layer that is created, this counter is shared among all displays.
u64 next_layer_id = 1;
diff --git a/src/core/hle/service/vi/vi.cpp b/src/core/hle/service/vi/vi.cpp
index ce0272e59..1051000f8 100644
--- a/src/core/hle/service/vi/vi.cpp
+++ b/src/core/hle/service/vi/vi.cpp
@@ -544,6 +544,12 @@ private:
Settings::values.resolution_factor.GetValue()),
static_cast<u32>(static_cast<u32>(DisplayResolution::UndockedHeight) *
Settings::values.resolution_factor.GetValue())};
+
+ {
+ auto& buffer_queue = *nv_flinger.FindBufferQueue(id);
+ buffer_queue.Connect();
+ }
+
ctx.WriteBuffer(response.Serialize());
break;
}
@@ -565,18 +571,15 @@ private:
const u32 width{request.data.width};
const u32 height{request.data.height};
- std::optional<std::pair<u32, Service::Nvidia::MultiFence*>> result;
-
- while (!result) {
- auto& buffer_queue = *nv_flinger.FindBufferQueue(id);
- result = buffer_queue.DequeueBuffer(width, height);
-
- if (result) {
+ auto& buffer_queue = *nv_flinger.FindBufferQueue(id);
+ do {
+ if (auto result = buffer_queue.DequeueBuffer(width, height); result) {
// Buffer is available
IGBPDequeueBufferResponseParcel response{result->first, *result->second};
ctx.WriteBuffer(response.Serialize());
+ break;
}
- }
+ } while (buffer_queue.IsConnected());
break;
}