summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorbunnei <bunneidev@gmail.com>2022-03-26 07:01:03 +0100
committerbunnei <bunneidev@gmail.com>2022-03-26 07:01:03 +0100
commit25faca8ea79f13b0eaa89dc2f7a3734e3d247b1c (patch)
tree03ee7563f76eef13c2b32912aa1ff9f30053561f
parenthle: service: nvflinger: buffer_queue_consumer: Use scoped_lock instead of unique_lock. (diff)
downloadyuzu-25faca8ea79f13b0eaa89dc2f7a3734e3d247b1c.tar
yuzu-25faca8ea79f13b0eaa89dc2f7a3734e3d247b1c.tar.gz
yuzu-25faca8ea79f13b0eaa89dc2f7a3734e3d247b1c.tar.bz2
yuzu-25faca8ea79f13b0eaa89dc2f7a3734e3d247b1c.tar.lz
yuzu-25faca8ea79f13b0eaa89dc2f7a3734e3d247b1c.tar.xz
yuzu-25faca8ea79f13b0eaa89dc2f7a3734e3d247b1c.tar.zst
yuzu-25faca8ea79f13b0eaa89dc2f7a3734e3d247b1c.zip
-rw-r--r--src/core/hle/service/nvflinger/buffer_queue_consumer.cpp182
-rw-r--r--src/core/hle/service/nvflinger/buffer_queue_core.cpp26
-rw-r--r--src/core/hle/service/nvflinger/buffer_queue_core.h22
-rw-r--r--src/core/hle/service/nvflinger/buffer_queue_producer.cpp79
-rw-r--r--src/core/hle/service/nvflinger/buffer_queue_producer.h2
5 files changed, 130 insertions, 181 deletions
diff --git a/src/core/hle/service/nvflinger/buffer_queue_consumer.cpp b/src/core/hle/service/nvflinger/buffer_queue_consumer.cpp
index 677bec932..41fbba219 100644
--- a/src/core/hle/service/nvflinger/buffer_queue_consumer.cpp
+++ b/src/core/hle/service/nvflinger/buffer_queue_consumer.cpp
@@ -20,122 +20,102 @@ BufferQueueConsumer::~BufferQueueConsumer() = default;
Status BufferQueueConsumer::AcquireBuffer(BufferItem* out_buffer,
std::chrono::nanoseconds expected_present,
u64 max_frame_number) {
- s32 num_dropped_buffers{};
+ std::scoped_lock lock(core->mutex);
+
+ // Check that the consumer doesn't currently have the maximum number of buffers acquired.
+ const s32 num_acquired_buffers{
+ static_cast<s32>(std::count_if(slots.begin(), slots.end(), [](const auto& slot) {
+ return slot.buffer_state == BufferState::Acquired;
+ }))};
+
+ if (num_acquired_buffers >= core->max_acquired_buffer_count + 1) {
+ LOG_ERROR(Service_NVFlinger, "max acquired buffer count reached: {} (max {})",
+ num_acquired_buffers, core->max_acquired_buffer_count);
+ return Status::InvalidOperation;
+ }
- std::shared_ptr<IProducerListener> listener;
- {
- std::unique_lock lock(core->mutex);
-
- // Check that the consumer doesn't currently have the maximum number of buffers acquired.
- const s32 num_acquired_buffers{
- static_cast<s32>(std::count_if(slots.begin(), slots.end(), [](const auto& slot) {
- return slot.buffer_state == BufferState::Acquired;
- }))};
-
- if (num_acquired_buffers >= core->max_acquired_buffer_count + 1) {
- LOG_ERROR(Service_NVFlinger, "max acquired buffer count reached: {} (max {})",
- num_acquired_buffers, core->max_acquired_buffer_count);
- return Status::InvalidOperation;
- }
+ // Check if the queue is empty.
+ if (core->queue.empty()) {
+ return Status::NoBufferAvailable;
+ }
- // Check if the queue is empty.
- if (core->queue.empty()) {
- return Status::NoBufferAvailable;
- }
+ auto front(core->queue.begin());
- auto front(core->queue.begin());
-
- // If expected_present is specified, we may not want to return a buffer yet.
- if (expected_present.count() != 0) {
- constexpr auto MAX_REASONABLE_NSEC = 1000000000LL; // 1 second
-
- // The expected_present argument indicates when the buffer is expected to be
- // presented on-screen.
- while (core->queue.size() > 1 && !core->queue[0].is_auto_timestamp) {
- const auto& buffer_item{core->queue[1]};
-
- // If dropping entry[0] would leave us with a buffer that the consumer is not yet
- // ready for, don't drop it.
- if (max_frame_number && buffer_item.frame_number > max_frame_number) {
- break;
- }
-
- // If entry[1] is timely, drop entry[0] (and repeat).
- const auto desired_present = buffer_item.timestamp;
- if (desired_present < expected_present.count() - MAX_REASONABLE_NSEC ||
- desired_present > expected_present.count()) {
- // This buffer is set to display in the near future, or desired_present is
- // garbage.
- LOG_DEBUG(Service_NVFlinger, "nodrop desire={} expect={}", desired_present,
- expected_present.count());
- break;
- }
-
- LOG_DEBUG(Service_NVFlinger, "drop desire={} expect={} size={}", desired_present,
- expected_present.count(), core->queue.size());
-
- if (core->StillTracking(*front)) {
- // Front buffer is still in mSlots, so mark the slot as free
- slots[front->slot].buffer_state = BufferState::Free;
- core->free_buffers.push_back(front->slot);
- listener = core->connected_producer_listener;
- ++num_dropped_buffers;
- }
-
- core->queue.erase(front);
- front = core->queue.begin();
- }
+ // If expected_present is specified, we may not want to return a buffer yet.
+ if (expected_present.count() != 0) {
+ constexpr auto MAX_REASONABLE_NSEC = 1000000000LL; // 1 second
- // See if the front buffer is ready to be acquired.
- const auto desired_present = front->timestamp;
- const auto buffer_is_due =
- desired_present <= expected_present.count() ||
- desired_present > expected_present.count() + MAX_REASONABLE_NSEC;
- const auto consumer_is_ready =
- max_frame_number > 0 ? front->frame_number <= max_frame_number : true;
+ // The expected_present argument indicates when the buffer is expected to be presented
+ // on-screen.
+ while (core->queue.size() > 1 && !core->queue[0].is_auto_timestamp) {
+ const auto& buffer_item{core->queue[1]};
- if (!buffer_is_due || !consumer_is_ready) {
- LOG_DEBUG(Service_NVFlinger, "defer desire={} expect={}", desired_present,
- expected_present.count());
- return Status::PresentLater;
+ // If dropping entry[0] would leave us with a buffer that the consumer is not yet ready
+ // for, don't drop it.
+ if (max_frame_number && buffer_item.frame_number > max_frame_number) {
+ break;
}
- LOG_DEBUG(Service_NVFlinger, "accept desire={} expect={}", desired_present,
- expected_present.count());
- }
+ // If entry[1] is timely, drop entry[0] (and repeat).
+ const auto desired_present = buffer_item.timestamp;
+ if (desired_present < expected_present.count() - MAX_REASONABLE_NSEC ||
+ desired_present > expected_present.count()) {
+ // This buffer is set to display in the near future, or desired_present is garbage.
+ LOG_DEBUG(Service_NVFlinger, "nodrop desire={} expect={}", desired_present,
+ expected_present.count());
+ break;
+ }
- const auto slot = front->slot;
- *out_buffer = *front;
+ LOG_DEBUG(Service_NVFlinger, "drop desire={} expect={} size={}", desired_present,
+ expected_present.count(), core->queue.size());
- LOG_DEBUG(Service_NVFlinger, "acquiring slot={}", slot);
+ if (core->StillTracking(*front)) {
+ // Front buffer is still in mSlots, so mark the slot as free
+ slots[front->slot].buffer_state = BufferState::Free;
+ }
- // If the front buffer is still being tracked, update its slot state
- if (core->StillTracking(*front)) {
- slots[slot].acquire_called = true;
- slots[slot].needs_cleanup_on_release = false;
- slots[slot].buffer_state = BufferState::Acquired;
- slots[slot].fence = Fence::NoFence();
+ core->queue.erase(front);
+ front = core->queue.begin();
}
- // If the buffer has previously been acquired by the consumer, set graphic_buffer to nullptr
- // to avoid unnecessarily remapping this buffer on the consumer side.
- if (out_buffer->acquire_called) {
- out_buffer->graphic_buffer = nullptr;
+ // See if the front buffer is ready to be acquired.
+ const auto desired_present = front->timestamp;
+ if (desired_present > expected_present.count() &&
+ desired_present < expected_present.count() + MAX_REASONABLE_NSEC) {
+ LOG_DEBUG(Service_NVFlinger, "defer desire={} expect={}", desired_present,
+ expected_present.count());
+ return Status::PresentLater;
}
- core->queue.erase(front);
+ LOG_DEBUG(Service_NVFlinger, "accept desire={} expect={}", desired_present,
+ expected_present.count());
+ }
+
+ const auto slot = front->slot;
+ *out_buffer = *front;
- // We might have freed a slot while dropping old buffers, or the producer may be blocked
- // waiting for the number of buffers in the queue to decrease.
- core->SignalDequeueCondition();
+ LOG_DEBUG(Service_NVFlinger, "acquiring slot={}", slot);
+
+ // If the front buffer is still being tracked, update its slot state
+ if (core->StillTracking(*front)) {
+ slots[slot].acquire_called = true;
+ slots[slot].needs_cleanup_on_release = false;
+ slots[slot].buffer_state = BufferState::Acquired;
+ slots[slot].fence = Fence::NoFence();
}
- if (listener != nullptr) {
- for (s32 i = 0; i < num_dropped_buffers; ++i) {
- listener->OnBufferReleased();
- }
+ // If the buffer has previously been acquired by the consumer, set graphic_buffer to nullptr to
+ // avoid unnecessarily remapping this buffer on the consumer side.
+ if (out_buffer->acquire_called) {
+ out_buffer->graphic_buffer = nullptr;
}
+ core->queue.erase(front);
+
+ // We might have freed a slot while dropping old buffers, or the producer may be blocked
+ // waiting for the number of buffers in the queue to decrease.
+ core->SignalDequeueCondition();
+
return Status::NoError;
}
@@ -147,7 +127,7 @@ Status BufferQueueConsumer::ReleaseBuffer(s32 slot, u64 frame_number, const Fenc
std::shared_ptr<IProducerListener> listener;
{
- std::unique_lock lock(core->mutex);
+ std::scoped_lock lock(core->mutex);
// If the frame number has changed because the buffer has been reallocated, we can ignore
// this ReleaseBuffer for the old buffer.
@@ -170,8 +150,6 @@ Status BufferQueueConsumer::ReleaseBuffer(s32 slot, u64 frame_number, const Fenc
slots[slot].fence = release_fence;
slots[slot].buffer_state = BufferState::Free;
- core->free_buffers.push_back(slot);
-
listener = core->connected_producer_listener;
LOG_DEBUG(Service_NVFlinger, "releasing slot {}", slot);
@@ -189,7 +167,7 @@ Status BufferQueueConsumer::ReleaseBuffer(s32 slot, u64 frame_number, const Fenc
return Status::BadValue;
}
- core->dequeue_condition.notify_all();
+ core->SignalDequeueCondition();
}
// Call back without lock held
@@ -209,7 +187,7 @@ Status BufferQueueConsumer::Connect(std::shared_ptr<IConsumerListener> consumer_
LOG_DEBUG(Service_NVFlinger, "controlled_by_app={}", controlled_by_app);
- BufferQueueCore::AutoLock lock(core);
+ std::scoped_lock lock(core->mutex);
if (core->is_abandoned) {
LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned");
diff --git a/src/core/hle/service/nvflinger/buffer_queue_core.cpp b/src/core/hle/service/nvflinger/buffer_queue_core.cpp
index eb93b43ee..6082610e0 100644
--- a/src/core/hle/service/nvflinger/buffer_queue_core.cpp
+++ b/src/core/hle/service/nvflinger/buffer_queue_core.cpp
@@ -10,16 +10,12 @@
namespace Service::android {
-BufferQueueCore::BufferQueueCore() : lock{mutex, std::defer_lock} {
- for (s32 slot = 0; slot < BufferQueueDefs::NUM_BUFFER_SLOTS; ++slot) {
- free_slots.insert(slot);
- }
-}
+BufferQueueCore::BufferQueueCore() = default;
BufferQueueCore::~BufferQueueCore() = default;
void BufferQueueCore::NotifyShutdown() {
- std::unique_lock lk(mutex);
+ std::scoped_lock lock(mutex);
is_shutting_down = true;
@@ -35,7 +31,7 @@ bool BufferQueueCore::WaitForDequeueCondition() {
return false;
}
- dequeue_condition.wait(lock);
+ dequeue_condition.wait(mutex);
return true;
}
@@ -86,26 +82,15 @@ s32 BufferQueueCore::GetPreallocatedBufferCountLocked() const {
void BufferQueueCore::FreeBufferLocked(s32 slot) {
LOG_DEBUG(Service_NVFlinger, "slot {}", slot);
- const auto had_buffer = slots[slot].graphic_buffer != nullptr;
-
slots[slot].graphic_buffer.reset();
if (slots[slot].buffer_state == BufferState::Acquired) {
slots[slot].needs_cleanup_on_release = true;
}
- if (slots[slot].buffer_state != BufferState::Free) {
- free_slots.insert(slot);
- } else if (had_buffer) {
- // If the slot was FREE, but we had a buffer, we need to move this slot from the free
- // buffers list to the the free slots list.
- free_buffers.remove(slot);
- free_slots.insert(slot);
- }
-
slots[slot].buffer_state = BufferState::Free;
+ slots[slot].frame_number = UINT32_MAX;
slots[slot].acquire_called = false;
- slots[slot].frame_number = 0;
slots[slot].fence = Fence::NoFence();
}
@@ -126,8 +111,7 @@ bool BufferQueueCore::StillTracking(const BufferItem& item) const {
void BufferQueueCore::WaitWhileAllocatingLocked() const {
while (is_allocating) {
- std::unique_lock lk(mutex);
- is_allocating_condition.wait(lk);
+ is_allocating_condition.wait(mutex);
}
}
diff --git a/src/core/hle/service/nvflinger/buffer_queue_core.h b/src/core/hle/service/nvflinger/buffer_queue_core.h
index a3cd89f1c..4dfd53387 100644
--- a/src/core/hle/service/nvflinger/buffer_queue_core.h
+++ b/src/core/hle/service/nvflinger/buffer_queue_core.h
@@ -50,23 +50,7 @@ private:
void WaitWhileAllocatingLocked() const;
private:
- class AutoLock final {
- public:
- AutoLock(std::shared_ptr<BufferQueueCore>& core_) : core{core_} {
- core->lock.lock();
- }
-
- ~AutoLock() {
- core->lock.unlock();
- }
-
- private:
- std::shared_ptr<BufferQueueCore>& core;
- };
-
-private:
mutable std::mutex mutex;
- mutable std::unique_lock<std::mutex> lock;
bool is_abandoned{};
bool consumer_controlled_by_app{};
std::shared_ptr<IConsumerListener> consumer_listener;
@@ -75,10 +59,8 @@ private:
std::shared_ptr<IProducerListener> connected_producer_listener;
BufferQueueDefs::SlotsType slots{};
std::vector<BufferItem> queue;
- std::set<s32> free_slots;
- std::list<s32> free_buffers;
s32 override_max_buffer_count{};
- mutable std::condition_variable dequeue_condition;
+ mutable std::condition_variable_any dequeue_condition;
const bool use_async_buffer{}; // This is always disabled on HOS
bool dequeue_buffer_cannot_block{};
PixelFormat default_buffer_format{PixelFormat::Rgba8888};
@@ -90,7 +72,7 @@ private:
u64 frame_counter{};
u32 transform_hint{};
bool is_allocating{};
- mutable std::condition_variable is_allocating_condition;
+ mutable std::condition_variable_any is_allocating_condition;
bool allow_allocation{true};
u64 buffer_age{};
bool is_shutting_down{};
diff --git a/src/core/hle/service/nvflinger/buffer_queue_producer.cpp b/src/core/hle/service/nvflinger/buffer_queue_producer.cpp
index 078091904..0833be57a 100644
--- a/src/core/hle/service/nvflinger/buffer_queue_producer.cpp
+++ b/src/core/hle/service/nvflinger/buffer_queue_producer.cpp
@@ -38,7 +38,7 @@ BufferQueueProducer::~BufferQueueProducer() {
Status BufferQueueProducer::RequestBuffer(s32 slot, std::shared_ptr<GraphicBuffer>* buf) {
LOG_DEBUG(Service_NVFlinger, "slot {}", slot);
- BufferQueueCore::AutoLock lock(core);
+ std::scoped_lock lock(core->mutex);
if (core->is_abandoned) {
LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned");
@@ -65,7 +65,7 @@ Status BufferQueueProducer::SetBufferCount(s32 buffer_count) {
std::shared_ptr<IConsumerListener> listener;
{
- BufferQueueCore::AutoLock lock(core);
+ std::scoped_lock lock(core->mutex);
core->WaitWhileAllocatingLocked();
if (core->is_abandoned) {
LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned");
@@ -156,6 +156,14 @@ Status BufferQueueProducer::WaitForFreeSlotThenRelock(bool async, s32* found,
case BufferState::Acquired:
++acquired_count;
break;
+ case BufferState::Free:
+ // We return the oldest of the free buffers to avoid stalling the producer if
+ // possible, since the consumer may still have pending reads of in-flight buffers
+ if (*found == BufferQueueCore::INVALID_BUFFER_SLOT ||
+ slots[s].frame_number < slots[*found].frame_number) {
+ *found = s;
+ }
+ break;
default:
break;
}
@@ -183,27 +191,12 @@ Status BufferQueueProducer::WaitForFreeSlotThenRelock(bool async, s32* found,
}
}
- *found = BufferQueueCore::INVALID_BUFFER_SLOT;
-
// If we disconnect and reconnect quickly, we can be in a state where our slots are empty
// but we have many buffers in the queue. This can cause us to run out of memory if we
// outrun the consumer. Wait here if it looks like we have too many buffers queued up.
const bool too_many_buffers = core->queue.size() > static_cast<size_t>(max_buffer_count);
if (too_many_buffers) {
LOG_ERROR(Service_NVFlinger, "queue size is {}, waiting", core->queue.size());
- } else {
- if (!core->free_buffers.empty()) {
- auto slot = core->free_buffers.begin();
- *found = *slot;
- core->free_buffers.erase(slot);
- } else if (core->allow_allocation && !core->free_slots.empty()) {
- auto slot = core->free_slots.begin();
- // Only return free slots up to the max buffer count
- if (*slot < max_buffer_count) {
- *found = *slot;
- core->free_slots.erase(slot);
- }
- }
}
// If no buffer is found, or if the queue has too many buffers outstanding, wait for a
@@ -240,7 +233,7 @@ Status BufferQueueProducer::DequeueBuffer(s32* out_slot, Fence* out_fence, bool
Status return_flags = Status::NoError;
bool attached_by_consumer = false;
{
- BufferQueueCore::AutoLock lock(core);
+ std::scoped_lock lock(core->mutex);
core->WaitWhileAllocatingLocked();
if (format == PixelFormat::NoFormat) {
format = core->default_buffer_format;
@@ -317,12 +310,13 @@ Status BufferQueueProducer::DequeueBuffer(s32* out_slot, Fence* out_fence, bool
}
{
- BufferQueueCore::AutoLock lock(core);
+ std::scoped_lock lock(core->mutex);
if (core->is_abandoned) {
LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned");
return Status::NoInit;
}
+ slots[*out_slot].frame_number = UINT32_MAX;
slots[*out_slot].graphic_buffer = graphic_buffer;
}
}
@@ -339,7 +333,7 @@ Status BufferQueueProducer::DequeueBuffer(s32* out_slot, Fence* out_fence, bool
Status BufferQueueProducer::DetachBuffer(s32 slot) {
LOG_DEBUG(Service_NVFlinger, "slot {}", slot);
- BufferQueueCore::AutoLock lock(core);
+ std::scoped_lock lock(core->mutex);
if (core->is_abandoned) {
LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned");
return Status::NoInit;
@@ -374,7 +368,7 @@ Status BufferQueueProducer::DetachNextBuffer(std::shared_ptr<GraphicBuffer>* out
return Status::BadValue;
}
- BufferQueueCore::AutoLock lock(core);
+ std::scoped_lock lock(core->mutex);
core->WaitWhileAllocatingLocked();
@@ -382,12 +376,21 @@ Status BufferQueueProducer::DetachNextBuffer(std::shared_ptr<GraphicBuffer>* out
LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned");
return Status::NoInit;
}
- if (core->free_buffers.empty()) {
- return Status::NoMemory;
+
+ // Find the oldest valid slot
+ int found = BufferQueueCore::INVALID_BUFFER_SLOT;
+ for (int s = 0; s < BufferQueueDefs::NUM_BUFFER_SLOTS; ++s) {
+ if (slots[s].buffer_state == BufferState::Free && slots[s].graphic_buffer != nullptr) {
+ if (found == BufferQueueCore::INVALID_BUFFER_SLOT ||
+ slots[s].frame_number < slots[found].frame_number) {
+ found = s;
+ }
+ }
}
- const s32 found = core->free_buffers.front();
- core->free_buffers.remove(found);
+ if (found == BufferQueueCore::INVALID_BUFFER_SLOT) {
+ return Status::NoMemory;
+ }
LOG_DEBUG(Service_NVFlinger, "Detached slot {}", found);
@@ -409,7 +412,7 @@ Status BufferQueueProducer::AttachBuffer(s32* out_slot,
return Status::BadValue;
}
- BufferQueueCore::AutoLock lock(core);
+ std::scoped_lock lock(core->mutex);
core->WaitWhileAllocatingLocked();
Status return_flags = Status::NoError;
@@ -469,7 +472,7 @@ Status BufferQueueProducer::QueueBuffer(s32 slot, const QueueBufferInput& input,
BufferItem item;
{
- BufferQueueCore::AutoLock lock(core);
+ std::scoped_lock lock(core->mutex);
if (core->is_abandoned) {
LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned");
@@ -554,7 +557,9 @@ Status BufferQueueProducer::QueueBuffer(s32 slot, const QueueBufferInput& input,
// mark it as freed
if (core->StillTracking(*front)) {
slots[front->slot].buffer_state = BufferState::Free;
- core->free_buffers.push_front(front->slot);
+ // Reset the frame number of the freed buffer so that it is the first in line to
+ // be dequeued again
+ slots[front->slot].frame_number = 0;
}
// Overwrite the droppable buffer with the incoming one
*front = item;
@@ -582,10 +587,9 @@ Status BufferQueueProducer::QueueBuffer(s32 slot, const QueueBufferInput& input,
// Call back without the main BufferQueue lock held, but with the callback lock held so we can
// ensure that callbacks occur in order
{
- std::unique_lock lock(callback_mutex);
+ std::scoped_lock lock(callback_mutex);
while (callback_ticket != current_callback_ticket) {
- std::unique_lock<std::mutex> lk(callback_mutex);
- callback_condition.wait(lk);
+ callback_condition.wait(callback_mutex);
}
if (frameAvailableListener != nullptr) {
@@ -604,7 +608,7 @@ Status BufferQueueProducer::QueueBuffer(s32 slot, const QueueBufferInput& input,
void BufferQueueProducer::CancelBuffer(s32 slot, const Fence& fence) {
LOG_DEBUG(Service_NVFlinger, "slot {}", slot);
- BufferQueueCore::AutoLock lock(core);
+ std::scoped_lock lock(core->mutex);
if (core->is_abandoned) {
LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned");
@@ -621,8 +625,8 @@ void BufferQueueProducer::CancelBuffer(s32 slot, const Fence& fence) {
return;
}
- core->free_buffers.push_front(slot);
slots[slot].buffer_state = BufferState::Free;
+ slots[slot].frame_number = 0;
slots[slot].fence = fence;
core->SignalDequeueCondition();
@@ -630,7 +634,7 @@ void BufferQueueProducer::CancelBuffer(s32 slot, const Fence& fence) {
}
Status BufferQueueProducer::Query(NativeWindow what, s32* out_value) {
- BufferQueueCore::AutoLock lock(core);
+ std::scoped_lock lock(core->mutex);
if (out_value == nullptr) {
LOG_ERROR(Service_NVFlinger, "outValue was nullptr");
@@ -687,7 +691,7 @@ Status BufferQueueProducer::Query(NativeWindow what, s32* out_value) {
Status BufferQueueProducer::Connect(const std::shared_ptr<IProducerListener>& listener,
NativeWindowApi api, bool producer_controlled_by_app,
QueueBufferOutput* output) {
- BufferQueueCore::AutoLock lock(core);
+ std::scoped_lock lock(core->mutex);
LOG_DEBUG(Service_NVFlinger, "api = {} producer_controlled_by_app = {}", api,
producer_controlled_by_app);
@@ -745,7 +749,7 @@ Status BufferQueueProducer::Disconnect(NativeWindowApi api) {
std::shared_ptr<IConsumerListener> listener;
{
- BufferQueueCore::AutoLock lock(core);
+ std::scoped_lock lock(core->mutex);
core->WaitWhileAllocatingLocked();
@@ -795,10 +799,11 @@ Status BufferQueueProducer::SetPreallocatedBuffer(s32 slot,
return Status::BadValue;
}
- BufferQueueCore::AutoLock lock(core);
+ std::scoped_lock lock(core->mutex);
slots[slot] = {};
slots[slot].graphic_buffer = buffer;
+ slots[slot].frame_number = 0;
// Most games preallocate a buffer and pass a valid buffer here. However, it is possible for
// this to be called with an empty buffer, Naruto Ultimate Ninja Storm is a game that does this.
diff --git a/src/core/hle/service/nvflinger/buffer_queue_producer.h b/src/core/hle/service/nvflinger/buffer_queue_producer.h
index 5ddeebe0c..77fdcae8e 100644
--- a/src/core/hle/service/nvflinger/buffer_queue_producer.h
+++ b/src/core/hle/service/nvflinger/buffer_queue_producer.h
@@ -77,7 +77,7 @@ private:
std::mutex callback_mutex;
s32 next_callback_ticket{};
s32 current_callback_ticket{};
- std::condition_variable callback_condition;
+ std::condition_variable_any callback_condition;
};
} // namespace Service::android