Use MultiLevelQueue instead of old ThreadQueueList

This commit is contained in:
Fernando Sahmkow 2019-03-16 00:30:15 -04:00 committed by FernandoS27
parent 9dbba9240b
commit dde0814837
3 changed files with 34 additions and 31 deletions

View file

@ -107,6 +107,9 @@ public:
iterator_impl(const iterator_impl<false>& other) iterator_impl(const iterator_impl<false>& other)
: mlq(other.mlq), it(other.it), current_priority(other.current_priority) {} : mlq(other.mlq), it(other.it), current_priority(other.current_priority) {}
iterator_impl(const iterator_impl<true>& other)
: mlq(other.mlq), it(other.it), current_priority(other.current_priority) {}
iterator_impl& operator=(const iterator_impl<false>& other) { iterator_impl& operator=(const iterator_impl<false>& other) {
mlq = other.mlq; mlq = other.mlq;
it = other.it; it = other.it;
@ -149,7 +152,7 @@ public:
using iterator = iterator_impl<false>; using iterator = iterator_impl<false>;
using const_iterator = iterator_impl<true>; using const_iterator = iterator_impl<true>;
void add(T& element, u32 priority, bool send_back = true) { void add(const T& element, u32 priority, bool send_back = true) {
if (send_back) if (send_back)
levels[priority].push_back(element); levels[priority].push_back(element);
else else
@ -158,23 +161,18 @@ public:
} }
void remove(const T& element, u32 priority) { void remove(const T& element, u32 priority) {
levels[priority].erase(ListIterateTo(levels[priority], element)); auto it = ListIterateTo(levels[priority], element);
if (it == levels[priority].end())
return;
levels[priority].erase(it);
if (levels[priority].empty()) { if (levels[priority].empty()) {
used_priorities &= ~(1ULL << priority); used_priorities &= ~(1ULL << priority);
} }
} }
void adjust(const T& element, u32 old_priority, u32 new_priority, bool adjust_front = false) { void adjust(const T& element, u32 old_priority, u32 new_priority, bool adjust_front = false) {
const auto new_next = remove(element, old_priority);
adjust_front ? levels[new_priority].cbegin() : levels[new_priority].cend(); add(element, new_priority, !adjust_front);
ListSplice(levels[new_priority], new_next, levels[old_priority],
ListIterateTo(levels[old_priority], element));
used_priorities |= 1ULL << new_priority;
if (levels[old_priority].empty()) {
used_priorities &= ~(1ULL << old_priority);
}
} }
void adjust(const_iterator it, u32 old_priority, u32 new_priority, bool adjust_front = false) { void adjust(const_iterator it, u32 old_priority, u32 new_priority, bool adjust_front = false) {
adjust(*it, old_priority, new_priority, adjust_front); adjust(*it, old_priority, new_priority, adjust_front);

View file

@ -30,7 +30,7 @@ Scheduler::~Scheduler() {
bool Scheduler::HaveReadyThreads() const { bool Scheduler::HaveReadyThreads() const {
std::lock_guard<std::mutex> lock(scheduler_mutex); std::lock_guard<std::mutex> lock(scheduler_mutex);
return ready_queue.get_first() != nullptr; return !ready_queue.empty();
} }
Thread* Scheduler::GetCurrentThread() const { Thread* Scheduler::GetCurrentThread() const {
@ -45,23 +45,27 @@ Thread* Scheduler::PopNextReadyThread() {
Thread* next = nullptr; Thread* next = nullptr;
Thread* thread = GetCurrentThread(); Thread* thread = GetCurrentThread();
if (thread && thread->GetStatus() == ThreadStatus::Running) { if (thread && thread->GetStatus() == ThreadStatus::Running) {
if (ready_queue.empty())
return thread;
// We have to do better than the current thread. // We have to do better than the current thread.
// This call returns null when that's not possible. // This call returns null when that's not possible.
next = ready_queue.pop_first_better(thread->GetPriority()); next = ready_queue.front();
if (!next) { if (next == nullptr || next->GetPriority() >= thread->GetPriority()) {
// Otherwise just keep going with the current thread
next = thread; next = thread;
} }
} else { } else {
next = ready_queue.pop_first(); if (ready_queue.empty())
return nullptr;
next = ready_queue.front();
} }
return next; return next;
} }
void Scheduler::SwitchContext(Thread* new_thread) { void Scheduler::SwitchContext(Thread* new_thread) {
Thread* const previous_thread = GetCurrentThread(); Thread* previous_thread = GetCurrentThread();
Process* const previous_process = system.Kernel().CurrentProcess(); Process* const previous_process = system.Kernel().CurrentProcess();
UpdateLastContextSwitchTime(previous_thread, previous_process); UpdateLastContextSwitchTime(previous_thread, previous_process);
@ -75,7 +79,7 @@ void Scheduler::SwitchContext(Thread* new_thread) {
if (previous_thread->GetStatus() == ThreadStatus::Running) { if (previous_thread->GetStatus() == ThreadStatus::Running) {
// This is only the case when a reschedule is triggered without the current thread // This is only the case when a reschedule is triggered without the current thread
// yielding execution (i.e. an event triggered, system core time-sliced, etc) // yielding execution (i.e. an event triggered, system core time-sliced, etc)
ready_queue.push_front(previous_thread->GetPriority(), previous_thread); ready_queue.add(previous_thread, previous_thread->GetPriority(), false);
previous_thread->SetStatus(ThreadStatus::Ready); previous_thread->SetStatus(ThreadStatus::Ready);
} }
} }
@ -90,7 +94,7 @@ void Scheduler::SwitchContext(Thread* new_thread) {
current_thread = new_thread; current_thread = new_thread;
ready_queue.remove(new_thread->GetPriority(), new_thread); ready_queue.remove(new_thread, new_thread->GetPriority());
new_thread->SetStatus(ThreadStatus::Running); new_thread->SetStatus(ThreadStatus::Running);
auto* const thread_owner_process = current_thread->GetOwnerProcess(); auto* const thread_owner_process = current_thread->GetOwnerProcess();
@ -147,7 +151,6 @@ void Scheduler::AddThread(SharedPtr<Thread> thread, u32 priority) {
std::lock_guard<std::mutex> lock(scheduler_mutex); std::lock_guard<std::mutex> lock(scheduler_mutex);
thread_list.push_back(std::move(thread)); thread_list.push_back(std::move(thread));
ready_queue.prepare(priority);
} }
void Scheduler::RemoveThread(Thread* thread) { void Scheduler::RemoveThread(Thread* thread) {
@ -161,33 +164,35 @@ void Scheduler::ScheduleThread(Thread* thread, u32 priority) {
std::lock_guard<std::mutex> lock(scheduler_mutex); std::lock_guard<std::mutex> lock(scheduler_mutex);
ASSERT(thread->GetStatus() == ThreadStatus::Ready); ASSERT(thread->GetStatus() == ThreadStatus::Ready);
ready_queue.push_back(priority, thread); ready_queue.add(thread, priority);
} }
void Scheduler::UnscheduleThread(Thread* thread, u32 priority) { void Scheduler::UnscheduleThread(Thread* thread, u32 priority) {
std::lock_guard<std::mutex> lock(scheduler_mutex); std::lock_guard<std::mutex> lock(scheduler_mutex);
ASSERT(thread->GetStatus() == ThreadStatus::Ready); ASSERT(thread->GetStatus() == ThreadStatus::Ready);
ready_queue.remove(priority, thread); ready_queue.remove(thread, priority);
} }
void Scheduler::SetThreadPriority(Thread* thread, u32 priority) { void Scheduler::SetThreadPriority(Thread* thread, u32 priority) {
std::lock_guard<std::mutex> lock(scheduler_mutex); std::lock_guard<std::mutex> lock(scheduler_mutex);
if (thread->GetPriority() == priority)
return;
// If thread was ready, adjust queues // If thread was ready, adjust queues
if (thread->GetStatus() == ThreadStatus::Ready) if (thread->GetStatus() == ThreadStatus::Ready)
ready_queue.move(thread, thread->GetPriority(), priority); ready_queue.adjust(thread, thread->GetPriority(), priority);
else
ready_queue.prepare(priority);
} }
Thread* Scheduler::GetNextSuggestedThread(u32 core, u32 maximum_priority) const { Thread* Scheduler::GetNextSuggestedThread(u32 core, u32 maximum_priority) const {
std::lock_guard<std::mutex> lock(scheduler_mutex); std::lock_guard<std::mutex> lock(scheduler_mutex);
const u32 mask = 1U << core; const u32 mask = 1U << core;
return ready_queue.get_first_filter([mask, maximum_priority](Thread const* thread) { for (auto& thread : ready_queue) {
return (thread->GetAffinityMask() & mask) != 0 && thread->GetPriority() < maximum_priority; if ((thread->GetAffinityMask() & mask) != 0 && thread->GetPriority() < maximum_priority)
}); return thread;
}
return nullptr;
} }
void Scheduler::YieldWithoutLoadBalancing(Thread* thread) { void Scheduler::YieldWithoutLoadBalancing(Thread* thread) {

View file

@ -7,7 +7,7 @@
#include <mutex> #include <mutex>
#include <vector> #include <vector>
#include "common/common_types.h" #include "common/common_types.h"
#include "common/thread_queue_list.h" #include "common/multi_level_queue.h"
#include "core/hle/kernel/object.h" #include "core/hle/kernel/object.h"
#include "core/hle/kernel/thread.h" #include "core/hle/kernel/thread.h"
@ -156,7 +156,7 @@ private:
std::vector<SharedPtr<Thread>> thread_list; std::vector<SharedPtr<Thread>> thread_list;
/// Lists only ready thread ids. /// Lists only ready thread ids.
Common::ThreadQueueList<Thread*, THREADPRIO_LOWEST + 1> ready_queue; Common::MultiLevelQueue<Thread*, THREADPRIO_LOWEST + 1> ready_queue;
SharedPtr<Thread> current_thread = nullptr; SharedPtr<Thread> current_thread = nullptr;