mirror of
				https://git.tardis.systems/mirrors/yuzu
				synced 2025-10-31 10:44:49 +01:00 
			
		
		
		
	kernel: convert KMemoryLayout, KMemoryRegion*, KPageTableSlabHeap, KPriorityQueue
This commit is contained in:
		
							parent
							
								
									0483dfae1a
								
							
						
					
					
						commit
						467adc1acd
					
				| @ -18,11 +18,11 @@ KMemoryRegion* AllocateRegion(KMemoryRegionAllocator& memory_region_allocator, A | ||||
| 
 | ||||
| } // namespace
 | ||||
| 
 | ||||
| KMemoryRegionTree::KMemoryRegionTree(KMemoryRegionAllocator& memory_region_allocator_) | ||||
|     : memory_region_allocator{memory_region_allocator_} {} | ||||
| KMemoryRegionTree::KMemoryRegionTree(KMemoryRegionAllocator& memory_region_allocator) | ||||
|     : m_memory_region_allocator{memory_region_allocator} {} | ||||
| 
 | ||||
| void KMemoryRegionTree::InsertDirectly(u64 address, u64 last_address, u32 attr, u32 type_id) { | ||||
|     this->insert(*AllocateRegion(memory_region_allocator, address, last_address, attr, type_id)); | ||||
|     this->insert(*AllocateRegion(m_memory_region_allocator, address, last_address, attr, type_id)); | ||||
| } | ||||
| 
 | ||||
| bool KMemoryRegionTree::Insert(u64 address, size_t size, u32 type_id, u32 new_attr, u32 old_attr) { | ||||
| @ -69,7 +69,7 @@ bool KMemoryRegionTree::Insert(u64 address, size_t size, u32 type_id, u32 new_at | ||||
|         const u64 new_pair = (old_pair != std::numeric_limits<u64>::max()) | ||||
|                                  ? old_pair + (address - old_address) | ||||
|                                  : old_pair; | ||||
|         this->insert(*AllocateRegion(memory_region_allocator, address, inserted_region_last, | ||||
|         this->insert(*AllocateRegion(m_memory_region_allocator, address, inserted_region_last, | ||||
|                                      new_pair, new_attr, type_id)); | ||||
|     } | ||||
| 
 | ||||
| @ -78,7 +78,7 @@ bool KMemoryRegionTree::Insert(u64 address, size_t size, u32 type_id, u32 new_at | ||||
|         const u64 after_pair = (old_pair != std::numeric_limits<u64>::max()) | ||||
|                                    ? old_pair + (inserted_region_end - old_address) | ||||
|                                    : old_pair; | ||||
|         this->insert(*AllocateRegion(memory_region_allocator, inserted_region_end, old_last, | ||||
|         this->insert(*AllocateRegion(m_memory_region_allocator, inserted_region_end, old_last, | ||||
|                                      after_pair, old_attr, old_type)); | ||||
|     } | ||||
| 
 | ||||
| @ -126,14 +126,15 @@ VAddr KMemoryRegionTree::GetRandomAlignedRegion(size_t size, size_t alignment, u | ||||
| } | ||||
| 
 | ||||
| KMemoryLayout::KMemoryLayout() | ||||
|     : virtual_tree{memory_region_allocator}, physical_tree{memory_region_allocator}, | ||||
|       virtual_linear_tree{memory_region_allocator}, physical_linear_tree{memory_region_allocator} {} | ||||
|     : m_virtual_tree{m_memory_region_allocator}, m_physical_tree{m_memory_region_allocator}, | ||||
|       m_virtual_linear_tree{m_memory_region_allocator}, m_physical_linear_tree{ | ||||
|                                                             m_memory_region_allocator} {} | ||||
| 
 | ||||
| void KMemoryLayout::InitializeLinearMemoryRegionTrees(PAddr aligned_linear_phys_start, | ||||
|                                                       VAddr linear_virtual_start) { | ||||
|     // Set static differences.
 | ||||
|     linear_phys_to_virt_diff = linear_virtual_start - aligned_linear_phys_start; | ||||
|     linear_virt_to_phys_diff = aligned_linear_phys_start - linear_virtual_start; | ||||
|     m_linear_phys_to_virt_diff = linear_virtual_start - aligned_linear_phys_start; | ||||
|     m_linear_virt_to_phys_diff = aligned_linear_phys_start - linear_virtual_start; | ||||
| 
 | ||||
|     // Initialize linear trees.
 | ||||
|     for (auto& region : GetPhysicalMemoryRegionTree()) { | ||||
|  | ||||
| @ -80,35 +80,35 @@ public: | ||||
|     KMemoryLayout(); | ||||
| 
 | ||||
|     KMemoryRegionTree& GetVirtualMemoryRegionTree() { | ||||
|         return virtual_tree; | ||||
|         return m_virtual_tree; | ||||
|     } | ||||
|     const KMemoryRegionTree& GetVirtualMemoryRegionTree() const { | ||||
|         return virtual_tree; | ||||
|         return m_virtual_tree; | ||||
|     } | ||||
|     KMemoryRegionTree& GetPhysicalMemoryRegionTree() { | ||||
|         return physical_tree; | ||||
|         return m_physical_tree; | ||||
|     } | ||||
|     const KMemoryRegionTree& GetPhysicalMemoryRegionTree() const { | ||||
|         return physical_tree; | ||||
|         return m_physical_tree; | ||||
|     } | ||||
|     KMemoryRegionTree& GetVirtualLinearMemoryRegionTree() { | ||||
|         return virtual_linear_tree; | ||||
|         return m_virtual_linear_tree; | ||||
|     } | ||||
|     const KMemoryRegionTree& GetVirtualLinearMemoryRegionTree() const { | ||||
|         return virtual_linear_tree; | ||||
|         return m_virtual_linear_tree; | ||||
|     } | ||||
|     KMemoryRegionTree& GetPhysicalLinearMemoryRegionTree() { | ||||
|         return physical_linear_tree; | ||||
|         return m_physical_linear_tree; | ||||
|     } | ||||
|     const KMemoryRegionTree& GetPhysicalLinearMemoryRegionTree() const { | ||||
|         return physical_linear_tree; | ||||
|         return m_physical_linear_tree; | ||||
|     } | ||||
| 
 | ||||
|     VAddr GetLinearVirtualAddress(PAddr address) const { | ||||
|         return address + linear_phys_to_virt_diff; | ||||
|         return address + m_linear_phys_to_virt_diff; | ||||
|     } | ||||
|     PAddr GetLinearPhysicalAddress(VAddr address) const { | ||||
|         return address + linear_virt_to_phys_diff; | ||||
|         return address + m_linear_virt_to_phys_diff; | ||||
|     } | ||||
| 
 | ||||
|     const KMemoryRegion* FindVirtual(VAddr address) const { | ||||
| @ -391,13 +391,13 @@ private: | ||||
|     } | ||||
| 
 | ||||
| private: | ||||
|     u64 linear_phys_to_virt_diff{}; | ||||
|     u64 linear_virt_to_phys_diff{}; | ||||
|     KMemoryRegionAllocator memory_region_allocator; | ||||
|     KMemoryRegionTree virtual_tree; | ||||
|     KMemoryRegionTree physical_tree; | ||||
|     KMemoryRegionTree virtual_linear_tree; | ||||
|     KMemoryRegionTree physical_linear_tree; | ||||
|     u64 m_linear_phys_to_virt_diff{}; | ||||
|     u64 m_linear_virt_to_phys_diff{}; | ||||
|     KMemoryRegionAllocator m_memory_region_allocator; | ||||
|     KMemoryRegionTree m_virtual_tree; | ||||
|     KMemoryRegionTree m_physical_tree; | ||||
|     KMemoryRegionTree m_virtual_linear_tree; | ||||
|     KMemoryRegionTree m_physical_linear_tree; | ||||
| }; | ||||
| 
 | ||||
| namespace Init { | ||||
|  | ||||
| @ -21,15 +21,15 @@ public: | ||||
|     YUZU_NON_MOVEABLE(KMemoryRegion); | ||||
| 
 | ||||
|     constexpr KMemoryRegion() = default; | ||||
|     constexpr KMemoryRegion(u64 address_, u64 last_address_) | ||||
|         : address{address_}, last_address{last_address_} {} | ||||
|     constexpr KMemoryRegion(u64 address_, u64 last_address_, u64 pair_address_, u32 attributes_, | ||||
|                             u32 type_id_) | ||||
|         : address(address_), last_address(last_address_), pair_address(pair_address_), | ||||
|           attributes(attributes_), type_id(type_id_) {} | ||||
|     constexpr KMemoryRegion(u64 address_, u64 last_address_, u32 attributes_, u32 type_id_) | ||||
|         : KMemoryRegion(address_, last_address_, std::numeric_limits<u64>::max(), attributes_, | ||||
|                         type_id_) {} | ||||
|     constexpr KMemoryRegion(u64 address, u64 last_address) | ||||
|         : m_address{address}, m_last_address{last_address} {} | ||||
|     constexpr KMemoryRegion(u64 address, u64 last_address, u64 pair_address, u32 attributes, | ||||
|                             u32 type_id) | ||||
|         : m_address(address), m_last_address(last_address), m_pair_address(pair_address), | ||||
|           m_attributes(attributes), m_type_id(type_id) {} | ||||
|     constexpr KMemoryRegion(u64 address, u64 last_address, u32 attributes, u32 type_id) | ||||
|         : KMemoryRegion(address, last_address, std::numeric_limits<u64>::max(), attributes, | ||||
|                         type_id) {} | ||||
| 
 | ||||
|     ~KMemoryRegion() = default; | ||||
| 
 | ||||
| @ -44,15 +44,15 @@ public: | ||||
|     } | ||||
| 
 | ||||
|     constexpr u64 GetAddress() const { | ||||
|         return address; | ||||
|         return m_address; | ||||
|     } | ||||
| 
 | ||||
|     constexpr u64 GetPairAddress() const { | ||||
|         return pair_address; | ||||
|         return m_pair_address; | ||||
|     } | ||||
| 
 | ||||
|     constexpr u64 GetLastAddress() const { | ||||
|         return last_address; | ||||
|         return m_last_address; | ||||
|     } | ||||
| 
 | ||||
|     constexpr u64 GetEndAddress() const { | ||||
| @ -64,16 +64,16 @@ public: | ||||
|     } | ||||
| 
 | ||||
|     constexpr u32 GetAttributes() const { | ||||
|         return attributes; | ||||
|         return m_attributes; | ||||
|     } | ||||
| 
 | ||||
|     constexpr u32 GetType() const { | ||||
|         return type_id; | ||||
|         return m_type_id; | ||||
|     } | ||||
| 
 | ||||
|     constexpr void SetType(u32 type) { | ||||
|         ASSERT(this->CanDerive(type)); | ||||
|         type_id = type; | ||||
|         m_type_id = type; | ||||
|     } | ||||
| 
 | ||||
|     constexpr bool Contains(u64 addr) const { | ||||
| @ -94,27 +94,27 @@ public: | ||||
|     } | ||||
| 
 | ||||
|     constexpr void SetPairAddress(u64 a) { | ||||
|         pair_address = a; | ||||
|         m_pair_address = a; | ||||
|     } | ||||
| 
 | ||||
|     constexpr void SetTypeAttribute(u32 attr) { | ||||
|         type_id |= attr; | ||||
|         m_type_id |= attr; | ||||
|     } | ||||
| 
 | ||||
| private: | ||||
|     constexpr void Reset(u64 a, u64 la, u64 p, u32 r, u32 t) { | ||||
|         address = a; | ||||
|         pair_address = p; | ||||
|         last_address = la; | ||||
|         attributes = r; | ||||
|         type_id = t; | ||||
|         m_address = a; | ||||
|         m_pair_address = p; | ||||
|         m_last_address = la; | ||||
|         m_attributes = r; | ||||
|         m_type_id = t; | ||||
|     } | ||||
| 
 | ||||
|     u64 address{}; | ||||
|     u64 last_address{}; | ||||
|     u64 pair_address{}; | ||||
|     u32 attributes{}; | ||||
|     u32 type_id{}; | ||||
|     u64 m_address{}; | ||||
|     u64 m_last_address{}; | ||||
|     u64 m_pair_address{}; | ||||
|     u32 m_attributes{}; | ||||
|     u32 m_type_id{}; | ||||
| }; | ||||
| 
 | ||||
| class KMemoryRegionTree final { | ||||
| @ -322,7 +322,7 @@ public: | ||||
| 
 | ||||
| private: | ||||
|     TreeType m_tree{}; | ||||
|     KMemoryRegionAllocator& memory_region_allocator; | ||||
|     KMemoryRegionAllocator& m_memory_region_allocator; | ||||
| }; | ||||
| 
 | ||||
| class KMemoryRegionAllocator final { | ||||
| @ -338,18 +338,18 @@ public: | ||||
|     template <typename... Args> | ||||
|     KMemoryRegion* Allocate(Args&&... args) { | ||||
|         // Ensure we stay within the bounds of our heap.
 | ||||
|         ASSERT(this->num_regions < MaxMemoryRegions); | ||||
|         ASSERT(m_num_regions < MaxMemoryRegions); | ||||
| 
 | ||||
|         // Create the new region.
 | ||||
|         KMemoryRegion* region = std::addressof(this->region_heap[this->num_regions++]); | ||||
|         new (region) KMemoryRegion(std::forward<Args>(args)...); | ||||
|         KMemoryRegion* region = std::addressof(m_region_heap[m_num_regions++]); | ||||
|         std::construct_at(region, std::forward<Args>(args)...); | ||||
| 
 | ||||
|         return region; | ||||
|     } | ||||
| 
 | ||||
| private: | ||||
|     std::array<KMemoryRegion, MaxMemoryRegions> region_heap{}; | ||||
|     size_t num_regions{}; | ||||
|     std::array<KMemoryRegion, MaxMemoryRegions> m_region_heap{}; | ||||
|     size_t m_num_regions{}; | ||||
| }; | ||||
| 
 | ||||
| } // namespace Kernel
 | ||||
|  | ||||
| @ -41,7 +41,7 @@ public: | ||||
|         // Check that the object is closed.
 | ||||
|         R_UNLESS(derived->IsServerClosed(), ResultInvalidState); | ||||
| 
 | ||||
|         return Delete(kernel, obj.GetPointerUnsafe(), name); | ||||
|         R_RETURN(Delete(kernel, obj.GetPointerUnsafe(), name)); | ||||
|     } | ||||
| 
 | ||||
|     template <typename Derived> | ||||
|  | ||||
| @ -20,7 +20,8 @@ public: | ||||
|     PageTablePage() = default; | ||||
| 
 | ||||
| private: | ||||
|     std::array<u8, PageSize> m_buffer{}; | ||||
|     // Initializer intentionally skipped
 | ||||
|     std::array<u8, PageSize> m_buffer; | ||||
| }; | ||||
| static_assert(sizeof(PageTablePage) == PageSize); | ||||
| 
 | ||||
|  | ||||
| @ -77,11 +77,11 @@ private: | ||||
| public: | ||||
|     class KPerCoreQueue { | ||||
|     private: | ||||
|         std::array<Entry, NumCores> root{}; | ||||
|         std::array<Entry, NumCores> m_root{}; | ||||
| 
 | ||||
|     public: | ||||
|         constexpr KPerCoreQueue() { | ||||
|             for (auto& per_core_root : root) { | ||||
|             for (auto& per_core_root : m_root) { | ||||
|                 per_core_root.Initialize(); | ||||
|             } | ||||
|         } | ||||
| @ -91,15 +91,15 @@ public: | ||||
|             Entry& member_entry = member->GetPriorityQueueEntry(core); | ||||
| 
 | ||||
|             // Get the entry associated with the end of the queue.
 | ||||
|             Member* tail = this->root[core].GetPrev(); | ||||
|             Member* tail = m_root[core].GetPrev(); | ||||
|             Entry& tail_entry = | ||||
|                 (tail != nullptr) ? tail->GetPriorityQueueEntry(core) : this->root[core]; | ||||
|                 (tail != nullptr) ? tail->GetPriorityQueueEntry(core) : m_root[core]; | ||||
| 
 | ||||
|             // Link the entries.
 | ||||
|             member_entry.SetPrev(tail); | ||||
|             member_entry.SetNext(nullptr); | ||||
|             tail_entry.SetNext(member); | ||||
|             this->root[core].SetPrev(member); | ||||
|             m_root[core].SetPrev(member); | ||||
| 
 | ||||
|             return tail == nullptr; | ||||
|         } | ||||
| @ -109,15 +109,15 @@ public: | ||||
|             Entry& member_entry = member->GetPriorityQueueEntry(core); | ||||
| 
 | ||||
|             // Get the entry associated with the front of the queue.
 | ||||
|             Member* head = this->root[core].GetNext(); | ||||
|             Member* head = m_root[core].GetNext(); | ||||
|             Entry& head_entry = | ||||
|                 (head != nullptr) ? head->GetPriorityQueueEntry(core) : this->root[core]; | ||||
|                 (head != nullptr) ? head->GetPriorityQueueEntry(core) : m_root[core]; | ||||
| 
 | ||||
|             // Link the entries.
 | ||||
|             member_entry.SetPrev(nullptr); | ||||
|             member_entry.SetNext(head); | ||||
|             head_entry.SetPrev(member); | ||||
|             this->root[core].SetNext(member); | ||||
|             m_root[core].SetNext(member); | ||||
| 
 | ||||
|             return (head == nullptr); | ||||
|         } | ||||
| @ -130,9 +130,9 @@ public: | ||||
|             Member* prev = member_entry.GetPrev(); | ||||
|             Member* next = member_entry.GetNext(); | ||||
|             Entry& prev_entry = | ||||
|                 (prev != nullptr) ? prev->GetPriorityQueueEntry(core) : this->root[core]; | ||||
|                 (prev != nullptr) ? prev->GetPriorityQueueEntry(core) : m_root[core]; | ||||
|             Entry& next_entry = | ||||
|                 (next != nullptr) ? next->GetPriorityQueueEntry(core) : this->root[core]; | ||||
|                 (next != nullptr) ? next->GetPriorityQueueEntry(core) : m_root[core]; | ||||
| 
 | ||||
|             // Unlink.
 | ||||
|             prev_entry.SetNext(next); | ||||
| @ -142,7 +142,7 @@ public: | ||||
|         } | ||||
| 
 | ||||
|         constexpr Member* GetFront(s32 core) const { | ||||
|             return this->root[core].GetNext(); | ||||
|             return m_root[core].GetNext(); | ||||
|         } | ||||
|     }; | ||||
| 
 | ||||
| @ -158,8 +158,8 @@ public: | ||||
|                 return; | ||||
|             } | ||||
| 
 | ||||
|             if (this->queues[priority].PushBack(core, member)) { | ||||
|                 this->available_priorities[core].SetBit(priority); | ||||
|             if (m_queues[priority].PushBack(core, member)) { | ||||
|                 m_available_priorities[core].SetBit(priority); | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
| @ -171,8 +171,8 @@ public: | ||||
|                 return; | ||||
|             } | ||||
| 
 | ||||
|             if (this->queues[priority].PushFront(core, member)) { | ||||
|                 this->available_priorities[core].SetBit(priority); | ||||
|             if (m_queues[priority].PushFront(core, member)) { | ||||
|                 m_available_priorities[core].SetBit(priority); | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
| @ -184,18 +184,17 @@ public: | ||||
|                 return; | ||||
|             } | ||||
| 
 | ||||
|             if (this->queues[priority].Remove(core, member)) { | ||||
|                 this->available_priorities[core].ClearBit(priority); | ||||
|             if (m_queues[priority].Remove(core, member)) { | ||||
|                 m_available_priorities[core].ClearBit(priority); | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
|         constexpr Member* GetFront(s32 core) const { | ||||
|             ASSERT(IsValidCore(core)); | ||||
| 
 | ||||
|             const s32 priority = | ||||
|                 static_cast<s32>(this->available_priorities[core].CountLeadingZero()); | ||||
|             const s32 priority = static_cast<s32>(m_available_priorities[core].CountLeadingZero()); | ||||
|             if (priority <= LowestPriority) { | ||||
|                 return this->queues[priority].GetFront(core); | ||||
|                 return m_queues[priority].GetFront(core); | ||||
|             } else { | ||||
|                 return nullptr; | ||||
|             } | ||||
| @ -206,7 +205,7 @@ public: | ||||
|             ASSERT(IsValidPriority(priority)); | ||||
| 
 | ||||
|             if (priority <= LowestPriority) { | ||||
|                 return this->queues[priority].GetFront(core); | ||||
|                 return m_queues[priority].GetFront(core); | ||||
|             } else { | ||||
|                 return nullptr; | ||||
|             } | ||||
| @ -218,9 +217,9 @@ public: | ||||
|             Member* next = member->GetPriorityQueueEntry(core).GetNext(); | ||||
|             if (next == nullptr) { | ||||
|                 const s32 priority = static_cast<s32>( | ||||
|                     this->available_priorities[core].GetNextSet(member->GetPriority())); | ||||
|                     m_available_priorities[core].GetNextSet(member->GetPriority())); | ||||
|                 if (priority <= LowestPriority) { | ||||
|                     next = this->queues[priority].GetFront(core); | ||||
|                     next = m_queues[priority].GetFront(core); | ||||
|                 } | ||||
|             } | ||||
|             return next; | ||||
| @ -231,8 +230,8 @@ public: | ||||
|             ASSERT(IsValidPriority(priority)); | ||||
| 
 | ||||
|             if (priority <= LowestPriority) { | ||||
|                 this->queues[priority].Remove(core, member); | ||||
|                 this->queues[priority].PushFront(core, member); | ||||
|                 m_queues[priority].Remove(core, member); | ||||
|                 m_queues[priority].PushFront(core, member); | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
| @ -241,29 +240,29 @@ public: | ||||
|             ASSERT(IsValidPriority(priority)); | ||||
| 
 | ||||
|             if (priority <= LowestPriority) { | ||||
|                 this->queues[priority].Remove(core, member); | ||||
|                 this->queues[priority].PushBack(core, member); | ||||
|                 return this->queues[priority].GetFront(core); | ||||
|                 m_queues[priority].Remove(core, member); | ||||
|                 m_queues[priority].PushBack(core, member); | ||||
|                 return m_queues[priority].GetFront(core); | ||||
|             } else { | ||||
|                 return nullptr; | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
|     private: | ||||
|         std::array<KPerCoreQueue, NumPriority> queues{}; | ||||
|         std::array<Common::BitSet64<NumPriority>, NumCores> available_priorities{}; | ||||
|         std::array<KPerCoreQueue, NumPriority> m_queues{}; | ||||
|         std::array<Common::BitSet64<NumPriority>, NumCores> m_available_priorities{}; | ||||
|     }; | ||||
| 
 | ||||
| private: | ||||
|     KPriorityQueueImpl scheduled_queue; | ||||
|     KPriorityQueueImpl suggested_queue; | ||||
|     KPriorityQueueImpl m_scheduled_queue; | ||||
|     KPriorityQueueImpl m_suggested_queue; | ||||
| 
 | ||||
| private: | ||||
|     constexpr void ClearAffinityBit(u64& affinity, s32 core) { | ||||
|     static constexpr void ClearAffinityBit(u64& affinity, s32 core) { | ||||
|         affinity &= ~(UINT64_C(1) << core); | ||||
|     } | ||||
| 
 | ||||
|     constexpr s32 GetNextCore(u64& affinity) { | ||||
|     static constexpr s32 GetNextCore(u64& affinity) { | ||||
|         const s32 core = std::countr_zero(affinity); | ||||
|         ClearAffinityBit(affinity, core); | ||||
|         return core; | ||||
| @ -275,13 +274,13 @@ private: | ||||
|         // Push onto the scheduled queue for its core, if we can.
 | ||||
|         u64 affinity = member->GetAffinityMask().GetAffinityMask(); | ||||
|         if (const s32 core = member->GetActiveCore(); core >= 0) { | ||||
|             this->scheduled_queue.PushBack(priority, core, member); | ||||
|             m_scheduled_queue.PushBack(priority, core, member); | ||||
|             ClearAffinityBit(affinity, core); | ||||
|         } | ||||
| 
 | ||||
|         // And suggest the thread for all other cores.
 | ||||
|         while (affinity) { | ||||
|             this->suggested_queue.PushBack(priority, GetNextCore(affinity), member); | ||||
|             m_suggested_queue.PushBack(priority, GetNextCore(affinity), member); | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
| @ -291,14 +290,14 @@ private: | ||||
|         // Push onto the scheduled queue for its core, if we can.
 | ||||
|         u64 affinity = member->GetAffinityMask().GetAffinityMask(); | ||||
|         if (const s32 core = member->GetActiveCore(); core >= 0) { | ||||
|             this->scheduled_queue.PushFront(priority, core, member); | ||||
|             m_scheduled_queue.PushFront(priority, core, member); | ||||
|             ClearAffinityBit(affinity, core); | ||||
|         } | ||||
| 
 | ||||
|         // And suggest the thread for all other cores.
 | ||||
|         // Note: Nintendo pushes onto the back of the suggested queue, not the front.
 | ||||
|         while (affinity) { | ||||
|             this->suggested_queue.PushBack(priority, GetNextCore(affinity), member); | ||||
|             m_suggested_queue.PushBack(priority, GetNextCore(affinity), member); | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
| @ -308,13 +307,13 @@ private: | ||||
|         // Remove from the scheduled queue for its core.
 | ||||
|         u64 affinity = member->GetAffinityMask().GetAffinityMask(); | ||||
|         if (const s32 core = member->GetActiveCore(); core >= 0) { | ||||
|             this->scheduled_queue.Remove(priority, core, member); | ||||
|             m_scheduled_queue.Remove(priority, core, member); | ||||
|             ClearAffinityBit(affinity, core); | ||||
|         } | ||||
| 
 | ||||
|         // Remove from the suggested queue for all other cores.
 | ||||
|         while (affinity) { | ||||
|             this->suggested_queue.Remove(priority, GetNextCore(affinity), member); | ||||
|             m_suggested_queue.Remove(priority, GetNextCore(affinity), member); | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
| @ -323,27 +322,27 @@ public: | ||||
| 
 | ||||
|     // Getters.
 | ||||
|     constexpr Member* GetScheduledFront(s32 core) const { | ||||
|         return this->scheduled_queue.GetFront(core); | ||||
|         return m_scheduled_queue.GetFront(core); | ||||
|     } | ||||
| 
 | ||||
|     constexpr Member* GetScheduledFront(s32 core, s32 priority) const { | ||||
|         return this->scheduled_queue.GetFront(priority, core); | ||||
|         return m_scheduled_queue.GetFront(priority, core); | ||||
|     } | ||||
| 
 | ||||
|     constexpr Member* GetSuggestedFront(s32 core) const { | ||||
|         return this->suggested_queue.GetFront(core); | ||||
|         return m_suggested_queue.GetFront(core); | ||||
|     } | ||||
| 
 | ||||
|     constexpr Member* GetSuggestedFront(s32 core, s32 priority) const { | ||||
|         return this->suggested_queue.GetFront(priority, core); | ||||
|         return m_suggested_queue.GetFront(priority, core); | ||||
|     } | ||||
| 
 | ||||
|     constexpr Member* GetScheduledNext(s32 core, const Member* member) const { | ||||
|         return this->scheduled_queue.GetNext(core, member); | ||||
|         return m_scheduled_queue.GetNext(core, member); | ||||
|     } | ||||
| 
 | ||||
|     constexpr Member* GetSuggestedNext(s32 core, const Member* member) const { | ||||
|         return this->suggested_queue.GetNext(core, member); | ||||
|         return m_suggested_queue.GetNext(core, member); | ||||
|     } | ||||
| 
 | ||||
|     constexpr Member* GetSamePriorityNext(s32 core, const Member* member) const { | ||||
| @ -375,7 +374,7 @@ public: | ||||
|             return; | ||||
|         } | ||||
| 
 | ||||
|         this->scheduled_queue.MoveToFront(member->GetPriority(), member->GetActiveCore(), member); | ||||
|         m_scheduled_queue.MoveToFront(member->GetPriority(), member->GetActiveCore(), member); | ||||
|     } | ||||
| 
 | ||||
|     constexpr KThread* MoveToScheduledBack(Member* member) { | ||||
| @ -384,8 +383,7 @@ public: | ||||
|             return {}; | ||||
|         } | ||||
| 
 | ||||
|         return this->scheduled_queue.MoveToBack(member->GetPriority(), member->GetActiveCore(), | ||||
|                                                 member); | ||||
|         return m_scheduled_queue.MoveToBack(member->GetPriority(), member->GetActiveCore(), member); | ||||
|     } | ||||
| 
 | ||||
|     // First class fancy operations.
 | ||||
| @ -425,9 +423,9 @@ public: | ||||
|         for (s32 core = 0; core < static_cast<s32>(NumCores); core++) { | ||||
|             if (prev_affinity.GetAffinity(core)) { | ||||
|                 if (core == prev_core) { | ||||
|                     this->scheduled_queue.Remove(priority, core, member); | ||||
|                     m_scheduled_queue.Remove(priority, core, member); | ||||
|                 } else { | ||||
|                     this->suggested_queue.Remove(priority, core, member); | ||||
|                     m_suggested_queue.Remove(priority, core, member); | ||||
|                 } | ||||
|             } | ||||
|         } | ||||
| @ -436,9 +434,9 @@ public: | ||||
|         for (s32 core = 0; core < static_cast<s32>(NumCores); core++) { | ||||
|             if (new_affinity.GetAffinity(core)) { | ||||
|                 if (core == new_core) { | ||||
|                     this->scheduled_queue.PushBack(priority, core, member); | ||||
|                     m_scheduled_queue.PushBack(priority, core, member); | ||||
|                 } else { | ||||
|                     this->suggested_queue.PushBack(priority, core, member); | ||||
|                     m_suggested_queue.PushBack(priority, core, member); | ||||
|                 } | ||||
|             } | ||||
|         } | ||||
| @ -458,22 +456,22 @@ public: | ||||
|         if (prev_core != new_core) { | ||||
|             // Remove from the scheduled queue for the previous core.
 | ||||
|             if (prev_core >= 0) { | ||||
|                 this->scheduled_queue.Remove(priority, prev_core, member); | ||||
|                 m_scheduled_queue.Remove(priority, prev_core, member); | ||||
|             } | ||||
| 
 | ||||
|             // Remove from the suggested queue and add to the scheduled queue for the new core.
 | ||||
|             if (new_core >= 0) { | ||||
|                 this->suggested_queue.Remove(priority, new_core, member); | ||||
|                 m_suggested_queue.Remove(priority, new_core, member); | ||||
|                 if (to_front) { | ||||
|                     this->scheduled_queue.PushFront(priority, new_core, member); | ||||
|                     m_scheduled_queue.PushFront(priority, new_core, member); | ||||
|                 } else { | ||||
|                     this->scheduled_queue.PushBack(priority, new_core, member); | ||||
|                     m_scheduled_queue.PushBack(priority, new_core, member); | ||||
|                 } | ||||
|             } | ||||
| 
 | ||||
|             // Add to the suggested queue for the previous core.
 | ||||
|             if (prev_core >= 0) { | ||||
|                 this->suggested_queue.PushBack(priority, prev_core, member); | ||||
|                 m_suggested_queue.PushBack(priority, prev_core, member); | ||||
|             } | ||||
|         } | ||||
|     } | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user