[prev in list] [next in list] [prev in thread] [next in thread] 

List:       haiku-commits
Subject:    [haiku-commits] BRANCH pdziepak-github.scheduler [f9ee217] in src/system/kernel/scheduler: . src/sys
From:       pdziepak-github.scheduler <community () haiku-os ! org>
Date:       2013-11-28 15:45:35
Message-ID: 20131128154536.3E59D5C0CAD () vmrepo ! haiku-os ! org
[Download RAW message or body]

added 3 changesets to branch 'refs/remotes/pdziepak-github/scheduler'
old head: 9caf7f4fb95b33daa9e2caf2267c2636ba24ce49
new head: f9ee217ad6c59122fefd39a445c99f2c0fb2af1e
overview: https://github.com/pdziepak/Haiku/compare/9caf7f4...f9ee217

----------------------------------------------------------------------------

8711571: scheduler: Protect package data with rw_spinlock

286b341: kernel: Merge two occurences of thread resume code

f9ee217: scheduler: Migrate threads less often in power saving mode

                                    [ Pawel Dziepak <pdziepak@quarnos.org> ]

----------------------------------------------------------------------------

6 files changed, 117 insertions(+), 108 deletions(-)
headers/private/kernel/kscheduler.h            |   2 +
src/system/kernel/scheduler/low_latency.cpp    |   7 +-
src/system/kernel/scheduler/power_saving.cpp   |  64 +++++------
src/system/kernel/scheduler/scheduler.cpp      | 128 +++++++++++++--------
src/system/kernel/scheduler/scheduler_common.h |   6 +-
src/system/kernel/thread.cpp                   |  18 +--

############################################################################

Commit:      87115715b40c394b08de08b6709863eb257b020d
Author:      Pawel Dziepak <pdziepak@quarnos.org>
Date:        Wed Nov 27 03:57:26 2013 UTC

scheduler: Protect package data with rw_spinlock

----------------------------------------------------------------------------

diff --git a/src/system/kernel/scheduler/low_latency.cpp \
b/src/system/kernel/scheduler/low_latency.cpp index 832b7ab..a3eb28b 100644
--- a/src/system/kernel/scheduler/low_latency.cpp
+++ b/src/system/kernel/scheduler/low_latency.cpp
@@ -63,7 +63,7 @@ choose_core(Thread* thread)
 {
 	CoreEntry* entry = NULL;
 
-	SpinLocker locker(gIdlePackageLock);
+	ReadSpinLocker locker(gIdlePackageLock);
 	// wake new package
 	PackageEntry* package = gIdlePackageList->Last();
 	if (package == NULL) {
@@ -73,7 +73,7 @@ choose_core(Thread* thread)
 	locker.Unlock();
 
 	if (package != NULL) {
-		SpinLocker _(package->fCoreLock);
+		ReadSpinLocker _(package->fCoreLock);
 		entry = package->fIdleCores.Last();
 	}
 
diff --git a/src/system/kernel/scheduler/power_saving.cpp \
b/src/system/kernel/scheduler/power_saving.cpp index 63ad0e3..75d754f 100644
--- a/src/system/kernel/scheduler/power_saving.cpp
+++ b/src/system/kernel/scheduler/power_saving.cpp
@@ -89,12 +89,12 @@ choose_idle_core(void)
 	}
 
 	if (current == NULL) {
-		SpinLocker _(gIdlePackageLock);
+		ReadSpinLocker _(gIdlePackageLock);
 		current = gIdlePackageList->Last();
 	}
 
 	if (current != NULL) {
-		SpinLocker _(current->fCoreLock);
+		ReadSpinLocker _(current->fCoreLock);
 		return current->fIdleCores.Last();
 	}
 
diff --git a/src/system/kernel/scheduler/scheduler.cpp \
b/src/system/kernel/scheduler/scheduler.cpp index 6f105f6..ede18e6 100644
--- a/src/system/kernel/scheduler/scheduler.cpp
+++ b/src/system/kernel/scheduler/scheduler.cpp
@@ -66,7 +66,7 @@ int32 gCoreCount;
 
 PackageEntry* gPackageEntries;
 IdlePackageList* gIdlePackageList;
-spinlock gIdlePackageLock = B_SPINLOCK_INITIALIZER;
+rw_spinlock gIdlePackageLock = B_RW_SPINLOCK_INITIALIZER;
 int32 gPackageCount;
 
 ThreadRunQueue* gRunQueues;
@@ -132,7 +132,7 @@ PackageEntry::PackageEntry()
 	fIdleCoreCount(0),
 	fCoreCount(0)
 {
-	B_INITIALIZE_SPINLOCK(&fCoreLock);
+	B_INITIALIZE_RW_SPINLOCK(&fCoreLock);
 }
 
 
@@ -457,7 +457,7 @@ update_cpu_priority(int32 cpu, int32 priority)
 	int32 package = gCPUToPackage[cpu];
 	PackageEntry* packageEntry = &gPackageEntries[package];
 	if (maxPriority == B_IDLE_PRIORITY) {
-		SpinLocker _(packageEntry->fCoreLock);
+		WriteSpinLocker _(packageEntry->fCoreLock);
 
 		// core goes idle
 		ASSERT(packageEntry->fIdleCoreCount >= 0);
@@ -468,11 +468,11 @@ update_cpu_priority(int32 cpu, int32 priority)
 
 		if (packageEntry->fIdleCoreCount == packageEntry->fCoreCount) {
 			// package goes idle
-			SpinLocker _(gIdlePackageLock);
+			WriteSpinLocker _(gIdlePackageLock);
 			gIdlePackageList->Add(packageEntry);
 		}
 	} else if (corePriority == B_IDLE_PRIORITY) {
-		SpinLocker _(packageEntry->fCoreLock);
+		WriteSpinLocker _(packageEntry->fCoreLock);
 
 		// core wakes up
 		ASSERT(packageEntry->fIdleCoreCount > 0);
@@ -483,7 +483,7 @@ update_cpu_priority(int32 cpu, int32 priority)
 
 		if (packageEntry->fIdleCoreCount + 1 == packageEntry->fCoreCount) {
 			// package wakes up
-			SpinLocker _(gIdlePackageLock);
+			WriteSpinLocker _(gIdlePackageLock);
 			gIdlePackageList->Remove(packageEntry);
 		}
 	}
@@ -956,9 +956,6 @@ choose_next_thread(int32 thisCPU, Thread* oldThread, bool \
putAtBack)  static inline void
 track_cpu_activity(Thread* oldThread, Thread* nextThread, int32 thisCore)
 {
-	bigtime_t now = system_time();
-	bigtime_t usedTime = now - oldThread->scheduler_data->quantum_start;
-
 	if (!thread_is_idle_thread(oldThread)) {
 		bigtime_t active
 			= (oldThread->kernel_time - oldThread->cpu->last_kernel_time)
@@ -976,9 +973,6 @@ track_cpu_activity(Thread* oldThread, Thread* nextThread, int32 \
thisCore)  if (!gSingleCore && !gCPU[smp_get_current_cpu()].disabled)
 		compute_cpu_load(smp_get_current_cpu());
 
-	int32 oldPriority = get_effective_priority(oldThread);
-	int32 nextPriority = get_effective_priority(nextThread);
-
 	if (!thread_is_idle_thread(nextThread)) {
 		oldThread->cpu->last_kernel_time = nextThread->kernel_time;
 		oldThread->cpu->last_user_time = nextThread->user_time;
diff --git a/src/system/kernel/scheduler/scheduler_common.h \
b/src/system/kernel/scheduler/scheduler_common.h index 1fdb26f..f379ba9 100644
--- a/src/system/kernel/scheduler/scheduler_common.h
+++ b/src/system/kernel/scheduler/scheduler_common.h
@@ -106,7 +106,7 @@ struct PackageEntry : public \
DoublyLinkedListLinkImpl<PackageEntry> {  
 	int32						fPackageID;
 
-	spinlock					fCoreLock;
+	rw_spinlock					fCoreLock;
 
 	DoublyLinkedList<CoreEntry>	fIdleCores;
 	int32						fIdleCoreCount;
@@ -117,7 +117,7 @@ typedef DoublyLinkedList<PackageEntry> IdlePackageList;
 
 extern PackageEntry* gPackageEntries;
 extern IdlePackageList* gIdlePackageList;
-extern spinlock gIdlePackageLock;
+extern rw_spinlock gIdlePackageLock;
 extern int32 gPackageCount;
 
 // The run queues. Holds the threads ready to run ordered by priority.

############################################################################

Commit:      286b341a400e8d12060a8be52214618b8f02df87
Author:      Pawel Dziepak <pdziepak@quarnos.org>
Date:        Thu Nov 28 13:03:57 2013 UTC

kernel: Merge two occurences of thread resume code

----------------------------------------------------------------------------

diff --git a/headers/private/kernel/kscheduler.h \
b/headers/private/kernel/kscheduler.h index 2d8222e..cbad85d 100644
--- a/headers/private/kernel/kscheduler.h
+++ b/headers/private/kernel/kscheduler.h
@@ -83,6 +83,8 @@ status_t scheduler_set_operation_mode(scheduler_mode mode);
 */
 void scheduler_dump_thread_data(Thread* thread);
 
+void scheduler_new_thread_entry(Thread* thread);
+
 void scheduler_set_cpu_enabled(int32 cpu, bool enabled);
 
 void scheduler_add_listener(struct SchedulerListener* listener);
diff --git a/src/system/kernel/scheduler/scheduler.cpp \
b/src/system/kernel/scheduler/scheduler.cpp index ede18e6..b1bcef7 100644
--- a/src/system/kernel/scheduler/scheduler.cpp
+++ b/src/system/kernel/scheduler/scheduler.cpp
@@ -1007,6 +1007,58 @@ update_cpu_performance(Thread* thread, int32 thisCore)
 }
 
 
+static inline void
+stop_cpu_timers(Thread* fromThread, Thread* toThread)
+{
+	SpinLocker teamLocker(&fromThread->team->time_lock);
+	SpinLocker threadLocker(&fromThread->time_lock);
+
+	if (fromThread->HasActiveCPUTimeUserTimers()
+		|| fromThread->team->HasActiveCPUTimeUserTimers()) {
+		user_timer_stop_cpu_timers(fromThread, toThread);
+	}
+}
+
+
+static inline void
+continue_cpu_timers(Thread* thread, cpu_ent* cpu)
+{
+	SpinLocker teamLocker(&thread->team->time_lock);
+	SpinLocker threadLocker(&thread->time_lock);
+
+	if (thread->HasActiveCPUTimeUserTimers()
+		|| thread->team->HasActiveCPUTimeUserTimers()) {
+		user_timer_continue_cpu_timers(thread, cpu->previous_thread);
+	}
+}
+
+
+static void
+thread_resumes(Thread* thread)
+{
+	cpu_ent* cpu = thread->cpu;
+
+	release_spinlock(&cpu->previous_thread->scheduler_lock);
+
+	// continue CPU time based user timers
+	continue_cpu_timers(thread, cpu);
+
+	// notify the user debugger code
+	if ((thread->flags & THREAD_FLAGS_DEBUGGER_INSTALLED) != 0)
+		user_debug_thread_scheduled(thread);
+}
+
+
+void
+scheduler_new_thread_entry(Thread* thread)
+{
+	thread_resumes(thread);
+
+	SpinLocker locker(thread->time_lock);
+	thread->last_time = system_time();
+}
+
+
 /*!	Switches the currently running thread.
 	This is a service function for scheduler implementations.
 
@@ -1022,14 +1074,7 @@ switch_thread(Thread* fromThread, Thread* toThread)
 		user_debug_thread_unscheduled(fromThread);
 
 	// stop CPU time based user timers
-	acquire_spinlock(&fromThread->team->time_lock);
-	acquire_spinlock(&fromThread->time_lock);
-	if (fromThread->HasActiveCPUTimeUserTimers()
-		|| fromThread->team->HasActiveCPUTimeUserTimers()) {
-		user_timer_stop_cpu_timers(fromThread, toThread);
-	}
-	release_spinlock(&fromThread->time_lock);
-	release_spinlock(&fromThread->team->time_lock);
+	stop_cpu_timers(fromThread, toThread);
 
 	// update CPU and Thread structures and perform the context switch
 	cpu_ent* cpu = fromThread->cpu;
@@ -1041,25 +1086,10 @@ switch_thread(Thread* fromThread, Thread* toThread)
 	arch_thread_set_current_thread(toThread);
 	arch_thread_context_switch(fromThread, toThread);
 
-	release_spinlock(&fromThread->cpu->previous_thread->scheduler_lock);
-
 	// The use of fromThread below looks weird, but is correct. fromThread had
 	// been unscheduled earlier, but is back now. For a thread scheduled the
 	// first time the same is done in thread.cpp:common_thread_entry().
-
-	// continue CPU time based user timers
-	acquire_spinlock(&fromThread->team->time_lock);
-	acquire_spinlock(&fromThread->time_lock);
-	if (fromThread->HasActiveCPUTimeUserTimers()
-		|| fromThread->team->HasActiveCPUTimeUserTimers()) {
-		user_timer_continue_cpu_timers(fromThread, cpu->previous_thread);
-	}
-	release_spinlock(&fromThread->time_lock);
-	release_spinlock(&fromThread->team->time_lock);
-
-	// notify the user debugger code
-	if ((fromThread->flags & THREAD_FLAGS_DEBUGGER_INSTALLED) != 0)
-		user_debug_thread_scheduled(fromThread);
+	thread_resumes(fromThread);
 }
 
 
@@ -1068,28 +1098,25 @@ update_thread_times(Thread* oldThread, Thread* nextThread)
 {
 	bigtime_t now = system_time();
 	if (oldThread == nextThread) {
-		acquire_spinlock(&oldThread->time_lock);
+		SpinLocker _(oldThread->time_lock);
 		oldThread->kernel_time += now - oldThread->last_time;
 		oldThread->last_time = now;
-		release_spinlock(&oldThread->time_lock);
 	} else {
-		acquire_spinlock(&oldThread->time_lock);
+		SpinLocker locker(oldThread->time_lock);
 		oldThread->kernel_time += now - oldThread->last_time;
 		oldThread->last_time = 0;
-		release_spinlock(&oldThread->time_lock);
+		locker.Unlock();
 
-		acquire_spinlock(&nextThread->time_lock);
+		locker.SetTo(nextThread->time_lock, false);
 		nextThread->last_time = now;
-		release_spinlock(&nextThread->time_lock);
 	}
 
 	// If the old thread's team has user time timers, check them now.
 	Team* team = oldThread->team;
 
-	acquire_spinlock(&team->time_lock);
+	SpinLocker _(team->time_lock);
 	if (team->HasActiveUserTimeUserTimers())
 		user_timer_check_team_user_timers(team);
-	release_spinlock(&team->time_lock);
 }
 
 
diff --git a/src/system/kernel/thread.cpp b/src/system/kernel/thread.cpp
index ca66a46..f3474b5 100644
--- a/src/system/kernel/thread.cpp
+++ b/src/system/kernel/thread.cpp
@@ -698,25 +698,9 @@ common_thread_entry(void* _args)
 
 	// The thread is new and has been scheduled the first time.
 
-	// start CPU time based user timers
-	acquire_spinlock(&thread->team->time_lock);
-	acquire_spinlock(&thread->time_lock);
-	if (thread->HasActiveCPUTimeUserTimers()
-		|| thread->team->HasActiveCPUTimeUserTimers()) {
-		user_timer_continue_cpu_timers(thread, thread->cpu->previous_thread);
-	}
-
-	// start tracking time
-	thread->last_time = system_time();
-	release_spinlock(&thread->time_lock);
-	release_spinlock(&thread->team->time_lock);
-
-	// notify the user debugger code
-	if ((thread->flags & THREAD_FLAGS_DEBUGGER_INSTALLED) != 0)
-		user_debug_thread_scheduled(thread);
+	scheduler_new_thread_entry(thread);
 
 	// unlock the scheduler lock and enable interrupts
-	release_spinlock(&thread->cpu->previous_thread->scheduler_lock);
 	release_spinlock(&thread->scheduler_lock);
 	enable_interrupts();
 

############################################################################

Commit:      f9ee217ad6c59122fefd39a445c99f2c0fb2af1e
Author:      Pawel Dziepak <pdziepak@quarnos.org>
Date:        Thu Nov 28 15:33:50 2013 UTC

scheduler: Migrate threads less often in power saving mode

----------------------------------------------------------------------------

diff --git a/src/system/kernel/scheduler/low_latency.cpp \
b/src/system/kernel/scheduler/low_latency.cpp index a3eb28b..3e43657 100644
--- a/src/system/kernel/scheduler/low_latency.cpp
+++ b/src/system/kernel/scheduler/low_latency.cpp
@@ -30,9 +30,6 @@ has_cache_expired(Thread* thread)
 {
 	ASSERT(!gSingleCore);
 
-	if (thread_is_idle_thread(thread))
-		return false;
-
 	scheduler_thread_data* schedulerThreadData = thread->scheduler_data;
 	ASSERT(schedulerThreadData->previous_core >= 0);
 
diff --git a/src/system/kernel/scheduler/power_saving.cpp \
b/src/system/kernel/scheduler/power_saving.cpp index 75d754f..aa960e4 100644
--- a/src/system/kernel/scheduler/power_saving.cpp
+++ b/src/system/kernel/scheduler/power_saving.cpp
@@ -36,9 +36,6 @@ has_cache_expired(Thread* thread)
 {
 	ASSERT(!gSingleCore);
 
-	if (thread_is_idle_thread(thread))
-		return false;
-
 	scheduler_thread_data* schedulerThreadData = thread->scheduler_data;
 	ASSERT(schedulerThreadData->previous_core >= 0);
 
@@ -46,19 +43,6 @@ has_cache_expired(Thread* thread)
 }
 
 
-static bool
-try_small_task_packing(Thread* thread)
-{
-	ReadSpinLocker locker(gCoreHeapsLock);
-
-	int32 core = sSmallTaskCore;
-	return (core == -1 && gCoreLoadHeap->PeekMaximum() != NULL)
-		|| (core != -1
-			&& get_core_load(&gCoreEntries[core]) + thread->scheduler_data->load
-				< kHighLoad);
-}
-
-
 static int32
 choose_small_task_core(void)
 {
@@ -107,24 +91,27 @@ choose_core(Thread* thread)
 {
 	CoreEntry* entry;
 
-	if (try_small_task_packing(thread)) {
-		// try to pack all threads on one core
-		entry = &gCoreEntries[choose_small_task_core()];
+	int32 core = -1;
+	// try to pack all threads on one core
+	core = choose_small_task_core();
+
+	if (core != -1
+		&& get_core_load(&gCoreEntries[core]) + thread->scheduler_data->load
+			< kHighLoad) {
+		entry = &gCoreEntries[core];
 	} else {
 		ReadSpinLocker coreLocker(gCoreHeapsLock);
-		if (gCoreLoadHeap->PeekMinimum() != NULL) {
-			// run immediately on already woken core
-			entry = gCoreLoadHeap->PeekMinimum();
-		} else {
+		// run immediately on already woken core
+		entry = gCoreLoadHeap->PeekMinimum();
+		if (entry == NULL) {
 			coreLocker.Unlock();
 
 			entry = choose_idle_core();
 
-			coreLocker.Lock();
-			if (entry == NULL)
-				entry = gCoreLoadHeap->PeekMinimum();
-			if (entry == NULL)
+			if (entry == NULL) {
+				coreLocker.Lock();
 				entry = gCoreHighLoadHeap->PeekMinimum();
+			}
 		}
 	}
 
@@ -138,9 +125,6 @@ should_rebalance(Thread* thread)
 {
 	ASSERT(!gSingleCore);
 
-	if (thread_is_idle_thread(thread))
-		return false;
-
 	scheduler_thread_data* schedulerThreadData = thread->scheduler_data;
 	ASSERT(schedulerThreadData->previous_core >= 0);
 
@@ -151,13 +135,16 @@ should_rebalance(Thread* thread)
 	if (coreLoad > kHighLoad) {
 		ReadSpinLocker coreLocker(gCoreHeapsLock);
 		if (sSmallTaskCore == core) {
-			if (coreLoad - schedulerThreadData->load < kHighLoad)
-				return true;
-
+			sSmallTaskCore = -1;
 			choose_small_task_core();
+			if (schedulerThreadData->load > coreLoad / 3)
+				return false;
 			return coreLoad > kVeryHighLoad;
 		}
 
+		if (schedulerThreadData->load >= coreLoad / 2)
+			return false;
+
 		CoreEntry* other = gCoreLoadHeap->PeekMaximum();
 		if (other == NULL)
 			other = gCoreHighLoadHeap->PeekMinimum();
@@ -165,10 +152,15 @@ should_rebalance(Thread* thread)
 		return coreLoad - get_core_load(other) >= kLoadDifference / 2;
 	}
 
+	if (coreLoad >= kMediumLoad)
+		return false;
+
 	int32 smallTaskCore = choose_small_task_core();
 	if (smallTaskCore == -1)
 		return false;
-	return smallTaskCore != core;
+	return smallTaskCore != core
+		&& get_core_load(&gCoreEntries[smallTaskCore])
+				+ thread->scheduler_data->load < kHighLoad;
 }
 
 
diff --git a/src/system/kernel/scheduler/scheduler.cpp \
b/src/system/kernel/scheduler/scheduler.cpp index b1bcef7..8fb6cf9 100644
--- a/src/system/kernel/scheduler/scheduler.cpp
+++ b/src/system/kernel/scheduler/scheduler.cpp
@@ -120,7 +120,8 @@ CoreEntry::CoreEntry()
 	fCPUCount(0),
 	fThreadCount(0),
 	fActiveTime(0),
-	fLoad(0)
+	fLoad(0),
+	fHighLoad(false)
 {
 	B_INITIALIZE_SPINLOCK(&fCPULock);
 	B_INITIALIZE_SPINLOCK(&fQueueLock);
@@ -264,6 +265,7 @@ dump_cpu_heap(int argc, char** argv)
 {
 	kprintf("core load\n");
 	dump_core_load_heap(gCoreLoadHeap);
+	kprintf("\n");
 	dump_core_load_heap(gCoreHighLoadHeap);
 
 	for (int32 i = 0; i < gCoreCount; i++) {
@@ -376,23 +378,32 @@ update_load_heaps(int32 core)
 		return;
 
 	if (newKey > kHighLoad) {
-		if (oldKey <= kHighLoad) {
+		if (!entry->fHighLoad) {
 			gCoreLoadHeap->ModifyKey(entry, -1);
 			ASSERT(gCoreLoadHeap->PeekMinimum() == entry);
 			gCoreLoadHeap->RemoveMinimum();
 
 			gCoreHighLoadHeap->Insert(entry, newKey);
+
+			entry->fHighLoad = true;
 		} else
 			gCoreHighLoadHeap->ModifyKey(entry, newKey);
-	} else {
-		if (oldKey > kHighLoad) {
+	} else if (newKey < kMediumLoad) {
+		if (entry->fHighLoad) {
 			gCoreHighLoadHeap->ModifyKey(entry, -1);
 			ASSERT(gCoreHighLoadHeap->PeekMinimum() == entry);
 			gCoreHighLoadHeap->RemoveMinimum();
 
 			gCoreLoadHeap->Insert(entry, newKey);
+
+			entry->fHighLoad = false;
 		} else
 			gCoreLoadHeap->ModifyKey(entry, newKey);
+	} else {
+		if (entry->fHighLoad)
+			gCoreHighLoadHeap->ModifyKey(entry, newKey);
+		else
+			gCoreLoadHeap->ModifyKey(entry, newKey);
 	}
 }
 
diff --git a/src/system/kernel/scheduler/scheduler_common.h \
b/src/system/kernel/scheduler/scheduler_common.h index f379ba9..e92b3a6 100644
--- a/src/system/kernel/scheduler/scheduler_common.h
+++ b/src/system/kernel/scheduler/scheduler_common.h
@@ -36,6 +36,7 @@ const bigtime_t kCacheExpire = 100000;
 const int kLowLoad = kMaxLoad * 20 / 100;
 const int kTargetLoad = kMaxLoad * 55 / 100;
 const int kHighLoad = kMaxLoad * 70 / 100;
+const int kMediumLoad = (kHighLoad + kTargetLoad) / 2;
 const int kVeryHighLoad = (kMaxLoad + kHighLoad) / 2;
 
 const int kLoadDifference = kMaxLoad * 20 / 100;
@@ -83,6 +84,7 @@ struct CoreEntry : public MinMaxHeapLinkImpl<CoreEntry, int32>,
 	bigtime_t	fActiveTime;
 
 	int32		fLoad;
+	bool		fHighLoad;
 } CACHE_LINE_ALIGN;
 typedef MinMaxHeap<CoreEntry, int32> CoreLoadHeap;
 


[prev in list] [next in list] [prev in thread] [next in thread] 

Configure | About | News | Add a list | Sponsored by KoreLogic