[prev in list] [next in list] [prev in thread] [next in thread] 

List:       kde-commits
Subject:    Re: branches/KDE/4.4/kdelibs/kio/kio
From:       Dawit Alemayehu <adawit () kde ! org>
Date:       2010-02-04 18:57:34
Message-ID: 201002041357.34858.adawit () kde ! org
[Download RAW message or body]

On Wednesday 03 February 2010 15:56:48 you wrote:
> On Wednesday 03 February 2010, Dawit Alemayehu wrote:
> > Simply changing these values to whatever higher number you use
> > demands would have automatically solved the problem without any need
> > to disable/change code. Right now for http ioslave these values are
> > 20 and 5 respectively which is probably not sufficient for some use
> > cases,
> 
> I can only say that I had this problem with recent 4.4 sources, but it
> only happens sometimes. Now I've to see if this still happens.
> 
> > but without getting the details for that and disabling this
> > code is a mistake, specially since the fix for it would not require
> > any code changes...
> 
> Well, this is IMO a show stopper for 4.4.0 and must be fixed.

Can someone else besides myself please test the proposed patch I posted to the 
bug report below [1] ?

Even though I already extensively tested the patch by loading several heavy 
and complex sites while limiting the maxSlaveInstances and 
maxSlaveInstancesPerHost values in the http{s}.protocol files to 4 and 2 
respectively, the more testing this patch receives from different use cases 
the better. 

This fix needs to be applied before 4.4.1 ; otherwise every application that 
uses kdewebkit will simply spawn a ridiculous number of ioslaves for a given 
request.

Thanks,
Dawit A.


[1] https://bugs.kde.org/show_bug.cgi?id=224857

["kio_scheduler_cpu_spin_fix.patch" (text/x-patch)]

Index: scheduler.cpp
===================================================================
--- scheduler.cpp	(revision 1084961)
+++ scheduler.cpp	(working copy)
@@ -97,7 +97,7 @@
 
     SchedulerPrivate() :
         q(new Scheduler),
-        busy( false ),
+        startTimerDelayed( false ),
         slaveOnHold( 0 ),
         slaveConfig( SlaveConfig::self() ),
         sessionData( new SessionData ),
@@ -124,7 +124,7 @@
     QTimer slaveTimer;
     QTimer coSlaveTimer;
     QTimer cleanupTimer;
-    bool busy;
+    bool startTimerDelayed;
 
     ProtocolInfoDict protInfoDict;
     Slave *slaveOnHold;
@@ -190,6 +190,7 @@
 
         SlaveList list (coSlaves.keys());
         qDeleteAll(list.begin(), list.end());
+        scheduledSlaves.clear();
     }
 
     Slave* findJobCoSlave(SimpleJob* job) const
@@ -251,7 +252,7 @@
 
         if (!host.isEmpty())
         {
-            QListIterator<SlavePtr> it (activeSlaves);
+            QListIterator<SlavePtr> it (scheduledSlaves);
             while (it.hasNext())
             {
                 if (host == it.next()->host())
@@ -261,7 +262,7 @@
             QString url = job->url().url();
 
             if (reserveList.contains(url)) {
-                kDebug() << "*** Removing paired request for: " << url;
+                kDebug(7006) << "*** Removing paired request for: " << url;
                 reserveList.removeOne(url);
             } else {
                 count += reserveList.count();
@@ -274,6 +275,7 @@
     QStringList reserveList;
     QList<SimpleJob *> joblist;
     SlaveList activeSlaves;
+    SlaveList scheduledSlaves;
     SlaveList idleSlaves;
     CoSlaveMap coSlaves;
     SlaveList coIdleSlaves;
@@ -288,13 +290,13 @@
   const int numActiveSlaves = protInfo->activeSlaveCountFor(job);
 
 #if 0
-    kDebug() << job->url() << ": ";
-    kDebug() << "    protocol :" << job->url().protocol()
-             << ", max :" << protInfo->maxSlaves
-             << ", max/host :" << protInfo->maxSlavesPerHost
-             << ", active :" << protInfo->activeSlaves.count()
-             << ", idle :" << protInfo->idleSlaves.count()
-             << ", active for " << job->url().host() << " = " << numActiveSlaves;
+    kDebug(7006) << job->url() << ": ";
+    kDebug(7006) << "    protocol :" << job->url().protocol()
+                 << ", max :" << protInfo->maxSlaves
+                 << ", max/host :" << protInfo->maxSlavesPerHost
+                 << ", active :" << protInfo->scheduledSlaves.count()
+                 << ", idle :" << protInfo->idleSlaves.count()
+                 << ", active for " << job->url().host() << " = " << \
numActiveSlaves;  #endif
 
   return (protInfo->maxSlavesPerHost < 1 || protInfo->maxSlavesPerHost > \
numActiveSlaves); @@ -477,7 +479,7 @@
     slaveTimer.start(0);
 #ifndef NDEBUG
     if (newJobs.count() > 150)
-        kDebug() << "WARNING - KIO::Scheduler got more than 150 jobs! This shows a \
misuse in your app (yes, a job is a QObject)."; +        kDebug(7006) << "WARNING - \
KIO::Scheduler got more than 150 jobs! This shows a misuse in your app (yes, a job is \
a QObject).";  #endif
 }
 
@@ -529,8 +531,21 @@
     QHashIterator<QString, ProtocolInfo*> it(protInfoDict);
     while(it.hasNext()) {
        it.next();
-       if (startJobScheduled(it.value())) return;
+       if (startJobScheduled(it.value()))
+          return;
     }
+
+    /*
+      If startTimerDelayed is true, we still have jobs could not be scheduled
+      due to limit enforcements. In order to avoid high CPU usage, however, we
+      start the slaveTimer after 1 sec. Otherwise, the above while loop will be
+      run in a tight loop and will peg the CPU. See BR# 224857...
+    */
+    if (startTimerDelayed) {
+       kDebug(7006) << "Rescheduling remaining jobs immediately...";
+       startTimerDelayed = false;
+       slaveTimer.start(0);
+    }
 }
 
 void SchedulerPrivate::setupSlave(KIO::Slave *slave, const KUrl &url, const QString \
&protocol, const QString &proxy , bool newSlave, const KIO::MetaData *config) @@ \
-640,7 +655,7 @@  if (slave)
           newSlave = true;
        else
-          slaveTimer.start(0);
+          startTimerDelayed = true;
     }
 
     if (!slave)
@@ -658,11 +673,12 @@
     KUrl url = pairedRequest(job);
     if (url.isValid())
     {
-        kDebug() << "*** PAIRED REQUEST: " << url;
+        kDebug(7006) << "*** PAIRED REQUEST: " << url;
         protInfoDict.get(url.protocol())->reserveList << url.url();
     }
 
     protInfo->activeSlaves.append(slave);
+    protInfo->scheduledSlaves.append(slave);
     protInfo->idleSlaves.removeAll(slave);
     protInfo->joblist.removeOne(job);
 //        kDebug(7006) << "scheduler: job started " << job;
@@ -744,8 +760,7 @@
 {
     Slave *slave = 0;
 
-    if (true /* ### temporary workaround for #224857*/ ||
-        !enforceLimits || checkLimits(protInfo, job))
+    if (!enforceLimits || checkLimits(protInfo, job))
     {
         KIO::SimpleJobPrivate *const jobPriv = SimpleJobPrivate::get(job);
 
@@ -801,7 +816,7 @@
                                      const KUrl &url, bool enforceLimits)
 {
    Slave *slave = 0;
-   const int slavesCount = protInfo->activeSlaves.count() + \
protInfo->idleSlaves.count(); +   const int slavesCount = \
protInfo->scheduledSlaves.count() + protInfo->idleSlaves.count();  
    if (!enforceLimits ||
        (protInfo->maxSlaves > slavesCount && checkLimits(protInfo, job)))
@@ -843,6 +858,7 @@
     ProtocolInfo *protInfo = protInfoDict.get(jobPriv->m_protocol);
     slave->disconnect(job);
     protInfo->activeSlaves.removeAll(slave);
+    protInfo->scheduledSlaves.removeAll(slave);
     if (slave->isAlive())
     {
        JobList *list = protInfo->coSlaves.value(slave);
@@ -875,6 +891,7 @@
     assert(!slave->isAlive());
     ProtocolInfo *protInfo = protInfoDict.get(slave->slaveProtocol());
     protInfo->activeSlaves.removeAll(slave);
+    protInfo->scheduledSlaves.removeAll(slave);
     if (slave == slaveOnHold)
     {
        slaveOnHold = 0;



[prev in list] [next in list] [prev in thread] [next in thread] 

Configure | About | News | Add a list | Sponsored by KoreLogic