From kde-commits Wed Nov 30 23:04:40 2011 From: Christian Mollekopf Date: Wed, 30 Nov 2011 23:04:40 +0000 To: kde-commits Subject: [kdepim-runtime] agents/nepomukfeeder: Itendation, Debug messages, Message-Id: <20111130230440.E1AEFA60D5 () git ! kde ! org> X-MARC-Message: https://marc.info/?l=kde-commits&m=132269438602296 Git commit 5b18ed2d65a2f2966c4d13f53434a24b377d1e7b by Christian Mollekopf. Committed on 30/11/2011 at 23:15. Pushed by cmollekopf into branch 'master'. Itendation, Debug messages, Wreorder M +41 -43 agents/nepomukfeeder/feederqueue.cpp M +1 -1 agents/nepomukfeeder/feederqueue.h http://commits.kde.org/kdepim-runtime/5b18ed2d65a2f2966c4d13f53434a24b377d1= e7b diff --git a/agents/nepomukfeeder/feederqueue.cpp b/agents/nepomukfeeder/fe= ederqueue.cpp index d444be7..d5434e2 100644 --- a/agents/nepomukfeeder/feederqueue.cpp +++ b/agents/nepomukfeeder/feederqueue.cpp @@ -43,9 +43,9 @@ FeederQueue::FeederQueue( QObject* parent ) mProcessedAmount( 0 ), mPendingJobs( 0 ), mReIndex( false ), + mOnline( true ), lowPrioQueue(1, 100, this), - highPrioQueue(1, 100, this), - mOnline( true ) + highPrioQueue(1, 100, this) { mProcessItemQueueTimer.setInterval( 0 ); mProcessItemQueueTimer.setSingleShot( true ); @@ -69,7 +69,7 @@ void FeederQueue::setReindexing( bool reindex ) = void FeederQueue::setOnline( bool online ) { - kDebug() << online; + //kDebug() << online; mOnline =3D online; if (online) continueIndexing(); @@ -77,7 +77,7 @@ void FeederQueue::setOnline( bool online ) = void FeederQueue::addCollection( const Akonadi::Collection &collection ) { - kDebug() << collection.id(); + //kDebug() << collection.id(); mCollectionQueue.append( collection ); if ( mPendingJobs =3D=3D 0 ) { processNextCollection(); @@ -178,7 +178,7 @@ void FeederQueue::continueIndexing() = void FeederQueue::processItemQueue() { - kDebug(); + //kDebug(); ++mProcessedAmount; if ( (mProcessedAmount % 100) =3D=3D 0 && mTotalAmount > 0 && mProcessed= Amount <=3D mTotalAmount ) emit progress( (mProcessedAmount * 100) / mTotalAmount ); @@ -202,7 +202,7 @@ void FeederQueue::processItemQueue() } = if ( !highPrioQueue.isEmpty() || ( !lowPrioQueue.isEmpty() && mOnline ) = ) { - kDebug() << "continue"; + //kDebug() << "continue"; // go to eventloop before processing the next one, otherwise we miss t= he idle status change mProcessItemQueueTimer.start(); } @@ -211,7 +211,7 @@ void FeederQueue::processItemQueue() void FeederQueue::prioQueueFinished() { if (highPrioQueue.isEmpty() && lowPrioQueue.isEmpty() && (mPendingJobs = =3D=3D 0) && mCurrentCollection.isValid() ) { - kDebug() << "indexing of collection " << mCurrentCollection.id() << " = completed"; + //kDebug() << "indexing of collection " << mCurrentCollection.id() << = " completed"; mCurrentCollection =3D Collection(); emit idle( i18n( "Indexing completed." ) ); processNextCollection(); @@ -252,9 +252,9 @@ void FeederQueue::setItemFetchScope(ItemFetchScope scop= e) ItemQueue::ItemQueue(int batchSize, int fetchSize, QObject* parent) : QObject(parent), mPendingRemoveDataJobs( 0 ), - mFetchSize(fetchSize), - mBatchSize(batchSize), = - block(false) + mBatchSize( batchSize ), + mFetchSize( fetchSize ), + block( false ) { if ( fetchSize < batchSize ) { kWarning() << "fetchSize must be >=3D batchsize"; @@ -270,7 +270,7 @@ ItemQueue::~ItemQueue() = void ItemQueue::addItem(const Akonadi::Item &item) { - kDebug() << "pipline size: " << mItemPipeline.size(); + //kDebug() << "pipline size: " << mItemPipeline.size(); mItemPipeline.enqueue(item.id()); //TODO if payload is available add dir= ectly to = } = @@ -285,10 +285,10 @@ void ItemQueue::addItems(const Akonadi::Item::List &l= ist ) bool ItemQueue::processItem() { if (block) {//wait until the old graph has been saved - kDebug() << "blocked"; + //kDebug() << "blocked"; return false; } - kDebug() << "------------------------procItem"; + //kDebug() << "------------------------procItem"; static bool processing =3D false; // guard against sub-eventloop reentra= ncy if ( processing ) return false; @@ -299,7 +299,7 @@ bool ItemQueue::processItem() processing =3D false; = if (mItemFetchList.size() >=3D mFetchSize || mItemPipeline.isEmpty() ) { - kDebug() << QString("Fetching %1 items").arg(mItemFetchList.size()); + //kDebug() << QString("Fetching %1 items").arg(mItemFetchList.size()); Akonadi::ItemFetchJob *job =3D new Akonadi::ItemFetchJob( mItemFetchLi= st, 0 ); job->fetchScope().fetchFullPayload(); job->fetchScope().setCacheOnly( true ); @@ -320,12 +320,10 @@ void ItemQueue::itemsReceived(const Akonadi::Item::Li= st& items) { Akonadi::ItemFetchJob *job =3D qobject_cast(se= nder()); int numberOfItems =3D job->property("numberOfItems").toInt(); - kDebug() << items.size() << numberOfItems; + //kDebug() << items.size() << numberOfItems; mFetchedItemList.append(items); if ( mFetchedItemList.size() >=3D numberOfItems ) { //Sometimes we get= a partial delivery only, wait for the rest processBatch(); - } else { - kDebug() << "waiting for more"; } } = @@ -340,24 +338,24 @@ void ItemQueue::fetchJobResult(KJob* job) = bool ItemQueue::processBatch() { - int size =3D mFetchedItemList.size(); - kDebug() << size; - for ( int i =3D 0; i < size && i < mBatchSize; i++ ) { - const Akonadi::Item &item =3D mFetchedItemList.takeFirst(); - //kDebug() << item.id(); - Q_ASSERT(item.hasPayload()); - Q_ASSERT(mBatch.size() =3D=3D 0 ? mResourceGraph.isEmpty() : true)= ; //otherwise we havent reached removeDataByApplication yet, and therfore m= ustn't overwrite mResourceGraph - NepomukHelpers::addItemToGraph( item, mResourceGraph ); - mBatch.append(item.url()); - } - if ( mBatch.size() && ( mBatch.size() >=3D mBatchSize || mItemPipeline= .isEmpty() ) ) { - kDebug() << "process batch of " << mBatch.size() << " left: "= << mFetchedItemList.size(); - KJob *job =3D Nepomuk::removeDataByApplication( mBatch, Nepomuk::R= emoveSubResoures, KGlobal::mainComponent() ); - connect( job, SIGNAL( finished( KJob* ) ), this, SLOT( removeDataR= esult( KJob* ) ) ); - mBatch.clear(); - return false; - } - return true; + int size =3D mFetchedItemList.size(); + //kDebug() << size; + for ( int i =3D 0; i < size && i < mBatchSize; i++ ) { + const Akonadi::Item &item =3D mFetchedItemList.takeFirst(); + //kDebug() << item.id(); + Q_ASSERT(item.hasPayload()); + Q_ASSERT(mBatch.size() =3D=3D 0 ? mResourceGraph.isEmpty() : true); //= otherwise we havent reached removeDataByApplication yet, and therfore mustn= 't overwrite mResourceGraph + NepomukHelpers::addItemToGraph( item, mResourceGraph ); + mBatch.append(item.url()); + } + if ( mBatch.size() && ( mBatch.size() >=3D mBatchSize || mItemPipeline.i= sEmpty() ) ) { + //kDebug() << "process batch of " << mBatch.size() << " left: " <= < mFetchedItemList.size(); + KJob *job =3D Nepomuk::removeDataByApplication( mBatch, Nepomuk::Remov= eSubResoures, KGlobal::mainComponent() ); + connect( job, SIGNAL( finished( KJob* ) ), this, SLOT( removeDataResul= t( KJob* ) ) ); + mBatch.clear(); + return false; + } + return true; } = void ItemQueue::removeDataResult(KJob* job) @@ -369,7 +367,7 @@ void ItemQueue::removeDataResult(KJob* job) //kDebug() << "Saving Graph"; KJob *addGraphJob =3D NepomukHelpers::addGraphToNepomuk( mResourceGraph = ); connect( addGraphJob, SIGNAL( result( KJob* ) ), SLOT( batchJobResult( K= Job* ) ) ); - m_debugGraph =3D mResourceGraph; + //m_debugGraph =3D mResourceGraph; mResourceGraph.clear(); //trigger processing of next collection as everything of this one has be= en stored //kDebug() << "removing completed, saving complete, batch done=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D"; @@ -377,15 +375,15 @@ void ItemQueue::removeDataResult(KJob* job) = void ItemQueue::batchJobResult(KJob* job) { - kDebug() << "------------------------------------------"; - kDebug() << "pipline size: " << mItemPipeline.size(); - kDebug() << "fetchedItemList : " << mFetchedItemList.size(); + //kDebug() << "------------------------------------------"; + //kDebug() << "pipline size: " << mItemPipeline.size(); + //kDebug() << "fetchedItemList : " << mFetchedItemList.size(); Q_ASSERT(mBatch.isEmpty()); int timeout =3D 0; if ( job->error() ) { - foreach( const Nepomuk::SimpleResource &res, m_debugGraph.toList() ) { + /*foreach( const Nepomuk::SimpleResource &res, m_debugGraph.toList() )= { kWarning() << res; - } + }*/ kWarning() << job->errorString(); timeout =3D 30000; //Nepomuk is probably still working. Lets wait a bi= t and hope it has finished until the next batch arrives. } @@ -395,11 +393,11 @@ void ItemQueue::batchJobResult(KJob* job) void ItemQueue::continueProcessing() { if (processBatch()) { //Go back for more - kDebug() << "batch finished"; + //kDebug() << "batch finished"; block =3D false; emit batchFinished(); } else { - kDebug() << "there was more..."; + //kDebug() << "there was more..."; return; } if ( mItemPipeline.isEmpty() && mFetchedItemList.isEmpty() ) { diff --git a/agents/nepomukfeeder/feederqueue.h b/agents/nepomukfeeder/feed= erqueue.h index a70a2c5..df1fa51 100644 --- a/agents/nepomukfeeder/feederqueue.h +++ b/agents/nepomukfeeder/feederqueue.h @@ -72,7 +72,7 @@ private: = QQueue mItemPipeline; Nepomuk::SimpleResourceGraph mResourceGraph; - Nepomuk::SimpleResourceGraph m_debugGraph; + //Nepomuk::SimpleResourceGraph m_debugGraph; QList mBatch; Akonadi::Item::List mItemFetchList; Akonadi::Item::List mFetchedItemList;