[prev in list] [next in list] [prev in thread] [next in thread] 

List:       linux-aio
Subject:    [PATCH 3/4] aio: queue final fput with iocb work struct
From:       Zach Brown <zach.brown () oracle ! com>
Date:       2007-09-27 23:40:30
Message-ID: 11909364322736-git-send-email-zach.brown () oracle ! com
[Download RAW message or body]

Now that we have a work struct in the iocb we can use it to queue the final
fput instead of using a single global work struct and a global list.

Signed-off-by: Zach Brown <zach.brown@oracle.com>
---
 fs/aio.c            |   78 ++++++++++++++++++--------------------------------
 include/linux/aio.h |    5 ---
 2 files changed, 28 insertions(+), 55 deletions(-)

diff --git a/fs/aio.c b/fs/aio.c
index 11bd482..0d9a0f7 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -53,16 +53,9 @@ static struct kmem_cache	*kioctx_cachep;
 
 static struct workqueue_struct *aio_wq;
 
-/* Used for rare fput completion. */
-static void aio_fput_routine(struct work_struct *);
-static DECLARE_WORK(fput_work, aio_fput_routine);
 static void aio_iocb_retry_work(struct work_struct *work);
 static int __aio_put_req(struct kioctx *ctx, struct kiocb *req);
 
-static DEFINE_SPINLOCK(fput_lock);
-static LIST_HEAD(fput_head);
-
-
 /* aio_setup
  *	Creates the slab caches used by the aio routines, panic on
  *	failure as this is done early during the boot sequence.
@@ -428,8 +421,8 @@ void fastcall __put_ioctx(struct kioctx *ctx)
  * This prevents races between the aio code path referencing the
  * req (after submitting it) and aio_complete() freeing the req.
  */
-static struct kiocb *FASTCALL(__aio_get_req(struct kioctx *ctx));
-static struct kiocb fastcall *__aio_get_req(struct kioctx *ctx)
+static struct kiocb *FASTCALL(aio_get_req(struct kioctx *ctx));
+static struct kiocb fastcall *aio_get_req(struct kioctx *ctx)
 {
 	struct kiocb *req = NULL;
 	struct aio_ring *ring;
@@ -480,21 +473,6 @@ static struct kiocb fastcall *__aio_get_req(struct kioctx *ctx)
 	return req;
 }
 
-static inline struct kiocb *aio_get_req(struct kioctx *ctx)
-{
-	struct kiocb *req;
-	/* Handle a potential starvation case -- should be exceedingly rare as 
-	 * requests will be stuck on fput_head only if the aio_fput_routine is 
-	 * delayed and the requests were the last user of the struct file.
-	 */
-	req = __aio_get_req(ctx);
-	if (unlikely(NULL == req)) {
-		aio_fput_routine(NULL);
-		req = __aio_get_req(ctx);
-	}
-	return req;
-}
-
 static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
 {
 	assert_spin_locked(&ctx->ctx_lock);
@@ -512,28 +490,25 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
 		wake_up(&ctx->wait);
 }
 
-static void aio_fput_routine(struct work_struct *data)
+/*
+ * This performs the final fput of an iocb file reference from a kernel task.
+ * The caller dropped the final ki_user ref and removed the iocb from the
+ * active reqs list in the context.  Nothing else can find it.  However, it is
+ * still accounted for in the context's reqs_active count.  wait_for_all_aios()
+ * won't exit until we drop that count and release the context lock.
+ */
+static void aio_iocb_fput_work(struct work_struct *work)
 {
-	spin_lock_irq(&fput_lock);
-	while (likely(!list_empty(&fput_head))) {
-		struct kiocb *req = list_kiocb(fput_head.next);
-		struct kioctx *ctx = req->ki_ctx;
-
-		list_del(&req->ki_list);
-		spin_unlock_irq(&fput_lock);
-
-		/* Complete the fput */
-		__fput(req->ki_filp);
+	struct kiocb *iocb = container_of(work, struct kiocb, ki_work);
+	struct kioctx *ctx = iocb->ki_ctx;
 
-		/* Link the iocb into the context's free list */
-		spin_lock_irq(&ctx->ctx_lock);
-		really_put_req(ctx, req);
-		spin_unlock_irq(&ctx->ctx_lock);
+	/* Complete the fput */
+	__fput(iocb->ki_filp);
 
-		put_ioctx(ctx);
-		spin_lock_irq(&fput_lock);
-	}
-	spin_unlock_irq(&fput_lock);
+	/* Link the iocb into the context's free list */
+	spin_lock_irq(&ctx->ctx_lock);
+	really_put_req(ctx, iocb);
+	spin_unlock_irq(&ctx->ctx_lock);
 }
 
 /* __aio_put_req
@@ -554,15 +529,18 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
 	req->ki_cancel = NULL;
 	req->ki_retry = NULL;
 
-	/* Must be done under the lock to serialise against cancellation.
-	 * Call this aio_fput as it duplicates fput via the fput_work.
+	/*
+	 * In the unlikely event that the iocb has the final filp reference
+	 * we pass the iocb off to a kernel thread to perform the final fput.
+	 * We assume we're in an interrupt and can't do the fput here.  We
+	 * get here when there are no more iocb references and that guarantees
+	 * that the ki_work shouldn't be queued for retry.
 	 */
 	if (unlikely(atomic_dec_and_test(&req->ki_filp->f_count))) {
-		get_ioctx(ctx);
-		spin_lock(&fput_lock);
-		list_add(&req->ki_list, &fput_head);
-		spin_unlock(&fput_lock);
-		queue_work(aio_wq, &fput_work);
+		int was_queued;
+		PREPARE_WORK(&req->ki_work, aio_iocb_fput_work);
+		was_queued = !queue_work(aio_wq, &req->ki_work);
+		BUG_ON(was_queued);
 	} else
 		really_put_req(ctx, req);
 	return 1;
diff --git a/include/linux/aio.h b/include/linux/aio.h
index 90c2784..1967c07 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -285,11 +285,6 @@ do {									\
 
 #include <linux/aio_abi.h>
 
-static inline struct kiocb *list_kiocb(struct list_head *h)
-{
-	return list_entry(h, struct kiocb, ki_list);
-}
-
 /* for sysctl: */
 extern unsigned long aio_nr;
 extern unsigned long aio_max_nr;
-- 
1.5.2.2

--
To unsubscribe, send a message with 'unsubscribe linux-aio' in
the body to majordomo@kvack.org.  For more info on Linux AIO,
see: http://www.kvack.org/aio/
Don't email: <a href=mailto:"aart@kvack.org">aart@kvack.org</a>
[prev in list] [next in list] [prev in thread] [next in thread] 

Configure | About | News | Add a list | Sponsored by KoreLogic