[prev in list] [next in list] [prev in thread] [next in thread] 

List:       drbd-cvs
Subject:    [DRBD-cvs] svn commit by lars - r2376 - trunk/drbd - we have issues
From:       drbd-cvs () linbit ! com
Date:       2006-08-18 14:09:06
Message-ID: 20060818140906.E88E92CDC422 () mail ! linbit ! com
[Download RAW message or body]

I'd
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Author: lars
Date: 2006-08-18 16:09:04 +0200 (Fri, 18 Aug 2006)
New Revision: 2376

Modified:
   trunk/drbd/drbd_int.h
   trunk/drbd/drbd_main.c
   trunk/drbd/drbd_receiver.c
   trunk/drbd/drbd_req.c
Log:

we have issues stacking the device limits properly.
I'd like to know/be able to print the "id" in case
the bio_add_page in drbd_alloc_ee fails.

we have no more free_ee list,
thats been converted to mempool long ago.

Think:
we may either remove the asserts refering to ID_VACANT, too,
or we reinitialize before mempool_freeing.
the later might be good practise, anyways.

also, devfs has been removed from the kernel 2.6.18 (and deprecated for ages)
so get rid of it here, too.



Modified: trunk/drbd/drbd_int.h
===================================================================
--- trunk/drbd/drbd_int.h	2006-08-16 14:16:06 UTC (rev 2375)
+++ trunk/drbd/drbd_int.h	2006-08-18 14:09:04 UTC (rev 2376)
@@ -39,9 +39,6 @@
 extern int major_nr;
 extern int use_nbd_major;
 
-// use_nbd_major ? "nbd" : "drbd";
-extern char* drbd_devfs_name;
-
 #include <linux/major.h>
 #ifdef DRBD_MAJOR
 # warning "FIXME. DRBD_MAJOR is now officially defined in major.h"
@@ -628,7 +625,6 @@
 typedef struct drbd_request drbd_request_t;
 
 /* These Tl_epoch_entries may be in one of 6 lists:
-   free_ee   .. free entries
    active_ee .. data packet being written
    sync_ee   .. syncer block being written
    done_ee   .. block written, need to send WriteAck
@@ -1166,6 +1162,7 @@
 // drbd_receiver.c
 extern int drbd_release_ee(drbd_dev* mdev,struct list_head* list);
 extern struct Tl_epoch_entry* drbd_alloc_ee(drbd_dev *mdev,
+					    u64 id,
 					    sector_t sector,
 					    unsigned int data_size,
 					    unsigned int gfp_mask);

Modified: trunk/drbd/drbd_main.c
===================================================================
--- trunk/drbd/drbd_main.c	2006-08-16 14:16:06 UTC (rev 2375)
+++ trunk/drbd/drbd_main.c	2006-08-18 14:09:04 UTC (rev 2376)
@@ -47,7 +47,6 @@
 #include <linux/drbd_config.h>
 #include <linux/mm_inline.h>
 #include <linux/slab.h>
-#include <linux/devfs_fs_kernel.h>
 #include <linux/random.h>
 #include <linux/reboot.h>
 #include <linux/notifier.h>
@@ -139,10 +138,6 @@
 
 #endif
 
-// devfs name
-char* drbd_devfs_name = "drbd";
-
-
 // global panic flag
 volatile int drbd_did_panic = 0;
 
@@ -2131,9 +2126,6 @@
 	/*
 	 * currently we drbd_init_ee only on module load, so
 	 * we may do drbd_release_ee only on module unload!
-	 * drbd_release_ee(&mdev->free_ee);
-	 * D_ASSERT(list_emptry(&mdev->free_ee));
-	 *
 	 */
 	D_ASSERT(list_empty(&mdev->active_ee));
 	D_ASSERT(list_empty(&mdev->sync_ee));
@@ -2367,8 +2359,6 @@
 #endif
 	kfree(drbd_conf);
 
-	devfs_remove(drbd_devfs_name);
-
 	if (unregister_blkdev(MAJOR_NR, DEVICE_NAME) != 0)
 		printk(KERN_ERR DEVICE_NAME": unregister of device failed\n");
 
@@ -2441,8 +2431,6 @@
 	}
 	register_reboot_notifier(&drbd_notifier);
 
-	drbd_devfs_name = (major_nr == NBD_MAJOR) ? "nbd" : "drbd";
-
 	/*
 	 * allocate all necessary structs
 	 */
@@ -2456,8 +2444,6 @@
 		memset(drbd_conf,0,sizeof(drbd_dev)*minor_count);
 	else goto Enomem;
 
-	devfs_mk_dir(drbd_devfs_name);
-
 	for (i = 0; i < minor_count; i++) {
 		drbd_dev    *mdev = drbd_conf + i;
 		struct gendisk         *disk;
@@ -2480,7 +2466,6 @@
 		disk->first_minor = i;
 		disk->fops = &drbd_ops;
 		sprintf(disk->disk_name, DEVICE_NAME "%d", i);
-		sprintf(disk->devfs_name, "%s/%d", drbd_devfs_name, i);
 		disk->private_data = mdev;
 		add_disk(disk);
 

Modified: trunk/drbd/drbd_receiver.c
===================================================================
--- trunk/drbd/drbd_receiver.c	2006-08-16 14:16:06 UTC (rev 2375)
+++ trunk/drbd/drbd_receiver.c	2006-08-18 14:09:04 UTC (rev 2376)
@@ -230,6 +230,7 @@
 */
 
 struct Tl_epoch_entry* drbd_alloc_ee(drbd_dev *mdev,
+				     u64 id,
 				     sector_t sector,
 				     unsigned int data_size,
 				     unsigned int gfp_mask)
@@ -239,7 +240,7 @@
 	struct page *page;
 	struct bio *bio;
 	unsigned int ds;
-	int bio_add,i;
+	int i;
 
 	e = mempool_alloc(drbd_ee_mempool, gfp_mask);
 	if (!e) return NULL;
@@ -254,8 +255,24 @@
 	while(ds) {
 		page = drbd_pp_alloc(mdev, gfp_mask);
 		if (!page) goto fail2;
-		bio_add=bio_add_page(bio, page, min_t(int, ds, PAGE_SIZE), 0);
-		D_ASSERT(bio_add);
+		if (!bio_add_page(bio, page, min_t(int, ds, PAGE_SIZE), 0)) {
+			/* if this happens, it indicates we are not doing correct
+			 * stacking of device limits.
+			 * or that the syncer (which may choose to ignore the
+			 * device limits) happend to request something which
+			 * does not work on this side...
+			 *
+			 * if this happens for the _first_ page, however, my
+			 * understanding is it would indicate a bug in the
+			 * lower levels, since adding _one_ page is
+			 * "guaranteed" to work.
+			 */
+			ERR("bio_add_page failed: "
+			    "id/sector/data_size/rest 0x%llx %llu %u %u\n",
+			    (unsigned long long)id,
+			    (unsigned long long)sector, data_size, ds);
+			break;
+		}
 		ds -= min_t(int, ds, PAGE_SIZE);
 	}
 
@@ -265,7 +282,7 @@
 	e->ee_size = bio->bi_size;
 	D_ASSERT( data_size == bio->bi_size);
 	e->private_bio = bio;
-	e->block_id = ID_VACANT;
+	e->block_id = id;
 	INIT_HLIST_NODE(&e->colision);
 	e->barrier_nr = 0;
 	e->barrier_nr2 = 0;
@@ -880,7 +897,7 @@
 }
 
 STATIC struct Tl_epoch_entry *
-read_in_block(drbd_dev *mdev, sector_t sector, int data_size)
+read_in_block(drbd_dev *mdev, u64 id, sector_t sector, int data_size)
 {
 	struct Tl_epoch_entry *e;
 	struct bio_vec *bvec;
@@ -888,7 +905,7 @@
 	struct bio *bio;
 	int ds,i,rr;
 
-	e = drbd_alloc_ee(mdev,sector,data_size,GFP_KERNEL);
+	e = drbd_alloc_ee(mdev,id,sector,data_size,GFP_KERNEL);
 	if(!e) return 0;
 	bio = e->private_bio;
 	ds = data_size;
@@ -987,7 +1004,7 @@
 {
 	struct Tl_epoch_entry *e;
 
-	e = read_in_block(mdev,sector,data_size);
+	e = read_in_block(mdev,ID_SYNCER,sector,data_size);
 	if(!e) {
 		dec_local(mdev);
 		return FALSE;
@@ -995,8 +1012,6 @@
 
 	dec_rs_pending(mdev);
 
-	e->block_id = ID_SYNCER;
-
 	drbd_ee_prepare_write(mdev,e);
 	e->w.cb     = e_end_resync_block;
 
@@ -1232,13 +1247,12 @@
 	}
 
 	sector = be64_to_cpu(p->sector);
-	e = read_in_block(mdev,sector,data_size);
+	e = read_in_block(mdev,p->block_id,sector,data_size);
 	if (!e) {
 		dec_local(mdev);
 		return FALSE;
 	}
 
-	e->block_id = p->block_id; // no meaning on this side, e* on partner
 	drbd_ee_prepare_write(mdev, e);
 	e->w.cb     = e_end_block;
 
@@ -1443,13 +1457,12 @@
 		return TRUE;
 	}
 
-	e = drbd_alloc_ee(mdev,sector,size,GFP_KERNEL);
+	e = drbd_alloc_ee(mdev,p->block_id,sector,size,GFP_KERNEL);
 	if (!e) {
 		dec_local(mdev);
 		return FALSE;
 	}
 
-	e->block_id = p->block_id; // no meaning on this side, pr* on partner
 	spin_lock_irq(&mdev->ee_lock);
 	list_add(&e->w.list,&mdev->read_ee);
 	spin_unlock_irq(&mdev->ee_lock);

Modified: trunk/drbd/drbd_req.c
===================================================================
--- trunk/drbd/drbd_req.c	2006-08-16 14:16:06 UTC (rev 2375)
+++ trunk/drbd/drbd_req.c	2006-08-18 14:09:04 UTC (rev 2376)
@@ -271,7 +271,13 @@
 		    (volatile int)((mdev->state.conn < WFBitMapS ||
 				    mdev->state.conn > WFBitMapT) &&
 				   !mdev->state.susp ) );
-
+	/* FIXME RACE
+	 * the wait condition may already be wrong again...
+	 * ok, thats "academic" atm, but probably not good in the long term.
+	 *
+	 * we should have a function that does wait for the condition,
+	 * and do the inc_local within what ever lock is necessary...
+	 */
 	local = inc_local(mdev);
 	if (rw == READ || rw == READA) {
 		if (local) {

_______________________________________________
drbd-cvs mailing list
drbd-cvs@lists.linbit.com
http://lists.linbit.com/mailman/listinfo/drbd-cvs
[prev in list] [next in list] [prev in thread] [next in thread] 

Configure | About | News | Add a list | Sponsored by KoreLogic