Move pg_init_fn to process_queued_io.
--- diff/drivers/md/dm-mpath.c	2004-10-29 15:39:26.000000000 +0100
+++ source/drivers/md/dm-mpath.c	2004-10-29 15:39:32.000000000 +0100
@@ -40,7 +40,7 @@
 	struct path_selector ps;
 
 	unsigned nr_paths;
-	unsigned bypass;
+	unsigned bypass;		/* Temporarily bypass this PG */
 	struct list_head paths;
 };
 
@@ -53,7 +53,8 @@
 
 	unsigned nr_priority_groups;
 	struct list_head priority_groups;
-	int initialising_pg;
+	unsigned pg_init_required;	/* pg_init needs calling? */
+	unsigned queue_io;		/* Must we queue all I/O? */
 
 	spinlock_t lock;
 	unsigned nr_valid_paths;
@@ -82,6 +83,7 @@
 typedef int (*action_fn) (struct path *path);
 
 #define MIN_IOS 256
+
 static kmem_cache_t *_mpio_cache;
 
 static void process_queued_ios(void *data);
@@ -153,6 +155,7 @@
 		memset(m, 0, sizeof(*m));
 		INIT_LIST_HEAD(&m->priority_groups);
 		m->lock = SPIN_LOCK_UNLOCKED;
+		m->queue_io = 1;
 		INIT_WORK(&m->process_queued_ios, process_queued_ios, m);
 		INIT_WORK(&m->trigger_event, trigger_event, m);
 		m->mpio_pool = mempool_create(MIN_IOS, mempool_alloc_slab,
@@ -185,17 +188,19 @@
 	kfree(m);
 }
 
-/*-----------------------------------------------------------------
- * The multipath daemon is responsible for resubmitting queued ios.
- *---------------------------------------------------------------*/
-
 static void __switch_pg(struct multipath *m, struct path *path)
 {
 	struct hw_handler *hwh = &m->hw_handler;
 
+	m->current_pg = path->pg;
+
+	/* Must we initialise the PG first, and queue I/O till it's ready? */
 	if (hwh && hwh->type->pg_init) {
-		hwh->type->pg_init(hwh, path, path->dev->bdev);
-		m->initialising_pg = 1;
+		m->pg_init_required = 1;
+		m->queue_io = 1;
+	} else {
+		m->pg_init_required = 0;
+		m->queue_io = 0;
 	}
 }
 
@@ -222,47 +227,49 @@
 	}
 
 	m->current_path = path;
-	if (path)
-		m->current_pg = path->pg;
 }
 
 static int map_io(struct multipath *m, struct bio *bio, struct mpath_io *mpio)
 {
 	unsigned long flags;
-	struct path *path = NULL;
-	int must_queue = 0;
+	struct path *path;
+	unsigned must_queue;
 
 	spin_lock_irqsave(&m->lock, flags);
 
 	/* Do we need to select a new path? */
-	if (!m->initialising_pg &&
-	    (!m->current_path || (m->current_count && --m->current_count == 0)))
+	if (!m->current_path ||
+	    (!m->queue_io && (m->current_count && --m->current_count == 0)))
 		__choose_path(m);
 
-	if (m->initialising_pg)
-		must_queue = 1;
-	else if (m->current_path)
-		path = m->current_path;
+	must_queue = m->queue_io;
+	path = m->current_path;
 
 	spin_unlock_irqrestore(&m->lock, flags);
 
-	if (!must_queue && !path)
+	if (!path)
 		return -EIO;
 
-	mpio->path = path;
-	bio->bi_bdev = mpio->path->dev->bdev;
-
-	if (!must_queue)
+	if (!must_queue) {
+		mpio->path = path;
+		bio->bi_bdev = mpio->path->dev->bdev;
 		return 1;	/* Mapped successfully */
+	}
 
 	/* queue for the daemon to resubmit */
 	spin_lock_irqsave(&m->lock, flags);
 	bio_list_add(&m->queued_ios, bio);
+	if (m->pg_init_required || !m->queue_io)
+		schedule_work(&m->process_queued_ios);
 	spin_unlock_irqrestore(&m->lock, flags);
 
 	return 0;		/* Queued */
 }
 
+/*-----------------------------------------------------------------
+ * The multipath daemon is responsible for resubmitting queued ios.
+ *---------------------------------------------------------------*/
+
 static void dispatch_queued_ios(struct multipath *m)
 {
 	int r;
@@ -295,6 +302,30 @@
 static void process_queued_ios(void *data)
 {
 	struct multipath *m = (struct multipath *) data;
+	struct hw_handler *hwh = &m->hw_handler;
+	struct path *path;
+	unsigned init_required, must_queue;
+	unsigned long flags;
+
+	spin_lock_irqsave(&m->lock, flags);
+
+	if (!m->current_path)
+		__choose_path(m);
+
+	path = m->current_path;
+	must_queue = m->queue_io;
+
+	init_required = m->pg_init_required;
+	if (init_required)
+		m->pg_init_required = 0;
+
+	spin_unlock_irqrestore(&m->lock, flags);
+
+	if (init_required)
+		hwh->type->pg_init(hwh, path, path->dev->bdev);
+
+	if (path && must_queue)
+		return;
 
 	dispatch_queued_ios(m);
 }
@@ -689,6 +720,7 @@
 	spin_lock_irqsave(&m->lock, flags);
 	pg->bypass = bypass;
 	m->current_path = NULL;
+	m->current_pg = NULL;
 	spin_unlock_irqrestore(&m->lock, flags);
 
 	schedule_work(&m->trigger_event);
@@ -720,6 +752,9 @@
 	return -EINVAL;
 }
 
+/*
+ * pg_init must call this when it has completed its initialisation
+ */
 void dm_pg_init_complete(struct path *path, unsigned err_flags)
 {
 	struct priority_group *pg = path->pg;
@@ -733,7 +768,12 @@
 		bypass_pg(m, pg, 1);
 
 	spin_lock_irqsave(&m->lock, flags);
-	m->initialising_pg = 0;
+	if (!err_flags)
+		m->queue_io = 0;
+	else {
+		m->current_path = NULL;
+		m->current_pg = NULL;
+	}
 	schedule_work(&m->process_queued_ios);
 	spin_unlock_irqrestore(&m->lock, flags);
 }
@@ -769,7 +809,7 @@
 		/* queue for the daemon to resubmit or fail */
 		spin_lock(&m->lock);
 		bio_list_add(&m->queued_ios, bio);
-		if (!m->initialising_pg)
+		if (!m->queue_io)
 			schedule_work(&m->process_queued_ios);
 		spin_unlock(&m->lock);
 
