Changed mpath 'failed_ios' to 'queued_ios' etc. ready for more general use.
--- diff/drivers/md/dm-mpath.c	2004-10-29 15:38:17.000000000 +0100
+++ source/drivers/md/dm-mpath.c	2004-10-29 15:38:23.000000000 +0100
@@ -28,7 +28,7 @@
 	struct dm_dev *dev;
 	struct priority_group *pg;
 
-	spinlock_t failed_lock;
+	spinlock_t queued_lock;
 	int is_active;
 	unsigned fail_count;		/* Cumulative */
 };
@@ -60,8 +60,8 @@
 	struct path *current_path;
 	unsigned current_count;
 
-	struct work_struct dispatch_failed;
-	struct bio_list failed_ios;
+	struct work_struct dispatch_queued;
+	struct bio_list queued_ios;
 
 	struct work_struct trigger_event;
 
@@ -82,7 +82,7 @@
 #define MIN_IOS 256
 static kmem_cache_t *_mpio_cache;
 
-static void dispatch_failed_ios(void *data);
+static void dispatch_queued_ios(void *data);
 static void trigger_event(void *data);
 
 static struct path *alloc_path(void)
@@ -91,7 +91,7 @@
 
 	if (path) {
 		memset(path, 0, sizeof(*path));
-		path->failed_lock = SPIN_LOCK_UNLOCKED;
+		path->queued_lock = SPIN_LOCK_UNLOCKED;
 		path->is_active = 1;
 	}
 
@@ -151,7 +151,7 @@
 		memset(m, 0, sizeof(*m));
 		INIT_LIST_HEAD(&m->priority_groups);
 		m->lock = SPIN_LOCK_UNLOCKED;
-		INIT_WORK(&m->dispatch_failed, dispatch_failed_ios, m);
+		INIT_WORK(&m->dispatch_queued, dispatch_queued_ios, m);
 		INIT_WORK(&m->trigger_event, trigger_event, m);
 		m->mpio_pool = mempool_create(MIN_IOS, mempool_alloc_slab,
 					      mempool_free_slab, _mpio_cache);
@@ -184,7 +184,7 @@
 }
 
 /*-----------------------------------------------------------------
- * The multipath daemon is responsible for resubmitting failed ios.
+ * The multipath daemon is responsible for resubmitting queued ios.
  *---------------------------------------------------------------*/
 static int __choose_path(struct multipath *m)
 {
@@ -236,7 +236,7 @@
 	return 1;
 }
 
-static void dispatch_failed_ios(void *data)
+static void dispatch_queued_ios(void *data)
 {
 	struct multipath *m = (struct multipath *) data;
 
@@ -244,7 +244,7 @@
 	struct bio *bio = NULL, *next;
 
 	spin_lock_irqsave(&m->lock, flags);
-	bio = bio_list_get(&m->failed_ios);
+	bio = bio_list_get(&m->queued_ios);
 	spin_unlock_irqrestore(&m->lock, flags);
 
 	while (bio) {
@@ -551,7 +551,7 @@
 	unsigned long flags;
 	struct multipath *m;
 
-	spin_lock_irqsave(&path->failed_lock, flags);
+	spin_lock_irqsave(&path->queued_lock, flags);
 
 	if (!path->is_active)
 		goto out;
@@ -572,7 +572,7 @@
 	spin_unlock(&m->lock);
 
 out:
-	spin_unlock_irqrestore(&path->failed_lock, flags);
+	spin_unlock_irqrestore(&path->queued_lock, flags);
 
 	return 0;
 }
@@ -583,7 +583,7 @@
 	unsigned long flags;
 	struct multipath *m;
 
-	spin_lock_irqsave(&path->failed_lock, flags);
+	spin_lock_irqsave(&path->queued_lock, flags);
 
 	if (path->is_active)
 		goto out;
@@ -608,7 +608,7 @@
 	spin_unlock(&m->lock);
 
 out:
-	spin_unlock_irqrestore(&path->failed_lock, flags);
+	spin_unlock_irqrestore(&path->queued_lock, flags);
 
 	return r;
 }
@@ -706,10 +706,10 @@
 
 		/* queue for the daemon to resubmit */
 		spin_lock(&m->lock);
-		bio_list_add(&m->failed_ios, bio);
+		bio_list_add(&m->queued_ios, bio);
 		spin_unlock(&m->lock);
 
-		schedule_work(&m->dispatch_failed);
+		schedule_work(&m->dispatch_queued);
 		return 1;	/* io not complete */
 	}
 
@@ -756,13 +756,13 @@
 
 			list_for_each_entry(p, &pg->paths, list) {
 				format_dev_t(buffer, p->dev->bdev->bd_dev);
-				spin_lock_irqsave(&p->failed_lock, flags);
+				spin_lock_irqsave(&p->queued_lock, flags);
 				DMEMIT("%s %s %u ", buffer,
 				       p->is_active ? "A" : "F", p->fail_count);
 				if (pg->ps.type->status)
 					sz += pg->ps.type->status(&pg->ps, p,
 					      type, result + sz, maxlen - sz);
-				spin_unlock_irqrestore(&p->failed_lock, flags);
+				spin_unlock_irqrestore(&p->queued_lock, flags);
 			}
 		}
 		break;
