Path selector select fn returns how many times to re-use the path.
--- diff/drivers/md/dm-mpath.c	2004-09-28 16:13:41.000000000 +0100
+++ source/drivers/md/dm-mpath.c	2004-09-28 16:15:18.000000000 +0100
@@ -19,23 +19,6 @@
 #include <linux/workqueue.h>
 #include <asm/atomic.h>
 
-/* FIXME: get rid of this */
-#define MPATH_FAIL_COUNT	1
-
-/*
- * We don't want to call the path selector for every single io
- * that comes through, so instead we only consider changing paths
- * every MPATH_MIN_IO ios.  This number should be selected to be
- * big enough that we can reduce the overhead of the path
- * selector, but also small enough that we don't take the policy
- * decision away from the path selector.
- *
- * So people should _not_ be tuning this number to try and get
- * the most performance from some particular type of hardware.
- * All the smarts should be going into the path selector.
- */
-#define MPATH_MIN_IO		1000
-
 /* Path properties */
 struct path {
 	struct list_head list;
@@ -44,8 +27,8 @@
 	struct priority_group *pg;
 
 	spinlock_t failed_lock;
-	int has_failed;
-	unsigned fail_count;
+	int is_active;
+	unsigned fail_count;		/* Cumulative */
 };
 
 struct priority_group {
@@ -102,7 +85,7 @@
 	if (path) {
 		memset(path, 0, sizeof(*path));
 		path->failed_lock = SPIN_LOCK_UNLOCKED;
-		path->fail_count = MPATH_FAIL_COUNT;
+		path->is_active = 1;
 	}
 
 	return path;
@@ -198,14 +181,14 @@
 	if (m->nr_valid_paths) {
 		/* loop through the priority groups until we find a valid path. */
 		list_for_each_entry (pg, &m->priority_groups, list) {
-			path = pg->ps.type->select_path(&pg->ps);
+			path = pg->ps.type->select_path(&pg->ps,
+							&m->current_count);
 			if (path)
 				break;
 		}
 	}
 
 	m->current_path = path;
-	m->current_count = MPATH_MIN_IO;
 
 	return 0;
 }
@@ -218,7 +201,7 @@
 	spin_lock_irqsave(&m->lock, flags);
 
 	/* Do we need to select a new path? */
-	if (!m->current_path || --m->current_count == 0)
+	if (!m->current_path || (m->current_count && --m->current_count == 0))
 		__choose_path(m);
 
 	path = m->current_path;
@@ -384,6 +367,8 @@
 		goto bad;
 	}
 
+	/* FIXME Read path selector arguments & pass them to ctr */
+
 	r = pst->ctr(&pg->ps);
 	if (r) {
 		dm_put_path_selector(pst);
@@ -509,11 +494,11 @@
 
 	spin_lock_irqsave(&path->failed_lock, flags);
 
-	/* FIXME: path->fail_count is brain dead */
-	if (!path->has_failed && !--path->fail_count) {
+	if (path->is_active) {
 		m = path->pg->m;
 
-		path->has_failed = 1;
+		path->is_active = 0;
+		path->fail_count++;
 		path->pg->ps.type->fail_path(&path->pg->ps, path);
 		schedule_work(&m->trigger_event);
 
@@ -605,7 +590,7 @@
 				format_dev_t(buffer, p->dev->bdev->bd_dev);
 				spin_lock_irqsave(&p->failed_lock, flags);
 				DMEMIT("%s %s %u ", buffer,
-				     p->has_failed ? "F" : "A", p->fail_count);
+				     p->is_active ? "A" : "F", p->fail_count);
 				sz += pg->ps.type->status(&pg->ps, p, type,
 						     result + sz, maxlen - sz);
 				spin_unlock_irqrestore(&p->failed_lock, flags);
--- diff/drivers/md/dm-path-selector.c	2004-09-28 15:46:19.000000000 +0100
+++ source/drivers/md/dm-path-selector.c	2004-09-28 16:15:18.000000000 +0100
@@ -160,14 +160,19 @@
 /*-----------------------------------------------------------------
  * Round robin selector
  *---------------------------------------------------------------*/
+
+#define RR_MIN_IO		1000
+
 struct selector {
 	spinlock_t lock;
 
 	struct list_head valid_paths;
 	struct list_head invalid_paths;
+
+	unsigned repeat_count;
 };
 
-static struct selector *alloc_selector(void)
+static struct selector *alloc_selector(unsigned repeat_count)
 {
 	struct selector *s = kmalloc(sizeof(*s), GFP_KERNEL);
 
@@ -175,6 +180,7 @@
 		INIT_LIST_HEAD(&s->valid_paths);
 		INIT_LIST_HEAD(&s->invalid_paths);
 		s->lock = SPIN_LOCK_UNLOCKED;
+		s->repeat_count = repeat_count;
 	}
 
 	return s;
@@ -185,7 +191,8 @@
 {
 	struct selector *s;
 
-	s = alloc_selector();
+	/* FIXME Parameter passed in */
+	s = alloc_selector(RR_MIN_IO);
 	if (!s)
 		return -ENOMEM;
 
@@ -266,7 +273,8 @@
 }
 
 /* Path selector */
-static struct path *rr_select_path(struct path_selector *ps)
+static struct path *rr_select_path(struct path_selector *ps,
+				   unsigned *repeat_count)
 {
 	unsigned long flags;
 	struct selector *s = (struct selector *) ps->context;
@@ -276,6 +284,7 @@
 	if (!list_empty(&s->valid_paths)) {
 		pi = list_entry(s->valid_paths.next, struct path_info, list);
 		list_move_tail(&pi->list, &s->valid_paths);
+		*repeat_count = RR_MIN_IO;
 	}
 	spin_unlock_irqrestore(&s->lock, flags);
 
--- diff/drivers/md/dm-path-selector.h	2004-09-28 15:42:45.000000000 +0100
+++ source/drivers/md/dm-path-selector.h	2004-09-28 16:15:18.000000000 +0100
@@ -48,8 +48,13 @@
  * Must ensure that _any_ dynamically allocated selection context is
  * reused or reallocated because an endio call (which needs to free it)
  * might happen after a couple of select calls.
+ *
+ * repeat_count is the number of times to use the path before
+ * calling the function again.  0 means don't call it again unless 
+ * the path fails.
  */
-typedef	struct path *(*ps_select_path_fn) (struct path_selector *ps);
+typedef	struct path *(*ps_select_path_fn) (struct path_selector *ps,
+					   unsigned *repeat_count);
 
 /*
  * Notify the selector that a path has failed.
