Change m->path_lock from a rw semaphore to spinlock, since it
sometimes needs to be locked from irq context.

NOTE: test_path() is safe to be called from within a spinlock since it
uses down_trylock() rather than down().
--- diff/drivers/md/dm-mpath.c	2003-12-29 10:17:15.000000000 +0000
+++ source/drivers/md/dm-mpath.c	2003-12-29 10:17:21.000000000 +0000
@@ -48,7 +48,7 @@
 	struct list_head list;
 	struct dm_target *ti;
 
-	struct rw_semaphore path_lock;
+	spinlock_t path_lock;
 	struct list_head priority_groups;
 	struct path *current_path;
 	atomic_t count;
@@ -153,7 +153,7 @@
 	m = kmalloc(sizeof(*m), GFP_KERNEL);
 	if (m) {
 		memset(m, 0, sizeof(*m));
-		init_rwsem(&m->path_lock);
+		m->path_lock = SPIN_LOCK_UNLOCKED;
 		INIT_LIST_HEAD(&m->priority_groups);
 		m->failed_lock = SPIN_LOCK_UNLOCKED;
 		m->min_io = 1000; /* FIXME: arbitrary number */
@@ -245,8 +245,9 @@
 {
 	struct priority_group *pg;
 	struct path *p;
+	unsigned long flags;
 
-	down_read(&m->path_lock);
+	spin_lock_irqsave(&m->path_lock, flags);
 	list_for_each_entry (pg, &m->priority_groups, list) {
 		list_for_each_entry (p, &pg->valid_paths, list)
 			fn(p);
@@ -254,7 +255,7 @@
 		list_for_each_entry (p, &pg->invalid_paths, list)
 			fn(p);
 	}
-	up_read(&m->path_lock);
+	spin_unlock_irqrestore(&m->path_lock, flags);
 }
 
 /* Multipathd does this every time it runs, returns a sleep duration hint */
@@ -590,35 +591,25 @@
 {
 	struct multipath *m = (struct multipath *) ti->private;
 	struct path *path;
+	unsigned long flags;
 
-	down_read(&m->path_lock);
+	spin_lock_irqsave(&m->path_lock, flags);
 
 	/*
 	 * Do we need to choose a new path?
 	 */
-	if (m->current_path && atomic_dec_and_test(&m->count)) {
+	if (m->current_path && atomic_dec_and_test(&m->count))
 		path = m->current_path;
-		up_read(&m->path_lock);
-
-	} else {
-		/*
-		 * Promote to write lock, there is no need to
-		 * recheck the condition after getting the write
-		 * lock, since choosing a new path twice is
-		 * harmless, and only this thread will have hit
-		 * a zero m->count. */
-		up_read(&m->path_lock);
-		down_write(&m->path_lock);
 
+	else {
 		if (__choose_path(m)) {
 			/* no paths */
-			up_write(&m->path_lock);
+			spin_unlock_irqrestore(&m->path_lock, flags);
 			return -EIO;
-		}
-
-		path = m->current_path;
-		up_write(&m->path_lock);
+		} else
+			path = m->current_path;
 	}
+	spin_unlock_irqrestore(&m->path_lock, flags);
 
 	/* map */
 	bio->bi_rw |= (1 << BIO_RW_FAILFAST);
@@ -690,10 +681,10 @@
 	if (error) {
 		struct path *path;
 
-		down_write(&m->path_lock);
+		spin_lock(&m->path_lock);
 		path = __find_path(m, bio->bi_bdev);
 		__fail_path(path);
-		up_write(&m->path_lock);
+		spin_unlock(&m->path_lock);
 
 		r = __resubmit_io(m, bio);
 	}
