--- linux/CREDITS.lvm.orig	Fri Jun 16 17:01:46 2000
+++ linux/CREDITS	Sun Jun 18 10:10:09 2000
@@ -1373,6 +1373,13 @@
 D: XF86_8514
 D: cfdisk (curses based disk partitioning program)
 
+N: Heinz Mauelshagen
+E: mge@EZ-Darmstadt.Telekom.de
+D: Logical Volume Manager
+S: Bartningstr. 12
+S: 64289 Darmstadt
+S: Germany
+
 N: Mike McLagan
 E: mike.mclagan@linux.org
 W: http://www.invlogic.com/~mmclagan
--- linux/Documentation/Configure.help.lvm.orig	Fri Jun 16 17:01:54 2000
+++ linux/Documentation/Configure.help	Sun Jun 18 10:10:09 2000
@@ -1263,6 +1263,30 @@
   called on26.o. You must also have a high-level driver for the type
   of device that you want to support.
 
+Logical Volume Manager (LVM) support
+CONFIG_BLK_DEV_LVM
+  This driver lets you combine several hard disks, hard disk partitions,
+  multiple devices or even loop devices (for evaluation purposes) into
+  a volume group. Imagine a volume group as a kind of virtual disk.
+  Logical volumes, which can be thought of as virtual partitions,
+  can be created in the volume group.  You can resize volume groups and
+  logical volumes after creation time, corresponding to new capacity needs.
+  Logical volumes are accessed as block devices named
+  /dev/VolumeGroupName/LogicalVolumeName.
+
+  For details see /usr/src/linux/Documentaion/LVM-HOWTO.
+
+  To get the newest software see <http://www.sistina.com/lvm>.
+
+Logical Volume Manager proc filesystem information
+CONFIG_LVM_PROC_FS
+  If you say Y here, you are able to access overall Logical Volume Manager,
+  Volume Group, Logical and Physical Volume information in /proc/lvm.
+  
+  To use this option, you have to check, that the "proc filesystem support"
+  (CONFIG_PROC_FS) is enabled too.
+
+
 Multiple devices driver support
 CONFIG_BLK_DEV_MD
   This driver lets you combine several hard disk partitions into one
--- linux/Documentation/LVM-HOWTO.lvm.orig	Sun Jun 18 10:10:09 2000
+++ linux/Documentation/LVM-HOWTO	Sun Jun 18 10:10:09 2000
@@ -0,0 +1,118 @@
+Heinz Mauelshagen's LVM (Logical Volume Manager) howto.             01/28/1999
+
+
+Abstract:
+---------
+The LVM adds a kind of virtual disks and virtual partitions functionality
+to the Linux operating system
+
+It achieves this by adding an additional layer between the physical peripherals
+and the i/o interface in the kernel.
+
+This allows the concatenation of several disk partitions or total disks
+(so-called physical volumes or PVs) or even multiple devices
+to form a storage pool (so-called Volume Group or VG) with
+allocation units called physical extents (called PE).
+You can think of the volume group as a virtual disk.
+Please see scenario below.
+
+Some or all PEs of this VG then can be allocated to so-called Logical Volumes
+or LVs in units called logical extents or LEs.
+Each LE is mapped to a corresponding PE.
+LEs and PEs are equal in size.
+Logical volumes are a kind of virtual partitions.
+
+
+The LVs can be used through device special files similar to the known
+/dev/sd[a-z]* or /dev/hd[a-z]* named /dev/VolumeGroupName/LogicalVolumeName.
+
+But going beyond this, you are able to extend or reduce
+VGs _AND_ LVs at runtime!
+
+So...
+If for example the capacity of a LV gets too small and your VG containing
+this LV is full, you could add another PV to that VG and simply extend
+the LV afterwards.
+If you reduce or delete a LV you can use the freed capacity for different
+LVs in the same VG.
+
+
+The above scenario looks like this:
+
+     /------------------------------------------\
+     |  /--PV2---\      VG 1      /--PVn---\    |
+     |  |-VGDA---|                |-VGDA-- |    |
+     |  |PE1PE2..|                |PE1PE2..|    |
+     |  |        |     ......     |        |    |
+     |  |        |                |        |    |
+     |  |    /-----------------------\     |    |
+     |  |    \-------LV 1------------/     |    |
+     |  |   ..PEn|                |   ..PEn|    |
+     |  \--------/                \--------/    |
+     \------------------------------------------/
+
+PV 1 could be /dev/sdc1 sized 3GB
+PV n could be /dev/sde1 sized 4GB
+VG 1 could be test_vg
+LV 1 could be /dev/test_vg/test_lv
+VGDA is the volume group descriptor area holding the LVM metadata
+PE1 up to PEn is the number of physical extents on each disk(partition)
+
+
+
+Installation steps see INSTALL and insmod(1)/modprobe(1), kmod/kerneld(8)
+to load the logical volume manager module if you did not bind it
+into the kernel.
+
+
+Configuration steps for getting the above scenario:
+
+1. Set the partition system id to 0xFE on /dev/sdc1 and /dev/sde1.
+
+2. do a "pvcreate /dev/sd[ce]1"
+   For testing purposes you can use more than one partition on a disk.
+   You should not use more than one partition because in the case of
+   a striped LV you'll have a performance breakdown.
+
+3. do a "vgcreate test_vg /dev/sd[ce]1" to create the new VG named "test_vg"
+   which has the total capacity of both partitions.
+   vgcreate activates (transfers the metadata into the LVM driver in the kernel)
+   the new volume group too to be able to create LVs in the next step.
+
+4. do a "lvcreate -L1500 -ntest_lv test_vg" to get a 1500MB linear LV named
+   "test_lv" and it's block device special "/dev/test_vg/test_lv".
+
+   Or do a "lvcreate -i2 -I4 -l1500 -nanother_test_lv test_vg" to get a 100 LE
+   large logical volume with 2 stripes and stripesize 4 KB.
+
+5. For example generate a filesystem in one LV with
+   "mke2fs /dev/test_vg/test_lv" and mount it.
+
+6. extend /dev/test_vg/test_lv to 1600MB with relative size by
+   "lvextend -L+100 /dev/test_vg/test_lv"
+   or with absolute size by
+   "lvextend -L1600 /dev/test_vg/test_lv"
+ 
+7. reduce /dev/test_vg/test_lv to 900 logical extents with relative extents by
+   "lvreduce -l-700 /dev/test_vg/test_lv"
+   or with absolute extents by
+   "lvreduce -l900 /dev/test_vg/test_lv"
+ 
+9. rename a VG by deactivating it with
+   "vgchange -an test_vg"   # only VGs with _no_ open LVs can be deactivated!
+   "vgrename test_vg whatever"
+   and reactivate it again by
+   "vgchange -ay whatever"
+
+9. rename a LV after closing it by
+   "lvchange -an /dev/whatever/test_lv" # only closed LVs can be deactivated
+   "lvrename  /dev/whatever/test_lv  /dev/whatever/whatvolume"
+   or by
+   "lvrename  whatever test_lv whatvolume"
+   and reactivate it again by
+   "lvchange -ay /dev/whatever/whatvolume"
+
+10. if you have the resize2fs program from e2fsprogs 1.19 or later and/or the
+    GNU ext2resize tools, you are able to resize the ext2 type filesystems
+    contained in logical volumes without destroyiing the data by
+    "e2fsadm -L+100 /dev/test_vg/another_test_lv"
--- linux/MAINTAINERS.lvm.orig	Fri Jun 16 17:01:50 2000
+++ linux/MAINTAINERS	Sun Jun 18 10:10:09 2000
@@ -541,6 +541,13 @@
 W:	http://people.redhat.com/zab/maestro/
 S:	Supported
 
+LOGICAL VOLUME MANAGER
+P:	Heinz Mauelshagen
+M:	linux-LVM@EZ-Darmstadt.Telekom.de
+L:	linux-LVM@sistina.com
+W:	http://www.sistina.com/lvm
+S:	Maintained
+
 M68K
 P:	Jes Sorensen
 M:	Jes.Sorensen@cern.ch
--- linux/drivers/block/Config.in.lvm.orig	Fri Jun 16 17:01:54 2000
+++ linux/drivers/block/Config.in	Sun Jun 18 10:10:09 2000
@@ -171,6 +171,10 @@
 
 comment 'Additional Block Devices'
 
+tristate 'Logical volume manager (LVM) support' CONFIG_BLK_DEV_LVM N
+if [ "$CONFIG_BLK_DEV_LVM" != "n" ]; then
+  bool '   LVM information in proc filesystem' CONFIG_LVM_PROC_FS Y
+fi
 tristate 'Loopback device support' CONFIG_BLK_DEV_LOOP
 if [ "$CONFIG_BLK_DEV_LOOP" != "n" -a "$CONFIG_EXPERIMENTAL" = "y" ]; then
   bool '   Loop filesystem debugging support (DANGEROUS)' CONFIG_LOOP_DISCARD
--- linux/drivers/block/Makefile.lvm.orig	Fri Jun 16 17:01:46 2000
+++ linux/drivers/block/Makefile	Sun Jun 18 10:10:09 2000
@@ -310,6 +310,14 @@
   endif
 endif
 
+ifeq ($(CONFIG_BLK_DEV_LVM),y)
+L_OBJS += lvm.o lvm-snap.o
+else
+   ifeq ($(CONFIG_BLK_DEV_LVM),m)
+   M_OBJS += lvm-mod.o
+   endif
+endif
+
 ifeq ($(CONFIG_BLK_DEV_MD),y)
 LX_OBJS += md.o
 
@@ -335,6 +339,9 @@
 endif
 
 include $(TOPDIR)/Rules.make
+
+lvm-mod.o: lvm.o lvm-snap.o
+	$(LD) -r -o $@ lvm.o lvm-snap.o
 
 ide-mod.o: ide.o $(IDE_OBJS)
 	$(LD) $(LD_RFLAG) -r -o $@ ide.o $(IDE_OBJS)
--- linux/drivers/block/README.lvm.lvm.orig	Sun Jun 18 10:10:09 2000
+++ linux/drivers/block/README.lvm	Sun Jun 18 10:10:09 2000
@@ -0,0 +1,8 @@
+
+This is the Logical Volume Manager driver for Linux,
+
+Tools, library that manage logical volumes can be found
+at <http://www.sistina.com/lvm>.
+
+There you can obtain actual driver versions too.
+
--- linux/drivers/block/genhd.c.lvm.orig	Fri Jun 16 17:01:46 2000
+++ linux/drivers/block/genhd.c	Sun Jun 18 10:10:09 2000
@@ -51,6 +51,11 @@
 				le32_to_cpu(__a); \
 			})
 
+#if defined CONFIG_BLK_DEV_LVM || defined CONFIG_BLK_DEV_LVM_MODULE
+#include <linux/lvm.h>
+void ( *lvm_hd_name_ptr) ( char *, int) = NULL;
+#endif
+
 struct gendisk *gendisk_head = NULL;
 
 static int current_minor = 0;
@@ -126,6 +131,14 @@
 			break;
 		case MD_MAJOR;
 			unit -= 'a'-'0';
+			break;
+#if defined CONFIG_BLK_DEV_LVM || defined CONFIG_BLK_DEV_LVM_MODULE
+		case LVM_BLK_MAJOR:
+			*buf = 0;
+			if ( lvm_hd_name_ptr != NULL)
+				( lvm_hd_name_ptr) ( buf, minor);
+			return buf;
+#endif
 	}
 	part = minor & ((1 << hd->minor_shift) - 1);
 	if (hd->major >= SCSI_DISK1_MAJOR && hd->major <= SCSI_DISK7_MAJOR) {
--- linux/drivers/block/ll_rw_blk.c.lvm.orig	Fri Jun 16 17:01:47 2000
+++ linux/drivers/block/ll_rw_blk.c	Sun Jun 18 10:10:09 2000
@@ -27,6 +27,14 @@
 
 #include <linux/module.h>
 
+#if defined CONFIG_BLK_DEV_LVM || defined CONFIG_BLK_DEV_LVM_MODULE
+#include <linux/lvm.h>
+   /* function pointer to the LVM driver remapping function
+      which will be setup during driver/module init; neccessary
+      to be able to load LVM as a module */
+int (*lvm_map_ptr) (struct buffer_head *, int) = NULL;
+#endif
+
 /*
  * The request-struct contains all necessary data
  * to load a nr of sectors into memory
@@ -731,6 +739,9 @@
 	if (!req) {
 		/* MD and loop can't handle plugging without deadlocking */
 		if (major != MD_MAJOR && major != LOOP_MAJOR && 
+#if defined CONFIG_BLK_DEV_LVM || defined CONFIG_BLK_DEV_LVM_MODULE
+		    major != LVM_BLK_MAJOR &&
+#endif
 		    major != DDV_MAJOR && major != NBD_MAJOR)
 			plug_device(blk_dev + major); /* is atomic */
 	} else switch (major) {
@@ -898,13 +909,34 @@
 			       correct_size, bh[i]->b_size);
 			goto sorry;
 		}
-
-		/* Md remaps blocks now */
+		/* LVM and MD remap blocks now */
+#if defined CONFIG_BLK_DEV_LVM || defined CONFIG_BLK_DEV_LVM_MODULE
+		major = MAJOR(bh[i]->b_dev);
+		if (major == LVM_BLK_MAJOR) {
+			if (lvm_map_ptr == NULL) {
+				printk(KERN_ERR
+				     "Bad lvm_map_ptr in ll_rw_block\n");
+				goto sorry;
+			}
+			if ((lvm_map_ptr) (bh[i], rw) != 0) {
+				printk(KERN_ERR
+				       "Bad lvm_map in ll_rw_block\n");
+				goto sorry;
+			}
+			/* remap major too ... */
+			major = MAJOR(bh[i]->b_rdev);
+		} else {
+			bh[i]->b_rdev = bh[i]->b_dev;
+			bh[i]->b_rsector = bh[i]->b_blocknr * (bh[i]->b_size >> 9);
+		}
+#else
 		bh[i]->b_rdev = bh[i]->b_dev;
 		bh[i]->b_rsector=bh[i]->b_blocknr*(bh[i]->b_size >> 9);
+#endif
 #ifdef CONFIG_BLK_DEV_MD
 		if (major==MD_MAJOR &&
-			md_map (bh[i]->b_dev, &bh[i]->b_rdev,
+		    /* changed       v   to allow LVM to remap */
+		    md_map (bh[i]->b_rdev, &bh[i]->b_rdev,
 			    &bh[i]->b_rsector, bh[i]->b_size >> 9)) {
 		        printk (KERN_ERR
 				"Bad md_map in ll_rw_block\n");
@@ -942,7 +955,8 @@
 		if (bh[i]) {
 			set_bit(BH_Req, &bh[i]->b_state);
 #ifdef CONFIG_BLK_DEV_MD
-			if (MAJOR(bh[i]->b_dev) == MD_MAJOR) {
+			/* changed         v  to allow LVM to remap */
+			if (MAJOR(bh[i]->b_rdev) == MD_MAJOR) {
 				md_make_request(bh[i], rw);
 				continue;
 			}
@@ -1109,6 +1141,9 @@
 #ifdef CONFIG_SJCD
 	sjcd_init();
 #endif CONFIG_SJCD
+#ifdef CONFIG_BLK_DEV_LVM
+	lvm_init();
+#endif
 #ifdef CONFIG_BLK_DEV_MD
 	md_init();
 #endif CONFIG_BLK_DEV_MD
--- linux/drivers/block/lvm-snap.c.lvm.orig	Sun Jun 18 10:10:09 2000
+++ linux/drivers/block/lvm-snap.c	Sun Jun 18 10:27:34 2000
@@ -0,0 +1,436 @@
+/*
+ * kernel/lvm-snap.c
+ *
+ * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
+ *
+ * LVM snapshot driver is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ * 
+ * LVM driver is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ * 
+ * You should have received a copy of the GNU General Public License
+ * along with GNU CC; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA. 
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/vmalloc.h>
+#include <linux/blkdev.h>
+#include <linux/smp_lock.h>
+#include <linux/types.h>
+#include <linux/iobuf.h>
+#include <linux/lvm.h>
+
+
+static char *lvm_snap_version __attribute ((unused)) = "LVM 0.8final (15/02/2000)\n";
+
+extern const char *const lvm_name;
+extern int lvm_blocksizes[];
+
+void lvm_snapshot_release(lv_t *);
+
+#define hashfn(dev,block,mask,chunk_size) \
+	((HASHDEV(dev)^((block)/(chunk_size))) & (mask))
+
+static inline lv_block_exception_t *
+lvm_find_exception_table(kdev_t org_dev, unsigned long org_start, lv_t * lv)
+{
+	struct list_head * hash_table = lv->lv_snapshot_hash_table, * next;
+	unsigned long mask = lv->lv_snapshot_hash_mask;
+	int chunk_size = lv->lv_chunk_size;
+	lv_block_exception_t * ret;
+	int i = 0;
+
+	hash_table = &hash_table[hashfn(org_dev, org_start, mask, chunk_size)];
+	ret = NULL;
+	for (next = hash_table->next; next != hash_table; next = next->next)
+	{
+		lv_block_exception_t * exception;
+
+		exception = list_entry(next, lv_block_exception_t, hash);
+		if (exception->rsector_org == org_start &&
+		    exception->rdev_org == org_dev)
+		{
+			if (i)
+			{
+				/* fun, isn't it? :) */
+				list_del(next);
+				list_add(next, hash_table);
+			}
+			ret = exception;
+			break;
+		}
+		i++;
+	}
+	return ret;
+}
+
+static inline void lvm_hash_link(lv_block_exception_t * exception,
+				 kdev_t org_dev, unsigned long org_start,
+				 lv_t * lv)
+{
+	struct list_head * hash_table = lv->lv_snapshot_hash_table;
+	unsigned long mask = lv->lv_snapshot_hash_mask;
+	int chunk_size = lv->lv_chunk_size;
+
+	hash_table = &hash_table[hashfn(org_dev, org_start, mask, chunk_size)];
+	list_add(&exception->hash, hash_table);
+}
+
+int lvm_snapshot_remap_block(kdev_t * org_dev, unsigned long * org_sector,
+			     unsigned long pe_start, lv_t * lv)
+{
+	int ret;
+	unsigned long pe_off, pe_adjustment, __org_start;
+	kdev_t __org_dev;
+	int chunk_size = lv->lv_chunk_size;
+	lv_block_exception_t * exception;
+
+	pe_off = pe_start % chunk_size;
+	pe_adjustment = (*org_sector-pe_off) % chunk_size;
+	__org_start = *org_sector - pe_adjustment;
+	__org_dev = *org_dev;
+
+	ret = 0;
+	exception = lvm_find_exception_table(__org_dev, __org_start, lv);
+	if (exception)
+	{
+		*org_dev = exception->rdev_new;
+		*org_sector = exception->rsector_new + pe_adjustment;
+		ret = 1;
+	}
+	return ret;
+}
+
+static void lvm_drop_snapshot(lv_t * lv_snap, const char * reason)
+{
+	kdev_t last_dev;
+	int i;
+
+	/* no exception storage space available for this snapshot
+	   or error on this snapshot --> release it */
+	invalidate_buffers(lv_snap->lv_dev);
+
+	last_dev = 0;
+	for (i = 0; i < lv_snap->lv_remap_ptr; i++) {
+		if ( lv_snap->lv_block_exception[i].rdev_new != last_dev) {
+			last_dev = lv_snap->lv_block_exception[i].rdev_new;
+			invalidate_buffers(last_dev);
+		}
+	}
+
+	lvm_snapshot_release(lv_snap);
+
+	printk(KERN_INFO
+	       "%s -- giving up to snapshot %s on %s due %s\n",
+	       lvm_name, lv_snap->lv_snapshot_org->lv_name, lv_snap->lv_name,
+	       reason);
+}
+
+static inline void lvm_snapshot_prepare_blocks(unsigned long * blocks,
+					       unsigned long start,
+					       int nr_sectors,
+					       int blocksize)
+{
+	int i, sectors_per_block, nr_blocks;
+
+	sectors_per_block = blocksize >> 9;
+	nr_blocks = nr_sectors / sectors_per_block;
+	start /= sectors_per_block;
+
+	for (i = 0; i < nr_blocks; i++)
+		blocks[i] = start++;
+}
+
+static inline int get_blksize(kdev_t dev)
+{
+	int correct_size = BLOCK_SIZE, i, major;
+
+	major = MAJOR(dev);
+	if (blksize_size[major])
+	{
+		i = blksize_size[major][MINOR(dev)];
+		if (i)
+			correct_size = i;
+	}
+	return correct_size;
+}
+
+#ifdef DEBUG_SNAPSHOT
+static inline void invalidate_snap_cache(unsigned long start, unsigned long nr,
+					 kdev_t dev)
+{
+	struct buffer_head * bh;
+	int sectors_per_block, i, blksize, minor;
+
+	minor = MINOR(dev);
+	blksize = lvm_blocksizes[minor];
+	sectors_per_block = blksize >> 9;
+	nr /= sectors_per_block;
+	start /= sectors_per_block;
+
+	for (i = 0; i < nr; i++)
+	{
+		bh = get_hash_table(dev, start++, blksize);
+		if (bh)
+			bforget(bh);
+	}
+}
+#endif
+
+/*
+ * copy on write handler for one snapshot logical volume
+ *
+ * read the original blocks and store it/them on the new one(s).
+ * if there is no exception storage space free any longer --> release snapshot.
+ *
+ * this routine gets called for each _first_ write to a physical chunk.
+ */
+int lvm_snapshot_COW(kdev_t org_phys_dev,
+		     unsigned long org_phys_sector,
+		     unsigned long org_pe_start,
+		     unsigned long org_virt_sector,
+		     lv_t * lv_snap)
+{
+	const char * reason;
+	unsigned long org_start, snap_start, virt_start, pe_off;
+	int idx = lv_snap->lv_remap_ptr, chunk_size = lv_snap->lv_chunk_size;
+	kdev_t snap_phys_dev;
+	struct kiobuf * iobuf;
+	unsigned long blocks[KIO_MAX_SECTORS];
+	int blksize_snap, blksize_org, min_blksize, max_blksize;
+	int max_sectors, nr_sectors;
+
+	/* check if we are out of snapshot space */
+	if (idx >= lv_snap->lv_remap_end)
+		goto fail_out_of_space;
+
+	/* calculate physical boundaries of source chunk */
+	pe_off = org_pe_start % chunk_size;
+	org_start = org_phys_sector - ((org_phys_sector-pe_off) % chunk_size);
+	virt_start = org_virt_sector - (org_phys_sector - org_start);
+
+	/* calculate physical boundaries of destination chunk */
+	snap_phys_dev = lv_snap->lv_block_exception[idx].rdev_new;
+	snap_start = lv_snap->lv_block_exception[idx].rsector_new;
+
+#ifdef DEBUG_SNAPSHOT
+	printk(KERN_INFO
+	       "%s -- COW: "
+	       "org %02d:%02d faulting %lu start %lu, "
+	       "snap %02d:%02d start %lu, "
+	       "size %d, pe_start %lu pe_off %lu, virt_sec %lu\n",
+	       lvm_name,
+	       MAJOR(org_phys_dev), MINOR(org_phys_dev), org_phys_sector,
+	       org_start,
+	       MAJOR(snap_phys_dev), MINOR(snap_phys_dev), snap_start,
+	       chunk_size,
+	       org_pe_start, pe_off,
+	       org_virt_sector);
+#endif
+
+	iobuf = lv_snap->lv_iobuf;
+
+	blksize_org = get_blksize(org_phys_dev);
+	blksize_snap = get_blksize(snap_phys_dev);
+	max_blksize = max(blksize_org, blksize_snap);
+	min_blksize = min(blksize_org, blksize_snap);
+	max_sectors = KIO_MAX_SECTORS * (min_blksize>>9);
+
+	if (chunk_size % (max_blksize>>9))
+		goto fail_blksize;
+
+	while (chunk_size)
+	{
+		nr_sectors = min(chunk_size, max_sectors);
+		chunk_size -= nr_sectors;
+
+		iobuf->length = nr_sectors << 9;
+
+		lvm_snapshot_prepare_blocks(blocks, org_start,
+					    nr_sectors, blksize_org);
+		if (brw_kiovec(READ, 1, &iobuf, org_phys_dev,
+			       blocks, blksize_org, 0) != (nr_sectors<<9))
+			goto fail_raw_read;
+
+		lvm_snapshot_prepare_blocks(blocks, snap_start,
+					    nr_sectors, blksize_snap);
+		if (brw_kiovec(WRITE, 1, &iobuf, snap_phys_dev,
+			       blocks, blksize_snap, 0) != (nr_sectors<<9))
+			goto fail_raw_write;
+	}
+
+#ifdef DEBUG_SNAPSHOT
+	/* invalidate the logcial snapshot buffer cache */
+	invalidate_snap_cache(virt_start, lv_snap->lv_chunk_size,
+			      lv_snap->lv_dev);
+#endif
+
+	/* the original chunk is now stored on the snapshot volume
+	   so update the execption table */
+	lv_snap->lv_block_exception[idx].rdev_org = org_phys_dev;
+	lv_snap->lv_block_exception[idx].rsector_org = org_start;
+	lvm_hash_link(lv_snap->lv_block_exception + idx,
+		      org_phys_dev, org_start, lv_snap);
+	lv_snap->lv_remap_ptr = idx + 1;
+	return 0;
+
+	/* slow path */
+ out:
+	lvm_drop_snapshot(lv_snap, reason);
+	return -1;
+
+ fail_out_of_space:
+	reason = "out of space";
+	goto out;
+ fail_raw_read:
+	reason = "read error";
+	goto out;
+ fail_raw_write:
+	reason = "write error";
+	goto out;
+ fail_blksize:
+	reason = "blocksize error";
+	goto out;
+}
+
+static int lvm_snapshot_alloc_iobuf_pages(struct kiobuf * iobuf, int sectors)
+{
+	int bytes, nr_pages, err, i;
+
+	bytes = sectors << 9;
+	nr_pages = (bytes + ~PAGE_MASK) >> PAGE_SHIFT;
+	err = expand_kiobuf(iobuf, nr_pages);
+	if (err)
+		goto out;
+
+	err = -ENOMEM;
+	iobuf->locked = 1;
+	iobuf->nr_pages = 0;
+	for (i = 0; i < nr_pages; i++)
+	{
+		struct page * page;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,27)
+		page = alloc_page(GFP_KERNEL);
+		if (!page)
+			goto out;
+#else
+		{
+			unsigned long addr = __get_free_page(GFP_USER);
+			if (!addr)
+				goto out;
+			iobuf->pagelist[i] = addr;
+			page = mem_map + MAP_NR(addr);
+		}
+#endif
+
+		iobuf->maplist[i] = page;
+		/* the only point to lock the page here is to be allowed
+		   to share unmap_kiobuf() in the fail-path */
+#ifndef LockPage
+#define LockPage(map) set_bit(PG_locked, &(map)->flags)
+#endif
+		LockPage(page);
+		iobuf->nr_pages++;
+	}
+	iobuf->offset = 0;
+
+	err = 0;
+ out:
+	return err;
+}
+
+static int calc_max_buckets(void)
+{
+	unsigned long mem;
+
+	mem = num_physpages << PAGE_SHIFT;
+	mem /= 100;
+	mem *= 2;
+	mem /= sizeof(struct list_head);
+
+	return mem;
+}
+
+static int lvm_snapshot_alloc_hash_table(lv_t * lv)
+{
+	int err;
+	unsigned long buckets, max_buckets, size;
+	struct list_head * hash;
+
+	buckets = lv->lv_remap_end;
+	max_buckets = calc_max_buckets();
+	buckets = min(buckets, max_buckets);
+	while (buckets & (buckets-1))
+		buckets &= (buckets-1);
+
+	size = buckets * sizeof(struct list_head);
+
+	err = -ENOMEM;
+	hash = vmalloc(size);
+	lv->lv_snapshot_hash_table = hash;
+
+	if (!hash)
+		goto out;
+
+	lv->lv_snapshot_hash_mask = buckets-1;
+	while (buckets--)
+		INIT_LIST_HEAD(hash+buckets);
+	err = 0;
+ out:
+	return err;
+}
+
+int lvm_snapshot_alloc(lv_t * lv_snap)
+{
+	int err, blocksize, max_sectors;
+
+	err = alloc_kiovec(1, &lv_snap->lv_iobuf);
+	if (err)
+		goto out;
+
+	blocksize = lvm_blocksizes[MINOR(lv_snap->lv_dev)];
+	max_sectors = KIO_MAX_SECTORS << (PAGE_SHIFT-9);
+
+	err = lvm_snapshot_alloc_iobuf_pages(lv_snap->lv_iobuf, max_sectors);
+	if (err)
+		goto out_free_kiovec;
+
+	err = lvm_snapshot_alloc_hash_table(lv_snap);
+	if (err)
+		goto out_free_kiovec;
+ out:
+	return err;
+
+ out_free_kiovec:
+	unmap_kiobuf(lv_snap->lv_iobuf);
+	free_kiovec(1, &lv_snap->lv_iobuf);
+	goto out;
+}
+
+void lvm_snapshot_release(lv_t * lv)
+{
+	if (lv->lv_block_exception)
+	{
+		vfree(lv->lv_block_exception);
+		lv->lv_block_exception = NULL;
+	}
+	if (lv->lv_snapshot_hash_table)
+	{
+		vfree(lv->lv_snapshot_hash_table);
+		lv->lv_snapshot_hash_table = NULL;
+	}
+	if (lv->lv_iobuf)
+	{
+		free_kiovec(1, &lv->lv_iobuf);
+		lv->lv_iobuf = NULL;
+	}
+}
--- linux/drivers/block/lvm.c.lvm.orig	Sun Jun 18 10:10:09 2000
+++ linux/drivers/block/lvm.c	Sun Jun 18 10:10:09 2000
@@ -0,0 +1,2561 @@
+/*
+ * kernel/lvm.c
+ *
+ * Copyright (C) 1997 - 2000  Heinz Mauelshagen, Germany
+ *
+ * February-November 1997
+ * April-May,July-August,November 1998
+ * January-March,May,July,September,October 1999
+ *
+ *
+ * LVM driver is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ * 
+ * LVM driver is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ * 
+ * You should have received a copy of the GNU General Public License
+ * along with GNU CC; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA. 
+ *
+ */
+
+/*
+ * Changelog
+ *
+ *    09/11/1997 - added chr ioctls VG_STATUS_GET_COUNT
+ *                 and VG_STATUS_GET_NAMELIST
+ *    18/01/1998 - change lvm_chr_open/close lock handling
+ *    30/04/1998 - changed LV_STATUS ioctl to LV_STATUS_BYNAME and
+ *               - added   LV_STATUS_BYINDEX ioctl
+ *               - used lvm_status_byname_req_t and
+ *                      lvm_status_byindex_req_t vars
+ *    04/05/1998 - added multiple device support
+ *    08/05/1998 - added support to set/clear extendable flag in volume group
+ *    09/05/1998 - changed output of lvm_proc_get_info() because of
+ *                 support for free (eg. longer) logical volume names
+ *    12/05/1998 - added spin_locks (thanks to Pascal van Dam
+ *                 <pascal@ramoth.xs4all.nl>)
+ *    25/05/1998 - fixed handling of locked PEs in lvm_map() and lvm_chr_ioctl()
+ *    26/05/1998 - reactivated verify_area by access_ok
+ *    07/06/1998 - used vmalloc/vfree instead of kmalloc/kfree to go
+ *                 beyond 128/256 KB max allocation limit per call
+ *               - #ifdef blocked spin_lock calls to avoid compile errors
+ *                 with 2.0.x
+ *    11/06/1998 - another enhancement to spinlock code in lvm_chr_open()
+ *                 and use of LVM_VERSION_CODE instead of my own macros
+ *                 (thanks to  Michael Marxmeier <mike@msede.com>)
+ *    07/07/1998 - added statistics in lvm_map()
+ *    08/07/1998 - saved statistics in do_lv_extend_reduce()
+ *    25/07/1998 - used __initfunc macro
+ *    02/08/1998 - changes for official char/block major numbers
+ *    07/08/1998 - avoided init_module() and cleanup_module() to be static
+ *    30/08/1998 - changed VG lv_open counter from sum of LV lv_open counters
+ *                 to sum of LVs open (no matter how often each is)
+ *    01/09/1998 - fixed lvm_gendisk.part[] index error
+ *    07/09/1998 - added copying of lv_current_pe-array
+ *                 in LV_STATUS_BYINDEX ioctl
+ *    17/11/1998 - added KERN_* levels to printk
+ *    13/01/1999 - fixed LV index bug in do_lv_create() which hit lvrename
+ *    07/02/1999 - fixed spinlock handling bug in case of LVM_RESET
+ *                 by moving spinlock code from lvm_chr_open()
+ *                 to lvm_chr_ioctl()
+ *               - added LVM_LOCK_LVM ioctl to lvm_chr_ioctl()
+ *               - allowed LVM_RESET and retrieval commands to go ahead;
+ *                 only other update ioctls are blocked now
+ *               - fixed pv->pe to NULL for pv_status
+ *               - using lv_req structure in lvm_chr_ioctl() now
+ *               - fixed NULL ptr reference bug in do_lv_extend_reduce()
+ *                 caused by uncontiguous PV array in lvm_chr_ioctl(VG_REDUCE)
+ *    09/02/1999 - changed BLKRASET and BLKRAGET in lvm_chr_ioctl() to
+ *                 handle lgoical volume private read ahead sector
+ *               - implemented LV read_ahead handling with lvm_blk_read()
+ *                 and lvm_blk_write()
+ *    10/02/1999 - implemented 2.[12].* support function lvm_hd_name()
+ *                 to be used in drivers/block/genhd.c by disk_name()
+ *    12/02/1999 - fixed index bug in lvm_blk_ioctl(), HDIO_GETGEO
+ *               - enhanced gendisk insert/remove handling
+ *    16/02/1999 - changed to dynamic block minor number allocation to
+ *                 have as much as 99 volume groups with 256 logical volumes
+ *                 as the grand total; this allows having 1 volume group with
+ *                 up to 256 logical volumes in it
+ *    21/02/1999 - added LV open count information to proc filesystem
+ *               - substituted redundant LVM_RESET code by calls
+ *                 to do_vg_remove()
+ *    22/02/1999 - used schedule_timeout() to be more responsive
+ *                 in case of do_vg_remove() with lots of logical volumes
+ *    19/03/1999 - fixed NULL pointer bug in module_init/lvm_init
+ *    17/05/1999 - used DECLARE_WAIT_QUEUE_HEAD macro (>2.3.0)
+ *               - enhanced lvm_hd_name support
+ *    03/07/1999 - avoided use of KERNEL_VERSION macro based ifdefs and
+ *                 memcpy_tofs/memcpy_fromfs macro redefinitions
+ *    06/07/1999 - corrected reads/writes statistic counter copy in case
+ *                 of striped logical volume
+ *    28/07/1999 - implemented snapshot logical volumes
+ *                 - lvm_chr_ioctl
+ *                   - LV_STATUS_BYINDEX
+ *                   - LV_STATUS_BYNAME
+ *                 - do_lv_create
+ *                 - do_lv_remove
+ *                 - lvm_map
+ *                 - new lvm_snapshot_remap_block
+ *                 - new lvm_snapshot_remap_new_block
+ *    08/10/1999 - implemented support for multiple snapshots per
+ *                 original logical volume
+ *    12/10/1999 - support for 2.3.19
+ *    11/11/1999 - support for 2.3.28
+ *    21/11/1999 - changed lvm_map() interface to buffer_head based
+ *    19/12/1999 - support for 2.3.33
+ *    01/01/2000 - changed locking concept in lvm_map(),
+ *                 do_vg_create() and do_lv_remove()
+ *    07/07/2000 - support for 2.2.16 (backport from 2.4) by Andreas Dilger
+ *                 <adilger@turbolinux.com>
+ *
+ */
+
+
+/*
+ * TODO
+ *
+ *   - implement special handling of unavailable physical volumes
+ *
+ */
+
+char *lvm_version = "LVM version 0.8final  by Heinz Mauelshagen (22/2/2000)\n";
+char *lvm_short_version = "version 0.8final  (22/02/2000)";
+
+#define MAJOR_NR	LVM_BLK_MAJOR
+#define DEVICE_OFF(device)
+#define DEVICE_REQUEST lvm_dummy_device_request
+#define LOCAL_END_REQUEST
+
+#include <linux/config.h>
+#include <linux/version.h>
+
+#ifdef MODVERSIONS
+#  undef MODULE
+#  define MODULE
+#    include <linux/modversions.h>
+#endif
+
+#ifdef MODULE
+#  include <linux/module.h>
+#endif
+
+#include <linux/kernel.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+
+#include <linux/hdreg.h>
+#include <linux/stat.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/blkdev.h>
+#include <linux/genhd.h>
+#include <linux/locks.h>
+#include <linux/smp_lock.h>
+#include <asm/ioctl.h>
+#include <asm/segment.h>
+#include <asm/uaccess.h>
+
+#ifdef CONFIG_KERNELD
+#include <linux/kerneld.h>
+#endif
+
+#include <linux/blk.h>
+#if LINUX_VERSION_CODE > KERNEL_VERSION ( 2, 3, 0)
+#include <linux/blkpg.h>
+#endif
+
+#include <linux/errno.h>
+#include <linux/lvm.h>
+
+#define	LVM_CORRECT_READ_AHEAD(a)		\
+do {						\
+	if ((a) < LVM_MIN_READ_AHEAD)		\
+		(a) =  LVM_MIN_READ_AHEAD;	\
+	if ((a) > LVM_MAX_READ_AHEAD)		\
+		(a) = LVM_MAX_READ_AHEAD;	\
+} while(0)
+
+#ifndef WRITEA
+#define WRITEA WRITE
+#endif
+
+
+/*
+ * External function prototypes
+ */
+#ifdef MODULE
+int init_module ( void);
+void cleanup_module ( void);
+#else
+extern int lvm_init ( void);
+#endif
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION ( 2, 3, 30)
+static void lvm_dummy_device_request ( request_queue_t*);
+#else
+static void lvm_dummy_device_request ( void);
+#endif
+static int lvm_blk_ioctl ( struct inode *, struct file *, uint, ulong);
+static int lvm_blk_open  ( struct inode *, struct file *);
+
+static int  lvm_chr_open  ( struct inode *, struct file *);
+
+static int lvm_chr_release ( struct inode *, struct file *);
+static int lvm_blk_release ( struct inode *, struct file *);
+
+static int  lvm_chr_ioctl ( struct inode *, struct file *, uint, ulong);
+
+#if defined CONFIG_LVM_PROC_FS && defined CONFIG_PROC_FS
+#if LINUX_VERSION_CODE > KERNEL_VERSION ( 2, 3, 30)
+static int lvm_proc_get_info ( char *, char **, off_t, int);
+static int (*lvm_proc_get_info_ptr)(char *, char **, off_t, int) =
+   &lvm_proc_get_info;
+#else
+static int lvm_proc_get_info ( char *, char **, off_t, int, int);
+#endif
+#endif
+
+#ifdef LVM_HD_NAME
+void lvm_hd_name ( char*, int);
+#endif
+
+/* external snapshot calls */
+int lvm_snapshot_remap_block ( kdev_t*, ulong*, unsigned long, lv_t*);
+int lvm_snapshot_COW(kdev_t, unsigned long, unsigned long,
+		     unsigned long, lv_t *);
+int lvm_snapshot_alloc(lv_t *);
+void lvm_snapshot_release(lv_t *);
+
+/* End external function prototypes */
+
+
+/*
+ * Internal function prototypes
+ */
+static void lvm_init_vars ( void);
+extern int (*lvm_map_ptr) ( struct buffer_head*, int);
+
+
+#ifdef LVM_HD_NAME
+extern void (*lvm_hd_name_ptr) ( char*, int);
+#endif
+static int lvm_map ( struct buffer_head*, int);
+static int do_vg_create ( int, void *);
+static int do_vg_remove ( int);
+static int do_lv_create ( int, char *, lv_t *);
+static int do_lv_remove ( int, char *, int);
+static int do_lv_extend_reduce ( int, char *, lv_t *);
+static void lvm_geninit ( struct gendisk *);
+#ifdef LVM_GET_INODE
+   static struct inode *lvm_get_inode ( kdev_t);
+   void lvm_clear_inode ( struct inode *);
+#endif
+inline int  lvm_strlen ( char *);
+inline void lvm_memcpy ( char *, char *, int);
+inline int  lvm_strcmp ( char *, char *);
+inline char *lvm_strrchr ( char *, char c);
+/* END Internal function prototypes */
+
+
+/* volume group descriptor area pointers */
+static vg_t *vg[ABS_MAX_VG + 1];
+static pv_t *pvp  = NULL;
+static lv_t *lvp  = NULL;
+static pe_t *pep  = NULL;
+static pe_t *pep1 = NULL;
+
+
+/* map from block minor number to VG and LV numbers */
+typedef struct {
+   int vg_number;
+   int lv_number;
+} vg_lv_map_t;
+static vg_lv_map_t vg_lv_map[ABS_MAX_LV];
+
+
+/* Request structures (lvm_chr_ioctl()) */
+static pv_change_req_t pv_change_req;
+static pv_flush_req_t  pv_flush_req;
+static pv_status_req_t pv_status_req;
+static pe_lock_req_t   pe_lock_req;
+static le_remap_req_t  le_remap_req;
+static lv_req_t        lv_req;
+
+#ifdef LVM_TOTAL_RESET
+static int lvm_reset_spindown = 0;
+#endif
+
+static char pv_name[NAME_LEN];
+/* static char rootvg[NAME_LEN] = { 0, }; */
+static uint lv_open = 0;
+const char *const lvm_name = LVM_NAME;
+static int lock = 0;
+static int loadtime = 0;
+static uint vg_count = 0;
+static long lvm_chr_open_count = 0;
+static ushort lvm_iop_version = LVM_DRIVER_IOP_VERSION;
+#if LINUX_VERSION_CODE > KERNEL_VERSION ( 2, 3, 0)
+static DECLARE_WAIT_QUEUE_HEAD ( lvm_wait);
+static DECLARE_WAIT_QUEUE_HEAD ( lvm_map_wait);
+#else
+struct wait_queue *lvm_wait = NULL;
+struct wait_queue *lvm_map_wait = NULL;
+#endif
+
+static spinlock_t lvm_lock = SPIN_LOCK_UNLOCKED;
+
+#if defined CONFIG_LVM_PROC_FS && defined CONFIG_PROC_FS
+#if LINUX_VERSION_CODE < KERNEL_VERSION ( 2, 3, 31)
+static struct proc_dir_entry lvm_proc_entry = {
+   0, 3, LVM_NAME, S_IFREG | S_IRUGO,
+   1, 0, 0, 0,
+   NULL,
+   lvm_proc_get_info,
+   NULL, NULL, NULL, NULL, NULL,
+};
+#endif
+#endif
+
+static struct file_operations lvm_chr_fops = {
+	ioctl:		lvm_chr_ioctl,
+	open:		lvm_chr_open,
+	release:	lvm_chr_release,
+};
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 38)
+static struct file_operations lvm_blk_fops = {
+	read:		block_read,
+	write:		block_write,
+	ioctl:		lvm_blk_ioctl,
+	open:		lvm_blk_open,
+	release:	lvm_blk_release,
+	fsync:		block_fsync,
+};
+#else
+static struct block_device_operations lvm_blk_fops =
+{
+	open:		lvm_blk_open,
+	release:	lvm_blk_release,
+	ioctl:		lvm_blk_ioctl,
+};
+#endif
+
+/* gendisk structures */
+static struct hd_struct lvm_hd_struct[MAX_LV];
+int lvm_blocksizes[MAX_LV] = { 0, };
+static int lvm_size[MAX_LV] = { 0, };
+static struct gendisk lvm_gendisk = {
+   MAJOR_NR,			/* major # */
+   LVM_NAME,			/* name of major */
+   0,				/* number of times minor is shifted
+				   to get real minor */
+   1,				/* maximum partitions per device */
+   MAX_LV,			/* maximum number of real devices */
+   lvm_geninit,			/* initialization called before we
+				   do other things */
+   lvm_hd_struct,		/* partition table */
+   lvm_size,			/* device size in blocks, copied
+				   to block_size[] */
+   MAX_LV,			/* number or real devices */
+   NULL,			/* internal */
+   NULL,			/* pointer to next gendisk struct (internal) */
+};
+
+
+#ifdef MODULE
+/*
+ * Module initialization...
+ */
+int init_module ( void)
+#else
+/*
+ * Driver initialization...
+ */
+#ifdef __initfunc
+__initfunc ( int lvm_init ( void))
+#else
+int __init lvm_init ( void)
+#endif
+#endif /* #ifdef MODULE */
+{
+   struct gendisk *gendisk_ptr = NULL;
+
+   lvm_init_vars ();
+
+   /* insert our gendisk at the corresponding major */
+   lvm_geninit ( &lvm_gendisk);
+   if ( gendisk_head != NULL) {
+      gendisk_ptr = gendisk_head;
+      while ( gendisk_ptr->next != NULL &&
+              gendisk_ptr->major > lvm_gendisk.major) {
+         gendisk_ptr = gendisk_ptr->next;
+      }
+      lvm_gendisk.next = gendisk_ptr->next;
+      gendisk_ptr->next = &lvm_gendisk;
+   } else {
+      gendisk_head = &lvm_gendisk;
+      lvm_gendisk.next = NULL;
+   }
+
+   /* reference from drivers/block/ll_rw_blk.c */
+   lvm_map_ptr = lvm_map;
+
+#ifdef LVM_HD_NAME
+   /* reference from drivers/block/genhd.c */
+   lvm_hd_name_ptr = lvm_hd_name;
+#endif
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION ( 2, 3, 30)
+   blk_init_queue ( BLK_DEFAULT_QUEUE ( MAJOR_NR), lvm_dummy_device_request);
+#else
+   blk_dev[MAJOR_NR].request_fn = lvm_dummy_device_request;
+   blk_dev[MAJOR_NR].current_request = NULL;
+#endif
+
+   /* optional read root VGDA */
+/*
+   if ( *rootvg != 0) {
+      vg_read_with_pv_and_lv ( rootvg, &vg);
+   }
+*/
+
+   if ( register_chrdev ( LVM_CHAR_MAJOR, lvm_name, &lvm_chr_fops) < 0) {
+      printk ( KERN_ERR "%s -- register_chrdev failed\n", lvm_name);
+      return -EIO;
+   }
+   if ( register_blkdev ( MAJOR_NR, lvm_name, &lvm_blk_fops) < 0) {
+      printk ( "%s -- register_blkdev failed\n", lvm_name);
+      if ( unregister_chrdev ( LVM_CHAR_MAJOR, lvm_name) < 0)
+         printk ( KERN_ERR "%s -- unregister_chrdev failed\n", lvm_name);
+      return -EIO;
+   }
+
+#if defined CONFIG_LVM_PROC_FS && defined CONFIG_PROC_FS
+#if LINUX_VERSION_CODE > KERNEL_VERSION ( 2, 3, 25)
+    create_proc_info_entry ( LVM_NAME, S_IFREG | S_IRUGO,
+                             &proc_root, lvm_proc_get_info_ptr);
+#  else
+    proc_register ( &proc_root, &lvm_proc_entry);
+#  endif
+#endif
+
+   printk ( KERN_INFO
+            "%s%s -- "
+#ifdef MODULE
+            "Module"
+#else
+            "Driver"
+#endif
+            " successfully initialized\n",
+            lvm_version, lvm_name);
+
+   return 0;
+} /* init_module () / lvm_init () */
+
+
+#ifdef MODULE
+/*
+ * Module cleanup...
+ */
+void cleanup_module ( void) {
+   struct gendisk *gendisk_ptr = NULL, *gendisk_ptr_prev = NULL;
+
+   if ( unregister_chrdev ( LVM_CHAR_MAJOR, lvm_name) < 0) {
+      printk ( KERN_ERR "%s -- unregister_chrdev failed\n", lvm_name);
+   }
+   if ( unregister_blkdev ( MAJOR_NR, lvm_name) < 0) {
+      printk ( KERN_ERR "%s -- unregister_blkdev failed\n", lvm_name);
+   }
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION ( 2, 3, 30)
+   blk_cleanup_queue ( BLK_DEFAULT_QUEUE ( MAJOR_NR));
+#else
+   blk_dev[MAJOR_NR].request_fn = NULL;
+   blk_dev[MAJOR_NR].current_request = NULL;
+#endif
+
+   gendisk_ptr = gendisk_ptr_prev = gendisk_head;
+   while ( gendisk_ptr != NULL) {
+      if ( gendisk_ptr == &lvm_gendisk) break;
+      gendisk_ptr_prev = gendisk_ptr;
+      gendisk_ptr = gendisk_ptr->next;
+   }
+   /* delete our gendisk from chain */
+   if ( gendisk_ptr == &lvm_gendisk) gendisk_ptr_prev->next = gendisk_ptr->next;
+
+   blk_size[MAJOR_NR] = NULL;
+   blksize_size[MAJOR_NR] = NULL;
+
+#if defined CONFIG_LVM_PROC_FS && defined CONFIG_PROC_FS
+#if LINUX_VERSION_CODE > KERNEL_VERSION ( 2, 3, 30)
+   remove_proc_entry ( LVM_NAME, &proc_root);
+#  else
+   proc_unregister ( &proc_root, lvm_proc_entry.low_ino);
+#  endif 
+#endif
+
+   /* reference from linux/drivers/block/ll_rw_blk.c */
+   lvm_map_ptr = NULL;
+
+#ifdef LVM_HD_NAME
+   /* reference from linux/drivers/block/genhd.c */
+   lvm_hd_name_ptr = NULL;
+#endif
+
+   printk ( KERN_INFO "%s -- Module successfully deactivated\n", lvm_name);
+
+   return;
+} /* void cleanup_module () */
+#endif /* #ifdef MODULE */
+
+
+/*
+ * support function to initialize lvm variables
+ */
+#ifdef __initfunc
+__initfunc ( void lvm_init_vars ( void))
+#else
+void __init lvm_init_vars ( void)
+#endif
+{
+   int v;
+
+   loadtime = CURRENT_TIME;
+
+   lvm_lock = SPIN_LOCK_UNLOCKED;
+
+   pe_lock_req.lock = UNLOCK_PE;
+   pe_lock_req.data.lv_dev = pe_lock_req.data.pv_dev = 0;
+   pe_lock_req.data.pv_offset = 0;
+
+   /* Initialize VG pointers */
+   for ( v = 0; v <= ABS_MAX_VG; v++) vg[v] = NULL;
+
+   /* Initialize LV -> VG association */
+   for ( v = 0; v < ABS_MAX_LV; v++) {
+      /* index ABS_MAX_VG never used for real VG */
+      vg_lv_map[v].vg_number = ABS_MAX_VG;
+      vg_lv_map[v].lv_number = -1;
+   }
+
+   return;
+} /* lvm_init_vars () */
+
+
+/********************************************************************
+ *
+ * Character device functions
+ *
+ ********************************************************************/
+
+/*
+ * character device open routine
+ */
+static int lvm_chr_open ( struct inode *inode,
+                          struct file *file) {
+   int minor = MINOR ( inode->i_rdev);
+
+#ifdef DEBUG
+   printk ( KERN_DEBUG
+            "%s -- lvm_chr_open MINOR: %d  VG#: %d  mode: 0x%X  lock: %d\n",
+            lvm_name, minor, VG_CHR(minor), file->f_mode, lock);
+#endif
+
+   /* super user validation */
+   if ( ! capable(CAP_SYS_ADMIN)) return -EACCES;
+
+   /* Group special file open */
+   if ( VG_CHR(minor) > MAX_VG) return -ENXIO;
+
+#ifdef MODULE
+   MOD_INC_USE_COUNT;
+#endif
+
+   lvm_chr_open_count++;
+   return 0;
+} /* lvm_chr_open () */
+
+
+/*
+ * character device i/o-control routine
+ *
+ * Only one changing process can do ioctl at one time, others will block.
+ *
+ */
+static int lvm_chr_ioctl ( struct inode *inode, struct file *file,
+                           uint command, ulong a) {
+   int minor = MINOR ( inode->i_rdev);
+   int extendable;
+   ulong  l, le, p, v;
+   ulong size;
+   void  *arg = ( void*) a;
+#ifdef LVM_GET_INODE
+   struct inode *inode_sav;
+#endif
+   lv_status_byname_req_t lv_status_byname_req;
+   lv_status_byindex_req_t lv_status_byindex_req;
+   lv_t lv;
+
+   /* otherwise cc will complain about unused variables */
+   ( void) lvm_lock;
+   
+
+#ifdef DEBUG_IOCTL
+   printk ( KERN_DEBUG
+            "%s -- lvm_chr_ioctl: command: 0x%X  MINOR: %d  "
+            "VG#: %d  mode: 0x%X\n",
+            lvm_name, command, minor, VG_CHR(minor), file->f_mode);
+#endif
+
+#ifdef LVM_TOTAL_RESET
+   if ( lvm_reset_spindown > 0) return -EACCES;
+#endif
+
+
+   /* Main command switch */
+   switch ( command) {
+      /* lock the LVM */
+      case LVM_LOCK_LVM:
+lock_try_again:
+         spin_lock ( &lvm_lock);
+         if( lock != 0 && lock != current->pid ) {
+#ifdef DEBUG_IOCTL
+            printk ( KERN_INFO "lvm_chr_ioctl: %s is locked by pid %d ...\n",
+                               lvm_name, lock);
+#endif
+            spin_unlock ( &lvm_lock);
+            interruptible_sleep_on ( &lvm_wait);
+            if ( current->sigpending != 0) return -EINTR;
+#ifdef LVM_TOTAL_RESET
+            if ( lvm_reset_spindown > 0) return -EACCES;
+#endif
+            goto lock_try_again;
+         }
+         lock = current->pid;
+         spin_unlock ( &lvm_lock);
+         return 0;
+
+
+      /* check lvm version to ensure driver/tools+lib interoperability */
+      case LVM_GET_IOP_VERSION:
+         if ( copy_to_user ( arg, &lvm_iop_version, sizeof ( ushort)) != 0)
+            return -EFAULT;
+         return 0;
+
+
+#ifdef LVM_TOTAL_RESET
+      /* lock reset function */
+      case LVM_RESET:
+         lvm_reset_spindown = 1;
+         for ( v = 0; v < ABS_MAX_VG; v++) {
+            if ( vg[v] != NULL) {
+               do_vg_remove ( v);
+            }
+         }
+
+#ifdef MODULE
+         while ( GET_USE_COUNT ( &__this_module) < 1)
+            MOD_INC_USE_COUNT;
+         while ( GET_USE_COUNT ( &__this_module) > 1)
+            MOD_DEC_USE_COUNT;
+#endif /* MODULE */
+         lock = 0; /* release lock */
+         wake_up_interruptible ( &lvm_wait);
+         return 0;
+#endif /* LVM_TOTAL_RESET */
+
+
+      /* lock/unlock i/o to a physical extent to move it to another
+         physical volume (move's done in user space's pvmove) */
+      case PE_LOCK_UNLOCK:
+         if ( vg[VG_CHR(minor)] == NULL) return -ENXIO;
+         if ( copy_from_user ( &pe_lock_req, arg, sizeof ( pe_lock_req_t)) != 0)
+            return -EFAULT;
+
+         switch ( pe_lock_req.lock) {
+            case LOCK_PE:
+               for ( p = 0; p < vg[VG_CHR(minor)]->pv_max; p++) {
+                  if ( vg[VG_CHR(minor)]->pv[p] != NULL &&
+                       pe_lock_req.data.pv_dev ==
+                       vg[VG_CHR(minor)]->pv[p]->pv_dev)
+                     break;
+               }
+      
+               if ( p == vg[VG_CHR(minor)]->pv_max) return -ENXIO;
+
+               pe_lock_req.lock = UNLOCK_PE;
+               fsync_dev ( pe_lock_req.data.lv_dev);
+               pe_lock_req.lock = LOCK_PE;
+               break;
+
+            case UNLOCK_PE:
+               pe_lock_req.lock = UNLOCK_PE;
+               pe_lock_req.data.lv_dev = pe_lock_req.data.pv_dev = 0;
+               pe_lock_req.data.pv_offset = 0;
+               wake_up ( &lvm_map_wait);
+               break;
+
+            default:
+               return -EINVAL;
+         }
+
+         return 0;
+
+
+      /* remap a logical extent (after moving the physical extent) */
+      case LE_REMAP:
+         if ( vg[VG_CHR(minor)] == NULL) return -ENXIO;
+         if ( copy_from_user ( &le_remap_req, arg,
+                               sizeof ( le_remap_req_t)) != 0)
+            return -EFAULT;
+
+         for ( l = 0; l < vg[VG_CHR(minor)]->lv_max; l++) {
+            if ( vg[VG_CHR(minor)]->lv[l] != NULL &&
+                 lvm_strcmp ( vg[VG_CHR(minor)]->lv[l]->lv_name,
+                              le_remap_req.lv_name) == 0) {
+               for ( le = 0; le < vg[VG_CHR(minor)]->lv[l]->lv_allocated_le;
+                     le++) {
+                  if ( vg[VG_CHR(minor)]->lv[l]->lv_current_pe[le].dev ==
+                       le_remap_req.old_dev &&
+                       vg[VG_CHR(minor)]->lv[l]->lv_current_pe[le].pe ==
+                       le_remap_req.old_pe) {
+                     vg[VG_CHR(minor)]->lv[l]->lv_current_pe[le].dev =
+                        le_remap_req.new_dev;
+                     vg[VG_CHR(minor)]->lv[l]->lv_current_pe[le].pe =
+                        le_remap_req.new_pe;
+                     return 0;
+                  }
+               }
+               return -EINVAL;
+            }
+         }
+
+         return -ENXIO;
+
+
+      /* create a VGDA */
+      case VG_CREATE:
+         return do_vg_create ( minor, arg);
+
+
+      /* remove an inactive VGDA */
+      case VG_REMOVE:
+         return do_vg_remove ( minor);
+
+
+      /* extend a volume group */
+      case VG_EXTEND:
+         if ( vg[VG_CHR(minor)] == NULL) return -ENXIO;
+         if ( vg[VG_CHR(minor)]->pv_cur < vg[VG_CHR(minor)]->pv_max) {
+            for ( p = 0; p < vg[VG_CHR(minor)]->pv_max; p++) {
+               if ( vg[VG_CHR(minor)]->pv[p] == NULL) {
+                  if ( ( vg[VG_CHR(minor)]->pv[p] =
+                         kmalloc ( sizeof ( pv_t), GFP_USER)) == NULL) {
+                     printk ( KERN_CRIT
+                              "%s -- VG_EXTEND: kmalloc error PV at line %d\n",
+                              lvm_name, __LINE__);
+                     return -ENOMEM;
+                  }
+                  if ( copy_from_user ( vg[VG_CHR(minor)]->pv[p], arg,
+                                        sizeof ( pv_t)) != 0)
+                     return -EFAULT;
+
+                  vg[VG_CHR(minor)]->pv[p]->pv_status = PV_ACTIVE;
+                  /* We don't need the PE list
+                     in kernel space like LVs pe_t list */
+                  vg[VG_CHR(minor)]->pv[p]->pe = NULL;
+                  vg[VG_CHR(minor)]->pv_cur++;
+                  vg[VG_CHR(minor)]->pv_act++;
+                  vg[VG_CHR(minor)]->pe_total +=
+                     vg[VG_CHR(minor)]->pv[p]->pe_total;
+#ifdef LVM_GET_INODE
+                  /* insert a dummy inode for fs_may_mount */
+                  vg[VG_CHR(minor)]->pv[p]->inode =
+                     lvm_get_inode ( vg[VG_CHR(minor)]->pv[p]->pv_dev);
+#endif
+                  return 0;
+               }
+            }
+         }
+         return -EPERM;
+
+
+      /* reduce a volume group */
+      case VG_REDUCE:
+         if ( vg[VG_CHR(minor)] == NULL) return -ENXIO;
+         if ( copy_from_user ( pv_name, arg, sizeof ( pv_name)) != 0)
+            return -EFAULT;
+
+         for ( p = 0; p < vg[VG_CHR(minor)]->pv_max; p++) {
+            if ( vg[VG_CHR(minor)]->pv[p] != NULL && 
+                 lvm_strcmp ( vg[VG_CHR(minor)]->pv[p]->pv_name,
+                              pv_name) == 0) {
+               if ( vg[VG_CHR(minor)]->pv[p]->lv_cur > 0) return -EPERM;
+               vg[VG_CHR(minor)]->pe_total -=
+                  vg[VG_CHR(minor)]->pv[p]->pe_total;
+               vg[VG_CHR(minor)]->pv_cur--;
+               vg[VG_CHR(minor)]->pv_act--;
+#ifdef DEBUG_VFREE
+               printk ( KERN_DEBUG
+                        "%s -- kfree %d\n", lvm_name, __LINE__);
+#endif
+#ifdef LVM_GET_INODE
+               lvm_clear_inode ( vg[VG_CHR(minor)]->pv[p]->inode);
+#endif
+               kfree ( vg[VG_CHR(minor)]->pv[p]);
+               /* Make PV pointer array contiguous */
+               for ( ; p < vg[VG_CHR(minor)]->pv_max-1; p++)
+                  vg[VG_CHR(minor)]->pv[p] = vg[VG_CHR(minor)]->pv[p + 1];
+               vg[VG_CHR(minor)]->pv[p + 1] = NULL;
+               return 0;
+            }
+         }
+         return -ENXIO;
+
+
+      /* set/clear extendability flag of volume group */
+      case VG_SET_EXTENDABLE:
+         if ( vg[VG_CHR(minor)] == NULL) return -ENXIO;
+         if ( copy_from_user ( &extendable, arg, sizeof ( extendable)) != 0)
+            return -EFAULT;
+
+         if ( extendable == VG_EXTENDABLE ||
+              extendable == ~VG_EXTENDABLE) {
+            if ( extendable == VG_EXTENDABLE)
+               vg[VG_CHR(minor)]->vg_status |= VG_EXTENDABLE;
+            else
+               vg[VG_CHR(minor)]->vg_status &= ~VG_EXTENDABLE;
+         } else return -EINVAL;
+         return 0;
+
+
+      /* get volume group data (only the vg_t struct) */
+      case VG_STATUS:
+         if ( vg[VG_CHR(minor)] == NULL) return -ENXIO;
+         if ( copy_to_user ( arg, vg[VG_CHR(minor)], sizeof ( vg_t)) != 0)
+            return -EFAULT;
+
+         return 0;
+
+
+      /* get volume group count */
+      case VG_STATUS_GET_COUNT:
+         if ( copy_to_user ( arg, &vg_count, sizeof ( vg_count)) != 0)
+            return -EFAULT;
+
+         return 0;
+
+
+      /* get volume group count */
+      case VG_STATUS_GET_NAMELIST:
+         for ( l = v = 0; v < ABS_MAX_VG; v++) {
+            if ( vg[v] != NULL) {
+               if ( copy_to_user ( arg + l++ * NAME_LEN,
+                                   vg[v]->vg_name,
+                                   NAME_LEN) != 0)
+                  return -EFAULT;
+            }
+         }
+         return 0;
+
+
+      /* create, remove, extend or reduce a logical volume */
+      case LV_CREATE:
+      case LV_REMOVE:
+      case LV_EXTEND:
+      case LV_REDUCE:
+         if ( vg[VG_CHR(minor)] == NULL) return -ENXIO;
+         if ( copy_from_user ( &lv_req, arg, sizeof ( lv_req)) != 0)
+            return -EFAULT;
+
+         if ( command != LV_REMOVE) {
+            if ( copy_from_user ( &lv, lv_req.lv, sizeof ( lv_t)) != 0)
+               return -EFAULT;
+         }
+
+         switch ( command) {
+            case LV_CREATE:
+               return do_lv_create ( minor, lv_req.lv_name, &lv);
+
+            case LV_REMOVE:
+               return do_lv_remove ( minor, lv_req.lv_name, -1);
+
+            case LV_EXTEND:
+            case LV_REDUCE:
+               return do_lv_extend_reduce ( minor, lv_req.lv_name, &lv);
+         }
+
+
+      /* get status of a logical volume by name */
+      case LV_STATUS_BYNAME:
+         if ( vg[VG_CHR(minor)] == NULL) return -ENXIO;
+         if ( copy_from_user ( &lv_status_byname_req, arg,
+                               sizeof ( lv_status_byname_req_t)) != 0)
+            return -EFAULT;
+
+         if ( lv_status_byname_req.lv == NULL) return -EINVAL;
+         if ( copy_from_user ( &lv, lv_status_byname_req.lv,
+                               sizeof ( lv_t)) != 0)
+            return -EFAULT;
+
+         for ( l = 0; l < vg[VG_CHR(minor)]->lv_max; l++) {
+            if ( vg[VG_CHR(minor)]->lv[l] != NULL &&
+                 lvm_strcmp ( vg[VG_CHR(minor)]->lv[l]->lv_name,
+                              lv_status_byname_req.lv_name) == 0) {
+               if ( copy_to_user ( lv_status_byname_req.lv,
+                                   vg[VG_CHR(minor)]->lv[l],
+                                   sizeof ( lv_t)) != 0)
+                  return -EFAULT;
+
+               if ( lv.lv_current_pe != NULL) {
+                  size = vg[VG_CHR(minor)]->lv[l]->lv_allocated_le *
+                         sizeof ( pe_t);
+                  if ( copy_to_user ( lv.lv_current_pe,
+                                      vg[VG_CHR(minor)]->lv[l]->lv_current_pe,
+                                      size) != 0)
+                     return -EFAULT;
+               }
+               return 0;
+            }
+         }
+         return -ENXIO;
+
+
+      /* get status of a logical volume by index */
+      case LV_STATUS_BYINDEX:
+         if ( vg[VG_CHR(minor)] == NULL) return -ENXIO;
+         if ( copy_from_user ( &lv_status_byindex_req, arg,
+                               sizeof ( lv_status_byindex_req)) != 0)
+            return -EFAULT;
+
+         if ( ( lvp = lv_status_byindex_req.lv) == NULL) return -EINVAL;
+         l = lv_status_byindex_req.lv_index;
+         if ( vg[VG_CHR(minor)]->lv[l] == NULL) return -ENXIO;
+
+         if ( copy_from_user ( &lv, lvp, sizeof ( lv_t)) != 0)
+            return -EFAULT;
+
+         if ( copy_to_user ( lvp, vg[VG_CHR(minor)]->lv[l],
+                             sizeof ( lv_t)) != 0)
+            return -EFAULT;
+
+         if ( lv.lv_current_pe != NULL) {
+            size = vg[VG_CHR(minor)]->lv[l]->lv_allocated_le * sizeof ( pe_t);
+            if ( copy_to_user ( lv.lv_current_pe,
+                                vg[VG_CHR(minor)]->lv[l]->lv_current_pe,
+                                size) != 0)
+               return -EFAULT;
+         }
+         return 0;
+
+
+      /* change a physical volume */
+      case PV_CHANGE:
+         if ( vg[VG_CHR(minor)] == NULL) return -ENXIO;
+         if ( copy_from_user ( &pv_change_req, arg,
+                               sizeof ( pv_change_req)) != 0)
+            return -EFAULT;
+
+         for ( p = 0; p < vg[VG_CHR(minor)]->pv_max; p++) {
+            if ( vg[VG_CHR(minor)]->pv[p] != NULL &&
+                 lvm_strcmp ( vg[VG_CHR(minor)]->pv[p]->pv_name,
+                              pv_change_req.pv_name) == 0) {
+#ifdef LVM_GET_INODE
+               inode_sav = vg[VG_CHR(minor)]->pv[p]->inode;
+#endif
+               if ( copy_from_user ( vg[VG_CHR(minor)]->pv[p],
+                                     pv_change_req.pv,
+                                     sizeof ( pv_t)) != 0)
+                  return -EFAULT;
+
+               /* We don't need the PE list
+                  in kernel space as with LVs pe_t list */
+               vg[VG_CHR(minor)]->pv[p]->pe = NULL;
+#ifdef LVM_GET_INODE
+               vg[VG_CHR(minor)]->pv[p]->inode = inode_sav;
+#endif
+               return 0;
+            }
+         }
+         return -ENXIO;
+
+
+      /* get physical volume data (pv_t structure only) */
+      case PV_STATUS:
+         if ( vg[VG_CHR(minor)] == NULL) return -ENXIO;
+         if ( copy_from_user ( &pv_status_req, arg,
+                               sizeof ( pv_status_req)) != 0)
+            return -EFAULT;
+
+         for ( p = 0; p < vg[VG_CHR(minor)]->pv_max; p++) {
+            if ( vg[VG_CHR(minor)]->pv[p] != NULL) {
+               if ( lvm_strcmp ( vg[VG_CHR(minor)]->pv[p]->pv_name,
+                                 pv_status_req.pv_name) == 0) {
+                  if ( copy_to_user ( pv_status_req.pv,
+                                      vg[VG_CHR(minor)]->pv[p],
+                                      sizeof ( pv_t)) != 0)
+                     return -EFAULT;
+                  return 0;
+               }
+            }
+         }
+         return -ENXIO;
+
+
+      /* physical volume buffer flush/invalidate */
+      case PV_FLUSH:
+         if ( copy_from_user ( &pv_flush_req, arg, sizeof ( pv_flush_req)) != 0)
+            return -EFAULT;
+
+         for ( v = 0; v < ABS_MAX_VG; v++) {
+            if ( vg[v] == NULL) continue;
+            for ( p = 0; p < vg[v]->pv_max; p++) {
+               if ( vg[v]->pv[p] != NULL &&
+                    lvm_strcmp ( vg[v]->pv[p]->pv_name,
+                                 pv_flush_req.pv_name) == 0) {
+                  fsync_dev ( vg[v]->pv[p]->pv_dev);
+                  invalidate_buffers ( vg[v]->pv[p]->pv_dev);
+                  return 0;
+               }
+            }
+         }
+         return 0;
+
+
+      default:
+         printk ( KERN_WARNING
+                  "%s -- lvm_chr_ioctl: unknown command %x\n",
+                  lvm_name, command);
+         return -EINVAL;
+   }
+
+   return 0;
+} /* lvm_chr_ioctl */
+
+
+/*
+ * character device close routine
+ */
+static int lvm_chr_release ( struct inode *inode, struct file *file)
+{
+#ifdef DEBUG
+   int minor = MINOR ( inode->i_rdev);
+   printk ( KERN_DEBUG
+            "%s -- lvm_chr_release   VG#: %d\n", lvm_name, VG_CHR(minor));
+#endif
+
+#ifdef MODULE
+   if ( GET_USE_COUNT ( &__this_module) > 0) MOD_DEC_USE_COUNT;
+#endif
+
+#ifdef LVM_TOTAL_RESET
+   if ( lvm_reset_spindown > 0) {
+      lvm_reset_spindown = 0;
+      lvm_chr_open_count = 1;
+   }
+#endif
+
+   if ( lvm_chr_open_count > 0) lvm_chr_open_count--;
+   if ( lock == current->pid) {
+      lock = 0; /* release lock */
+      wake_up_interruptible ( &lvm_wait);
+   }
+
+   return 0;
+} /* lvm_chr_release () */
+
+
+
+/********************************************************************
+ *
+ * Block device functions
+ *
+ ********************************************************************/
+
+/*
+ * block device open routine
+ */
+static int lvm_blk_open ( struct inode *inode, struct file *file) {
+   int minor = MINOR ( inode->i_rdev);
+
+#ifdef DEBUG_LVM_BLK_OPEN
+   printk ( KERN_DEBUG
+            "%s -- lvm_blk_open MINOR: %d  VG#: %d  LV#: %d  mode: 0x%X\n",
+            lvm_name, minor, VG_BLK(minor), LV_BLK(minor), file->f_mode);
+#endif
+
+#ifdef LVM_TOTAL_RESET
+   if ( lvm_reset_spindown > 0) return -EPERM;
+#endif
+
+   if ( vg[VG_BLK(minor)] != NULL &&
+        ( vg[VG_BLK(minor)]->vg_status & VG_ACTIVE) &&
+        vg[VG_BLK(minor)]->lv[LV_BLK(minor)] != NULL &&
+        LV_BLK(minor) >= 0 &&
+        LV_BLK(minor) < vg[VG_BLK(minor)]->lv_max) {
+
+      /* Check parallel LV spindown (LV remove) */
+      if ( vg[VG_BLK(minor)]->lv[LV_BLK(minor)]->lv_status & LV_SPINDOWN)
+         return -EPERM;
+
+      /* Check inactive LV and open for read/write */
+      if ( file->f_mode & O_RDWR) {
+         if ( ! ( vg[VG_BLK(minor)]->lv[LV_BLK(minor)]->lv_status & LV_ACTIVE))
+            return -EPERM;
+         if ( ! ( vg[VG_BLK(minor)]->lv[LV_BLK(minor)]->lv_access & LV_WRITE))
+            return -EACCES;
+      }
+
+      if ( vg[VG_BLK(minor)]->lv[LV_BLK(minor)]->lv_open == 0)
+         vg[VG_BLK(minor)]->lv_open++;
+      vg[VG_BLK(minor)]->lv[LV_BLK(minor)]->lv_open++;
+
+#ifdef MODULE
+      MOD_INC_USE_COUNT;
+#endif
+
+#ifdef DEBUG_LVM_BLK_OPEN
+      printk ( KERN_DEBUG
+               "%s -- lvm_blk_open MINOR: %d  VG#: %d  LV#: %d  size: %d\n",
+               lvm_name, minor, VG_BLK(minor), LV_BLK(minor),
+               vg[VG_BLK(minor)]->lv[LV_BLK(minor)]->lv_size);
+#endif
+
+      return 0;
+   }
+
+   return -ENXIO;
+} /* lvm_blk_open () */
+
+
+/*
+ * block device i/o-control routine
+ */
+static int lvm_blk_ioctl (struct inode *inode, struct file *file,
+                          uint command, ulong a) {
+   int minor = MINOR ( inode->i_rdev);
+   void *arg = ( void*) a;
+   struct hd_geometry *hd = ( struct hd_geometry *) a;
+
+#ifdef DEBUG_IOCTL
+   printk ( KERN_DEBUG
+            "%s -- lvm_blk_ioctl MINOR: %d  command: 0x%X  arg: %X  "
+            "VG#: %dl  LV#: %d\n",
+            lvm_name, minor, command, ( ulong) arg,
+            VG_BLK(minor), LV_BLK(minor));
+#endif
+
+   switch ( command) {
+      /* return device size */
+      case BLKGETSIZE:
+#ifdef DEBUG_IOCTL
+         printk ( KERN_DEBUG
+                  "%s -- lvm_blk_ioctl -- BLKGETSIZE: %u\n",
+                  lvm_name, vg[VG_BLK(minor)]->lv[LV_BLK(minor)]->lv_size);
+#endif
+         copy_to_user ( ( long*) arg, &vg[VG_BLK(minor)]->\
+                                      lv[LV_BLK(minor)]->lv_size,
+                        sizeof ( vg[VG_BLK(minor)]->\
+                                 lv[LV_BLK(minor)]->lv_size));
+         break;
+
+
+      /* flush buffer cache */
+      case BLKFLSBUF:
+         /* super user validation */
+         if ( ! capable (CAP_SYS_ADMIN)) return -EACCES;
+
+#ifdef DEBUG_IOCTL
+         printk ( KERN_DEBUG
+                  "%s -- lvm_blk_ioctl -- BLKFLSBUF\n", lvm_name);
+#endif
+         fsync_dev ( inode->i_rdev);
+	 invalidate_buffers(inode->i_rdev);
+         break;
+
+
+      /* set read ahead for block device */
+      case BLKRASET:
+         /* super user validation */
+         if ( ! capable (CAP_SYS_ADMIN)) return -EACCES;
+
+#ifdef DEBUG_IOCTL
+         printk ( KERN_DEBUG
+                  "%s -- lvm_blk_ioctl -- BLKRASET: %d sectors for %02X:%02X\n",
+                  lvm_name, ( long) arg, MAJOR( inode->i_rdev), minor);
+#endif
+         if ( ( long) arg < LVM_MIN_READ_AHEAD ||
+              ( long) arg > LVM_MAX_READ_AHEAD) return -EINVAL;
+         read_ahead[MAJOR_NR] =
+	 vg[VG_BLK(minor)]->lv[LV_BLK(minor)]->lv_read_ahead = ( long) arg;
+         break;
+
+
+      /* get current read ahead setting */
+      case BLKRAGET:
+#ifdef DEBUG_IOCTL
+         printk ( KERN_DEBUG
+                  "%s -- lvm_blk_ioctl -- BLKRAGET\n", lvm_name);
+#endif
+         copy_to_user ( ( long*) arg,
+                        &vg[VG_BLK(minor)]->lv[LV_BLK(minor)]->lv_read_ahead,
+                        sizeof ( vg[VG_BLK(minor)]->lv[LV_BLK(minor)]->\
+                                 lv_read_ahead));
+         break;
+
+
+      /* get disk geometry */
+      case HDIO_GETGEO:
+#ifdef DEBUG_IOCTL
+         printk ( KERN_DEBUG
+                  "%s -- lvm_blk_ioctl -- HDIO_GETGEO\n", lvm_name);
+#endif
+         if ( hd == NULL) return -EINVAL;
+         {
+            unsigned char heads = 64;
+            unsigned char sectors = 32;
+            long start = 0;
+            short cylinders = vg[VG_BLK(minor)]->lv[LV_BLK(minor)]->lv_size /
+                              heads / sectors;
+
+            if ( copy_to_user ( ( char*) &hd->heads, &heads,
+                                sizeof ( heads)) != 0 ||
+                 copy_to_user ( ( char*) &hd->sectors, &sectors,
+                                sizeof ( sectors)) != 0 ||
+                 copy_to_user ( ( short*) &hd->cylinders,
+                                &cylinders, sizeof ( cylinders)) != 0 ||
+                 copy_to_user ( ( long*) &hd->start, &start,
+                                sizeof ( start)) != 0)
+               return -EFAULT;
+         }
+
+#ifdef DEBUG_IOCTL
+            printk ( KERN_DEBUG
+                     "%s -- lvm_blk_ioctl -- cylinders: %d\n",
+                     lvm_name, vg[VG_BLK(minor)]->lv[LV_BLK(minor)]->\
+                               lv_size / heads / sectors);
+#endif
+         break;
+
+
+      /* set access flags of a logical volume */
+      case LV_SET_ACCESS:
+         /* super user validation */
+         if ( ! capable (CAP_SYS_ADMIN)) return -EACCES;
+         vg[VG_BLK(minor)]->lv[LV_BLK(minor)]->lv_access = ( ulong) arg;
+         if ( vg[VG_BLK(minor)]->lv[LV_BLK(minor)]->lv_access & LV_WRITE)
+                         set_device_ro(vg[VG_BLK(minor)]->lv[LV_BLK(minor)]->lv_dev, 0);
+                 else
+                         set_device_ro(vg[VG_BLK(minor)]->lv[LV_BLK(minor)]->lv_dev, 1);
+                 break;
+         break;
+
+
+      /* set status flags of a logical volume */
+      case LV_SET_STATUS:
+         /* super user validation */
+         if ( ! capable (CAP_SYS_ADMIN)) return -EACCES;
+         if ( ! ( ( ulong) arg & LV_ACTIVE) &&
+              vg[VG_BLK(minor)]->lv[LV_BLK(minor)]->lv_open > 1) return -EPERM;
+         vg[VG_BLK(minor)]->lv[LV_BLK(minor)]->lv_status = ( ulong) arg;
+         break;
+
+
+      /* set allocation flags of a logical volume */
+      case LV_SET_ALLOCATION:
+         /* super user validation */
+         if ( ! capable (CAP_SYS_ADMIN)) return -EACCES;
+         vg[VG_BLK(minor)]->lv[LV_BLK(minor)]->lv_allocation = ( ulong) arg;
+         break;
+
+
+      default:
+         printk ( KERN_WARNING
+                  "%s -- lvm_blk_ioctl: unknown command %d\n",
+                  lvm_name, command);
+         return -EINVAL;
+   }
+
+   return 0;
+} /* lvm_blk_ioctl () */
+
+
+/*
+ * block device close routine
+ */
+static int lvm_blk_release ( struct inode *inode, struct file *file)
+{
+   int minor = MINOR ( inode->i_rdev);
+
+#ifdef DEBUG
+   printk ( KERN_DEBUG
+            "%s -- lvm_blk_release MINOR: %d  VG#: %d  LV#: %d\n",
+            lvm_name, minor, VG_BLK(minor), LV_BLK(minor));
+#endif
+
+   sync_dev ( inode->i_rdev);
+   if ( vg[VG_BLK(minor)]->lv[LV_BLK(minor)]->lv_open == 1)
+      vg[VG_BLK(minor)]->lv_open--;
+   vg[VG_BLK(minor)]->lv[LV_BLK(minor)]->lv_open--;
+
+#ifdef MODULE
+   MOD_DEC_USE_COUNT;
+#endif
+
+   return 0;
+} /* lvm_blk_release () */
+
+
+#if defined CONFIG_LVM_PROC_FS && defined CONFIG_PROC_FS
+/*
+ * Support function /proc-Filesystem
+ */
+#define  LVM_PROC_BUF   ( i == 0 ? dummy_buf : &buf[sz])
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION ( 2, 3, 25)
+static int lvm_proc_get_info ( char *page, char **start, off_t pos, int count)
+#else
+static int lvm_proc_get_info ( char *page, char **start, off_t pos,
+                               int count, int whence)
+#endif
+{
+   int c, i, l, p, v, vg_counter, pv_counter, lv_counter, lv_open_counter,
+       lv_open_total, pe_t_bytes, lv_block_exception_t_bytes, seconds;
+   static off_t sz;
+   off_t sz_last;
+   char allocation_flag, inactive_flag, rw_flag, stripes_flag;
+   char *lv_name = NULL;
+   static char *buf = NULL;
+   static char dummy_buf[160]; /* sized for 2 lines */
+
+#ifdef DEBUG_LVM_PROC_GET_INFO
+   printk ( KERN_DEBUG
+            "%s - lvm_proc_get_info CALLED  pos: %lu  count: %d  whence: %d\n",
+            lvm_name, pos, count, whence);
+#endif
+
+   if ( pos == 0 || buf == NULL) {
+      sz_last = vg_counter = pv_counter = lv_counter = lv_open_counter = \
+      lv_open_total = pe_t_bytes = lv_block_exception_t_bytes = 0;
+   
+      /* search for activity */
+      for ( v = 0; v < ABS_MAX_VG; v++) {
+         if ( vg[v] != NULL) {
+            vg_counter++;
+            pv_counter += vg[v]->pv_cur;
+            lv_counter += vg[v]->lv_cur;
+            if ( vg[v]->lv_cur > 0) {
+               for ( l = 0; l < vg[v]->lv_max; l++) {
+                  if ( vg[v]->lv[l] != NULL) {
+                     pe_t_bytes += vg[v]->lv[l]->lv_allocated_le;
+                     if ( vg[v]->lv[l]->lv_block_exception != NULL) {
+                        lv_block_exception_t_bytes +=
+                           vg[v]->lv[l]->lv_remap_end;
+                     }
+                     if ( vg[v]->lv[l]->lv_open > 0) {
+                        lv_open_counter++;
+                        lv_open_total += vg[v]->lv[l]->lv_open;
+                     }
+                  }
+               }
+            }
+         }
+      }
+      pe_t_bytes *= sizeof ( pe_t);
+      lv_block_exception_t_bytes *= sizeof ( lv_block_exception_t);
+   
+      if ( buf != NULL) {
+#ifdef DEBUG_VFREE
+         printk ( KERN_DEBUG
+                  "%s -- vfree %d\n", lvm_name, __LINE__);
+#endif
+         vfree ( buf);
+         buf = NULL;
+      }
+
+      /* 2 times: first to get size to allocate buffer,
+         2nd to fill the vmalloced buffer */
+      for ( i = 0; i < 2; i++) {
+         sz = 0;
+         sz += sprintf ( LVM_PROC_BUF,
+                         "LVM "
+#ifdef MODULE
+                         "module"
+#else
+                         "driver"
+#endif
+                         " %s\n\n"
+                         "Total:  %d VG%s  %d PV%s  %d LV%s ",
+                         lvm_short_version,
+                         vg_counter, vg_counter == 1 ? "" : "s",
+                         pv_counter, pv_counter == 1 ? "" : "s",
+                         lv_counter, lv_counter == 1 ? "" : "s");
+         sz += sprintf ( LVM_PROC_BUF,
+                         "(%d LV%s open",
+                         lv_open_counter,
+                         lv_open_counter == 1 ? "" : "s");
+         if ( lv_open_total > 0) sz += sprintf ( LVM_PROC_BUF,
+                                                 " %d times)\n",
+                                                 lv_open_total);
+         else                    sz += sprintf ( LVM_PROC_BUF, ")");
+         sz += sprintf ( LVM_PROC_BUF,
+                         "\nGlobal: %lu bytes vmalloced   IOP version: %d   ",
+                         vg_counter * sizeof ( vg_t) +
+                         pv_counter * sizeof ( pv_t) +
+                         lv_counter * sizeof ( lv_t) +
+                         pe_t_bytes + lv_block_exception_t_bytes + sz_last,
+                         lvm_iop_version);
+
+         seconds = CURRENT_TIME - loadtime;
+         if ( seconds < 0) loadtime = CURRENT_TIME + seconds;
+         if ( seconds / 86400 > 0) {
+            sz += sprintf ( LVM_PROC_BUF, "%d day%s ",
+                                          seconds / 86400,
+                                          seconds / 86400 == 0 ||
+                                          seconds / 86400 > 1 ? "s": "");
+         }
+         sz += sprintf ( LVM_PROC_BUF, "%d:%02d:%02d active\n",
+                                       ( seconds % 86400) / 3600,
+                                       ( seconds % 3600) / 60,
+                                       seconds % 60);
+
+         if ( vg_counter > 0) {
+            for ( v = 0; v < ABS_MAX_VG; v++) {
+               /* volume group */
+               if ( vg[v] != NULL) {
+                  inactive_flag = ' ';
+                  if ( ! ( vg[v]->vg_status & VG_ACTIVE))
+                     inactive_flag = 'I';
+                  sz += sprintf ( LVM_PROC_BUF,
+                                  "\nVG: %c%s  [%d PV, %d LV/%d open] "
+                                  " PE Size: %d KB\n"
+                                  "  Usage [KB/PE]: %d /%d total  "
+                                  "%d /%d used  %d /%d free",
+                                  inactive_flag,
+                                  vg[v]->vg_name,
+                                  vg[v]->pv_cur,
+                                  vg[v]->lv_cur,
+                                  vg[v]->lv_open,
+                                  vg[v]->pe_size >> 1,
+                                  vg[v]->pe_size * vg[v]->pe_total >> 1,
+                                  vg[v]->pe_total,
+                                  vg[v]->pe_allocated * vg[v]->pe_size >> 1,
+                                  vg[v]->pe_allocated,
+                                  ( vg[v]->pe_total - vg[v]->pe_allocated) *
+                                  vg[v]->pe_size >> 1,
+                                  vg[v]->pe_total - vg[v]->pe_allocated);
+
+                  /* physical volumes */
+                  sz += sprintf ( LVM_PROC_BUF,
+                                  "\n  PV%s ",
+                                  vg[v]->pv_cur == 1 ? ": " : "s:");
+                  c = 0;
+                  for ( p = 0; p < vg[v]->pv_max; p++) {
+                     if ( vg[v]->pv[p] != NULL) {
+                        inactive_flag = 'A';
+                        if ( ! ( vg[v]->pv[p]->pv_status & PV_ACTIVE))
+                           inactive_flag = 'I';
+                        allocation_flag = 'A';
+                        if ( ! ( vg[v]->pv[p]->pv_allocatable & PV_ALLOCATABLE))
+                           allocation_flag = 'N';
+                        sz += sprintf ( LVM_PROC_BUF,
+                                        "[%c%c] %-21s %8d /%-6d  "
+                                        "%8d /%-6d  %8d /%-6d",
+                                        inactive_flag,
+                                        allocation_flag,
+                                        vg[v]->pv[p]->pv_name,
+                                        vg[v]->pv[p]->pe_total *
+                                        vg[v]->pv[p]->pe_size >> 1,
+                                        vg[v]->pv[p]->pe_total,
+                                        vg[v]->pv[p]->pe_allocated *
+                                        vg[v]->pv[p]->pe_size >> 1,
+                                        vg[v]->pv[p]->pe_allocated,
+                                        ( vg[v]->pv[p]->pe_total -
+                                          vg[v]->pv[p]->pe_allocated) *
+                                        vg[v]->pv[p]->pe_size >> 1,
+                                        vg[v]->pv[p]->pe_total -
+                                        vg[v]->pv[p]->pe_allocated);
+                        c++;
+                        if ( c < vg[v]->pv_cur) sz += sprintf ( LVM_PROC_BUF,
+                                                                "\n       ");
+                     }
+                  }
+
+                  /* logical volumes */
+                  sz += sprintf ( LVM_PROC_BUF,
+                                  "\n    LV%s ",
+                                  vg[v]->lv_cur == 1 ? ": " : "s:");
+                  c = 0;
+                  for ( l = 0; l < vg[v]->lv_max; l++) {
+                     if ( vg[v]->lv[l] != NULL) {
+                        inactive_flag = 'A';
+                        if ( ! ( vg[v]->lv[l]->lv_status & LV_ACTIVE))
+                           inactive_flag = 'I';
+                        rw_flag = 'R';
+                        if ( vg[v]->lv[l]->lv_access & LV_WRITE) rw_flag = 'W';
+                        allocation_flag = 'D';
+                        if ( vg[v]->lv[l]->lv_allocation & LV_CONTIGUOUS)
+                           allocation_flag = 'C';
+                        stripes_flag = 'L';
+                        if ( vg[v]->lv[l]->lv_stripes > 1) stripes_flag = 'S';
+                        sz += sprintf ( LVM_PROC_BUF,
+                                        "[%c%c%c%c",
+                                        inactive_flag,
+                                        rw_flag,
+                                        allocation_flag,
+                                        stripes_flag);
+                        if ( vg[v]->lv[l]->lv_stripes > 1)
+                           sz += sprintf ( LVM_PROC_BUF, "%-2d",
+                                           vg[v]->lv[l]->lv_stripes);
+                        else
+                           sz += sprintf ( LVM_PROC_BUF, "  ");
+                        lv_name = lvm_strrchr ( vg[v]->lv[l]->lv_name, '/');
+                        if ( lv_name != NULL) lv_name++;
+                        else lv_name = vg[v]->lv[l]->lv_name;
+                        sz += sprintf ( LVM_PROC_BUF, "] %-25s", lv_name);
+                        if ( lvm_strlen ( lv_name) > 25)
+                           sz += sprintf ( LVM_PROC_BUF,
+                                           "\n                              ");
+                        sz += sprintf ( LVM_PROC_BUF, "%9d /%-6d   ",
+                                        vg[v]->lv[l]->lv_size >> 1,
+                                        vg[v]->lv[l]->lv_size / vg[v]->pe_size);
+
+                        if ( vg[v]->lv[l]->lv_open == 0)
+                           sz += sprintf ( LVM_PROC_BUF, "close");
+                        else
+                           sz += sprintf ( LVM_PROC_BUF, "%dx open",
+                                           vg[v]->lv[l]->lv_open);
+                        c++;
+                        if ( c < vg[v]->lv_cur) sz += sprintf ( LVM_PROC_BUF,
+                                                                "\n         ");
+                     }
+                  }
+                  if ( vg[v]->lv_cur == 0)
+                     sz += sprintf ( LVM_PROC_BUF, "none");
+                  sz += sprintf ( LVM_PROC_BUF, "\n");
+               }
+            }
+         }
+
+         if ( buf == NULL) {
+            if ( ( buf = vmalloc ( sz)) == NULL) {
+               sz = 0;
+               return sprintf ( page, "%s - vmalloc error at line %d\n",
+                                      lvm_name, __LINE__);
+            }
+         }
+         sz_last = sz;
+      }
+   }
+
+   if ( pos > sz - 1) {
+      vfree ( buf);
+      buf = NULL;
+      return 0;
+   }
+
+   *start = &buf[pos];
+   if ( sz - pos < count) return sz - pos;
+   else                   return count;
+} /* lvm_proc_get_info () */
+#endif /* #if defined CONFIG_LVM_PROC_FS && defined CONFIG_PROC_FS */
+
+
+/*
+ * block device support function for /usr/src/linux/drivers/block/ll_rw_blk.c
+ * (see init_module/lvm_init)
+ */
+static int lvm_map ( struct buffer_head *bh, int rw) {
+   int minor = MINOR ( bh->b_dev);
+   int ret = 0;
+   ulong index;
+   ulong size = bh->b_size >> 9;
+   ulong rsector_tmp = bh->b_blocknr * size;
+   ulong rsector_sav;
+   kdev_t rdev_tmp = bh->b_dev;
+   kdev_t rdev_sav;
+   lv_t *lv = vg[VG_BLK(minor)]->lv[LV_BLK(minor)];
+   unsigned long pe_start;
+   
+
+   if ( ! ( lv->lv_status & LV_ACTIVE)) {
+      printk ( KERN_ALERT
+               "%s - lvm_map: ll_rw_blk for inactive LV %s\n",
+               lvm_name, lv->lv_name);
+      return -1;
+   }
+
+/*
+if ( lv->lv_access & LV_SNAPSHOT)
+printk ( "%s -- %02d:%02d  block: %lu  rw: %d\n", lvm_name, MAJOR ( bh->b_dev), MINOR ( bh->b_dev), bh->b_blocknr, rw);
+*/
+
+   /* take care of snapshot chunk writes before
+      check for writable logical volume */
+   if ( ( lv->lv_access & LV_SNAPSHOT) &&
+        MAJOR ( bh->b_dev) != 0 &&
+        MAJOR ( bh->b_dev) != MAJOR_NR &&
+        ( rw == WRITEA || rw == WRITE))
+   {
+/*
+printk ( "%s -- doing snapshot write for %02d:%02d[%02d:%02d]  b_blocknr: %lu  b_rsector: %lu\n", lvm_name, MAJOR ( bh->b_dev), MINOR ( bh->b_dev), MAJOR ( bh->b_dev), MINOR ( bh->b_dev), bh->b_blocknr, bh->b_rsector);
+*/
+      return 0;
+   }
+
+   if ( ( rw == WRITE || rw == WRITEA) &&
+        ! ( lv->lv_access & LV_WRITE)) {
+      printk ( KERN_CRIT
+               "%s - lvm_map: ll_rw_blk write for readonly LV %s\n",
+               lvm_name, lv->lv_name);
+      return -1;
+   }
+
+
+#ifdef DEBUG_MAP
+   printk ( KERN_DEBUG
+            "%s - lvm_map minor:%d  *rdev: %02d:%02d  *rsector: %lu  "
+            "size:%lu\n",
+            lvm_name, minor,
+            MAJOR ( rdev_tmp),
+            MINOR ( rdev_tmp),
+            rsector_tmp, size);
+#endif
+
+   if ( rsector_tmp + size > lv->lv_size) {
+      printk ( KERN_ALERT
+               "%s - lvm_map *rsector: %lu or size: %lu wrong for"
+               " minor: %2d\n", lvm_name, rsector_tmp, size, minor);
+      return -1;
+   }
+
+   rsector_sav = rsector_tmp;
+   rdev_sav    = rdev_tmp;
+
+lvm_second_remap:
+   /* linear mapping */
+   if ( lv->lv_stripes < 2) {
+      index = rsector_tmp / vg[VG_BLK(minor)]->pe_size; /* get the index */
+      pe_start = lv->lv_current_pe[index].pe;
+      rsector_tmp = lv->lv_current_pe[index].pe +
+                    ( rsector_tmp % vg[VG_BLK(minor)]->pe_size);
+      rdev_tmp    = lv->lv_current_pe[index].dev;
+
+#ifdef DEBUG_MAP
+      printk ( KERN_DEBUG
+               "lv_current_pe[%ld].pe: %d  rdev: %02d:%02d  rsector:%ld\n",
+               index,
+               lv->lv_current_pe[index].pe,
+               MAJOR ( rdev_tmp),
+               MINOR ( rdev_tmp),
+               rsector_tmp);
+#endif
+
+   /* striped mapping */
+   } else {
+      ulong stripe_index;
+      ulong stripe_length;
+
+      stripe_length = vg[VG_BLK(minor)]->pe_size * lv->lv_stripes;
+      stripe_index = ( rsector_tmp % stripe_length) / lv->lv_stripesize;
+      index = rsector_tmp / stripe_length +
+              ( stripe_index % lv->lv_stripes) *
+              ( lv->lv_allocated_le / lv->lv_stripes);
+      pe_start = lv->lv_current_pe[index].pe;
+      rsector_tmp = lv->lv_current_pe[index].pe +
+                    ( rsector_tmp % stripe_length) -
+                    ( stripe_index % lv->lv_stripes) * lv->lv_stripesize -
+                    stripe_index / lv->lv_stripes *
+                    ( lv->lv_stripes - 1) * lv->lv_stripesize;
+      rdev_tmp = lv->lv_current_pe[index].dev;
+
+#ifdef DEBUG_MAP
+      printk(KERN_DEBUG
+	     "lv_current_pe[%ld].pe: %d  rdev: %02d:%02d  rsector:%ld\n"
+	     "stripe_length: %ld  stripe_index: %ld\n",
+	     index,
+	     lv->lv_current_pe[index].pe,
+	     MAJOR ( rdev_tmp),
+	     MINOR ( rdev_tmp),
+	     rsector_tmp,
+	     stripe_length,
+	     stripe_index);
+#endif
+   }
+
+   /* handle physical extents on the move */
+   if ( pe_lock_req.lock == LOCK_PE) {
+      if ( rdev_tmp == pe_lock_req.data.pv_dev &&
+           rsector_tmp >= pe_lock_req.data.pv_offset &&
+           rsector_tmp < ( pe_lock_req.data.pv_offset +
+                        vg[VG_BLK(minor)]->pe_size)) {
+         sleep_on ( &lvm_map_wait);
+         rsector_tmp = rsector_sav;
+         rdev_tmp    = rdev_sav;
+         goto lvm_second_remap;
+      }
+   }
+
+   /* statistic */
+   if ( rw == WRITE || rw == WRITEA)
+      lv->lv_current_pe[index].writes++;
+   else
+      lv->lv_current_pe[index].reads++;
+
+   /* snapshot volume exception handling on physical device address base */
+   if ( lv->lv_access & ( LV_SNAPSHOT | LV_SNAPSHOT_ORG)) {
+      /* original logical volume */
+      if ( lv->lv_access & LV_SNAPSHOT_ORG) {
+         if ( rw == WRITE || rw == WRITEA)
+         {
+            lv_t *lv_ptr;
+
+            /* start with first snapshot and loop thrugh all of them */
+            for ( lv_ptr = lv->lv_snapshot_next;
+                  lv_ptr != NULL;
+                  lv_ptr = lv_ptr->lv_snapshot_next) {
+	       down(&lv_ptr->lv_snapshot_sem);
+               /* do we still have exception storage for this snapshot free? */
+               if ( lv_ptr->lv_block_exception != NULL) {
+			rdev_sav = rdev_tmp;
+			rsector_sav = rsector_tmp;
+			if (!lvm_snapshot_remap_block(&rdev_tmp,
+						      &rsector_tmp,
+						      pe_start,
+						      lv_ptr))
+				/* create a new mapping */
+				ret = lvm_snapshot_COW(rdev_tmp,
+						       rsector_tmp,
+						       pe_start,
+						       rsector_sav,
+						       lv_ptr);
+			rdev_tmp    = rdev_sav;
+			rsector_tmp = rsector_sav;
+               }
+	       up(&lv_ptr->lv_snapshot_sem);
+            }
+         }
+      } else {
+         /* remap snapshot logical volume */
+	 down(&lv->lv_snapshot_sem);
+         if ( lv->lv_block_exception != NULL)
+            lvm_snapshot_remap_block ( &rdev_tmp, &rsector_tmp, pe_start, lv);
+	 up(&lv->lv_snapshot_sem);
+      }
+   }
+
+   bh->b_rdev    = rdev_tmp;
+   bh->b_rsector = rsector_tmp;
+
+   return ret;
+} /* lvm_map () */
+
+
+/*
+ * lvm_map snapshot logical volume support functions
+ */
+
+/*
+ * end lvm_map snapshot logical volume support functions
+ */
+
+
+/*
+ * internal support functions
+ */
+
+#ifdef LVM_HD_NAME
+/*
+ * generate "hard disk" name
+ */
+void lvm_hd_name ( char *buf, int minor) {
+   int len = 0;
+
+   if ( vg[VG_BLK(minor)] == NULL ||
+        vg[VG_BLK(minor)]->lv[LV_BLK(minor)] == NULL) return;
+   len = lvm_strlen ( vg[VG_BLK(minor)]->lv[LV_BLK(minor)]->lv_name) - 5;
+   lvm_memcpy ( buf, &vg[VG_BLK(minor)]->lv[LV_BLK(minor)]->lv_name[5], len);
+   buf[len] = 0;
+   return;
+}
+#endif
+
+
+/*
+ * this one never should be called...
+ */
+#if LINUX_VERSION_CODE > KERNEL_VERSION ( 2, 3, 30)
+static void lvm_dummy_device_request ( request_queue_t *t)
+#else
+static void lvm_dummy_device_request ( void)
+#endif
+{
+  printk ( KERN_EMERG
+           "%s -- oops, got lvm request for %02d:%02d [sector: %lu]\n",
+           lvm_name,
+           MAJOR ( CURRENT->rq_dev),
+           MINOR ( CURRENT->rq_dev),
+           CURRENT->sector);
+  return;
+}
+
+
+/*
+ * character device support function VGDA create
+ */
+int do_vg_create ( int minor, void *arg) {
+   int snaporg_minor = 0;
+   ulong  l, p;
+   lv_t lv;
+   vg_t *vg_ptr;
+
+   if ( vg[VG_CHR(minor)] != NULL) return -EPERM;
+
+   if ( ( vg_ptr = kmalloc ( sizeof ( vg_t), GFP_USER)) == NULL) {
+      printk ( KERN_CRIT
+               "%s -- VG_CREATE: kmalloc error VG at line %d\n",
+               lvm_name, __LINE__);
+      return -ENOMEM;
+   }
+
+   /* get the volume group structure */
+   if ( copy_from_user ( vg_ptr, arg, sizeof ( vg_t)) != 0) {
+      kfree ( vg_ptr);
+      return -EFAULT;
+   }
+
+   /* we are not that active so far... */
+   vg_ptr->vg_status &= ~VG_ACTIVE;
+   vg[VG_CHR(minor)] = vg_ptr;
+
+   vg[VG_CHR(minor)]->pe_allocated = 0;
+   if ( vg[VG_CHR(minor)]->pv_max > ABS_MAX_PV) {
+      printk ( KERN_WARNING
+               "%s -- Can't activate VG: ABS_MAX_PV too small\n",
+               lvm_name);
+      kfree ( vg[VG_CHR(minor)]);
+      vg[VG_CHR(minor)] = NULL;
+      return -EPERM;
+   }
+   if ( vg[VG_CHR(minor)]->lv_max > ABS_MAX_LV) {
+      printk ( KERN_WARNING
+               "%s -- Can't activate VG: ABS_MAX_LV too small for %u\n",
+               lvm_name, vg[VG_CHR(minor)]->lv_max);
+      kfree ( vg[VG_CHR(minor)]);
+      vg[VG_CHR(minor)] = NULL;
+      return -EPERM;
+   }
+
+   /* get the physical volume structures */
+   vg[VG_CHR(minor)]->pv_act = vg[VG_CHR(minor)]->pv_cur = 0; 
+   for ( p = 0; p < vg[VG_CHR(minor)]->pv_max; p++) {
+      /* user space address */
+      if ( ( pvp = vg[VG_CHR(minor)]->pv[p]) != NULL) {
+         vg[VG_CHR(minor)]->pv[p] = kmalloc ( sizeof ( pv_t), GFP_USER);
+         if ( vg[VG_CHR(minor)]->pv[p] == NULL) {
+            printk ( KERN_CRIT
+                     "%s -- VG_CREATE: kmalloc error PV at line %d\n",
+                     lvm_name, __LINE__);
+            do_vg_remove ( minor);
+            return -ENOMEM;
+         }
+         if ( copy_from_user ( vg[VG_CHR(minor)]->pv[p], pvp,
+                               sizeof ( pv_t)) != 0) {
+            do_vg_remove ( minor);
+            return -EFAULT;
+         }
+
+         /* We don't need the PE list
+            in kernel space as with LVs pe_t list (see below) */
+         vg[VG_CHR(minor)]->pv[p]->pe = NULL;
+         vg[VG_CHR(minor)]->pv[p]->pe_allocated = 0;
+         vg[VG_CHR(minor)]->pv[p]->pv_status = PV_ACTIVE;
+         vg[VG_CHR(minor)]->pv_act++;
+         vg[VG_CHR(minor)]->pv_cur++;
+
+#ifdef LVM_GET_INODE
+         /* insert a dummy inode for fs_may_mount */
+         vg[VG_CHR(minor)]->pv[p]->inode =
+            lvm_get_inode ( vg[VG_CHR(minor)]->pv[p]->pv_dev);
+#endif
+      }
+   }
+
+   /* get the logical volume structures */
+   vg[VG_CHR(minor)]->lv_cur = 0;
+   for ( l = 0; l < vg[VG_CHR(minor)]->lv_max; l++) {
+      /* user space address */
+      if ( ( lvp = vg[VG_CHR(minor)]->lv[l]) != NULL) {
+         if ( copy_from_user ( &lv, lvp, sizeof ( lv_t)) != 0) {
+            do_vg_remove ( minor);
+            return -EFAULT;
+         }
+         vg[VG_CHR(minor)]->lv[l] = NULL;
+	 {
+		 int err;
+
+		 err = do_lv_create(minor, lv.lv_name, &lv);
+		 if (err)
+		 {
+			 do_vg_remove(minor);
+			 return err;
+		 }
+         }
+      }
+   }
+
+   /* Second path to correct snapshot logical volumes which are not
+      in place during first path above */
+   for ( l = 0; l < vg[VG_CHR(minor)]->lv_max; l++) {
+      if ( vg[VG_CHR(minor)]->lv[l] != NULL &&
+           vg[VG_CHR(minor)]->lv[l]->lv_access & LV_SNAPSHOT) {
+         snaporg_minor = vg[VG_CHR(minor)]->lv[l]->lv_snapshot_minor;
+         if ( vg[VG_CHR(minor)]->lv[LV_BLK(snaporg_minor)] != NULL) {
+            /* get pointer to original logical volume */
+            lv_t *lv_ptr = vg[VG_CHR(minor)]->lv[l]->lv_snapshot_org =
+                           vg[VG_CHR(minor)]->lv[LV_BLK(snaporg_minor)];
+
+            /* set necessary fields of original logical volume */
+            lv_ptr->lv_access |= LV_SNAPSHOT_ORG;
+            lv_ptr->lv_snapshot_minor = 0;
+            lv_ptr->lv_snapshot_org = lv_ptr;
+            lv_ptr->lv_snapshot_prev = NULL;
+
+            /* find last snapshot logical volume in the chain */
+            while ( lv_ptr->lv_snapshot_next != NULL)
+               lv_ptr = lv_ptr->lv_snapshot_next;
+
+            /* set back pointer to this last one in our new logical volume */
+            vg[VG_CHR(minor)]->lv[l]->lv_snapshot_prev = lv_ptr;
+
+            /* last logical volume now points to our new snapshot volume */
+            lv_ptr->lv_snapshot_next = vg[VG_CHR(minor)]->lv[l];
+
+            /* now point to the new one */
+            lv_ptr = lv_ptr->lv_snapshot_next;
+
+            /* set necessary fields of new snapshot logical volume */
+            lv_ptr->lv_snapshot_next = NULL;
+            lv_ptr->lv_current_pe =
+               vg[VG_CHR(minor)]->lv[LV_BLK(snaporg_minor)]->lv_current_pe;
+            lv_ptr->lv_allocated_le =
+               vg[VG_CHR(minor)]->lv[LV_BLK(snaporg_minor)]->lv_allocated_le;
+            lv_ptr->lv_current_le =
+               vg[VG_CHR(minor)]->lv[LV_BLK(snaporg_minor)]->lv_current_le;
+            lv_ptr->lv_size =
+               vg[VG_CHR(minor)]->lv[LV_BLK(snaporg_minor)]->lv_size;
+         }
+      }
+   }
+
+   vg_count++;
+
+   /* let's go active */
+   vg[VG_CHR(minor)]->vg_status |= VG_ACTIVE;
+
+#ifdef MODULE
+   MOD_INC_USE_COUNT;
+#endif
+   return 0;
+} /* do_vg_create () */
+
+
+/*
+ * character device support function VGDA remove
+ */
+static int do_vg_remove ( int minor) {
+   int i;
+
+   if ( vg[VG_CHR(minor)] == NULL) return -ENXIO;
+
+#ifdef LVM_TOTAL_RESET
+   if ( vg[VG_CHR(minor)]->lv_open > 0 && lvm_reset_spindown == 0)
+#else
+   if ( vg[VG_CHR(minor)]->lv_open > 0)
+#endif
+      return -EPERM;
+
+   /* let's go inactive */
+   vg[VG_CHR(minor)]->vg_status &= ~VG_ACTIVE;
+
+   /* free LVs */
+   /* first free snapshot logical volumes */
+   for ( i = 0; i < vg[VG_CHR(minor)]->lv_max; i++) {
+      if ( vg[VG_CHR(minor)]->lv[i] != NULL &&
+           vg[VG_CHR(minor)]->lv[i]->lv_access & LV_SNAPSHOT) {
+         do_lv_remove ( minor, NULL, i);
+         current->state = TASK_INTERRUPTIBLE;
+         schedule_timeout ( 1);
+      }
+   }
+   /* then free the rest */
+   for ( i = 0; i < vg[VG_CHR(minor)]->lv_max; i++) {
+      if ( vg[VG_CHR(minor)]->lv[i] != NULL) {
+         do_lv_remove ( minor, NULL, i);
+         current->state = TASK_INTERRUPTIBLE;
+         schedule_timeout ( 1);
+      }
+   }
+
+   /* free PVs */
+   for ( i = 0; i < vg[VG_CHR(minor)]->pv_max; i++) {
+      if ( vg[VG_CHR(minor)]->pv[i] != NULL) {
+#ifdef DEBUG_VFREE
+         printk ( KERN_DEBUG
+                  "%s -- kfree %d\n", lvm_name, __LINE__);
+#endif
+#ifdef LVM_GET_INODE
+         lvm_clear_inode ( vg[VG_CHR(minor)]->pv[i]->inode);
+#endif
+         kfree ( vg[VG_CHR(minor)]->pv[i]);
+         vg[VG_CHR(minor)]->pv[i] = NULL;
+      }
+   }
+
+#ifdef DEBUG_VFREE
+   printk ( KERN_DEBUG "%s -- kfree %d\n", lvm_name, __LINE__);
+#endif
+   kfree ( vg[VG_CHR(minor)]);
+   vg[VG_CHR(minor)] = NULL;
+
+   vg_count--;
+
+#ifdef MODULE
+   MOD_DEC_USE_COUNT;
+#endif
+   return 0;
+} /* do_vg_remove () */
+
+
+/*
+ * character device support function logical volume create
+ */
+static int do_lv_create ( int minor, char *lv_name, lv_t *lv) {
+   int l, le, l_new, p, size;
+   ulong lv_status_save;
+   lv_block_exception_t *lvbe = lv->lv_block_exception;
+   lv_t *lv_ptr = NULL;
+
+   if ( ( pep = lv->lv_current_pe) == NULL) return -EINVAL;
+   if ( lv->lv_chunk_size > LVM_SNAPSHOT_MAX_CHUNK) return -EINVAL;
+
+   for ( l = 0; l < vg[VG_CHR(minor)]->lv_max; l++) {
+      if ( vg[VG_CHR(minor)]->lv[l] != NULL && 
+           lvm_strcmp ( vg[VG_CHR(minor)]->lv[l]->lv_name, lv_name) == 0)
+         return -EEXIST;
+   }
+
+   /* in case of lv_remove(), lv_create() pair; for eg. lvrename does this */
+   l_new = -1;
+   if ( vg[VG_CHR(minor)]->lv[lv->lv_number] == NULL) l_new = lv->lv_number;
+   else {
+      for ( l = 0; l < vg[VG_CHR(minor)]->lv_max; l++) {
+         if ( vg[VG_CHR(minor)]->lv[l] == NULL) if ( l_new == -1) l_new = l;
+      }
+   }
+   if ( l_new == -1) return -EPERM;
+   l = l_new;
+
+   if ( ( lv_ptr = kmalloc ( sizeof ( lv_t), GFP_USER)) == NULL) {;
+      printk ( KERN_CRIT "%s -- LV_CREATE: kmalloc error LV at line %d\n",
+                         lvm_name, __LINE__);
+      return -ENOMEM;
+   }
+
+   /* copy preloaded LV */
+   lvm_memcpy ( ( char*) lv_ptr, ( char *) lv, sizeof ( lv_t));
+
+   lv_status_save = lv_ptr->lv_status;
+   lv_ptr->lv_status &= ~LV_ACTIVE;
+   lv_ptr->lv_snapshot_org =  \
+   lv_ptr->lv_snapshot_prev = \
+   lv_ptr->lv_snapshot_next = NULL;
+   lv_ptr->lv_block_exception = NULL;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 4)
+   lv_ptr->lv_snapshot_sem = MUTEX;
+#else
+   init_MUTEX(&lv_ptr->lv_snapshot_sem);
+#endif
+   vg[VG_CHR(minor)]->lv[l] = lv_ptr;
+
+   /* get the PE structures from user space if this
+      is no snapshot logical volume */
+   if ( ! ( lv_ptr->lv_access & LV_SNAPSHOT)) {
+      size = lv_ptr->lv_allocated_le * sizeof ( pe_t);
+      if ( ( lv_ptr->lv_current_pe = vmalloc ( size)) == NULL) {
+         printk ( KERN_CRIT
+                  "%s -- LV_CREATE: vmalloc error LV_CURRENT_PE of %d Byte "
+                  "at line %d\n",
+                  lvm_name, size, __LINE__);
+#ifdef DEBUG_VFREE
+         printk ( KERN_DEBUG "%s -- vfree %d\n", lvm_name, __LINE__);
+#endif
+         kfree ( lv_ptr);
+         vg[VG_CHR(minor)]->lv[l] = NULL;
+         return -ENOMEM;
+      }
+   
+      if ( copy_from_user ( lv_ptr->lv_current_pe, pep, size)) {
+         vfree ( lv_ptr->lv_current_pe);
+         kfree ( lv_ptr);
+         vg[VG_CHR(minor)]->lv[l] = NULL;
+         return -EFAULT;
+      }
+
+      /* correct the PE count in PVs */
+      for ( le = 0; le < lv_ptr->lv_allocated_le; le++) {
+         vg[VG_CHR(minor)]->pe_allocated++;
+         for ( p = 0; p < vg[VG_CHR(minor)]->pv_cur; p++) {
+            if ( vg[VG_CHR(minor)]->pv[p]->pv_dev ==
+                 lv_ptr->lv_current_pe[le].dev)
+               vg[VG_CHR(minor)]->pv[p]->pe_allocated++;
+         }
+      }
+   } else {
+      /* Get snapshot exception data and block list */
+      if ( lvbe != NULL) {
+         lv_ptr->lv_snapshot_org =
+            vg[VG_CHR(minor)]->lv[LV_BLK(lv_ptr->lv_snapshot_minor)];
+         if ( lv_ptr->lv_snapshot_org != NULL) {
+            size = lv_ptr->lv_remap_end * sizeof ( lv_block_exception_t);
+            if ( ( lv_ptr->lv_block_exception = vmalloc ( size)) == NULL) {
+               printk ( KERN_CRIT
+                        "%s -- do_lv_create: vmalloc error LV_BLOCK_EXCEPTION "
+                        "of %d byte at line %d\n",
+                        lvm_name, size, __LINE__);
+#ifdef DEBUG_VFREE
+               printk ( KERN_DEBUG "%s -- vfree %d\n", lvm_name, __LINE__);
+#endif
+               kfree ( lv_ptr);
+               vg[VG_CHR(minor)]->lv[l] = NULL;
+               return -ENOMEM;
+            }
+      
+            if ( copy_from_user ( lv_ptr->lv_block_exception, lvbe, size)) {
+               vfree ( lv_ptr->lv_block_exception);
+               kfree ( lv_ptr);
+               vg[VG_CHR(minor)]->lv[l] = NULL;
+               return -EFAULT;
+            }
+
+            /* get pointer to original logical volume */
+            lv_ptr = lv_ptr->lv_snapshot_org;
+
+            lv_ptr->lv_snapshot_minor = 0;
+            lv_ptr->lv_snapshot_org = lv_ptr;
+            lv_ptr->lv_snapshot_prev = NULL;
+            /* walk thrugh the snapshot list */
+            while ( lv_ptr->lv_snapshot_next != NULL)
+               lv_ptr = lv_ptr->lv_snapshot_next;
+            /* now lv_ptr points to the last existing snapshot in the chain */
+            vg[VG_CHR(minor)]->lv[l]->lv_snapshot_prev = lv_ptr;
+            /* our new one now back points to the previous last in the chain */
+            lv_ptr = vg[VG_CHR(minor)]->lv[l];
+            /* now lv_ptr points to our new last snapshot logical volume */
+            lv_ptr->lv_snapshot_org = lv_ptr->lv_snapshot_prev->lv_snapshot_org;
+            lv_ptr->lv_snapshot_next = NULL;
+            lv_ptr->lv_current_pe = lv_ptr->lv_snapshot_org->lv_current_pe;
+            lv_ptr->lv_allocated_le = lv_ptr->lv_snapshot_org->lv_allocated_le;
+            lv_ptr->lv_current_le = lv_ptr->lv_snapshot_org->lv_current_le;
+            lv_ptr->lv_size = lv_ptr->lv_snapshot_org->lv_size;
+            lv_ptr->lv_stripes = lv_ptr->lv_snapshot_org->lv_stripes;
+            lv_ptr->lv_stripesize = lv_ptr->lv_snapshot_org->lv_stripesize;
+	    {
+		int err;
+
+		err = lvm_snapshot_alloc(lv_ptr);
+		if (err)
+		{
+			vfree(lv_ptr->lv_block_exception);
+			kfree(lv_ptr);
+			vg[VG_CHR(minor)]->lv[l] = NULL;
+			return err;
+		}
+	    }
+         } else {
+            vfree ( lv_ptr->lv_block_exception);
+            kfree ( lv_ptr);
+            vg[VG_CHR(minor)]->lv[l] = NULL;
+            return -EFAULT;
+         }
+      } else {
+         kfree ( vg[VG_CHR(minor)]->lv[l]);
+         vg[VG_CHR(minor)]->lv[l] = NULL;
+         return -EINVAL;
+      }
+   } /* if ( vg[VG_CHR(minor)]->lv[l]->lv_access & LV_SNAPSHOT) */
+
+   lv_ptr = vg[VG_CHR(minor)]->lv[l];
+   lvm_gendisk.part[MINOR(lv_ptr->lv_dev)].start_sect = 0;
+   lvm_gendisk.part[MINOR(lv_ptr->lv_dev)].nr_sects = lv_ptr->lv_size;
+   lvm_size[MINOR(lv_ptr->lv_dev)] = lv_ptr->lv_size >> 1;
+   vg_lv_map[MINOR(lv_ptr->lv_dev)].vg_number = vg[VG_CHR(minor)]->vg_number;
+   vg_lv_map[MINOR(lv_ptr->lv_dev)].lv_number = lv_ptr->lv_number;
+   LVM_CORRECT_READ_AHEAD ( lv_ptr->lv_read_ahead);
+   read_ahead[MAJOR_NR] = lv_ptr->lv_read_ahead;
+   vg[VG_CHR(minor)]->lv_cur++;
+   lv_ptr->lv_status = lv_status_save;
+
+   /* optionally add our new snapshot LV */
+   if ( lv_ptr->lv_access & LV_SNAPSHOT) {
+      /* sync the original logical volume */
+      fsync_dev ( lv_ptr->lv_snapshot_org->lv_dev);
+      /* put ourselve into the chain */
+      lv_ptr->lv_snapshot_prev->lv_snapshot_next = lv_ptr;
+      lv_ptr->lv_snapshot_org->lv_access |= LV_SNAPSHOT_ORG;
+   }
+   lv_ptr->lv_status |= LV_ACTIVE;
+   if ( lv_ptr->lv_access & LV_WRITE)
+      set_device_ro(lv_ptr->lv_dev, 0);
+   else
+      set_device_ro(lv_ptr->lv_dev, 1);
+
+   return 0;
+} /* do_lv_create () */
+
+
+/*
+ * character device support function logical volume remove
+ */
+static int do_lv_remove ( int minor, char *lv_name, int l) {
+   uint le, p;
+   lv_t *lv_ptr;
+
+   if ( l == -1) {
+      for ( l = 0; l < vg[VG_CHR(minor)]->lv_max; l++) {
+         if ( vg[VG_CHR(minor)]->lv[l] != NULL &&
+              lvm_strcmp ( vg[VG_CHR(minor)]->lv[l]->lv_name, lv_name) == 0) {
+            break;
+         }
+      }
+   }
+
+   lv_ptr = vg[VG_CHR(minor)]->lv[l];
+   if ( l < vg[VG_CHR(minor)]->lv_max) {
+#ifdef LVM_TOTAL_RESET
+      if ( lv_ptr->lv_open > 0 && lvm_reset_spindown == 0)
+#else
+      if ( lv_ptr->lv_open > 0)
+#endif
+         return -EBUSY;
+
+      /* check for deletion of snapshot source while
+         snapshot volume still exists */
+      if ( ( lv_ptr->lv_access & LV_SNAPSHOT_ORG) &&
+           lv_ptr->lv_snapshot_next != NULL)
+         return -EPERM;
+
+      lv_ptr->lv_status |= LV_SPINDOWN;
+
+      /* sync the buffers */
+      fsync_dev ( lv_ptr->lv_dev);
+
+      lv_ptr->lv_status &= ~LV_ACTIVE;
+
+      /* invalidate the buffers */
+      invalidate_buffers ( lv_ptr->lv_dev);
+
+      /* reset generic hd */
+      lvm_gendisk.part[MINOR(lv_ptr->lv_dev)].start_sect = -1;
+      lvm_gendisk.part[MINOR(lv_ptr->lv_dev)].nr_sects = 0;
+      lvm_size[MINOR(lv_ptr->lv_dev)] = 0;
+
+      /* reset VG/LV mapping */
+      vg_lv_map[MINOR(lv_ptr->lv_dev)].vg_number = ABS_MAX_VG;
+      vg_lv_map[MINOR(lv_ptr->lv_dev)].lv_number = -1;
+   
+      /* correct the PE count in PVs if this is no snapshot logical volume */
+      if ( ! ( lv_ptr->lv_access & LV_SNAPSHOT)) {
+         /* only if this is no snapshot logical volume because we share
+            the lv_current_pe[] structs with the original logical volume */
+         for ( le = 0; le < lv_ptr->lv_allocated_le; le++) {
+            vg[VG_CHR(minor)]->pe_allocated--;
+            for ( p = 0; p < vg[VG_CHR(minor)]->pv_cur; p++) {
+               if (  vg[VG_CHR(minor)]->pv[p]->pv_dev ==
+                     lv_ptr->lv_current_pe[le].dev)
+                  vg[VG_CHR(minor)]->pv[p]->pe_allocated--;
+            }
+         }
+         vfree ( lv_ptr->lv_current_pe);
+      /* LV_SNAPSHOT */
+      } else {
+/*
+         if ( lv_ptr->lv_block_exception != NULL) {
+            int i;
+            kdev_t last_dev;
+            for ( i = last_dev = 0; i < lv_ptr->lv_remap_ptr; i++) {
+               if ( lv_ptr->lv_block_exception[i].rdev_new != last_dev) {
+                  last_dev = lv_ptr->lv_block_exception[i].rdev_new;
+                  invalidate_buffers ( last_dev);
+                  current->state = TASK_INTERRUPTIBLE;
+                  schedule_timeout ( 1);
+               }
+            }
+         }
+*/
+         /* remove this snapshot logical volume from the chain */
+         lv_ptr->lv_snapshot_prev->lv_snapshot_next = lv_ptr->lv_snapshot_next;
+         if ( lv_ptr->lv_snapshot_next != NULL) {
+            lv_ptr->lv_snapshot_next->lv_snapshot_prev =
+               lv_ptr->lv_snapshot_prev;
+         }
+         /* no more snapshots? */
+         if ( lv_ptr->lv_snapshot_org->lv_snapshot_next == NULL)
+            lv_ptr->lv_snapshot_org->lv_access &= ~LV_SNAPSHOT_ORG;
+	 lvm_snapshot_release(lv_ptr);
+      }
+
+#ifdef DEBUG_VFREE
+      printk ( KERN_DEBUG "%s -- kfree %d\n", lvm_name, __LINE__);
+#endif
+      kfree ( lv_ptr);
+      vg[VG_CHR(minor)]->lv[l] = NULL;
+      vg[VG_CHR(minor)]->lv_cur--;
+      return 0;
+   }
+
+   return -ENXIO;
+} /* do_lv_remove () */
+
+
+/*
+ * character device support function logical volume extend / reduce
+ */
+static int do_lv_extend_reduce ( int minor, char *lv_name, lv_t *lv) {
+   int l, le, p, size, old_allocated_le;
+   uint32_t end, lv_status_save;
+   pe_t *pe;
+
+   if ( ( pep = lv->lv_current_pe) == NULL) return -EINVAL;
+
+   for ( l = 0; l < vg[VG_CHR(minor)]->lv_max; l++) {
+      if ( vg[VG_CHR(minor)]->lv[l] != NULL &&
+           lvm_strcmp ( vg[VG_CHR(minor)]->lv[l]->lv_name, lv_name) == 0)
+         break;
+   }
+   if ( l == vg[VG_CHR(minor)]->lv_max) return -ENXIO;
+
+   /* check for active snapshot */
+   if ( lv->lv_access & ( LV_SNAPSHOT|LV_SNAPSHOT_ORG)) return -EPERM;
+
+   if ( ( pe = vmalloc ( size = lv->lv_current_le * sizeof ( pe_t))) == NULL) {
+      printk ( KERN_CRIT
+               "%s -- do_lv_extend_reduce: vmalloc error LV_CURRENT_PE "
+               "of %d Byte at line %d\n",
+               lvm_name, size, __LINE__);
+      return -ENOMEM;
+   }
+
+   /* get the PE structures from user space */
+   if ( copy_from_user ( pe, pep, size)) {
+      vfree ( pe);
+      return -EFAULT;
+   }
+
+#ifdef DEBUG
+   printk ( KERN_DEBUG
+            "%s -- fsync_dev and "
+            "invalidate_buffers for %s [%s] in %s\n",
+            lvm_name, vg[VG_CHR(minor)]->lv[l]->lv_name,
+            kdevname ( vg[VG_CHR(minor)]->lv[l]->lv_dev),
+            vg[VG_CHR(minor)]->vg_name);
+#endif
+
+   vg[VG_CHR(minor)]->lv[l]->lv_status |= LV_SPINDOWN;
+   fsync_dev ( vg[VG_CHR(minor)]->lv[l]->lv_dev);
+   vg[VG_CHR(minor)]->lv[l]->lv_status &= ~LV_ACTIVE;
+   invalidate_buffers ( vg[VG_CHR(minor)]->lv[l]->lv_dev);
+
+   /* reduce allocation counters on PV(s) */
+   for ( le = 0; le < vg[VG_CHR(minor)]->lv[l]->lv_allocated_le; le++) {
+      vg[VG_CHR(minor)]->pe_allocated--;
+      for ( p = 0; p < vg[VG_CHR(minor)]->pv_cur; p++) {
+         if (  vg[VG_CHR(minor)]->pv[p]->pv_dev ==
+               vg[VG_CHR(minor)]->lv[l]->lv_current_pe[le].dev) {
+            vg[VG_CHR(minor)]->pv[p]->pe_allocated--;
+            break;
+         }
+      }
+   }
+
+#ifdef DEBUG_VFREE
+   printk ( KERN_DEBUG "%s -- vfree %d\n", lvm_name, __LINE__);
+#endif
+
+   /* save pointer to "old" lv/pe pointer array */
+   pep1 = vg[VG_CHR(minor)]->lv[l]->lv_current_pe;
+   end  = vg[VG_CHR(minor)]->lv[l]->lv_current_le;
+
+   /* save open counter */
+   lv_open = vg[VG_CHR(minor)]->lv[l]->lv_open;
+
+   /* save # of old allocated logical extents */
+   old_allocated_le = vg[VG_CHR(minor)]->lv[l]->lv_allocated_le;
+
+   /* copy preloaded LV */
+   lv_status_save = lv->lv_status;
+   lv->lv_status |= LV_SPINDOWN;
+   lv->lv_status &= ~LV_ACTIVE;
+   lvm_memcpy ( ( char*) vg[VG_CHR(minor)]->lv[l], ( char*) lv, sizeof ( lv_t));
+   vg[VG_CHR(minor)]->lv[l]->lv_current_pe = pe;
+   vg[VG_CHR(minor)]->lv[l]->lv_open = lv_open;
+
+   /* save availiable i/o statistic data */
+   /* linear logical volume */
+   if ( vg[VG_CHR(minor)]->lv[l]->lv_stripes < 2) {
+      /* Check what last LE shall be used */
+      if ( end > vg[VG_CHR(minor)]->lv[l]->lv_current_le)
+         end = vg[VG_CHR(minor)]->lv[l]->lv_current_le;
+      for ( le = 0; le < end; le++) {
+         vg[VG_CHR(minor)]->lv[l]->lv_current_pe[le].reads  = pep1[le].reads;
+         vg[VG_CHR(minor)]->lv[l]->lv_current_pe[le].writes = pep1[le].writes;
+      }
+   /* striped logical volume */
+   } else {
+      uint i, j, source, dest, end, old_stripe_size, new_stripe_size;
+
+      old_stripe_size = old_allocated_le / vg[VG_CHR(minor)]->lv[l]->lv_stripes;
+      new_stripe_size = vg[VG_CHR(minor)]->lv[l]->lv_allocated_le /
+                        vg[VG_CHR(minor)]->lv[l]->lv_stripes;
+      end = old_stripe_size;
+      if ( end > new_stripe_size) end = new_stripe_size;
+      for ( i = source = dest = 0;
+            i < vg[VG_CHR(minor)]->lv[l]->lv_stripes; i++) {
+         for ( j = 0; j < end; j++) {
+            vg[VG_CHR(minor)]->lv[l]->lv_current_pe[dest+j].reads =
+               pep1[source+j].reads;
+            vg[VG_CHR(minor)]->lv[l]->lv_current_pe[dest+j].writes =
+               pep1[source+j].writes;
+         }
+         source += old_stripe_size;
+         dest   += new_stripe_size;
+      }
+   }
+   vfree ( pep1); pep1 = NULL;
+
+
+   /* extend the PE count in PVs */
+   for ( le = 0; le < vg[VG_CHR(minor)]->lv[l]->lv_allocated_le; le++) {
+      vg[VG_CHR(minor)]->pe_allocated++;
+      for ( p = 0; p < vg[VG_CHR(minor)]->pv_cur; p++) {
+         if ( vg[VG_CHR(minor)]->pv[p]->pv_dev ==
+              vg[VG_CHR(minor)]->lv[l]->lv_current_pe[le].dev) {
+            vg[VG_CHR(minor)]->pv[p]->pe_allocated++;
+            break;
+         }
+      }
+   }
+
+   lvm_gendisk.part[MINOR(vg[VG_CHR(minor)]->lv[l]->lv_dev)].start_sect = 0;
+   lvm_gendisk.part[MINOR(vg[VG_CHR(minor)]->lv[l]->lv_dev)].nr_sects =
+      vg[VG_CHR(minor)]->lv[l]->lv_size;
+   lvm_size[MINOR(vg[VG_CHR(minor)]->lv[l]->lv_dev)] =
+      vg[VG_CHR(minor)]->lv[l]->lv_size >> 1;
+   /* vg_lv_map array doesn't have to be changed here */
+
+   LVM_CORRECT_READ_AHEAD ( vg[VG_CHR(minor)]->lv[l]->lv_read_ahead);
+   read_ahead[MAJOR_NR] = vg[VG_CHR(minor)]->lv[l]->lv_read_ahead;
+   vg[VG_CHR(minor)]->lv[l]->lv_status = lv_status_save;
+
+   return 0;
+} /* do_lv_extend_reduce () */
+
+
+/*
+ * support function initialize gendisk variables
+ */
+#ifdef __initfunc
+__initfunc ( void lvm_geninit ( struct gendisk *lvm_gdisk))
+#else
+void __init lvm_geninit ( struct gendisk *lvm_gdisk)
+#endif
+{
+   int i = 0;
+
+#ifdef DEBUG_GENDISK
+   printk ( KERN_DEBUG "%s -- lvm_gendisk\n", lvm_name);
+#endif
+
+   for ( i = 0; i < MAX_LV; i++) {
+      lvm_gendisk.part[i].start_sect = -1; /* avoid partition check */
+      lvm_size[i] = lvm_gendisk.part[i].nr_sects = 0;
+      lvm_blocksizes[i] = BLOCK_SIZE;
+   }
+
+   blksize_size[MAJOR_NR] = lvm_blocksizes;
+   blk_size[MAJOR_NR] = lvm_size;
+
+   return;
+} /* lvm_gen_init () */
+
+
+#ifdef LVM_GET_INODE
+/*
+ * support function to get an empty inode
+ *
+ * Gets an empty inode to be inserted into the inode hash,
+ * so that a physical volume can't be mounted.
+ * This is analog to drivers/block/md.c
+ *
+ * Is this the real thing?
+ *
+ */
+struct inode *lvm_get_inode ( kdev_t dev) {
+   struct inode *inode_this = NULL;
+
+   /* Lock the device by inserting a dummy inode. */
+   inode_this = get_empty_inode ();
+   inode_this->i_dev = dev;
+   insert_inode_hash ( inode_this);
+   return inode_this;
+}
+
+
+/*
+ * support function to clear an inode
+ *
+ */
+void lvm_clear_inode ( struct inode *inode) {
+#ifdef I_FREEING
+   inode->i_state |= I_FREEING;
+#endif
+   clear_inode ( inode);
+   return;
+}
+#endif /* #ifdef LVM_GET_INODE */
+
+
+/* my strlen */
+inline int lvm_strlen ( char *s1) {
+   int len = 0;
+
+   while ( s1[len] != 0) len++;
+   return len;
+}
+
+
+/* my strcmp */
+inline int lvm_strcmp ( char *s1, char *s2) {
+   while ( *s1 != 0 && *s2 != 0) {
+      if ( *s1 != *s2) return -1;
+      s1++; s2++;
+   }
+   if ( *s1 == 0 && *s2 == 0) return 0;
+   return -1;
+}
+
+
+/* my strrchr */
+inline char *lvm_strrchr ( char *s1, char c) {
+   char *s2 = NULL;
+
+   while ( *s1 != 0) {
+      if ( *s1 == c) s2 = s1;
+      s1++;
+   }
+   return s2;
+}
+
+
+/* my memcpy */
+inline void lvm_memcpy ( char *dest, char *source, int size) {
+   for ( ;size > 0; size--) *dest++ = *source++;
+}
--- linux/drivers/block/md.c.lvm.orig	Sun Jun 18 10:10:09 2000
+++ linux/drivers/block/md.c	Sun Jun 18 10:13:02 2000
@@ -2898,7 +2898,8 @@
 int md_make_request (struct buffer_head * bh, int rw)
 {
 	int err;
-	mddev_t *mddev = kdev_to_mddev(bh->b_dev);
+	/* changed                           v  to allow LVM to remap */
+	mddev_t *mddev = kdev_to_mddev(bh->b_rdev);
 
 	if (!mddev || !mddev->pers) {
 		err = -ENXIO;
--- linux/include/linux/lvm.h.lvm.orig	Sun Jun 18 10:10:09 2000
+++ linux/include/linux/lvm.h	Sun Jun 18 10:13:03 2000
@@ -0,0 +1,771 @@
+/*
+ * include/linux/lvm.h
+ * kernel/lvm.h
+ *
+ * Copyright (C) 1997 - 2000  Heinz Mauelshagen, Germany
+ *
+ * February-November 1997
+ * May-July 1998
+ * January-March,July,September,October,Dezember 1999
+ * January,February 2000
+ *
+ * lvm is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ * 
+ * lvm is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ * 
+ * You should have received a copy of the GNU General Public License
+ * along with GNU CC; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA. 
+ *
+ */
+
+/*
+ * Changelog
+ *
+ *    10/10/1997 - beginning of new structure creation
+ *    12/05/1998 - incorporated structures from lvm_v1.h and deleted lvm_v1.h
+ *    07/06/1998 - avoided LVM_KMALLOC_MAX define by using vmalloc/vfree
+ *                 instead of kmalloc/kfree
+ *    01/07/1998 - fixed wrong LVM_MAX_SIZE
+ *    07/07/1998 - extended pe_t structure by ios member (for statistic)
+ *    02/08/1998 - changes for official char/block major numbers
+ *    07/08/1998 - avoided init_module() and cleanup_module() to be static
+ *    29/08/1998 - seprated core and disk structure type definitions
+ *    01/09/1998 - merged kernel integration version (mike)
+ *    20/01/1999 - added LVM_PE_DISK_OFFSET macro for use in
+ *                 vg_read_with_pv_and_lv(), pv_move_pe(), pv_show_pe_text()...
+ *    18/02/1999 - added definition of time_disk_t structure for;
+ *                 keeps time stamps on disk for nonatomic writes (future)
+ *    15/03/1999 - corrected LV() and VG() macro definition to use argument
+ *                 instead of minor
+ *    03/07/1999 - define for genhd.c name handling
+ *    23/07/1999 - implemented snapshot part
+ *    08/12/1999 - changed LVM_LV_SIZE_MAX macro to reflect current 1TB limit
+ *    01/01/2000 - extended lv_v2 core structure by wait_queue member
+ *    12/02/2000 - integrated Andrea Arcagnelli's snapshot work
+ *    18/02/2000 - seperated user and kernel space parts by 
+ *                 #ifdef them with __KERNEL__
+ *
+ */
+
+
+#ifndef _LVM_H_INCLUDE
+#define _LVM_H_INCLUDE
+
+#define	_LVM_H_VERSION	"LVM 0.8final (22/02/2000)"
+
+#include <linux/version.h>
+
+/*
+ * preprocessor definitions
+ */
+/* if you like emergency reset code in the driver */
+#define	LVM_TOTAL_RESET
+
+#ifdef __KERNEL__
+#define LVM_GET_INODE
+#define	LVM_HD_NAME
+
+/* lots of debugging output (see driver source)
+   #define DEBUG_LVM_GET_INFO
+   #define DEBUG
+   #define DEBUG_MAP
+   #define DEBUG_MAP_SIZE
+   #define DEBUG_IOCTL
+   #define DEBUG_READ
+   #define DEBUG_GENDISK
+   #define DEBUG_VG_CREATE
+   #define DEBUG_LVM_BLK_OPEN
+   #define DEBUG_KFREE
+ */
+#endif /* #ifdef __KERNEL__ */
+
+#ifndef __KERNEL__
+#define __KERNEL__
+#include <linux/kdev_t.h>
+#include <linux/list.h>
+#undef __KERNEL__
+#else
+#include <linux/kdev_t.h>
+#include <linux/list.h>
+#endif /* #ifndef __KERNEL__ */
+
+#include <asm/types.h>
+#include <linux/major.h>
+
+#ifdef __KERNEL__
+#if LINUX_VERSION_CODE >= KERNEL_VERSION ( 2, 3 ,0)
+#include <linux/spinlock.h>
+#else
+#include <asm/spinlock.h>
+#endif
+
+#include <asm/semaphore.h>
+#endif /* #ifdef __KERNEL__ */
+
+#include <asm/page.h>
+
+#if !defined ( LVM_BLK_MAJOR) || !defined ( LVM_CHAR_MAJOR)
+#error Bad include/linux/major.h - LVM MAJOR undefined
+#endif
+
+
+#define LVM_STRUCT_VERSION	1	/* structure version */
+
+#ifndef min
+#define min(a,b) (((a)<(b))?(a):(b))
+#endif
+#ifndef max
+#define max(a,b) (((a)>(b))?(a):(b))
+#endif
+
+/* set the default structure version */
+#if ( LVM_STRUCT_VERSION == 1)
+#define pv_t pv_v1_t
+#define lv_t lv_v2_t
+#define vg_t vg_v1_t
+#define pv_disk_t pv_disk_v1_t
+#define lv_disk_t lv_disk_v1_t
+#define vg_disk_t vg_disk_v1_t
+#define lv_block_exception_t lv_block_exception_v1_t
+#endif
+
+
+/*
+ * i/o protocoll version
+ *
+ * defined here for the driver and defined seperate in the
+ * user land tools/lib/liblvm.h
+ *
+ */
+#define	LVM_DRIVER_IOP_VERSION	        6
+
+#define LVM_NAME        "lvm"
+
+/*
+ * VG/LV indexing macros
+ */
+/* character minor maps directly to volume group */
+#define	VG_CHR(a) ( a)
+
+/* block minor indexes into a volume group/logical volume indirection table */
+#define	VG_BLK(a)	( vg_lv_map[a].vg_number)
+#define LV_BLK(a)	( vg_lv_map[a].lv_number)
+
+/*
+ * absolute limits for VGs, PVs per VG and LVs per VG
+ */
+#define ABS_MAX_VG	99
+#define ABS_MAX_PV	256
+#define ABS_MAX_LV	256	/* caused by 8 bit minor */
+
+#define MAX_VG  ABS_MAX_VG
+#define MAX_LV	ABS_MAX_LV
+#define	MAX_PV	ABS_MAX_PV
+
+#if ( MAX_VG > ABS_MAX_VG)
+#undef MAX_VG
+#define MAX_VG ABS_MAX_VG
+#endif
+
+#if ( MAX_LV > ABS_MAX_LV)
+#undef MAX_LV
+#define MAX_LV ABS_MAX_LV
+#endif
+
+
+/*
+ * VGDA: default disk spaces and offsets
+ *
+ *   there's space after the structures for later extensions.
+ *
+ *   offset            what                                size
+ *   ---------------   ----------------------------------  ------------
+ *   0                 physical volume structure           ~500 byte
+ *
+ *   1K                volume group structure              ~200 byte
+ *
+ *   5K                time stamp structure                ~
+ *
+ *   6K                namelist of physical volumes        128 byte each
+ *
+ *   6k + n * 128byte  n logical volume structures         ~300 byte each
+ *
+ *   + m * 328byte     m physical extent alloc. structs    4 byte each
+ *
+ *   End of disk -     first physical extent               typical 4 megabyte
+ *   PE total *
+ *   PE size
+ *
+ *
+ */
+
+/* DONT TOUCH THESE !!! */
+/* base of PV structure in disk partition */
+#define	LVM_PV_DISK_BASE  	0L
+
+/* size reserved for PV structure on disk */
+#define	LVM_PV_DISK_SIZE  	1024L
+
+/* base of VG structure in disk partition */
+#define	LVM_VG_DISK_BASE  	LVM_PV_DISK_SIZE
+
+/* size reserved for VG structure */
+#define	LVM_VG_DISK_SIZE  	( 9 * 512L)
+
+/* size reserved for timekeeping */
+#define	LVM_TIMESTAMP_DISK_BASE	( LVM_VG_DISK_BASE +  LVM_VG_DISK_SIZE)
+#define	LVM_TIMESTAMP_DISK_SIZE	512L	/* reserved for timekeeping */
+
+/* name list of physical volumes on disk */
+#define	LVM_PV_NAMELIST_DISK_BASE ( LVM_TIMESTAMP_DISK_BASE + \
+                                    LVM_TIMESTAMP_DISK_SIZE)
+
+/* now for the dynamically calculated parts of the VGDA */
+#define	LVM_LV_DISK_OFFSET(a, b) ( (a)->lv_on_disk.base + sizeof ( lv_disk_t) * b)
+#define	LVM_DISK_SIZE(pv) 	 ( (pv)->pe_on_disk.base + \
+                                   (pv)->pe_on_disk.size)
+#define	LVM_PE_DISK_OFFSET(pe, pv)	( pe * pv->pe_size + \
+					  ( LVM_DISK_SIZE ( pv) / SECTOR_SIZE))
+#define	LVM_PE_ON_DISK_BASE(pv) \
+   { int rest; \
+     pv->pe_on_disk.base = pv->lv_on_disk.base + pv->lv_on_disk.size; \
+     if ( ( rest = pv->pe_on_disk.base % SECTOR_SIZE) != 0) \
+        pv->pe_on_disk.base += ( SECTOR_SIZE - rest); \
+   }
+/* END default disk spaces and offsets for PVs */
+
+
+/*
+ * LVM_PE_T_MAX corresponds to:
+ *
+ * 8KB PE size can map a ~512 MB logical volume at the cost of 1MB memory,
+ *
+ * 128MB PE size can map a 8TB logical volume at the same cost of memory.
+ *
+ * Default PE size of 4 MB gives a maximum logical volume size of 256 GB.
+ *
+ * Maximum PE size of 16GB gives a maximum logical volume size of 1024 TB.
+ *
+ * AFAIK, the actual kernels limit this to 1 TB.
+ *
+ * Should be a sufficient spectrum ;*)
+ */
+
+/* This is the usable size of disk_pe_t.le_num !!!        v     v */
+#define	LVM_PE_T_MAX		( ( 1 << ( sizeof ( __u16) * 8)) - 2)
+
+#define	LVM_LV_SIZE_MAX(a)	( ( long long) LVM_PE_T_MAX * (a)->pe_size > ( long long) 2*1024*1024*1024 ? ( long long) 2*1024*1024*1024 : ( long long) LVM_PE_T_MAX * (a)->pe_size)
+#define	LVM_MIN_PE_SIZE		( 8L * 2)	/* 8 KB in sectors */
+#define	LVM_MAX_PE_SIZE		( 16L * 1024L * 1024L * 2)	/* 16GB in sectors */
+#define	LVM_DEFAULT_PE_SIZE	( 4096L * 2)	/* 4 MB in sectors */
+#define	LVM_DEFAULT_STRIPE_SIZE	16L	/* 16 KB  */
+#define	LVM_MIN_STRIPE_SIZE	( PAGE_SIZE>>9)	/* PAGESIZE in sectors */
+#define	LVM_MAX_STRIPE_SIZE	( 512L * 2)	/* 512 KB in sectors */
+#define	LVM_MAX_STRIPES		128	/* max # of stripes */
+#define	LVM_MAX_SIZE            ( 1024LU * 1024 * 1024 * 2)	/* 1TB[sectors] */
+#define	LVM_MAX_MIRRORS    	2	/* future use */
+#define	LVM_MIN_READ_AHEAD	2	/* minimum read ahead sectors */
+#define	LVM_MAX_READ_AHEAD	120	/* maximum read ahead sectors */
+#define	LVM_MAX_LV_IO_TIMEOUT	60	/* seconds I/O timeout (future use) */
+#define	LVM_PARTITION           0xfe	/* LVM partition id */
+#define	LVM_NEW_PARTITION       0x8e	/* new LVM partition id (10/09/1999) */
+#define	LVM_PE_SIZE_PV_SIZE_REL	5	/* max relation PV size and PE size */
+
+#define	LVM_SNAPSHOT_MAX_CHUNK	1024	/* 1024 KB */
+#define	LVM_SNAPSHOT_DEF_CHUNK	64	/* 64  KB */
+#define	LVM_SNAPSHOT_MIN_CHUNK	1	/* 1   KB */
+
+#define	UNDEF	-1
+#define FALSE	0
+#define TRUE	1
+
+
+/*
+ * ioctls
+ */
+/* volume group */
+#define	VG_CREATE               _IOW ( 0xfe, 0x00, 1)
+#define	VG_REMOVE               _IOW ( 0xfe, 0x01, 1)
+
+#define	VG_EXTEND               _IOW ( 0xfe, 0x03, 1)
+#define	VG_REDUCE               _IOW ( 0xfe, 0x04, 1)
+
+#define	VG_STATUS               _IOWR ( 0xfe, 0x05, 1)
+#define	VG_STATUS_GET_COUNT     _IOWR ( 0xfe, 0x06, 1)
+#define	VG_STATUS_GET_NAMELIST  _IOWR ( 0xfe, 0x07, 1)
+
+#define	VG_SET_EXTENDABLE       _IOW ( 0xfe, 0x08, 1)
+
+
+/* logical volume */
+#define	LV_CREATE               _IOW ( 0xfe, 0x20, 1)
+#define	LV_REMOVE               _IOW ( 0xfe, 0x21, 1)
+
+#define	LV_ACTIVATE             _IO ( 0xfe, 0x22)
+#define	LV_DEACTIVATE           _IO ( 0xfe, 0x23)
+
+#define	LV_EXTEND               _IOW ( 0xfe, 0x24, 1)
+#define	LV_REDUCE               _IOW ( 0xfe, 0x25, 1)
+
+#define	LV_STATUS_BYNAME        _IOWR ( 0xfe, 0x26, 1)
+#define	LV_STATUS_BYINDEX       _IOWR ( 0xfe, 0x27, 1)
+
+#define LV_SET_ACCESS           _IOW ( 0xfe, 0x28, 1)
+#define LV_SET_ALLOCATION       _IOW ( 0xfe, 0x29, 1)
+#define LV_SET_STATUS           _IOW ( 0xfe, 0x2a, 1)
+
+#define LE_REMAP                _IOW ( 0xfe, 0x2b, 1)
+
+
+/* physical volume */
+#define	PV_STATUS               _IOWR ( 0xfe, 0x40, 1)
+#define	PV_CHANGE               _IOWR ( 0xfe, 0x41, 1)
+#define	PV_FLUSH                _IOW ( 0xfe, 0x42, 1)
+
+/* physical extent */
+#define	PE_LOCK_UNLOCK          _IOW ( 0xfe, 0x50, 1)
+
+/* i/o protocol version */
+#define	LVM_GET_IOP_VERSION     _IOR ( 0xfe, 0x98, 1)
+
+#ifdef LVM_TOTAL_RESET
+/* special reset function for testing purposes */
+#define	LVM_RESET               _IO ( 0xfe, 0x99)
+#endif
+
+/* lock the logical volume manager */
+#define	LVM_LOCK_LVM            _IO ( 0xfe, 0x100)
+/* END ioctls */
+
+
+/*
+ * Status flags
+ */
+/* volume group */
+#define	VG_ACTIVE            0x01	/* vg_status */
+#define	VG_EXPORTED          0x02	/*     "     */
+#define	VG_EXTENDABLE        0x04	/*     "     */
+
+#define	VG_READ              0x01	/* vg_access */
+#define	VG_WRITE             0x02	/*     "     */
+
+/* logical volume */
+#define	LV_ACTIVE            0x01	/* lv_status */
+#define	LV_SPINDOWN          0x02	/*     "     */
+
+#define	LV_READ              0x01	/* lv_access */
+#define	LV_WRITE             0x02	/*     "     */
+#define	LV_SNAPSHOT          0x04	/*     "     */
+#define	LV_SNAPSHOT_ORG      0x08	/*     "     */
+
+#define	LV_BADBLOCK_ON       0x01	/* lv_badblock */
+
+#define	LV_STRICT            0x01	/* lv_allocation */
+#define	LV_CONTIGUOUS        0x02	/*       "       */
+
+/* physical volume */
+#define	PV_ACTIVE            0x01	/* pv_status */
+#define	PV_ALLOCATABLE       0x02	/* pv_allocatable */
+
+
+/*
+ * Structure definitions core/disk follow
+ *
+ * conditional conversion takes place on big endian architectures
+ * in functions * pv_copy_*(), vg_copy_*() and lv_copy_*()
+ *
+ */
+
+#define	NAME_LEN		128	/* don't change!!! */
+#define	UUID_LEN		16	/* don't change!!! */
+
+/* remap physical sector/rdev pairs including hash */
+typedef struct {
+    struct list_head hash;
+    ulong rsector_org;
+    kdev_t rdev_org;
+    ulong rsector_new;
+    kdev_t rdev_new;
+} lv_block_exception_v1_t;
+
+/* disk stored pe information */
+typedef struct {
+    __u16 lv_num;
+    __u16 le_num;
+} disk_pe_t;
+
+/* disk stored PV, VG, LV and PE size and offset information */
+typedef struct {
+    __u32 base;
+    __u32 size;
+} lvm_disk_data_t;
+
+
+/*
+ * Structure Physical Volume (PV) Version 1
+ */
+
+/* core */
+typedef struct {
+    __u8 id[2];		/* Identifier */
+    __u16 version;		/* HM lvm version */
+    lvm_disk_data_t pv_on_disk;
+    lvm_disk_data_t vg_on_disk;
+    lvm_disk_data_t pv_namelist_on_disk;
+    lvm_disk_data_t lv_on_disk;
+    lvm_disk_data_t pe_on_disk;
+    __u8 pv_name[NAME_LEN];
+    __u8 vg_name[NAME_LEN];
+    __u8 system_id[NAME_LEN];	/* for vgexport/vgimport */
+    kdev_t pv_dev;
+    __u32 pv_number;
+    __u32 pv_status;
+    __u32 pv_allocatable;
+    __u32 pv_size;		/* HM */
+    __u32 lv_cur;
+    __u32 pe_size;
+    __u32 pe_total;
+    __u32 pe_allocated;
+    __u32 pe_stale;		/* for future use */
+
+    disk_pe_t *pe;		/* HM */
+    struct inode *inode;	/* HM */
+} pv_v1_t;
+
+/* disk */
+typedef struct {
+    __u8 id[2];			/* Identifier */
+    __u16 version;		/* HM lvm version */
+    lvm_disk_data_t pv_on_disk;
+    lvm_disk_data_t vg_on_disk;
+    lvm_disk_data_t pv_namelist_on_disk;
+    lvm_disk_data_t lv_on_disk;
+    lvm_disk_data_t pe_on_disk;
+    __u8 pv_name[NAME_LEN];
+    __u8 vg_name[NAME_LEN];
+    __u8 system_id[NAME_LEN];	/* for vgexport/vgimport */
+    __u32 pv_major;
+    __u32 pv_number;
+    __u32 pv_status;
+    __u32 pv_allocatable;
+    __u32 pv_size;		/* HM */
+    __u32 lv_cur;
+    __u32 pe_size;
+    __u32 pe_total;
+    __u32 pe_allocated;
+} pv_disk_v1_t;
+
+
+/*
+ * Structure Physical Volume (PV) Version 2 (future!)
+ */
+
+typedef struct {
+    __u8 id[2];		/* Identifier */
+    __u16 version;		/* HM lvm version */
+    lvm_disk_data_t pv_on_disk;
+    lvm_disk_data_t vg_on_disk;
+    lvm_disk_data_t pv_uuid_on_disk;
+    lvm_disk_data_t lv_on_disk;
+    lvm_disk_data_t pe_on_disk;
+    __u8 pv_name[NAME_LEN];
+    __u8 vg_name[NAME_LEN];
+    __u8 system_id[NAME_LEN];	/* for vgexport/vgimport */
+    kdev_t pv_dev;
+    __u32 pv_number;
+    __u32 pv_status;
+    __u32 pv_allocatable;
+    __u32 pv_size;		/* HM */
+    __u32 lv_cur;
+    __u32 pe_size;
+    __u32 pe_total;
+    __u32 pe_allocated;
+    __u32 pe_stale;		/* for future use */
+    disk_pe_t *pe;		/* HM */
+    struct inode *inode;	/* HM */
+    /* delta to version 1 starts here */
+    __u8 pv_uuid[UUID_LEN];
+    __u32 pv_atime;		/* PV access time */
+    __u32 pv_ctime;		/* PV creation time */
+    __u32 pv_mtime;		/* PV modification time */
+} pv_v2_t;
+
+
+/*
+ * Structures for Logical Volume (LV)
+ */
+
+/* core PE information */
+typedef struct {
+    kdev_t dev;
+    __u32 pe;		/* to be changed if > 2TB */
+    __u32 reads;
+    __u32 writes;
+} pe_t;
+
+typedef struct {
+    __u8 lv_name[NAME_LEN];
+    kdev_t old_dev;
+    kdev_t new_dev;
+    __u32 old_pe;
+    __u32 new_pe;
+} le_remap_req_t;
+
+/*
+ * Structure Logical Volume (LV) Version 1
+ */
+
+/*
+ * Structure Logical Volume (LV) Version 2
+ */
+
+/* core */
+typedef struct lv_v2 {
+    __u8 lv_name[NAME_LEN];
+    __u8 vg_name[NAME_LEN];
+    __u32 lv_access;
+    __u32 lv_status;
+    __u32 lv_open;		/* HM */
+    kdev_t lv_dev;		/* HM */
+    __u32 lv_number;		/* HM */
+    __u32 lv_mirror_copies;	/* for future use */
+    __u32 lv_recovery;		/*       "        */
+    __u32 lv_schedule;		/*       "        */
+    __u32 lv_size;
+    pe_t *lv_current_pe;	/* HM */
+    __u32 lv_current_le;	/* for future use */
+    __u32 lv_allocated_le;
+    __u32 lv_stripes;
+    __u32 lv_stripesize;
+    __u32 lv_badblock;		/* for future use */
+    __u32 lv_allocation;
+    __u32 lv_io_timeout;	/* for future use */
+    __u32 lv_read_ahead;
+
+    /* delta to version 1 starts here */
+    struct lv_v2 *lv_snapshot_org;
+    struct lv_v2 *lv_snapshot_prev;
+    struct lv_v2 *lv_snapshot_next;
+    lv_block_exception_t *lv_block_exception;
+    __u8 __unused;
+    __u32 lv_remap_ptr;
+    __u32 lv_remap_end;
+    __u32 lv_chunk_size;
+    __u32 lv_snapshot_minor;
+#ifdef __KERNEL__
+    struct kiobuf *lv_iobuf;
+    struct semaphore lv_snapshot_sem;
+    struct list_head *lv_snapshot_hash_table;
+    ulong  lv_snapshot_hash_mask;
+#else
+    char  dummy[200];
+#endif
+} lv_v2_t;
+
+/* disk */
+typedef struct {
+    __u8 lv_name[NAME_LEN];
+    __u8 vg_name[NAME_LEN];
+    __u32 lv_access;
+    __u32 lv_status;
+    __u32 lv_open;		/* HM */
+    __u32 lv_dev;		/* HM */
+    __u32 lv_number;		/* HM */
+    __u32 lv_mirror_copies;	/* for future use */
+    __u32 lv_recovery;		/*       "        */
+    __u32 lv_schedule;		/*       "        */
+    __u32 lv_size;
+    __u32 dummy;
+    __u32 lv_current_le;	/* for future use */
+    __u32 lv_allocated_le;
+    __u32 lv_stripes;
+    __u32 lv_stripesize;
+    __u32 lv_badblock;		/* for future use */
+    __u32 lv_allocation;
+    __u32 lv_io_timeout;	/* for future use */
+    __u32 lv_read_ahead;	/* HM, for future use */
+} lv_disk_v1_t;
+
+/*
+ * Structure Volume Group (VG) Version 1
+ */
+
+/* core */
+typedef struct {
+    __u8 vg_name[NAME_LEN];	/* volume group name */
+    __u32 vg_number;		/* volume group number */
+    __u32 vg_access;		/* read/write */
+    __u32 vg_status;		/* active or not */
+    __u32 lv_max;		/* maximum logical volumes */
+    __u32 lv_cur;		/* current logical volumes */
+    __u32 lv_open;		/* open    logical volumes */
+    __u32 pv_max;		/* maximum physical volumes */
+    __u32 pv_cur;		/* current physical volumes FU */
+    __u32 pv_act;		/* active physical volumes */
+    __u32 dummy;		/* was obsolete max_pe_per_pv */
+    __u32 vgda;		/* volume group descriptor arrays FU */
+    __u32 pe_size;		/* physical extent size in sectors */
+    __u32 pe_total;		/* total of physical extents */
+    __u32 pe_allocated;	/* allocated physical extents */
+    __u32 pvg_total;		/* physical volume groups FU */
+    struct proc_dir_entry *proc;
+    pv_t *pv[ABS_MAX_PV + 1];	/* physical volume struct pointers */
+    lv_t *lv[ABS_MAX_LV + 1];	/* logical  volume struct pointers */
+} vg_v1_t;
+
+typedef struct {
+    __u8 vg_name[NAME_LEN];	/* volume group name */
+    __u32 vg_number;		/* volume group number */
+    __u32 vg_access;		/* read/write */
+    __u32 vg_status;		/* active or not */
+    __u32 lv_max;		/* maximum logical volumes */
+    __u32 lv_cur;		/* current logical volumes */
+    __u32 lv_open;		/* open    logical volumes */
+    __u32 pv_max;		/* maximum physical volumes */
+    __u32 pv_cur;		/* current physical volumes FU */
+    __u32 pv_act;		/* active physical volumes */
+    __u32 dummy;
+    __u32 vgda;		/* volume group descriptor arrays FU */
+    __u32 pe_size;		/* physical extent size in sectors */
+    __u32 pe_total;		/* total of physical extents */
+    __u32 pe_allocated;	/* allocated physical extents */
+    __u32 pvg_total;		/* physical volume groups FU */
+} vg_disk_v1_t;
+
+/*
+ * Structure Volume Group (VG) Version 2
+ */
+
+typedef struct {
+    __u8 vg_name[NAME_LEN];	/* volume group name */
+    __u32 vg_number;		/* volume group number */
+    __u32 vg_access;		/* read/write */
+    __u32 vg_status;		/* active or not */
+    __u32 lv_max;		/* maximum logical volumes */
+    __u32 lv_cur;		/* current logical volumes */
+    __u32 lv_open;		/* open    logical volumes */
+    __u32 pv_max;		/* maximum physical volumes */
+    __u32 pv_cur;		/* current physical volumes FU */
+    __u32 pv_act;		/* future: active physical volumes */
+    __u32 max_pe_per_pv;	/* OBSOLETE maximum PE/PV */
+    __u32 vgda;		/* volume group descriptor arrays FU */
+    __u32 pe_size;		/* physical extent size in sectors */
+    __u32 pe_total;		/* total of physical extents */
+    __u32 pe_allocated;	/* allocated physical extents */
+    __u32 pvg_total;		/* physical volume groups FU */
+    struct proc_dir_entry *proc;
+    pv_t *pv[ABS_MAX_PV + 1];	/* physical volume struct pointers */
+    lv_t *lv[ABS_MAX_LV + 1];	/* logical  volume struct pointers */
+    /* delta to version 1 starts here */
+    __u8 vg_uuid[UUID_LEN];	/*  volume group UUID */
+    time_t vg_atime;		/* VG access time */
+    time_t vg_ctime;		/* VG creation time */
+    time_t vg_mtime;		/* VG modification time */
+} vg_v2_t;
+
+
+/*
+ * Timekeeping structure on disk (future)
+ *
+ * Holds several timestamps for start/stop time of non
+ * atomic VGDA disk i/o operations
+ *
+ */
+
+typedef struct {
+    __u32 seconds;		/* seconds since the epoch */
+    __u32 jiffies;		/* micro timer */
+} lvm_time_t;
+
+#define	TIMESTAMP_ID_SIZE	2
+typedef struct {
+    __u8 id[TIMESTAMP_ID_SIZE];	/* Identifier */
+    lvm_time_t pv_vg_lv_pe_io_begin;
+    lvm_time_t pv_vg_lv_pe_io_end;
+    lvm_time_t pv_io_begin;
+    lvm_time_t pv_io_end;
+    lvm_time_t vg_io_begin;
+    lvm_time_t vg_io_end;
+    lvm_time_t lv_io_begin;
+    lvm_time_t lv_io_end;
+    lvm_time_t pe_io_begin;
+    lvm_time_t pe_io_end;
+    lvm_time_t pe_move_io_begin;
+    lvm_time_t pe_move_io_end;
+    __u8 dummy[LVM_TIMESTAMP_DISK_SIZE -
+		  TIMESTAMP_ID_SIZE -
+		  12 * sizeof(lvm_time_t)];
+    /* ATTENTION  ^^ */
+} timestamp_disk_t;
+
+/* same on disk and in core so far */
+typedef timestamp_disk_t timestamp_t;
+
+/* function identifiers for timestamp actions */
+typedef enum {
+    PV_VG_LV_PE_IO_BEGIN,
+    PV_VG_LV_PE_IO_END,
+    PV_IO_BEGIN,
+    PV_IO_END,
+    VG_IO_BEGIN,
+    VG_IO_END,
+    LV_IO_BEGIN,
+    LV_IO_END,
+    PE_IO_BEGIN,
+    PE_IO_END,
+    PE_MOVE_IO_BEGIN,
+    PE_MOVE_IO_END
+} ts_fct_id_t;
+
+
+/*
+ * Request structures for ioctls
+ */
+
+/* Request structure PV_STATUS */
+typedef struct {
+    char pv_name[NAME_LEN];
+    pv_t *pv;
+} pv_status_req_t, pv_change_req_t;
+
+/* Request structure PV_FLUSH */
+typedef struct {
+    char pv_name[NAME_LEN];
+    kdev_t pv_dev;
+} pv_flush_req_t;
+
+
+/* Request structure PE_MOVE */
+typedef struct {
+    enum {
+        LOCK_PE, UNLOCK_PE
+    } lock;
+    struct {
+        kdev_t lv_dev;
+        kdev_t pv_dev;
+        __u32 pv_offset;
+    } data;
+} pe_lock_req_t;
+
+
+/* Request structure LV_STATUS_BYNAME */
+typedef struct {
+    char lv_name[NAME_LEN];
+    lv_t *lv;
+} lv_status_byname_req_t, lv_req_t;
+
+/* Request structure LV_STATUS_BYINDEX */
+typedef struct {
+    __u32 lv_index;
+    lv_t *lv;
+} lv_status_byindex_req_t;
+
+#endif				/* #ifndef _LVM_H_INCLUDE */
--- linux/kernel/ksyms.c.lvm.orig	Fri Jun 16 17:01:53 2000
+++ linux/kernel/ksyms.c	Sun Jun 18 10:10:09 2000
@@ -75,6 +75,14 @@
 #endif
 EXPORT_SYMBOL(get_options);
 
+#ifdef CONFIG_BLK_DEV_LVM_MODULE
+   extern int (*lvm_map_ptr) ( int, kdev_t *, unsigned long *,
+                               unsigned long, int);
+   extern void (*lvm_hd_name_ptr) ( char*, int);
+   EXPORT_SYMBOL(lvm_map_ptr);
+   EXPORT_SYMBOL(lvm_hd_name_ptr);
+#endif
+
 /* process memory management */
 EXPORT_SYMBOL(do_mmap);
 EXPORT_SYMBOL(do_munmap);
@@ -110,6 +117,7 @@
 EXPORT_SYMBOL(mem_map);
 EXPORT_SYMBOL(remap_page_range);
 EXPORT_SYMBOL(max_mapnr);
+EXPORT_SYMBOL(num_physpages);
 EXPORT_SYMBOL(high_memory);
 EXPORT_SYMBOL(update_vm_cache);
 EXPORT_SYMBOL(update_vm_cache_conditional);
