summaryrefslogtreecommitdiff
path: root/drivers/char/ftape/compressor
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char/ftape/compressor')
-rw-r--r--drivers/char/ftape/compressor/Makefile31
-rw-r--r--drivers/char/ftape/compressor/lzrw3.c743
-rw-r--r--drivers/char/ftape/compressor/lzrw3.h253
-rw-r--r--drivers/char/ftape/compressor/zftape-compress.c1203
-rw-r--r--drivers/char/ftape/compressor/zftape-compress.h83
5 files changed, 2313 insertions, 0 deletions
diff --git a/drivers/char/ftape/compressor/Makefile b/drivers/char/ftape/compressor/Makefile
new file mode 100644
index 000000000000..1fbd6c4019db
--- /dev/null
+++ b/drivers/char/ftape/compressor/Makefile
@@ -0,0 +1,31 @@
+#
+# Copyright (C) 1997 Claus-Justus Heine.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; see the file COPYING. If not, write to
+# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+#
+# $Source: /homes/cvs/ftape-stacked/ftape/compressor/Makefile,v $
+# $Revision: 1.1 $
+# $Date: 1997/10/05 19:12:28 $
+#
+# Makefile for the optional compressor for th zftape VFS
+# interface to the QIC-40/80/3010/3020 floppy-tape driver for
+# Linux.
+#
+
+obj-$(CONFIG_ZFT_COMPRESSOR) += zft-compressor.o
+
+zft-compressor-objs := zftape-compress.o lzrw3.o
+
+CFLAGS_lzrw3.o := -O6 -funroll-all-loops
diff --git a/drivers/char/ftape/compressor/lzrw3.c b/drivers/char/ftape/compressor/lzrw3.c
new file mode 100644
index 000000000000..a032a0ee2a99
--- /dev/null
+++ b/drivers/char/ftape/compressor/lzrw3.c
@@ -0,0 +1,743 @@
+/*
+ * $Source: /homes/cvs/ftape-stacked/ftape/compressor/lzrw3.c,v $
+ * $Revision: 1.1 $
+ * $Date: 1997/10/05 19:12:29 $
+ *
+ * Implementation of Ross Williams lzrw3 algorithm. Adaption for zftape.
+ *
+ */
+
+#include "../compressor/lzrw3.h" /* Defines single exported function "compress". */
+
+/******************************************************************************/
+/* */
+/* LZRW3.C */
+/* */
+/******************************************************************************/
+/* */
+/* Author : Ross Williams. */
+/* Date : 30-Jun-1991. */
+/* Release : 1. */
+/* */
+/******************************************************************************/
+/* */
+/* This file contains an implementation of the LZRW3 data compression */
+/* algorithm in C. */
+/* */
+/* The algorithm is a general purpose compression algorithm that runs fast */
+/* and gives reasonable compression. The algorithm is a member of the Lempel */
+/* Ziv family of algorithms and bases its compression on the presence in the */
+/* data of repeated substrings. */
+/* */
+/* This algorithm is unpatented and the code is public domain. As the */
+/* algorithm is based on the LZ77 class of algorithms, it is unlikely to be */
+/* the subject of a patent challenge. */
+/* */
+/* Unlike the LZRW1 and LZRW1-A algorithms, the LZRW3 algorithm is */
+/* deterministic and is guaranteed to yield the same compressed */
+/* representation for a given file each time it is run. */
+/* */
+/* The LZRW3 algorithm was originally designed and implemented */
+/* by Ross Williams on 31-Dec-1990. */
+/* */
+/* Here are the results of applying this code, compiled under THINK C 4.0 */
+/* and running on a Mac-SE (8MHz 68000), to the standard calgary corpus. */
+/* */
+/* +----------------------------------------------------------------+ */
+/* | DATA COMPRESSION TEST | */
+/* | ===================== | */
+/* | Time of run : Sun 30-Jun-1991 09:31PM | */
+/* | Timing accuracy : One part in 100 | */
+/* | Context length : 262144 bytes (= 256.0000K) | */
+/* | Test suite : Calgary Corpus Suite | */
+/* | Files in suite : 14 | */
+/* | Algorithm : LZRW3 | */
+/* | Note: All averages are calculated from the un-rounded values. | */
+/* +----------------------------------------------------------------+ */
+/* | File Name Length CxB ComLen %Remn Bits Com K/s Dec K/s | */
+/* | ---------- ------ --- ------ ----- ---- ------- ------- | */
+/* | rpus:Bib.D 111261 1 55033 49.5 3.96 19.46 32.27 | */
+/* | us:Book1.D 768771 3 467962 60.9 4.87 17.03 31.07 | */
+/* | us:Book2.D 610856 3 317102 51.9 4.15 19.39 34.15 | */
+/* | rpus:Geo.D 102400 1 82424 80.5 6.44 11.65 18.18 | */
+/* | pus:News.D 377109 2 205670 54.5 4.36 17.14 27.47 | */
+/* | pus:Obj1.D 21504 1 13027 60.6 4.85 13.40 18.95 | */
+/* | pus:Obj2.D 246814 1 116286 47.1 3.77 19.31 30.10 | */
+/* | s:Paper1.D 53161 1 27522 51.8 4.14 18.60 31.15 | */
+/* | s:Paper2.D 82199 1 45160 54.9 4.40 18.45 32.84 | */
+/* | rpus:Pic.D 513216 2 122388 23.8 1.91 35.29 51.05 | */
+/* | us:Progc.D 39611 1 19669 49.7 3.97 18.87 30.64 | */
+/* | us:Progl.D 71646 1 28247 39.4 3.15 24.34 40.66 | */
+/* | us:Progp.D 49379 1 19377 39.2 3.14 23.91 39.23 | */
+/* | us:Trans.D 93695 1 33481 35.7 2.86 25.48 40.37 | */
+/* +----------------------------------------------------------------+ */
+/* | Average 224401 1 110953 50.0 4.00 20.17 32.72 | */
+/* +----------------------------------------------------------------+ */
+/* */
+/******************************************************************************/
+
+/******************************************************************************/
+
+/* The following structure is returned by the "compress" function below when */
+/* the user asks the function to return identifying information. */
+/* The most important field in the record is the working memory field which */
+/* tells the calling program how much working memory should be passed to */
+/* "compress" when it is called to perform a compression or decompression. */
+/* LZRW3 uses the same amount of memory during compression and decompression. */
+/* For more information on this structure see "compress.h". */
+
+#define U(X) ((ULONG) X)
+#define SIZE_P_BYTE (U(sizeof(UBYTE *)))
+#define SIZE_WORD (U(sizeof(UWORD )))
+#define ALIGNMENT_FUDGE (U(16))
+#define MEM_REQ ( U(4096)*(SIZE_P_BYTE) + ALIGNMENT_FUDGE )
+
+static struct compress_identity identity =
+{
+ U(0x032DDEA8), /* Algorithm identification number. */
+ MEM_REQ, /* Working memory (bytes) required. */
+ "LZRW3", /* Name of algorithm. */
+ "1.0", /* Version number of algorithm. */
+ "31-Dec-1990", /* Date of algorithm. */
+ "Public Domain", /* Copyright notice. */
+ "Ross N. Williams", /* Author of algorithm. */
+ "Renaissance Software", /* Affiliation of author. */
+ "Public Domain" /* Vendor of algorithm. */
+};
+
+LOCAL void compress_compress (UBYTE *,UBYTE *,ULONG,UBYTE *, LONG *);
+LOCAL void compress_decompress(UBYTE *,UBYTE *,LONG, UBYTE *, ULONG *);
+
+/******************************************************************************/
+
+/* This function is the only function exported by this module. */
+/* Depending on its first parameter, the function can be requested to */
+/* compress a block of memory, decompress a block of memory, or to identify */
+/* itself. For more information, see the specification file "compress.h". */
+
+EXPORT void lzrw3_compress(
+ UWORD action, /* Action to be performed. */
+ UBYTE *wrk_mem, /* Address of working memory we can use.*/
+ UBYTE *src_adr, /* Address of input data. */
+ LONG src_len, /* Length of input data. */
+ UBYTE *dst_adr, /* Address to put output data. */
+ void *p_dst_len /* Address of longword for length of output data.*/
+)
+{
+ switch (action)
+ {
+ case COMPRESS_ACTION_IDENTITY:
+ *((struct compress_identity **)p_dst_len)= &identity;
+ break;
+ case COMPRESS_ACTION_COMPRESS:
+ compress_compress(wrk_mem,src_adr,src_len,dst_adr,(LONG *)p_dst_len);
+ break;
+ case COMPRESS_ACTION_DECOMPRESS:
+ compress_decompress(wrk_mem,src_adr,src_len,dst_adr,(LONG *)p_dst_len);
+ break;
+ }
+}
+
+/******************************************************************************/
+/* */
+/* BRIEF DESCRIPTION OF THE LZRW3 ALGORITHM */
+/* ======================================== */
+/* The LZRW3 algorithm is identical to the LZRW1-A algorithm except that */
+/* instead of transmitting history offsets, it transmits hash table indexes. */
+/* In order to decode the indexes, the decompressor must maintain an */
+/* identical hash table. Copy items are straightforward:when the decompressor */
+/* receives a copy item, it simply looks up the hash table to translate the */
+/* index into a pointer into the data already decompressed. To update the */
+/* hash table, it replaces the same table entry with a pointer to the start */
+/* of the newly decoded phrase. The tricky part is with literal items, for at */
+/* the time that the decompressor receives a literal item the decompressor */
+/* does not have the three bytes in the Ziv (that the compressor has) to */
+/* perform the three-byte hash. To solve this problem, in LZRW3, both the */
+/* compressor and decompressor are wired up so that they "buffer" these */
+/* literals and update their hash tables only when three bytes are available. */
+/* This makes the maximum buffering 2 bytes. */
+/* */
+/* Replacement of offsets by hash table indexes yields a few percent extra */
+/* compression at the cost of some speed. LZRW3 is slower than LZRW1, LZRW1-A */
+/* and LZRW2, but yields better compression. */
+/* */
+/* Extra compression could be obtained by using a hash table of depth two. */
+/* However, increasing the depth above one incurs a significant decrease in */
+/* compression speed which was not considered worthwhile. Another reason for */
+/* keeping the depth down to one was to allow easy comparison with the */
+/* LZRW1-A and LZRW2 algorithms so as to demonstrate the exact effect of the */
+/* use of direct hash indexes. */
+/* */
+/* +---+ */
+/* |___|4095 */
+/* |___| */
+/* +---------------------*_|<---+ /----+---\ */
+/* | |___| +---|Hash | */
+/* | |___| |Function| */
+/* | |___| \--------/ */
+/* | |___|0 ^ */
+/* | +---+ | */
+/* | Hash +-----+ */
+/* | Table | */
+/* | --- */
+/* v ^^^ */
+/* +-------------------------------------|----------------+ */
+/* |||||||||||||||||||||||||||||||||||||||||||||||||||||||| */
+/* +-------------------------------------|----------------+ */
+/* | |1......18| | */
+/* |<------- Lempel=History ------------>|<--Ziv-->| | */
+/* | (=bytes already processed) |<-Still to go-->| */
+/* |<-------------------- INPUT BLOCK ------------------->| */
+/* */
+/* The diagram above for LZRW3 looks almost identical to the diagram for */
+/* LZRW1. The difference is that in LZRW3, the compressor transmits hash */
+/* table indices instead of Lempel offsets. For this to work, the */
+/* decompressor must maintain a hash table as well as the compressor and both */
+/* compressor and decompressor must "buffer" literals, as the decompressor */
+/* cannot hash phrases commencing with a literal until another two bytes have */
+/* arrived. */
+/* */
+/* LZRW3 Algorithm Execution Summary */
+/* --------------------------------- */
+/* 1. Hash the first three bytes of the Ziv to yield a hash table index h. */
+/* 2. Look up the hash table yielding history pointer p. */
+/* 3. Match where p points with the Ziv. If there is a match of three or */
+/* more bytes, code those bytes (in the Ziv) as a copy item, otherwise */
+/* code the next byte in the Ziv as a literal item. */
+/* 4. Update the hash table as possible subject to the constraint that only */
+/* phrases commencing three bytes back from the Ziv can be hashed and */
+/* entered into the hash table. (This enables the decompressor to keep */
+/* pace). See the description and code for more details. */
+/* */
+/******************************************************************************/
+/* */
+/* DEFINITION OF COMPRESSED FILE FORMAT */
+/* ==================================== */
+/* * A compressed file consists of a COPY FLAG followed by a REMAINDER. */
+/* * The copy flag CF uses up four bytes with the first byte being the */
+/* least significant. */
+/* * If CF=1, then the compressed file represents the remainder of the file */
+/* exactly. Otherwise CF=0 and the remainder of the file consists of zero */
+/* or more GROUPS, each of which represents one or more bytes. */
+/* * Each group consists of two bytes of CONTROL information followed by */
+/* sixteen ITEMs except for the last group which can contain from one */
+/* to sixteen items. */
+/* * An item can be either a LITERAL item or a COPY item. */
+/* * Each item corresponds to a bit in the control bytes. */
+/* * The first control byte corresponds to the first 8 items in the group */
+/* with bit 0 corresponding to the first item in the group and bit 7 to */
+/* the eighth item in the group. */
+/* * The second control byte corresponds to the second 8 items in the group */
+/* with bit 0 corresponding to the ninth item in the group and bit 7 to */
+/* the sixteenth item in the group. */
+/* * A zero bit in a control word means that the corresponding item is a */
+/* literal item. A one bit corresponds to a copy item. */
+/* * A literal item consists of a single byte which represents itself. */
+/* * A copy item consists of two bytes that represent from 3 to 18 bytes. */
+/* * The first byte in a copy item will be denoted C1. */
+/* * The second byte in a copy item will be denoted C2. */
+/* * Bits will be selected using square brackets. */
+/* For example: C1[0..3] is the low nibble of the first control byte. */
+/* of copy item C1. */
+/* * The LENGTH of a copy item is defined to be C1[0..3]+3 which is a number */
+/* in the range [3,18]. */
+/* * The INDEX of a copy item is defined to be C1[4..7]*256+C2[0..8] which */
+/* is a number in the range [0,4095]. */
+/* * A copy item represents the sequence of bytes */
+/* text[POS-OFFSET..POS-OFFSET+LENGTH-1] where */
+/* text is the entire text of the uncompressed string. */
+/* POS is the index in the text of the character following the */
+/* string represented by all the items preceeding the item */
+/* being defined. */
+/* OFFSET is obtained from INDEX by looking up the hash table. */
+/* */
+/******************************************************************************/
+
+/* The following #define defines the length of the copy flag that appears at */
+/* the start of the compressed file. The value of four bytes was chosen */
+/* because the fast_copy routine on my Macintosh runs faster if the source */
+/* and destination blocks are relatively longword aligned. */
+/* The actual flag data appears in the first byte. The rest are zeroed so as */
+/* to normalize the compressed representation (i.e. not non-deterministic). */
+#define FLAG_BYTES 4
+
+/* The following #defines define the meaning of the values of the copy */
+/* flag at the start of the compressed file. */
+#define FLAG_COMPRESS 0 /* Signals that output was result of compression. */
+#define FLAG_COPY 1 /* Signals that output was simply copied over. */
+
+/* The 68000 microprocessor (on which this algorithm was originally developed */
+/* is fussy about non-aligned arrays of words. To avoid these problems the */
+/* following macro can be used to "waste" from 0 to 3 bytes so as to align */
+/* the argument pointer. */
+#define ULONG_ALIGN_UP(X) ((((ULONG)X)+sizeof(ULONG)-1)&~(sizeof(ULONG)-1))
+
+
+/* The following constant defines the maximum length of an uncompressed item. */
+/* This definition must not be changed; its value is hardwired into the code. */
+/* The longest number of bytes that can be spanned by a single item is 18 */
+/* for the longest copy item. */
+#define MAX_RAW_ITEM (18)
+
+/* The following constant defines the maximum length of an uncompressed group.*/
+/* This definition must not be changed; its value is hardwired into the code. */
+/* A group contains at most 16 items which explains this definition. */
+#define MAX_RAW_GROUP (16*MAX_RAW_ITEM)
+
+/* The following constant defines the maximum length of a compressed group. */
+/* This definition must not be changed; its value is hardwired into the code. */
+/* A compressed group consists of two control bytes followed by up to 16 */
+/* compressed items each of which can have a maximum length of two bytes. */
+#define MAX_CMP_GROUP (2+16*2)
+
+/* The following constant defines the number of entries in the hash table. */
+/* This definition must not be changed; its value is hardwired into the code. */
+#define HASH_TABLE_LENGTH (4096)
+
+/* LZRW3, unlike LZRW1(-A), must initialize its hash table so as to enable */
+/* the compressor and decompressor to stay in step maintaining identical hash */
+/* tables. In an early version of the algorithm, the tables were simply */
+/* initialized to zero and a check for zero was included just before the */
+/* matching code. However, this test costs time. A better solution is to */
+/* initialize all the entries in the hash table to point to a constant */
+/* string. The decompressor does the same. This solution requires no extra */
+/* test. The contents of the string do not matter so long as the string is */
+/* the same for the compressor and decompressor and contains at least */
+/* MAX_RAW_ITEM bytes. I chose consecutive decimal digits because they do not */
+/* have white space problems (e.g. there is no chance that the compiler will */
+/* replace more than one space by a TAB) and because they make the length of */
+/* the string obvious by inspection. */
+#define START_STRING_18 ((UBYTE *) "123456789012345678")
+
+/* In this algorithm, hash values have to be calculated at more than one */
+/* point. The following macro neatens the code up for this. */
+#define HASH(PTR) \
+ (((40543*(((*(PTR))<<8)^((*((PTR)+1))<<4)^(*((PTR)+2))))>>4) & 0xFFF)
+
+/******************************************************************************/
+
+/* Input : Hand over the required amount of working memory in p_wrk_mem. */
+/* Input : Specify input block using p_src_first and src_len. */
+/* Input : Point p_dst_first to the start of the output zone (OZ). */
+/* Input : Point p_dst_len to a ULONG to receive the output length. */
+/* Input : Input block and output zone must not overlap. */
+/* Output : Length of output block written to *p_dst_len. */
+/* Output : Output block in Mem[p_dst_first..p_dst_first+*p_dst_len-1]. May */
+/* Output : write in OZ=Mem[p_dst_first..p_dst_first+src_len+MAX_CMP_GROUP-1].*/
+/* Output : Upon completion guaranteed *p_dst_len<=src_len+FLAG_BYTES. */
+LOCAL void compress_compress(UBYTE *p_wrk_mem,
+ UBYTE *p_src_first, ULONG src_len,
+ UBYTE *p_dst_first, LONG *p_dst_len)
+{
+ /* p_src and p_dst step through the source and destination blocks. */
+ register UBYTE *p_src = p_src_first;
+ register UBYTE *p_dst = p_dst_first;
+
+ /* The following variables are never modified and are used in the */
+ /* calculations that determine when the main loop terminates. */
+ UBYTE *p_src_post = p_src_first+src_len;
+ UBYTE *p_dst_post = p_dst_first+src_len;
+ UBYTE *p_src_max1 = p_src_first+src_len-MAX_RAW_ITEM;
+ UBYTE *p_src_max16 = p_src_first+src_len-MAX_RAW_ITEM*16;
+
+ /* The variables 'p_control' and 'control' are used to buffer control bits. */
+ /* Before each group is processed, the next two bytes of the output block */
+ /* are set aside for the control word for the group about to be processed. */
+ /* 'p_control' is set to point to the first byte of that word. Meanwhile, */
+ /* 'control' buffers the control bits being generated during the processing */
+ /* of the group. Instead of having a counter to keep track of how many items */
+ /* have been processed (=the number of bits in the control word), at the */
+ /* start of each group, the top word of 'control' is filled with 1 bits. */
+ /* As 'control' is shifted for each item, the 1 bits in the top word are */
+ /* absorbed or destroyed. When they all run out (i.e. when the top word is */
+ /* all zero bits, we know that we are at the end of a group. */
+# define TOPWORD 0xFFFF0000
+ UBYTE *p_control;
+ register ULONG control=TOPWORD;
+
+ /* THe variable 'hash' always points to the first element of the hash table. */
+ UBYTE **hash= (UBYTE **) ULONG_ALIGN_UP(p_wrk_mem);
+
+ /* The following two variables represent the literal buffer. p_h1 points to */
+ /* the hash table entry corresponding to the youngest literal. p_h2 points */
+ /* to the hash table entry corresponding to the second youngest literal. */
+ /* Note: p_h1=0=>p_h2=0 because zero values denote absence of a pending */
+ /* literal. The variables are initialized to zero meaning an empty "buffer". */
+ UBYTE **p_h1=NULL;
+ UBYTE **p_h2=NULL;
+
+ /* To start, we write the flag bytes. Being optimistic, we set the flag to */
+ /* FLAG_COMPRESS. The remaining flag bytes are zeroed so as to keep the */
+ /* algorithm deterministic. */
+ *p_dst++=FLAG_COMPRESS;
+ {UWORD i; for (i=2;i<=FLAG_BYTES;i++) *p_dst++=0;}
+
+ /* Reserve the first word of output as the control word for the first group. */
+ /* Note: This is undone at the end if the input block is empty. */
+ p_control=p_dst; p_dst+=2;
+
+ /* Initialize all elements of the hash table to point to a constant string. */
+ /* Use of an unrolled loop speeds this up considerably. */
+ {UWORD i; UBYTE **p_h=hash;
+# define ZH *p_h++=START_STRING_18
+ for (i=0;i<256;i++) /* 256=HASH_TABLE_LENGTH/16. */
+ {ZH;ZH;ZH;ZH;
+ ZH;ZH;ZH;ZH;
+ ZH;ZH;ZH;ZH;
+ ZH;ZH;ZH;ZH;}
+ }
+
+ /* The main loop processes either 1 or 16 items per iteration. As its */
+ /* termination logic is complicated, I have opted for an infinite loop */
+ /* structure containing 'break' and 'goto' statements. */
+ while (TRUE)
+ {/* Begin main processing loop. */
+
+ /* Note: All the variables here except unroll should be defined within */
+ /* the inner loop. Unfortunately the loop hasn't got a block. */
+ register UBYTE *p; /* Scans through targ phrase during matching. */
+ register UBYTE *p_ziv= NULL ; /* Points to first byte of current Ziv. */
+ register UWORD unroll; /* Loop counter for unrolled inner loop. */
+ register UWORD index; /* Index of current hash table entry. */
+ register UBYTE **p_h0 = NULL ; /* Pointer to current hash table entry. */
+
+ /* Test for overrun and jump to overrun code if necessary. */
+ if (p_dst>p_dst_post)
+ goto overrun;
+
+ /* The following cascade of if statements efficiently catches and deals */
+ /* with varying degrees of closeness to the end of the input block. */
+ /* When we get very close to the end, we stop updating the table and */
+ /* code the remaining bytes as literals. This makes the code simpler. */
+ unroll=16;
+ if (p_src>p_src_max16)
+ {
+ unroll=1;
+ if (p_src>p_src_max1)
+ {
+ if (p_src==p_src_post)
+ break;
+ else
+ goto literal;
+ }
+ }
+
+ /* This inner unrolled loop processes 'unroll' (whose value is either 1 */
+ /* or 16) items. I have chosen to implement this loop with labels and */
+ /* gotos to heighten the ease with which the loop may be implemented with */
+ /* a single decrement and branch instruction in assembly language and */
+ /* also because the labels act as highly readable place markers. */
+ /* (Also because we jump into the loop for endgame literals (see above)). */
+
+ begin_unrolled_loop:
+
+ /* To process the next phrase, we hash the next three bytes and use */
+ /* the resultant hash table index to look up the hash table. A pointer */
+ /* to the entry is stored in p_h0 so as to avoid an array lookup. The */
+ /* hash table entry *p_h0 is looked up yielding a pointer p to a */
+ /* potential match of the Ziv in the history. */
+ index=HASH(p_src);
+ p_h0=&hash[index];
+ p=*p_h0;
+
+ /* Having looked up the candidate position, we are in a position to */
+ /* attempt a match. The match loop has been unrolled using the PS */
+ /* macro so that failure within the first three bytes automatically */
+ /* results in the literal branch being taken. The coding is simple. */
+ /* p_ziv saves p_src so we can let p_src wander. */
+# define PS *p++!=*p_src++
+ p_ziv=p_src;
+ if (PS || PS || PS)
+ {
+ /* Literal. */
+
+ /* Code the literal byte as itself and a zero control bit. */
+ p_src=p_ziv; literal: *p_dst++=*p_src++; control&=0xFFFEFFFF;
+
+ /* We have just coded a literal. If we had two pending ones, that */
+ /* makes three and we can update the hash table. */
+ if (p_h2!=0)
+ {*p_h2=p_ziv-2;}
+
+ /* In any case, rotate the hash table pointers for next time. */
+ p_h2=p_h1; p_h1=p_h0;
+
+ }
+ else
+ {
+ /* Copy */
+
+ /* Match up to 15 remaining bytes using an unrolled loop and code. */
+#if 0
+ PS || PS || PS || PS || PS || PS || PS || PS ||
+ PS || PS || PS || PS || PS || PS || PS || p_src++;
+#else
+ if (
+ !( PS || PS || PS || PS || PS || PS || PS || PS ||
+ PS || PS || PS || PS || PS || PS || PS )
+ ) p_src++;
+#endif
+ *p_dst++=((index&0xF00)>>4)|(--p_src-p_ziv-3);
+ *p_dst++=index&0xFF;
+
+ /* As we have just coded three bytes, we are now in a position to */
+ /* update the hash table with the literal bytes that were pending */
+ /* upon the arrival of extra context bytes. */
+ if (p_h1!=0)
+ {
+ if (p_h2)
+ {*p_h2=p_ziv-2; p_h2=NULL;}
+ *p_h1=p_ziv-1; p_h1=NULL;
+ }
+
+ /* In any case, we can update the hash table based on the current */
+ /* position as we just coded at least three bytes in a copy items. */
+ *p_h0=p_ziv;
+
+ }
+ control>>=1;
+
+ /* This loop is all set up for a decrement and jump instruction! */
+#ifndef linux
+` end_unrolled_loop: if (--unroll) goto begin_unrolled_loop;
+#else
+ /* end_unrolled_loop: */ if (--unroll) goto begin_unrolled_loop;
+#endif
+
+ /* At this point it will nearly always be the end of a group in which */
+ /* case, we have to do some control-word processing. However, near the */
+ /* end of the input block, the inner unrolled loop is only executed once. */
+ /* This necessitates the 'if' test. */
+ if ((control&TOPWORD)==0)
+ {
+ /* Write the control word to the place we saved for it in the output. */
+ *p_control++= control &0xFF;
+ *p_control = (control>>8) &0xFF;
+
+ /* Reserve the next word in the output block for the control word */
+ /* for the group about to be processed. */
+ p_control=p_dst; p_dst+=2;
+
+ /* Reset the control bits buffer. */
+ control=TOPWORD;
+ }
+
+ } /* End main processing loop. */
+
+ /* After the main processing loop has executed, all the input bytes have */
+ /* been processed. However, the control word has still to be written to the */
+ /* word reserved for it in the output at the start of the most recent group. */
+ /* Before writing, the control word has to be shifted so that all the bits */
+ /* are in the right place. The "empty" bit positions are filled with 1s */
+ /* which partially fill the top word. */
+ while(control&TOPWORD) control>>=1;
+ *p_control++= control &0xFF;
+ *p_control++=(control>>8) &0xFF;
+
+ /* If the last group contained no items, delete the control word too. */
+ if (p_control==p_dst) p_dst-=2;
+
+ /* Write the length of the output block to the dst_len parameter and return. */
+ *p_dst_len=p_dst-p_dst_first;
+ return;
+
+ /* Jump here as soon as an overrun is detected. An overrun is defined to */
+ /* have occurred if p_dst>p_dst_first+src_len. That is, the moment the */
+ /* length of the output written so far exceeds the length of the input block.*/
+ /* The algorithm checks for overruns at least at the end of each group */
+ /* which means that the maximum overrun is MAX_CMP_GROUP bytes. */
+ /* Once an overrun occurs, the only thing to do is to set the copy flag and */
+ /* copy the input over. */
+ overrun:
+#if 0
+ *p_dst_first=FLAG_COPY;
+ fast_copy(p_src_first,p_dst_first+FLAG_BYTES,src_len);
+ *p_dst_len=src_len+FLAG_BYTES;
+#else
+ fast_copy(p_src_first,p_dst_first,src_len);
+ *p_dst_len= -src_len; /* return a negative number to indicate uncompressed data */
+#endif
+}
+
+/******************************************************************************/
+
+/* Input : Hand over the required amount of working memory in p_wrk_mem. */
+/* Input : Specify input block using p_src_first and src_len. */
+/* Input : Point p_dst_first to the start of the output zone. */
+/* Input : Point p_dst_len to a ULONG to receive the output length. */
+/* Input : Input block and output zone must not overlap. User knows */
+/* Input : upperbound on output block length from earlier compression. */
+/* Input : In any case, maximum expansion possible is nine times. */
+/* Output : Length of output block written to *p_dst_len. */
+/* Output : Output block in Mem[p_dst_first..p_dst_first+*p_dst_len-1]. */
+/* Output : Writes only in Mem[p_dst_first..p_dst_first+*p_dst_len-1]. */
+LOCAL void compress_decompress( UBYTE *p_wrk_mem,
+ UBYTE *p_src_first, LONG src_len,
+ UBYTE *p_dst_first, ULONG *p_dst_len)
+{
+ /* Byte pointers p_src and p_dst scan through the input and output blocks. */
+ register UBYTE *p_src = p_src_first+FLAG_BYTES;
+ register UBYTE *p_dst = p_dst_first;
+ /* we need to avoid a SEGV when trying to uncompress corrupt data */
+ register UBYTE *p_dst_post = p_dst_first + *p_dst_len;
+
+ /* The following two variables are never modified and are used to control */
+ /* the main loop. */
+ UBYTE *p_src_post = p_src_first+src_len;
+ UBYTE *p_src_max16 = p_src_first+src_len-(MAX_CMP_GROUP-2);
+
+ /* The hash table is the only resident of the working memory. The hash table */
+ /* contains HASH_TABLE_LENGTH=4096 pointers to positions in the history. To */
+ /* keep Macintoshes happy, it is longword aligned. */
+ UBYTE **hash = (UBYTE **) ULONG_ALIGN_UP(p_wrk_mem);
+
+ /* The variable 'control' is used to buffer the control bits which appear in */
+ /* groups of 16 bits (control words) at the start of each compressed group. */
+ /* When each group is read, bit 16 of the register is set to one. Whenever */
+ /* a new bit is needed, the register is shifted right. When the value of the */
+ /* register becomes 1, we know that we have reached the end of a group. */
+ /* Initializing the register to 1 thus instructs the code to follow that it */
+ /* should read a new control word immediately. */
+ register ULONG control=1;
+
+ /* The value of 'literals' is always in the range 0..3. It is the number of */
+ /* consecutive literal items just seen. We have to record this number so as */
+ /* to know when to update the hash table. When literals gets to 3, there */
+ /* have been three consecutive literals and we can update at the position of */
+ /* the oldest of the three. */
+ register UWORD literals=0;
+
+ /* Check the leading copy flag to see if the compressor chose to use a copy */
+ /* operation instead of a compression operation. If a copy operation was */
+ /* used, then all we need to do is copy the data over, set the output length */
+ /* and return. */
+#if 0
+ if (*p_src_first==FLAG_COPY)
+ {
+ fast_copy(p_src_first+FLAG_BYTES,p_dst_first,src_len-FLAG_BYTES);
+ *p_dst_len=src_len-FLAG_BYTES;
+ return;
+ }
+#else
+ if ( src_len < 0 )
+ {
+ fast_copy(p_src_first,p_dst_first,-src_len );
+ *p_dst_len = (ULONG)-src_len;
+ return;
+ }
+#endif
+
+ /* Initialize all elements of the hash table to point to a constant string. */
+ /* Use of an unrolled loop speeds this up considerably. */
+ {UWORD i; UBYTE **p_h=hash;
+# define ZJ *p_h++=START_STRING_18
+ for (i=0;i<256;i++) /* 256=HASH_TABLE_LENGTH/16. */
+ {ZJ;ZJ;ZJ;ZJ;
+ ZJ;ZJ;ZJ;ZJ;
+ ZJ;ZJ;ZJ;ZJ;
+ ZJ;ZJ;ZJ;ZJ;}
+ }
+
+ /* The outer loop processes either 1 or 16 items per iteration depending on */
+ /* how close p_src is to the end of the input block. */
+ while (p_src!=p_src_post)
+ {/* Start of outer loop */
+
+ register UWORD unroll; /* Counts unrolled loop executions. */
+
+ /* When 'control' has the value 1, it means that the 16 buffered control */
+ /* bits that were read in at the start of the current group have all been */
+ /* shifted out and that all that is left is the 1 bit that was injected */
+ /* into bit 16 at the start of the current group. When we reach the end */
+ /* of a group, we have to load a new control word and inject a new 1 bit. */
+ if (control==1)
+ {
+ control=0x10000|*p_src++;
+ control|=(*p_src++)<<8;
+ }
+
+ /* If it is possible that we are within 16 groups from the end of the */
+ /* input, execute the unrolled loop only once, else process a whole group */
+ /* of 16 items by looping 16 times. */
+ unroll= p_src<=p_src_max16 ? 16 : 1;
+
+ /* This inner loop processes one phrase (item) per iteration. */
+ while (unroll--)
+ { /* Begin unrolled inner loop. */
+
+ /* Process a literal or copy item depending on the next control bit. */
+ if (control&1)
+ {
+ /* Copy item. */
+
+ register UBYTE *p; /* Points to place from which to copy. */
+ register UWORD lenmt; /* Length of copy item minus three. */
+ register UBYTE **p_hte; /* Pointer to current hash table entry.*/
+ register UBYTE *p_ziv=p_dst; /* Pointer to start of current Ziv. */
+
+ /* Read and dismantle the copy word. Work out from where to copy. */
+ lenmt=*p_src++;
+ p_hte=&hash[((lenmt&0xF0)<<4)|*p_src++];
+ p=*p_hte;
+ lenmt&=0xF;
+
+ /* Now perform the copy using a half unrolled loop. */
+ *p_dst++=*p++;
+ *p_dst++=*p++;
+ *p_dst++=*p++;
+ while (lenmt--)
+ *p_dst++=*p++;
+
+ /* Because we have just received 3 or more bytes in a copy item */
+ /* (whose bytes we have just installed in the output), we are now */
+ /* in a position to flush all the pending literal hashings that had */
+ /* been postponed for lack of bytes. */
+ if (literals>0)
+ {
+ register UBYTE *r=p_ziv-literals;
+ hash[HASH(r)]=r;
+ if (literals==2)
+ {r++; hash[HASH(r)]=r;}
+ literals=0;
+ }
+
+ /* In any case, we can immediately update the hash table with the */
+ /* current position. We don't need to do a HASH(...) to work out */
+ /* where to put the pointer, as the compressor just told us!!! */
+ *p_hte=p_ziv;
+
+ }
+ else
+ {
+ /* Literal item. */
+
+ /* Copy over the literal byte. */
+ *p_dst++=*p_src++;
+
+ /* If we now have three literals waiting to be hashed into the hash */
+ /* table, we can do one of them now (because there are three). */
+ if (++literals == 3)
+ {register UBYTE *p=p_dst-3; hash[HASH(p)]=p; literals=2;}
+ }
+
+ /* Shift the control buffer so the next control bit is in bit 0. */
+ control>>=1;
+#if 1
+ if (p_dst > p_dst_post)
+ {
+ /* Shit: we tried to decompress corrupt data */
+ *p_dst_len = 0;
+ return;
+ }
+#endif
+ } /* End unrolled inner loop. */
+
+ } /* End of outer loop */
+
+ /* Write the length of the decompressed data before returning. */
+ *p_dst_len=p_dst-p_dst_first;
+}
+
+/******************************************************************************/
+/* End of LZRW3.C */
+/******************************************************************************/
diff --git a/drivers/char/ftape/compressor/lzrw3.h b/drivers/char/ftape/compressor/lzrw3.h
new file mode 100644
index 000000000000..533feba47526
--- /dev/null
+++ b/drivers/char/ftape/compressor/lzrw3.h
@@ -0,0 +1,253 @@
+#ifndef _LZRW3_H
+#define _LZRW3_H
+/*
+ * $Source: /homes/cvs/ftape-stacked/ftape/compressor/lzrw3.h,v $
+ * $Revision: 1.1 $
+ * $Date: 1997/10/05 19:12:30 $
+ *
+ * include files for lzrw3. Only slighty modified from the original
+ * version. Assembles the three include files compress.h, port.h and
+ * fastcopy.h from the original lzrw3 package.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/string.h>
+
+/******************************************************************************/
+/* */
+/* COMPRESS.H */
+/* */
+/******************************************************************************/
+/* */
+/* Author : Ross Williams. */
+/* Date : December 1989. */
+/* */
+/* This header file defines the interface to a set of functions called */
+/* 'compress', each member of which implements a particular data compression */
+/* algorithm. */
+/* */
+/* Normally in C programming, for each .H file, there is a corresponding .C */
+/* file that implements the functions promised in the .H file. */
+/* Here, there are many .C files corresponding to this header file. */
+/* Each comforming implementation file contains a single function */
+/* called 'compress' that implements a single data compression */
+/* algorithm that conforms with the interface specified in this header file. */
+/* Only one algorithm can be linked in at a time in this organization. */
+/* */
+/******************************************************************************/
+/* */
+/* DEFINITION OF FUNCTION COMPRESS */
+/* =============================== */
+/* */
+/* Summary of Function Compress */
+/* ---------------------------- */
+/* The action that 'compress' takes depends on its first argument called */
+/* 'action'. The function provides three actions: */
+/* */
+/* - Return information about the algorithm. */
+/* - Compress a block of memory. */
+/* - Decompress a block of memory. */
+/* */
+/* Parameters */
+/* ---------- */
+/* See the formal C definition later for a description of the parameters. */
+/* */
+/* Constants */
+/* --------- */
+/* COMPRESS_OVERRUN: The constant COMPRESS_OVERRUN defines by how many bytes */
+/* an algorithm is allowed to expand a block during a compression operation. */
+/* */
+/* Although compression algorithms usually compress data, there will always */
+/* be data that a given compressor will expand (this can be proven). */
+/* Fortunately, the degree of expansion can be limited to a single bit, by */
+/* copying over the input data if the data gets bigger during compression. */
+/* To allow for this possibility, the first bit of a compressed */
+/* representation can be used as a flag indicating whether the */
+/* input data was copied over, or truly compressed. In practice, the first */
+/* byte would be used to store this bit so as to maintain byte alignment. */
+/* */
+/* Unfortunately, in general, the only way to tell if an algorithm will */
+/* expand a particular block of data is to run the algorithm on the data. */
+/* If the algorithm does not continuously monitor how many output bytes it */
+/* has written, it might write an output block far larger than the input */
+/* block before realizing that it has done so. */
+/* On the other hand, continuous checks on output length are inefficient. */
+/* */
+/* To cater for all these problems, this interface definition: */
+/* > Allows a compression algorithm to return an output block that is up to */
+/* COMPRESS_OVERRUN bytes longer than the input block. */
+/* > Allows a compression algorithm to write up to COMPRESS_OVERRUN bytes */
+/* more than the length of the input block to the memory of the output */
+/* block regardless of the length of the output block eventually returned. */
+/* This allows an algorithm to overrun the length of the input block in the */
+/* output block by up to COMPRESS_OVERRUN bytes between expansion checks. */
+/* */
+/* The problem does not arise for decompression. */
+/* */
+/* Identity Action */
+/* --------------- */
+/* > action must be COMPRESS_ACTION_IDENTITY. */
+/* > p_dst_len must point to a longword to receive a longword address. */
+/* > The value of the other parameters does not matter. */
+/* > After execution, the longword that p_dst_len points to will be a pointer */
+/* to a structure of type compress_identity. */
+/* Thus, for example, after the call, (*p_dst_len)->memory will return the */
+/* number of bytes of working memory that the algorithm requires to run. */
+/* > The values of the identity structure returned are fixed constant */
+/* attributes of the algorithm and must not vary from call to call. */
+/* */
+/* Common Requirements for Compression and Decompression Actions */
+/* ------------------------------------------------------------- */
+/* > wrk_mem must point to an unused block of memory of a length specified in */
+/* the algorithm's identity block. The identity block can be obtained by */
+/* making a separate call to compress, specifying the identity action. */
+/* > The INPUT BLOCK is defined to be Memory[src_addr,src_addr+src_len-1]. */
+/* > dst_len will be used to denote *p_dst_len. */
+/* > dst_len is not read by compress, only written. */
+/* > The value of dst_len is defined only upon termination. */
+/* > The OUTPUT BLOCK is defined to be Memory[dst_addr,dst_addr+dst_len-1]. */
+/* */
+/* Compression Action */
+/* ------------------ */
+/* > action must be COMPRESS_ACTION_COMPRESS. */
+/* > src_len must be in the range [0,COMPRESS_MAX_ORG]. */
+/* > The OUTPUT ZONE is defined to be */
+/* Memory[dst_addr,dst_addr+src_len-1+COMPRESS_OVERRUN]. */
+/* > The function can modify any part of the output zone regardless of the */
+/* final length of the output block. */
+/* > The input block and the output zone must not overlap. */
+/* > dst_len will be in the range [0,src_len+COMPRESS_OVERRUN]. */
+/* > dst_len will be in the range [0,COMPRESS_MAX_COM] (from prev fact). */
+/* > The output block will consist of a representation of the input block. */
+/* */
+/* Decompression Action */
+/* -------------------- */
+/* > action must be COMPRESS_ACTION_DECOMPRESS. */
+/* > The input block must be the result of an earlier compression operation. */
+/* > If the previous fact is true, the following facts must also be true: */
+/* > src_len will be in the range [0,COMPRESS_MAX_COM]. */
+/* > dst_len will be in the range [0,COMPRESS_MAX_ORG]. */
+/* > The input and output blocks must not overlap. */
+/* > Only the output block is modified. */
+/* > Upon termination, the output block will consist of the bytes contained */
+/* in the input block passed to the earlier compression operation. */
+/* */
+/******************************************************************************/
+
+/******************************************************************************/
+/* */
+/* PORT.H */
+/* */
+/******************************************************************************/
+/* */
+/* This module contains macro definitions and types that are likely to */
+/* change between computers. */
+/* */
+/******************************************************************************/
+
+#ifndef DONE_PORT /* Only do this if not previously done. */
+
+ #ifdef THINK_C
+ #define UBYTE unsigned char /* Unsigned byte */
+ #define UWORD unsigned int /* Unsigned word (2 bytes) */
+ #define ULONG unsigned long /* Unsigned word (4 bytes) */
+ #define BOOL unsigned char /* Boolean */
+ #define FOPEN_BINARY_READ "rb" /* Mode string for binary reading. */
+ #define FOPEN_BINARY_WRITE "wb" /* Mode string for binary writing. */
+ #define FOPEN_TEXT_APPEND "a" /* Mode string for text appending. */
+ #define REAL double /* USed for floating point stuff. */
+ #endif
+ #if defined(LINUX) || defined(linux)
+ #define UBYTE __u8 /* Unsigned byte */
+ #define UWORD __u16 /* Unsigned word (2 bytes) */
+ #define ULONG __u32 /* Unsigned word (4 bytes) */
+ #define LONG __s32 /* Signed word (4 bytes) */
+ #define BOOL is not used here /* Boolean */
+ #define FOPEN_BINARY_READ not used /* Mode string for binary reading. */
+ #define FOPEN_BINARY_WRITE not used /* Mode string for binary writing. */
+ #define FOPEN_TEXT_APPEND not used /* Mode string for text appending. */
+ #define REAL not used /* USed for floating point stuff. */
+ #ifndef TRUE
+ #define TRUE 1
+ #endif
+ #endif
+
+ #define DONE_PORT /* Don't do all this again. */
+ #define MALLOC_FAIL NULL /* Failure status from malloc() */
+ #define LOCAL static /* For non-exported routines. */
+ #define EXPORT /* Signals exported function. */
+ #define then /* Useful for aligning ifs. */
+
+#endif
+
+/******************************************************************************/
+/* End of PORT.H */
+/******************************************************************************/
+
+#define COMPRESS_ACTION_IDENTITY 0
+#define COMPRESS_ACTION_COMPRESS 1
+#define COMPRESS_ACTION_DECOMPRESS 2
+
+#define COMPRESS_OVERRUN 1024
+#define COMPRESS_MAX_COM 0x70000000
+#define COMPRESS_MAX_ORG (COMPRESS_MAX_COM-COMPRESS_OVERRUN)
+
+#define COMPRESS_MAX_STRLEN 255
+
+/* The following structure provides information about the algorithm. */
+/* > The top bit of id must be zero. The remaining bits must be chosen by */
+/* the author of the algorithm by tossing a coin 31 times. */
+/* > The amount of memory requested by the algorithm is specified in bytes */
+/* and must be in the range [0,0x70000000]. */
+/* > All strings s must be such that strlen(s)<=COMPRESS_MAX_STRLEN. */
+struct compress_identity
+ {
+ ULONG id; /* Identifying number of algorithm. */
+ ULONG memory; /* Number of bytes of working memory required. */
+
+ char *name; /* Name of algorithm. */
+ char *version; /* Version number. */
+ char *date; /* Date of release of this version. */
+ char *copyright; /* Copyright message. */
+
+ char *author; /* Author of algorithm. */
+ char *affiliation; /* Affiliation of author. */
+ char *vendor; /* Where the algorithm can be obtained. */
+ };
+
+void lzrw3_compress( /* Single function interface to compression algorithm. */
+UWORD action, /* Action to be performed. */
+UBYTE *wrk_mem, /* Working memory temporarily given to routine to use. */
+UBYTE *src_adr, /* Address of input data. */
+LONG src_len, /* Length of input data. */
+UBYTE *dst_adr, /* Address of output data. */
+void *p_dst_len /* Pointer to a longword where routine will write: */
+ /* If action=..IDENTITY => Adr of id structure. */
+ /* If action=..COMPRESS => Length of output data. */
+ /* If action=..DECOMPRESS => Length of output data. */
+);
+
+/******************************************************************************/
+/* End of COMPRESS.H */
+/******************************************************************************/
+
+
+/******************************************************************************/
+/* fast_copy.h */
+/******************************************************************************/
+
+/* This function copies a block of memory very quickly. */
+/* The exact speed depends on the relative alignment of the blocks of memory. */
+/* PRE : 0<=src_len<=(2^32)-1 . */
+/* PRE : Source and destination blocks must not overlap. */
+/* POST : MEM[dst_adr,dst_adr+src_len-1]=MEM[src_adr,src_adr+src_len-1]. */
+/* POST : MEM[dst_adr,dst_adr+src_len-1] is the only memory changed. */
+
+#define fast_copy(src,dst,len) memcpy(dst,src,len)
+
+/******************************************************************************/
+/* End of fast_copy.h */
+/******************************************************************************/
+
+#endif
diff --git a/drivers/char/ftape/compressor/zftape-compress.c b/drivers/char/ftape/compressor/zftape-compress.c
new file mode 100644
index 000000000000..220a227e6061
--- /dev/null
+++ b/drivers/char/ftape/compressor/zftape-compress.c
@@ -0,0 +1,1203 @@
+/*
+ * Copyright (C) 1994-1997 Claus-Justus Heine
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2, or (at
+ your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; see the file COPYING. If not, write to
+ the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
+ USA.
+
+ *
+ * This file implements a "generic" interface between the *
+ * zftape-driver and a compression-algorithm. The *
+ * compression-algorithm currently used is a LZ77. I use the *
+ * implementation lzrw3 by Ross N. Williams (Renaissance *
+ * Software). The compression program itself is in the file
+ * lzrw3.c * and lzrw3.h. To adopt another compression algorithm
+ * the functions * zft_compress() and zft_uncompress() must be
+ * changed * appropriately. See below.
+ */
+
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+
+#include <linux/zftape.h>
+
+#include <asm/uaccess.h>
+
+#include "../zftape/zftape-init.h"
+#include "../zftape/zftape-eof.h"
+#include "../zftape/zftape-ctl.h"
+#include "../zftape/zftape-write.h"
+#include "../zftape/zftape-read.h"
+#include "../zftape/zftape-rw.h"
+#include "../compressor/zftape-compress.h"
+#include "../zftape/zftape-vtbl.h"
+#include "../compressor/lzrw3.h"
+
+/*
+ * global variables
+ */
+
+/* I handle the allocation of this buffer as a special case, because
+ * it's size varies depending on the tape length inserted.
+ */
+
+/* local variables
+ */
+static void *zftc_wrk_mem = NULL;
+static __u8 *zftc_buf = NULL;
+static void *zftc_scratch_buf = NULL;
+
+/* compression statistics
+ */
+static unsigned int zftc_wr_uncompressed = 0;
+static unsigned int zftc_wr_compressed = 0;
+static unsigned int zftc_rd_uncompressed = 0;
+static unsigned int zftc_rd_compressed = 0;
+
+/* forward */
+static int zftc_write(int *write_cnt,
+ __u8 *dst_buf, const int seg_sz,
+ const __u8 __user *src_buf, const int req_len,
+ const zft_position *pos, const zft_volinfo *volume);
+static int zftc_read(int *read_cnt,
+ __u8 __user *dst_buf, const int to_do,
+ const __u8 *src_buf, const int seg_sz,
+ const zft_position *pos, const zft_volinfo *volume);
+static int zftc_seek(unsigned int new_block_pos,
+ zft_position *pos, const zft_volinfo *volume,
+ __u8 *buffer);
+static void zftc_lock (void);
+static void zftc_reset (void);
+static void zftc_cleanup(void);
+static void zftc_stats (void);
+
+/* compressed segment. This conforms to QIC-80-MC, Revision K.
+ *
+ * Rev. K applies to tapes with `fixed length format' which is
+ * indicated by format code 2,3 and 5. See below for format code 4 and 6
+ *
+ * 2 bytes: offset of compression segment structure
+ * 29k > offset >= 29k-18: data from previous segment ens in this
+ * segment and no compressed block starts
+ * in this segment
+ * offset == 0: data from previous segment occupies entire
+ * segment and continues in next segment
+ * n bytes: remainder from previous segment
+ *
+ * Rev. K:
+ * 4 bytes: 4 bytes: files set byte offset
+ * Post Rev. K and QIC-3020/3020:
+ * 8 bytes: 8 bytes: files set byte offset
+ * 2 bytes: byte count N (amount of data following)
+ * bit 15 is set if data is compressed, bit 15 is not
+ * set if data is uncompressed
+ * N bytes: data (as much as specified in the byte count)
+ * 2 bytes: byte count N_1 of next cluster
+ * N_1 bytes: data of next cluset
+ * 2 bytes: byte count N_2 of next cluster
+ * N_2 bytes: ...
+ *
+ * Note that the `N' byte count accounts only for the bytes that in the
+ * current segment if the cluster spans to the next segment.
+ */
+
+typedef struct
+{
+ int cmpr_pos; /* actual position in compression buffer */
+ int cmpr_sz; /* what is left in the compression buffer
+ * when copying the compressed data to the
+ * deblock buffer
+ */
+ unsigned int first_block; /* location of header information in
+ * this segment
+ */
+ unsigned int count; /* amount of data of current block
+ * contained in current segment
+ */
+ unsigned int offset; /* offset in current segment */
+ unsigned int spans:1; /* might continue in next segment */
+ unsigned int uncmpr; /* 0x8000 if this block contains
+ * uncompressed data
+ */
+ __s64 foffs; /* file set byte offset, same as in
+ * compression map segment
+ */
+} cmpr_info;
+
+static cmpr_info cseg; /* static data. Must be kept uptodate and shared by
+ * read, write and seek functions
+ */
+
+#define DUMP_CMPR_INFO(level, msg, info) \
+ TRACE(level, msg "\n" \
+ KERN_INFO "cmpr_pos : %d\n" \
+ KERN_INFO "cmpr_sz : %d\n" \
+ KERN_INFO "first_block: %d\n" \
+ KERN_INFO "count : %d\n" \
+ KERN_INFO "offset : %d\n" \
+ KERN_INFO "spans : %d\n" \
+ KERN_INFO "uncmpr : 0x%04x\n" \
+ KERN_INFO "foffs : " LL_X, \
+ (info)->cmpr_pos, (info)->cmpr_sz, (info)->first_block, \
+ (info)->count, (info)->offset, (info)->spans == 1, \
+ (info)->uncmpr, LL((info)->foffs))
+
+/* dispatch compression segment info, return error code
+ *
+ * afterwards, cseg->offset points to start of data of the NEXT
+ * compressed block, and cseg->count contains the amount of data
+ * left in the actual compressed block. cseg->spans is set to 1 if
+ * the block is continued in the following segment. Otherwise it is
+ * set to 0.
+ */
+static int get_cseg (cmpr_info *cinfo, const __u8 *buff,
+ const unsigned int seg_sz,
+ const zft_volinfo *volume)
+{
+ TRACE_FUN(ft_t_flow);
+
+ cinfo->first_block = GET2(buff, 0);
+ if (cinfo->first_block == 0) { /* data spans to next segment */
+ cinfo->count = seg_sz - sizeof(__u16);
+ cinfo->offset = seg_sz;
+ cinfo->spans = 1;
+ } else { /* cluster definetely ends in this segment */
+ if (cinfo->first_block > seg_sz) {
+ /* data corrupted */
+ TRACE_ABORT(-EIO, ft_t_err, "corrupted data:\n"
+ KERN_INFO "segment size: %d\n"
+ KERN_INFO "first block : %d",
+ seg_sz, cinfo->first_block);
+ }
+ cinfo->count = cinfo->first_block - sizeof(__u16);
+ cinfo->offset = cinfo->first_block;
+ cinfo->spans = 0;
+ }
+ /* now get the offset the first block should have in the
+ * uncompressed data stream.
+ *
+ * For this magic `18' refer to CRF-3 standard or QIC-80MC,
+ * Rev. K.
+ */
+ if ((seg_sz - cinfo->offset) > 18) {
+ if (volume->qic113) { /* > revision K */
+ TRACE(ft_t_data_flow, "New QIC-113 compliance");
+ cinfo->foffs = GET8(buff, cinfo->offset);
+ cinfo->offset += sizeof(__s64);
+ } else {
+ TRACE(/* ft_t_data_flow */ ft_t_noise, "pre QIC-113 version");
+ cinfo->foffs = (__s64)GET4(buff, cinfo->offset);
+ cinfo->offset += sizeof(__u32);
+ }
+ }
+ if (cinfo->foffs > volume->size) {
+ TRACE_ABORT(-EIO, ft_t_err, "Inconsistency:\n"
+ KERN_INFO "offset in current volume: %d\n"
+ KERN_INFO "size of current volume : %d",
+ (int)(cinfo->foffs>>10), (int)(volume->size>>10));
+ }
+ if (cinfo->cmpr_pos + cinfo->count > volume->blk_sz) {
+ TRACE_ABORT(-EIO, ft_t_err, "Inconsistency:\n"
+ KERN_INFO "block size : %d\n"
+ KERN_INFO "data record: %d",
+ volume->blk_sz, cinfo->cmpr_pos + cinfo->count);
+ }
+ DUMP_CMPR_INFO(ft_t_noise /* ft_t_any */, "", cinfo);
+ TRACE_EXIT 0;
+}
+
+/* This one is called, when a new cluster starts in same segment.
+ *
+ * Note: if this is the first cluster in the current segment, we must
+ * not check whether there are more than 18 bytes available because
+ * this have already been done in get_cseg() and there may be less
+ * than 18 bytes available due to header information.
+ *
+ */
+static void get_next_cluster(cmpr_info *cluster, const __u8 *buff,
+ const int seg_sz, const int finish)
+{
+ TRACE_FUN(ft_t_flow);
+
+ if (seg_sz - cluster->offset > 18 || cluster->foffs != 0) {
+ cluster->count = GET2(buff, cluster->offset);
+ cluster->uncmpr = cluster->count & 0x8000;
+ cluster->count -= cluster->uncmpr;
+ cluster->offset += sizeof(__u16);
+ cluster->foffs = 0;
+ if ((cluster->offset + cluster->count) < seg_sz) {
+ cluster->spans = 0;
+ } else if (cluster->offset + cluster->count == seg_sz) {
+ cluster->spans = !finish;
+ } else {
+ /* either an error or a volume written by an
+ * old version. If this is a data error, then we'll
+ * catch it later.
+ */
+ TRACE(ft_t_data_flow, "Either error or old volume");
+ cluster->spans = 1;
+ cluster->count = seg_sz - cluster->offset;
+ }
+ } else {
+ cluster->count = 0;
+ cluster->spans = 0;
+ cluster->foffs = 0;
+ }
+ DUMP_CMPR_INFO(ft_t_noise /* ft_t_any */ , "", cluster);
+ TRACE_EXIT;
+}
+
+static void zftc_lock(void)
+{
+}
+
+/* this function is needed for zftape_reset_position in zftape-io.c
+ */
+static void zftc_reset(void)
+{
+ TRACE_FUN(ft_t_flow);
+
+ memset((void *)&cseg, '\0', sizeof(cseg));
+ zftc_stats();
+ TRACE_EXIT;
+}
+
+static int cmpr_mem_initialized = 0;
+static unsigned int alloc_blksz = 0;
+
+static int zft_allocate_cmpr_mem(unsigned int blksz)
+{
+ TRACE_FUN(ft_t_flow);
+
+ if (cmpr_mem_initialized && blksz == alloc_blksz) {
+ TRACE_EXIT 0;
+ }
+ TRACE_CATCH(zft_vmalloc_once(&zftc_wrk_mem, CMPR_WRK_MEM_SIZE),
+ zftc_cleanup());
+ TRACE_CATCH(zft_vmalloc_always(&zftc_buf, blksz + CMPR_OVERRUN),
+ zftc_cleanup());
+ alloc_blksz = blksz;
+ TRACE_CATCH(zft_vmalloc_always(&zftc_scratch_buf, blksz+CMPR_OVERRUN),
+ zftc_cleanup());
+ cmpr_mem_initialized = 1;
+ TRACE_EXIT 0;
+}
+
+static void zftc_cleanup(void)
+{
+ TRACE_FUN(ft_t_flow);
+
+ zft_vfree(&zftc_wrk_mem, CMPR_WRK_MEM_SIZE);
+ zft_vfree(&zftc_buf, alloc_blksz + CMPR_OVERRUN);
+ zft_vfree(&zftc_scratch_buf, alloc_blksz + CMPR_OVERRUN);
+ cmpr_mem_initialized = alloc_blksz = 0;
+ TRACE_EXIT;
+}
+
+/*****************************************************************************
+ * *
+ * The following two functions "ftape_compress()" and *
+ * "ftape_uncompress()" are the interface to the actual compression *
+ * algorithm (i.e. they are calling the "compress()" function from *
+ * the lzrw3 package for now). These routines could quite easily be *
+ * changed to adopt another compression algorithm instead of lzrw3, *
+ * which currently is used. *
+ * *
+ *****************************************************************************/
+
+/* called by zft_compress_write() to perform the compression. Must
+ * return the size of the compressed data.
+ *
+ * NOTE: The size of the compressed data should not exceed the size of
+ * the uncompressed data. Most compression algorithms have means
+ * to store data unchanged if the "compressed" data amount would
+ * exceed the original one. Mostly this is done by storing some
+ * flag-bytes in front of the compressed data to indicate if it
+ * is compressed or not. Thus the worst compression result
+ * length is the original length plus those flag-bytes.
+ *
+ * We don't want that, as the QIC-80 standard provides a means
+ * of marking uncompressed blocks by simply setting bit 15 of
+ * the compressed block's length. Thus a compessed block can
+ * have at most a length of 2^15-1 bytes. The QIC-80 standard
+ * restricts the block-length even further, allowing only 29k -
+ * 6 bytes.
+ *
+ * Currently, the maximum blocksize used by zftape is 28k.
+ *
+ * In short: don't exceed the length of the input-package, set
+ * bit 15 of the compressed size to 1 if you have copied data
+ * instead of compressing it.
+ */
+static int zft_compress(__u8 *in_buffer, unsigned int in_sz, __u8 *out_buffer)
+{
+ __s32 compressed_sz;
+ TRACE_FUN(ft_t_flow);
+
+
+ lzrw3_compress(COMPRESS_ACTION_COMPRESS, zftc_wrk_mem,
+ in_buffer, in_sz, out_buffer, &compressed_sz);
+ if (TRACE_LEVEL >= ft_t_info) {
+ /* the compiler will optimize this away when
+ * compiled with NO_TRACE_AT_ALL option
+ */
+ TRACE(ft_t_data_flow, "\n"
+ KERN_INFO "before compression: %d bytes\n"
+ KERN_INFO "after compresison : %d bytes",
+ in_sz,
+ (int)(compressed_sz < 0
+ ? -compressed_sz : compressed_sz));
+ /* for statistical purposes
+ */
+ zftc_wr_compressed += (compressed_sz < 0
+ ? -compressed_sz : compressed_sz);
+ zftc_wr_uncompressed += in_sz;
+ }
+ TRACE_EXIT (int)compressed_sz;
+}
+
+/* called by zft_compress_read() to decompress the data. Must
+ * return the size of the decompressed data for sanity checks
+ * (compared with zft_blk_sz)
+ *
+ * NOTE: Read the note for zft_compress() above! If bit 15 of the
+ * parameter in_sz is set, then the data in in_buffer isn't
+ * compressed, which must be handled by the un-compression
+ * algorithm. (I changed lzrw3 to handle this.)
+ *
+ * The parameter max_out_sz is needed to prevent buffer overruns when
+ * uncompressing corrupt data.
+ */
+static unsigned int zft_uncompress(__u8 *in_buffer,
+ int in_sz,
+ __u8 *out_buffer,
+ unsigned int max_out_sz)
+{
+ TRACE_FUN(ft_t_flow);
+
+ lzrw3_compress(COMPRESS_ACTION_DECOMPRESS, zftc_wrk_mem,
+ in_buffer, (__s32)in_sz,
+ out_buffer, (__u32 *)&max_out_sz);
+
+ if (TRACE_LEVEL >= ft_t_info) {
+ TRACE(ft_t_data_flow, "\n"
+ KERN_INFO "before decompression: %d bytes\n"
+ KERN_INFO "after decompression : %d bytes",
+ in_sz < 0 ? -in_sz : in_sz,(int)max_out_sz);
+ /* for statistical purposes
+ */
+ zftc_rd_compressed += in_sz < 0 ? -in_sz : in_sz;
+ zftc_rd_uncompressed += max_out_sz;
+ }
+ TRACE_EXIT (unsigned int)max_out_sz;
+}
+
+/* print some statistics about the efficiency of the compression to
+ * the kernel log
+ */
+static void zftc_stats(void)
+{
+ TRACE_FUN(ft_t_flow);
+
+ if (TRACE_LEVEL < ft_t_info) {
+ TRACE_EXIT;
+ }
+ if (zftc_wr_uncompressed != 0) {
+ if (zftc_wr_compressed > (1<<14)) {
+ TRACE(ft_t_info, "compression statistics (writing):\n"
+ KERN_INFO " compr./uncmpr. : %3d %%",
+ (((zftc_wr_compressed>>10) * 100)
+ / (zftc_wr_uncompressed>>10)));
+ } else {
+ TRACE(ft_t_info, "compression statistics (writing):\n"
+ KERN_INFO " compr./uncmpr. : %3d %%",
+ ((zftc_wr_compressed * 100)
+ / zftc_wr_uncompressed));
+ }
+ }
+ if (zftc_rd_uncompressed != 0) {
+ if (zftc_rd_compressed > (1<<14)) {
+ TRACE(ft_t_info, "compression statistics (reading):\n"
+ KERN_INFO " compr./uncmpr. : %3d %%",
+ (((zftc_rd_compressed>>10) * 100)
+ / (zftc_rd_uncompressed>>10)));
+ } else {
+ TRACE(ft_t_info, "compression statistics (reading):\n"
+ KERN_INFO " compr./uncmpr. : %3d %%",
+ ((zftc_rd_compressed * 100)
+ / zftc_rd_uncompressed));
+ }
+ }
+ /* only print it once: */
+ zftc_wr_uncompressed =
+ zftc_wr_compressed =
+ zftc_rd_uncompressed =
+ zftc_rd_compressed = 0;
+ TRACE_EXIT;
+}
+
+/* start new compressed block
+ */
+static int start_new_cseg(cmpr_info *cluster,
+ char *dst_buf,
+ const zft_position *pos,
+ const unsigned int blk_sz,
+ const char *src_buf,
+ const int this_segs_sz,
+ const int qic113)
+{
+ int size_left;
+ int cp_cnt;
+ int buf_pos;
+ TRACE_FUN(ft_t_flow);
+
+ size_left = this_segs_sz - sizeof(__u16) - cluster->cmpr_sz;
+ TRACE(ft_t_data_flow,"\n"
+ KERN_INFO "segment size : %d\n"
+ KERN_INFO "compressed_sz: %d\n"
+ KERN_INFO "size_left : %d",
+ this_segs_sz, cluster->cmpr_sz, size_left);
+ if (size_left > 18) { /* start a new cluseter */
+ cp_cnt = cluster->cmpr_sz;
+ cluster->cmpr_sz = 0;
+ buf_pos = cp_cnt + sizeof(__u16);
+ PUT2(dst_buf, 0, buf_pos);
+
+ if (qic113) {
+ __s64 foffs = pos->volume_pos;
+ if (cp_cnt) foffs += (__s64)blk_sz;
+
+ TRACE(ft_t_data_flow, "new style QIC-113 header");
+ PUT8(dst_buf, buf_pos, foffs);
+ buf_pos += sizeof(__s64);
+ } else {
+ __u32 foffs = (__u32)pos->volume_pos;
+ if (cp_cnt) foffs += (__u32)blk_sz;
+
+ TRACE(ft_t_data_flow, "old style QIC-80MC header");
+ PUT4(dst_buf, buf_pos, foffs);
+ buf_pos += sizeof(__u32);
+ }
+ } else if (size_left >= 0) {
+ cp_cnt = cluster->cmpr_sz;
+ cluster->cmpr_sz = 0;
+ buf_pos = cp_cnt + sizeof(__u16);
+ PUT2(dst_buf, 0, buf_pos);
+ /* zero unused part of segment. */
+ memset(dst_buf + buf_pos, '\0', size_left);
+ buf_pos = this_segs_sz;
+ } else { /* need entire segment and more space */
+ PUT2(dst_buf, 0, 0);
+ cp_cnt = this_segs_sz - sizeof(__u16);
+ cluster->cmpr_sz -= cp_cnt;
+ buf_pos = this_segs_sz;
+ }
+ memcpy(dst_buf + sizeof(__u16), src_buf + cluster->cmpr_pos, cp_cnt);
+ cluster->cmpr_pos += cp_cnt;
+ TRACE_EXIT buf_pos;
+}
+
+/* return-value: the number of bytes removed from the user-buffer
+ * `src_buf' or error code
+ *
+ * int *write_cnt : how much actually has been moved to the
+ * dst_buf. Need not be initialized when
+ * function returns with an error code
+ * (negativ return value)
+ * __u8 *dst_buf : kernel space buffer where the has to be
+ * copied to. The contents of this buffers
+ * goes to a specific segment.
+ * const int seg_sz : the size of the segment dst_buf will be
+ * copied to.
+ * const zft_position *pos : struct containing the coordinates in
+ * the current volume (byte position,
+ * segment id of current segment etc)
+ * const zft_volinfo *volume: information about the current volume,
+ * size etc.
+ * const __u8 *src_buf : user space buffer that contains the
+ * data the user wants to be written to
+ * tape.
+ * const int req_len : the amount of data the user wants to be
+ * written to tape.
+ */
+static int zftc_write(int *write_cnt,
+ __u8 *dst_buf, const int seg_sz,
+ const __u8 __user *src_buf, const int req_len,
+ const zft_position *pos, const zft_volinfo *volume)
+{
+ int req_len_left = req_len;
+ int result;
+ int len_left;
+ int buf_pos_write = pos->seg_byte_pos;
+ TRACE_FUN(ft_t_flow);
+
+ /* Note: we do not unlock the module because
+ * there are some values cached in that `cseg' variable. We
+ * don't don't want to use this information when being
+ * unloaded by kerneld even when the tape is full or when we
+ * cannot allocate enough memory.
+ */
+ if (pos->tape_pos > (volume->size-volume->blk_sz-ZFT_CMPR_OVERHEAD)) {
+ TRACE_EXIT -ENOSPC;
+ }
+ if (zft_allocate_cmpr_mem(volume->blk_sz) < 0) {
+ /* should we unlock the module? But it shouldn't
+ * be locked anyway ...
+ */
+ TRACE_EXIT -ENOMEM;
+ }
+ if (buf_pos_write == 0) { /* fill a new segment */
+ *write_cnt = buf_pos_write = start_new_cseg(&cseg,
+ dst_buf,
+ pos,
+ volume->blk_sz,
+ zftc_buf,
+ seg_sz,
+ volume->qic113);
+ if (cseg.cmpr_sz == 0 && cseg.cmpr_pos != 0) {
+ req_len_left -= result = volume->blk_sz;
+ cseg.cmpr_pos = 0;
+ } else {
+ result = 0;
+ }
+ } else {
+ *write_cnt = result = 0;
+ }
+
+ len_left = seg_sz - buf_pos_write;
+ while ((req_len_left > 0) && (len_left > 18)) {
+ /* now we have some size left for a new compressed
+ * block. We know, that the compression buffer is
+ * empty (else there wouldn't be any space left).
+ */
+ if (copy_from_user(zftc_scratch_buf, src_buf + result,
+ volume->blk_sz) != 0) {
+ TRACE_EXIT -EFAULT;
+ }
+ req_len_left -= volume->blk_sz;
+ cseg.cmpr_sz = zft_compress(zftc_scratch_buf, volume->blk_sz,
+ zftc_buf);
+ if (cseg.cmpr_sz < 0) {
+ cseg.uncmpr = 0x8000;
+ cseg.cmpr_sz = -cseg.cmpr_sz;
+ } else {
+ cseg.uncmpr = 0;
+ }
+ /* increment "result" iff we copied the entire
+ * compressed block to the zft_deblock_buf
+ */
+ len_left -= sizeof(__u16);
+ if (len_left >= cseg.cmpr_sz) {
+ len_left -= cseg.count = cseg.cmpr_sz;
+ cseg.cmpr_pos = cseg.cmpr_sz = 0;
+ result += volume->blk_sz;
+ } else {
+ cseg.cmpr_sz -=
+ cseg.cmpr_pos =
+ cseg.count = len_left;
+ len_left = 0;
+ }
+ PUT2(dst_buf, buf_pos_write, cseg.uncmpr | cseg.count);
+ buf_pos_write += sizeof(__u16);
+ memcpy(dst_buf + buf_pos_write, zftc_buf, cseg.count);
+ buf_pos_write += cseg.count;
+ *write_cnt += cseg.count + sizeof(__u16);
+ FT_SIGNAL_EXIT(_DONT_BLOCK);
+ }
+ /* erase the remainder of the segment if less than 18 bytes
+ * left (18 bytes is due to the QIC-80 standard)
+ */
+ if (len_left <= 18) {
+ memset(dst_buf + buf_pos_write, '\0', len_left);
+ (*write_cnt) += len_left;
+ }
+ TRACE(ft_t_data_flow, "returning %d", result);
+ TRACE_EXIT result;
+}
+
+/* out:
+ *
+ * int *read_cnt: the number of bytes we removed from the zft_deblock_buf
+ * (result)
+ * int *to_do : the remaining size of the read-request.
+ *
+ * in:
+ *
+ * char *buff : buff is the address of the upper part of the user
+ * buffer, that hasn't been filled with data yet.
+
+ * int buf_pos_read : copy of from _ftape_read()
+ * int buf_len_read : copy of buf_len_rd from _ftape_read()
+ * char *zft_deblock_buf: zft_deblock_buf
+ * unsigned short blk_sz: the block size valid for this volume, may differ
+ * from zft_blk_sz.
+ * int finish: if != 0 means that this is the last segment belonging
+ * to this volume
+ * returns the amount of data actually copied to the user-buffer
+ *
+ * to_do MUST NOT SHRINK except to indicate an EOF. In this case *to_do has to
+ * be set to 0
+ */
+static int zftc_read (int *read_cnt,
+ __u8 __user *dst_buf, const int to_do,
+ const __u8 *src_buf, const int seg_sz,
+ const zft_position *pos, const zft_volinfo *volume)
+{
+ int uncompressed_sz;
+ int result = 0;
+ int remaining = to_do;
+ TRACE_FUN(ft_t_flow);
+
+ TRACE_CATCH(zft_allocate_cmpr_mem(volume->blk_sz),);
+ if (pos->seg_byte_pos == 0) {
+ /* new segment just read
+ */
+ TRACE_CATCH(get_cseg(&cseg, src_buf, seg_sz, volume),
+ *read_cnt = 0);
+ memcpy(zftc_buf + cseg.cmpr_pos, src_buf + sizeof(__u16),
+ cseg.count);
+ cseg.cmpr_pos += cseg.count;
+ *read_cnt = cseg.offset;
+ DUMP_CMPR_INFO(ft_t_noise /* ft_t_any */, "", &cseg);
+ } else {
+ *read_cnt = 0;
+ }
+ /* loop and uncompress until user buffer full or
+ * deblock-buffer empty
+ */
+ TRACE(ft_t_data_flow, "compressed_sz: %d, compos : %d, *read_cnt: %d",
+ cseg.cmpr_sz, cseg.cmpr_pos, *read_cnt);
+ while ((cseg.spans == 0) && (remaining > 0)) {
+ if (cseg.cmpr_pos != 0) { /* cmpr buf is not empty */
+ uncompressed_sz =
+ zft_uncompress(zftc_buf,
+ cseg.uncmpr == 0x8000 ?
+ -cseg.cmpr_pos : cseg.cmpr_pos,
+ zftc_scratch_buf,
+ volume->blk_sz);
+ if (uncompressed_sz != volume->blk_sz) {
+ *read_cnt = 0;
+ TRACE_ABORT(-EIO, ft_t_warn,
+ "Uncompressed blk (%d) != blk size (%d)",
+ uncompressed_sz, volume->blk_sz);
+ }
+ if (copy_to_user(dst_buf + result,
+ zftc_scratch_buf,
+ uncompressed_sz) != 0 ) {
+ TRACE_EXIT -EFAULT;
+ }
+ remaining -= uncompressed_sz;
+ result += uncompressed_sz;
+ cseg.cmpr_pos = 0;
+ }
+ if (remaining > 0) {
+ get_next_cluster(&cseg, src_buf, seg_sz,
+ volume->end_seg == pos->seg_pos);
+ if (cseg.count != 0) {
+ memcpy(zftc_buf, src_buf + cseg.offset,
+ cseg.count);
+ cseg.cmpr_pos = cseg.count;
+ cseg.offset += cseg.count;
+ *read_cnt += cseg.count + sizeof(__u16);
+ } else {
+ remaining = 0;
+ }
+ }
+ TRACE(ft_t_data_flow, "\n"
+ KERN_INFO "compressed_sz: %d\n"
+ KERN_INFO "compos : %d\n"
+ KERN_INFO "*read_cnt : %d",
+ cseg.cmpr_sz, cseg.cmpr_pos, *read_cnt);
+ }
+ if (seg_sz - cseg.offset <= 18) {
+ *read_cnt += seg_sz - cseg.offset;
+ TRACE(ft_t_data_flow, "expanding read cnt to: %d", *read_cnt);
+ }
+ TRACE(ft_t_data_flow, "\n"
+ KERN_INFO "segment size : %d\n"
+ KERN_INFO "read count : %d\n"
+ KERN_INFO "buf_pos_read : %d\n"
+ KERN_INFO "remaining : %d",
+ seg_sz, *read_cnt, pos->seg_byte_pos,
+ seg_sz - *read_cnt - pos->seg_byte_pos);
+ TRACE(ft_t_data_flow, "returning: %d", result);
+ TRACE_EXIT result;
+}
+
+/* seeks to the new data-position. Reads sometimes a segment.
+ *
+ * start_seg and end_seg give the boundaries of the current volume
+ * blk_sz is the blk_sz of the current volume as stored in the
+ * volume label
+ *
+ * We don't allow blocksizes less than 1024 bytes, therefore we don't need
+ * a 64 bit argument for new_block_pos.
+ */
+
+static int seek_in_segment(const unsigned int to_do, cmpr_info *c_info,
+ const char *src_buf, const int seg_sz,
+ const int seg_pos, const zft_volinfo *volume);
+static int slow_seek_forward_until_error(const unsigned int distance,
+ cmpr_info *c_info, zft_position *pos,
+ const zft_volinfo *volume, __u8 *buf);
+static int search_valid_segment(unsigned int segment,
+ const unsigned int end_seg,
+ const unsigned int max_foffs,
+ zft_position *pos, cmpr_info *c_info,
+ const zft_volinfo *volume, __u8 *buf);
+static int slow_seek_forward(unsigned int dest, cmpr_info *c_info,
+ zft_position *pos, const zft_volinfo *volume,
+ __u8 *buf);
+static int compute_seg_pos(unsigned int dest, zft_position *pos,
+ const zft_volinfo *volume);
+
+#define ZFT_SLOW_SEEK_THRESHOLD 10 /* segments */
+#define ZFT_FAST_SEEK_MAX_TRIALS 10 /* times */
+#define ZFT_FAST_SEEK_BACKUP 10 /* segments */
+
+static int zftc_seek(unsigned int new_block_pos,
+ zft_position *pos, const zft_volinfo *volume, __u8 *buf)
+{
+ unsigned int dest;
+ int limit;
+ int distance;
+ int result = 0;
+ int seg_dist;
+ int new_seg;
+ int old_seg = 0;
+ int fast_seek_trials = 0;
+ TRACE_FUN(ft_t_flow);
+
+ if (new_block_pos == 0) {
+ pos->seg_pos = volume->start_seg;
+ pos->seg_byte_pos = 0;
+ pos->volume_pos = 0;
+ zftc_reset();
+ TRACE_EXIT 0;
+ }
+ dest = new_block_pos * (volume->blk_sz >> 10);
+ distance = dest - (pos->volume_pos >> 10);
+ while (distance != 0) {
+ seg_dist = compute_seg_pos(dest, pos, volume);
+ TRACE(ft_t_noise, "\n"
+ KERN_INFO "seg_dist: %d\n"
+ KERN_INFO "distance: %d\n"
+ KERN_INFO "dest : %d\n"
+ KERN_INFO "vpos : %d\n"
+ KERN_INFO "seg_pos : %d\n"
+ KERN_INFO "trials : %d",
+ seg_dist, distance, dest,
+ (unsigned int)(pos->volume_pos>>10), pos->seg_pos,
+ fast_seek_trials);
+ if (distance > 0) {
+ if (seg_dist < 0) {
+ TRACE(ft_t_bug, "BUG: distance %d > 0, "
+ "segment difference %d < 0",
+ distance, seg_dist);
+ result = -EIO;
+ break;
+ }
+ new_seg = pos->seg_pos + seg_dist;
+ if (new_seg > volume->end_seg) {
+ new_seg = volume->end_seg;
+ }
+ if (old_seg == new_seg || /* loop */
+ seg_dist <= ZFT_SLOW_SEEK_THRESHOLD ||
+ fast_seek_trials >= ZFT_FAST_SEEK_MAX_TRIALS) {
+ TRACE(ft_t_noise, "starting slow seek:\n"
+ KERN_INFO "fast seek failed too often: %s\n"
+ KERN_INFO "near target position : %s\n"
+ KERN_INFO "looping between two segs : %s",
+ (fast_seek_trials >=
+ ZFT_FAST_SEEK_MAX_TRIALS)
+ ? "yes" : "no",
+ (seg_dist <= ZFT_SLOW_SEEK_THRESHOLD)
+ ? "yes" : "no",
+ (old_seg == new_seg)
+ ? "yes" : "no");
+ result = slow_seek_forward(dest, &cseg,
+ pos, volume, buf);
+ break;
+ }
+ old_seg = new_seg;
+ limit = volume->end_seg;
+ fast_seek_trials ++;
+ for (;;) {
+ result = search_valid_segment(new_seg, limit,
+ volume->size,
+ pos, &cseg,
+ volume, buf);
+ if (result == 0 || result == -EINTR) {
+ break;
+ }
+ if (new_seg == volume->start_seg) {
+ result = -EIO; /* set errror
+ * condition
+ */
+ break;
+ }
+ limit = new_seg;
+ new_seg -= ZFT_FAST_SEEK_BACKUP;
+ if (new_seg < volume->start_seg) {
+ new_seg = volume->start_seg;
+ }
+ }
+ if (result < 0) {
+ TRACE(ft_t_warn,
+ "Couldn't find a readable segment");
+ break;
+ }
+ } else /* if (distance < 0) */ {
+ if (seg_dist > 0) {
+ TRACE(ft_t_bug, "BUG: distance %d < 0, "
+ "segment difference %d >0",
+ distance, seg_dist);
+ result = -EIO;
+ break;
+ }
+ new_seg = pos->seg_pos + seg_dist;
+ if (fast_seek_trials > 0 && seg_dist == 0) {
+ /* this avoids sticking to the same
+ * segment all the time. On the other hand:
+ * if we got here for the first time, and the
+ * deblock_buffer still contains a valid
+ * segment, then there is no need to skip to
+ * the previous segment if the desired position
+ * is inside this segment.
+ */
+ new_seg --;
+ }
+ if (new_seg < volume->start_seg) {
+ new_seg = volume->start_seg;
+ }
+ limit = pos->seg_pos;
+ fast_seek_trials ++;
+ for (;;) {
+ result = search_valid_segment(new_seg, limit,
+ pos->volume_pos,
+ pos, &cseg,
+ volume, buf);
+ if (result == 0 || result == -EINTR) {
+ break;
+ }
+ if (new_seg == volume->start_seg) {
+ result = -EIO; /* set errror
+ * condition
+ */
+ break;
+ }
+ limit = new_seg;
+ new_seg -= ZFT_FAST_SEEK_BACKUP;
+ if (new_seg < volume->start_seg) {
+ new_seg = volume->start_seg;
+ }
+ }
+ if (result < 0) {
+ TRACE(ft_t_warn,
+ "Couldn't find a readable segment");
+ break;
+ }
+ }
+ distance = dest - (pos->volume_pos >> 10);
+ }
+ TRACE_EXIT result;
+}
+
+
+/* advance inside the given segment at most to_do bytes.
+ * of kilobytes moved
+ */
+
+static int seek_in_segment(const unsigned int to_do,
+ cmpr_info *c_info,
+ const char *src_buf,
+ const int seg_sz,
+ const int seg_pos,
+ const zft_volinfo *volume)
+{
+ int result = 0;
+ int blk_sz = volume->blk_sz >> 10;
+ int remaining = to_do;
+ TRACE_FUN(ft_t_flow);
+
+ if (c_info->offset == 0) {
+ /* new segment just read
+ */
+ TRACE_CATCH(get_cseg(c_info, src_buf, seg_sz, volume),);
+ c_info->cmpr_pos += c_info->count;
+ DUMP_CMPR_INFO(ft_t_noise, "", c_info);
+ }
+ /* loop and uncompress until user buffer full or
+ * deblock-buffer empty
+ */
+ TRACE(ft_t_noise, "compressed_sz: %d, compos : %d",
+ c_info->cmpr_sz, c_info->cmpr_pos);
+ while (c_info->spans == 0 && remaining > 0) {
+ if (c_info->cmpr_pos != 0) { /* cmpr buf is not empty */
+ result += blk_sz;
+ remaining -= blk_sz;
+ c_info->cmpr_pos = 0;
+ }
+ if (remaining > 0) {
+ get_next_cluster(c_info, src_buf, seg_sz,
+ volume->end_seg == seg_pos);
+ if (c_info->count != 0) {
+ c_info->cmpr_pos = c_info->count;
+ c_info->offset += c_info->count;
+ } else {
+ break;
+ }
+ }
+ /* Allow escape from this loop on signal!
+ */
+ FT_SIGNAL_EXIT(_DONT_BLOCK);
+ DUMP_CMPR_INFO(ft_t_noise, "", c_info);
+ TRACE(ft_t_noise, "to_do: %d", remaining);
+ }
+ if (seg_sz - c_info->offset <= 18) {
+ c_info->offset = seg_sz;
+ }
+ TRACE(ft_t_noise, "\n"
+ KERN_INFO "segment size : %d\n"
+ KERN_INFO "buf_pos_read : %d\n"
+ KERN_INFO "remaining : %d",
+ seg_sz, c_info->offset,
+ seg_sz - c_info->offset);
+ TRACE_EXIT result;
+}
+
+static int slow_seek_forward_until_error(const unsigned int distance,
+ cmpr_info *c_info,
+ zft_position *pos,
+ const zft_volinfo *volume,
+ __u8 *buf)
+{
+ unsigned int remaining = distance;
+ int seg_sz;
+ int seg_pos;
+ int result;
+ TRACE_FUN(ft_t_flow);
+
+ seg_pos = pos->seg_pos;
+ do {
+ TRACE_CATCH(seg_sz = zft_fetch_segment(seg_pos, buf,
+ FT_RD_AHEAD),);
+ /* now we have the contents of the actual segment in
+ * the deblock buffer
+ */
+ TRACE_CATCH(result = seek_in_segment(remaining, c_info, buf,
+ seg_sz, seg_pos,volume),);
+ remaining -= result;
+ pos->volume_pos += result<<10;
+ pos->seg_pos = seg_pos;
+ pos->seg_byte_pos = c_info->offset;
+ seg_pos ++;
+ if (seg_pos <= volume->end_seg && c_info->offset == seg_sz) {
+ pos->seg_pos ++;
+ pos->seg_byte_pos = 0;
+ c_info->offset = 0;
+ }
+ /* Allow escape from this loop on signal!
+ */
+ FT_SIGNAL_EXIT(_DONT_BLOCK);
+ TRACE(ft_t_noise, "\n"
+ KERN_INFO "remaining: %d\n"
+ KERN_INFO "seg_pos: %d\n"
+ KERN_INFO "end_seg: %d\n"
+ KERN_INFO "result: %d",
+ remaining, seg_pos, volume->end_seg, result);
+ } while (remaining > 0 && seg_pos <= volume->end_seg);
+ TRACE_EXIT 0;
+}
+
+/* return segment id of next segment containing valid data, -EIO otherwise
+ */
+static int search_valid_segment(unsigned int segment,
+ const unsigned int end_seg,
+ const unsigned int max_foffs,
+ zft_position *pos,
+ cmpr_info *c_info,
+ const zft_volinfo *volume,
+ __u8 *buf)
+{
+ cmpr_info tmp_info;
+ int seg_sz;
+ TRACE_FUN(ft_t_flow);
+
+ memset(&tmp_info, 0, sizeof(cmpr_info));
+ while (segment <= end_seg) {
+ FT_SIGNAL_EXIT(_DONT_BLOCK);
+ TRACE(ft_t_noise,
+ "Searching readable segment between %d and %d",
+ segment, end_seg);
+ seg_sz = zft_fetch_segment(segment, buf, FT_RD_AHEAD);
+ if ((seg_sz > 0) &&
+ (get_cseg (&tmp_info, buf, seg_sz, volume) >= 0) &&
+ (tmp_info.foffs != 0 || segment == volume->start_seg)) {
+ if ((tmp_info.foffs>>10) > max_foffs) {
+ TRACE_ABORT(-EIO, ft_t_noise, "\n"
+ KERN_INFO "cseg.foff: %d\n"
+ KERN_INFO "dest : %d",
+ (int)(tmp_info.foffs >> 10),
+ max_foffs);
+ }
+ DUMP_CMPR_INFO(ft_t_noise, "", &tmp_info);
+ *c_info = tmp_info;
+ pos->seg_pos = segment;
+ pos->volume_pos = c_info->foffs;
+ pos->seg_byte_pos = c_info->offset;
+ TRACE(ft_t_noise, "found segment at %d", segment);
+ TRACE_EXIT 0;
+ }
+ segment++;
+ }
+ TRACE_EXIT -EIO;
+}
+
+static int slow_seek_forward(unsigned int dest,
+ cmpr_info *c_info,
+ zft_position *pos,
+ const zft_volinfo *volume,
+ __u8 *buf)
+{
+ unsigned int distance;
+ int result = 0;
+ TRACE_FUN(ft_t_flow);
+
+ distance = dest - (pos->volume_pos >> 10);
+ while ((distance > 0) &&
+ (result = slow_seek_forward_until_error(distance,
+ c_info,
+ pos,
+ volume,
+ buf)) < 0) {
+ if (result == -EINTR) {
+ break;
+ }
+ TRACE(ft_t_noise, "seg_pos: %d", pos->seg_pos);
+ /* the failing segment is either pos->seg_pos or
+ * pos->seg_pos + 1. There is no need to further try
+ * that segment, because ftape_read_segment() already
+ * has tried very much to read it. So we start with
+ * following segment, which is pos->seg_pos + 1
+ */
+ if(search_valid_segment(pos->seg_pos+1, volume->end_seg, dest,
+ pos, c_info,
+ volume, buf) < 0) {
+ TRACE(ft_t_noise, "search_valid_segment() failed");
+ result = -EIO;
+ break;
+ }
+ distance = dest - (pos->volume_pos >> 10);
+ result = 0;
+ TRACE(ft_t_noise, "segment: %d", pos->seg_pos);
+ /* found valid segment, retry the seek */
+ }
+ TRACE_EXIT result;
+}
+
+static int compute_seg_pos(const unsigned int dest,
+ zft_position *pos,
+ const zft_volinfo *volume)
+{
+ int segment;
+ int distance = dest - (pos->volume_pos >> 10);
+ unsigned int raw_size;
+ unsigned int virt_size;
+ unsigned int factor;
+ TRACE_FUN(ft_t_flow);
+
+ if (distance >= 0) {
+ raw_size = volume->end_seg - pos->seg_pos + 1;
+ virt_size = ((unsigned int)(volume->size>>10)
+ - (unsigned int)(pos->volume_pos>>10)
+ + FT_SECTORS_PER_SEGMENT - FT_ECC_SECTORS - 1);
+ virt_size /= FT_SECTORS_PER_SEGMENT - FT_ECC_SECTORS;
+ if (virt_size == 0 || raw_size == 0) {
+ TRACE_EXIT 0;
+ }
+ if (raw_size >= (1<<25)) {
+ factor = raw_size/(virt_size>>7);
+ } else {
+ factor = (raw_size<<7)/virt_size;
+ }
+ segment = distance/(FT_SECTORS_PER_SEGMENT-FT_ECC_SECTORS);
+ segment = (segment * factor)>>7;
+ } else {
+ raw_size = pos->seg_pos - volume->start_seg + 1;
+ virt_size = ((unsigned int)(pos->volume_pos>>10)
+ + FT_SECTORS_PER_SEGMENT - FT_ECC_SECTORS - 1);
+ virt_size /= FT_SECTORS_PER_SEGMENT - FT_ECC_SECTORS;
+ if (virt_size == 0 || raw_size == 0) {
+ TRACE_EXIT 0;
+ }
+ if (raw_size >= (1<<25)) {
+ factor = raw_size/(virt_size>>7);
+ } else {
+ factor = (raw_size<<7)/virt_size;
+ }
+ segment = distance/(FT_SECTORS_PER_SEGMENT-FT_ECC_SECTORS);
+ }
+ TRACE(ft_t_noise, "factor: %d/%d", factor, 1<<7);
+ TRACE_EXIT segment;
+}
+
+static struct zft_cmpr_ops cmpr_ops = {
+ zftc_write,
+ zftc_read,
+ zftc_seek,
+ zftc_lock,
+ zftc_reset,
+ zftc_cleanup
+};
+
+int zft_compressor_init(void)
+{
+ TRACE_FUN(ft_t_flow);
+
+#ifdef MODULE
+ printk(KERN_INFO "zftape compressor v1.00a 970514 for " FTAPE_VERSION "\n");
+ if (TRACE_LEVEL >= ft_t_info) {
+ printk(
+KERN_INFO "(c) 1997 Claus-Justus Heine (claus@momo.math.rwth-aachen.de)\n"
+KERN_INFO "Compressor for zftape (lzrw3 algorithm)\n");
+ }
+#else /* !MODULE */
+ /* print a short no-nonsense boot message */
+ printk("zftape compressor v1.00a 970514\n");
+ printk("For use with " FTAPE_VERSION "\n");
+#endif /* MODULE */
+ TRACE(ft_t_info, "zft_compressor_init @ 0x%p", zft_compressor_init);
+ TRACE(ft_t_info, "installing compressor for zftape ...");
+ TRACE_CATCH(zft_cmpr_register(&cmpr_ops),);
+ TRACE_EXIT 0;
+}
+
+#ifdef MODULE
+
+MODULE_AUTHOR(
+ "(c) 1996, 1997 Claus-Justus Heine (claus@momo.math.rwth-aachen.de");
+MODULE_DESCRIPTION(
+"Compression routines for zftape. Uses the lzrw3 algorithm by Ross Williams");
+MODULE_LICENSE("GPL");
+
+/* Called by modules package when installing the driver
+ */
+int init_module(void)
+{
+ return zft_compressor_init();
+}
+
+#endif /* MODULE */
diff --git a/drivers/char/ftape/compressor/zftape-compress.h b/drivers/char/ftape/compressor/zftape-compress.h
new file mode 100644
index 000000000000..f200741e33bf
--- /dev/null
+++ b/drivers/char/ftape/compressor/zftape-compress.h
@@ -0,0 +1,83 @@
+#ifndef _ZFTAPE_COMPRESS_H
+#define _ZFTAPE_COMPRESS_H
+/*
+ * Copyright (c) 1994-1997 Claus-Justus Heine
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2, or (at
+ your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; see the file COPYING. If not, write to
+ the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
+ USA.
+
+ *
+ * $Source: /homes/cvs/ftape-stacked/ftape/compressor/zftape-compress.h,v $
+ * $Revision: 1.1 $
+ * $Date: 1997/10/05 19:12:32 $
+ *
+ * This file contains macros and definitions for zftape's
+ * builtin compression code.
+ *
+ */
+
+#include "../zftape/zftape-buffers.h"
+#include "../zftape/zftape-vtbl.h"
+#include "../compressor/lzrw3.h"
+
+/* CMPR_WRK_MEM_SIZE gives the size of the compression wrk_mem */
+/* I got these out of lzrw3.c */
+#define U(X) ((__u32) X)
+#define SIZE_P_BYTE (U(sizeof(__u8 *)))
+#define ALIGNMENT_FUDGE (U(16))
+
+#define CMPR_WRK_MEM_SIZE (U(4096)*(SIZE_P_BYTE) + ALIGNMENT_FUDGE)
+
+/* the maximum number of bytes the size of the "compressed" data can
+ * exceed the uncompressed data. As it is quite useless to compress
+ * data twice it is sometimes the case that it is more efficient to
+ * copy a block of data but to feed it to the "compression"
+ * algorithm. In this case there are some flag bytes or the like
+ * proceding the "compressed" data. THAT MUST NOT BE THE CASE for the
+ * algorithm we use for this driver. Instead, the high bit 15 of
+ * compressed_size:
+ *
+ * compressed_size = ftape_compress()
+ *
+ * must be set in such a case.
+ *
+ * Nevertheless, it might also be as for lzrw3 that there is an
+ * "intermediate" overrun that exceeds the amount of the compressed
+ * data that is actually produced. During the algorithm we need in the
+ * worst case MAX_CMP_GROUP bytes more than the input-size.
+ */
+#define MAX_CMP_GROUP (2+16*2) /* from lzrw3.c */
+
+#define CMPR_OVERRUN MAX_CMP_GROUP /* during compression */
+
+/****************************************************/
+
+#define CMPR_BUFFER_SIZE (MAX_BLOCK_SIZE + CMPR_OVERRUN)
+
+/* the compression map stores the byte offset compressed blocks within
+ * the current volume for catridges with format code 2,3 and 5
+ * (and old versions of zftape) and the offset measured in kilobytes for
+ * format code 4 and 6. This gives us a possible max. size of a
+ * compressed volume of 1024*4GIG which should be enough.
+ */
+typedef __u32 CmprMap;
+
+/* globals
+ */
+
+/* exported functions
+ */
+
+#endif /* _ZFTAPE_COMPRESS_H */