src/lib/bbiRead.c 1.15

1.15 2009/11/12 23:15:51 kent
First cut of compressed bigWig/bigBed stuff. So far read side should be complete including Genome Browser, Table Browser, wigToBigWig and bigWig utility functions. Still to do bedGraphToBigWig and bedToBigBed.
Index: src/lib/bbiRead.c
===================================================================
RCS file: /projects/compbio/cvsroot/kent/src/lib/bbiRead.c,v
retrieving revision 1.14
retrieving revision 1.15
diff -b -B -U 4 -r1.14 -r1.15
--- src/lib/bbiRead.c	7 Nov 2009 19:25:54 -0000	1.14
+++ src/lib/bbiRead.c	12 Nov 2009 23:15:51 -0000	1.15
@@ -5,8 +5,9 @@
 #include "linefile.h"
 #include "hash.h"
 #include "obscure.h"
 #include "localmem.h"
+#include "zlibFace.h"
 #include "bPlusTree.h"
 #include "cirTree.h"
 #include "udc.h"
 #include "bbiFile.h"
@@ -73,8 +74,9 @@
 bbi->fieldCount = udcReadBits16(udc, isSwapped);
 bbi->definedFieldCount = udcReadBits16(udc, isSwapped);
 bbi->asOffset = udcReadBits64(udc, isSwapped);
 bbi->totalSummaryOffset = udcReadBits64(udc, isSwapped);
+bbi->uncompressBufSize = udcReadBits32(udc, isSwapped);
 
 /* Skip over reserved area. */
 udcSeek(udc, 64);
 
@@ -288,22 +290,55 @@
 struct bbiSummary *sumList = NULL, *sum;
 struct udcFile *udc = bbi->udc;
 udcSeek(udc, zoom->indexOffset);
 struct cirTreeFile *ctf = cirTreeFileAttach(bbi->fileName, bbi->udc);
-struct fileOffsetSize *fragList = cirTreeFindOverlappingBlocks(ctf, chromId, start, end);
-struct fileOffsetSize *block, *blockList = fileOffsetSizeMerge(fragList);
-for (block = blockList; block != NULL; block = block->next)
-    {
-    /* Read info we need into memory. */
-    udcSeek(udc, block->offset);
-    char *blockBuf = needLargeMem(block->size);
-    udcRead(udc, blockBuf, block->size);
-    char *blockPt = blockBuf;
+struct fileOffsetSize *blockList = cirTreeFindOverlappingBlocks(ctf, chromId, start, end);
+struct fileOffsetSize *block, *beforeGap, *afterGap;
 
+/* Set up for uncompression optionally. */
+char *uncompressBuf = NULL;
+if (bbi->uncompressBufSize > 0)
+    uncompressBuf = needLargeMem(bbi->uncompressBufSize);
+
+
+/* This loop is a little complicated because we merge the read requests for efficiency, but we 
+ * have to then go back through the data one unmerged block at a time. */
+for (block = blockList; block != NULL; )
+    {
+    /* Find contigious blocks and read them into mergedBuf. */
+    fileOffsetSizeFindGap(block, &beforeGap, &afterGap);
+    bits64 mergedOffset = block->offset;
+    bits64 mergedSize = beforeGap->offset + beforeGap->size - mergedOffset;
+    udcSeek(udc, mergedOffset);
+    char *mergedBuf = needLargeMem(mergedSize);
+    udcMustRead(udc, mergedBuf, mergedSize);
+    char *blockBuf = mergedBuf;
+
+    /* Loop through individual blocks within merged section. */
+    for (;block != afterGap; block = block->next)
+        {
+	/* Uncompress if necessary. */
+	char *blockPt, *blockEnd;
+	if (uncompressBuf)
+	    {
+	    blockPt = uncompressBuf;
+	    int uncSize = zUncompress(blockBuf, block->size, uncompressBuf, bbi->uncompressBufSize);
+	    blockEnd = blockPt + uncSize;
+	    }
+	else
+	    {
+	    blockPt = blockBuf;
+	    blockEnd = blockPt + block->size;
+	    }
+
+	/* Figure out bounds and number of items in block. */
+	int blockSize = blockEnd - blockPt;
     struct bbiSummaryOnDisk *dSum;
     int itemSize = sizeof(*dSum);
-    assert(block->size % itemSize == 0);
-    int itemCount = block->size / itemSize;
+	assert(blockSize % itemSize == 0);
+	int itemCount = blockSize / itemSize;
+
+	/* Read in items and convert to memory list format. */
     int i;
     for (i=0; i<itemCount; ++i)
 	{
 	dSum = (void *)blockPt;
@@ -318,12 +353,15 @@
 		slAddHead(&sumList, sum);
 		}
 	    }
 	}
-    freeMem(blockBuf);
+	assert(blockPt == blockEnd);
+	blockBuf += block->size;
+        }
+    freeMem(mergedBuf);
     }
+freeMem(uncompressBuf);
 slFreeList(&blockList);
-slFreeList(&fragList);
 cirTreeFileDetach(&ctf);
 slReverse(&sumList);
 return sumList;
 }