33d721957e8037927a42d23ea2fc00d41094b54e
galt
  Mon Jul 30 17:19:42 2012 -0700
keeping res* variables from overflowing integers when averageSpan is large.  fixes #8630
diff --git src/utils/bedToBigBed/bedToBigBed.c src/utils/bedToBigBed/bedToBigBed.c
index dc68d8f..5fa52dd 100644
--- src/utils/bedToBigBed/bedToBigBed.c
+++ src/utils/bedToBigBed/bedToBigBed.c
@@ -501,30 +501,38 @@
 bits64 chromTreeOffset = ftell(f);
 bbiWriteChromInfo(usageList, blockSize, f);
 
 /* Set up to keep track of possible initial reduction levels. */
 int resTryCount = 10, resTry;
 int resIncrement = 4;
 int resScales[resTryCount], resSizes[resTryCount];
 int minZoom = 10;
 int res = aveSpan;
 if (res < minZoom)
     res = minZoom;
 for (resTry = 0; resTry < resTryCount; ++resTry)
     {
     resSizes[resTry] = 0;
     resScales[resTry] = res;
+    // if aveSpan is large, then the initial value of res is large,
+    //  and we cannot do all 10 levels without overflowing res* integers and other related variables.
+    if (res > 1000000000) 
+	{
+	resTryCount = resTry + 1;  
+	verbose(2, "resTryCount reduced from 10 to %d\n", resTryCount);
+	break;
+	}
     res *= resIncrement;
     }
 
 /* Write out primary full resolution data in sections, collect stats to use for reductions. */
 bits64 dataOffset = ftell(f);
 writeOne(f, bedCount);
 bits32 blockCount = bbiCountSectionsNeeded(usageList, itemsPerSlot);
 struct bbiBoundsArray *boundsArray;
 AllocArray(boundsArray, blockCount);
 lineFileRewind(lf);
 bits16 fieldCount=0;
 bits32 maxBlockSize = 0;
 writeBlocks(usageList, lf, as, itemsPerSlot, boundsArray, blockCount, doCompress,
 	f, resTryCount, resScales, resSizes, &fieldCount, &maxBlockSize);
 verboseTime(1, "pass2 - checking and writing primary data (%lld records, %d fields)",