69087d3a65af31c39337085920e99e5b2db13082
galt
  Fri Jun 17 15:05:28 2022 -0700
Ran the dbsnp pipeline designed by Angie for dbsnp v155. It produces huge bigBed output and I found and fixed a problem encountered on the bedToBigBed. I also tweaked dbSnpJsonToTab to deal with some dbsnp data having multiple study subversions, by ignoring the old datasets and just using the latest one. Added a track description page that has lots of content and counts to update. dbsnp155 is ready for QA on hgwdev. refs #rm27751

diff --git src/lib/bbiWrite.c src/lib/bbiWrite.c
index fc31ab1..26fd66b 100644
--- src/lib/bbiWrite.c
+++ src/lib/bbiWrite.c
@@ -353,31 +353,31 @@
     boolean doCompress,	    /* Do we compress.  Answer really should be yes! */
     bits64 dataSize,	    /* Size of data on disk (after compression if any). */
     struct bbiChromUsage *usageList, /* Result from bbiChromUsageFromBedFile */
     int resTryCount, int resScales[], int resSizes[],   /* How much to zoom at each level */
     bits32 zoomAmounts[bbiMaxZoomLevels],      /* Fills in amount zoomed at each level. */
     bits64 zoomDataOffsets[bbiMaxZoomLevels],  /* Fills in where data starts for each zoom level. */
     bits64 zoomIndexOffsets[bbiMaxZoomLevels], /* Fills in where index starts for each level. */
     struct bbiSummaryElement *totalSum)
 /* Write out all the zoom levels and return the number of levels written.  Writes 
  * actual zoom amount and the offsets of the zoomed data and index in the last three
  * parameters.  Sorry for all the parameters - it was this or duplicate a big chunk of
  * code between bedToBigBed and bedGraphToBigWig. */
 {
 /* Write out first zoomed section while storing in memory next zoom level. */
 assert(resTryCount > 0);
-int maxReducedSize = dataSize/2;
+bits64 maxReducedSize = dataSize/2;
 int initialReduction = 0, initialReducedCount = 0;
 
 /* Figure out initialReduction for zoom - one that is maxReducedSize or less. */
 int resTry;
 for (resTry = 0; resTry < resTryCount; ++resTry)
     {
     bits64 reducedSize = resSizes[resTry] * sizeof(struct bbiSummaryOnDisk);
     if (doCompress)
 	reducedSize /= 2;	// Estimate!
     if (reducedSize <= maxReducedSize)
 	{
 	initialReduction = resScales[resTry];
 	initialReducedCount = resSizes[resTry];
 	break;
 	}