6a731cde7f73dcf905192439bdf064a73abf11d2
galt
  Sun Aug 24 13:24:29 2025 -0700
Fixes #35580 and #36250

diff --git src/hg/hgTracks/hgTracks.c src/hg/hgTracks/hgTracks.c
index 56c148ee253..9099fcce366 100644
--- src/hg/hgTracks/hgTracks.c
+++ src/hg/hgTracks/hgTracks.c
@@ -7758,69 +7758,79 @@
                 if (cartVis)
                     child->visibility = hTvFromString(cartVis);
                 }
             }
         }
     }
 lmCleanup(&lm);
 }
 
 
 struct paraFetchData
     {
     struct paraFetchData *next;
     struct track *track;
     boolean done;
+    boolean doLoadSummary;
     };
 
 static boolean isTrackForParallelLoad(struct track *track)
 /* Is this a track that should be loaded in parallel ? */
 {
 char *bdu = trackDbSetting(track->tdb, "bigDataUrl");
 
 return customFactoryParallelLoad(bdu, track->tdb->type) && (track->subtracks == NULL);
 }
 
-static void findLeavesForParallelLoad(struct track *trackList, struct paraFetchData **ppfdList)
+static void findLeavesForParallelLoad(struct track *trackList, struct paraFetchData **ppfdList, boolean doLoadSummary)
 /* Find leaves of track tree that are remote network resources for parallel-fetch loading */
 {
 struct track *track;
 if (!trackList)
     return;
 for (track = trackList; track != NULL; track = track->next)
     {
  
+    if (doLoadSummary && !track->loadSummary)
+      continue;    
+
     if (track->visibility != tvHide)
 	{
 	if (isTrackForParallelLoad(track))
 	    {
 	    struct paraFetchData *pfd;
 	    AllocVar(pfd);
 	    pfd->track = track;  // need pointer to be stable
+            pfd->doLoadSummary = doLoadSummary;
 	    slAddHead(ppfdList, pfd);
 	    track->parallelLoading = TRUE;
 	    }
 	struct track *subtrack;
         for (subtrack=track->subtracks; subtrack; subtrack=subtrack->next)
 	    {
+
+	    if (doLoadSummary && !subtrack->loadSummary)
+	      continue;    
+
 	    if (isTrackForParallelLoad(subtrack))
 		{
 		if (tdbVisLimitedByAncestors(cart,subtrack->tdb,TRUE,TRUE) != tvHide)
 		    {
 		    struct paraFetchData *pfd;
 		    AllocVar(pfd);
 		    pfd->track = subtrack;  // need pointer to be stable
+		    pfd->doLoadSummary = doLoadSummary;
 		    slAddHead(ppfdList, pfd);
 		    subtrack->parallelLoading = TRUE;
 		    }
 		}
 	    }
 	}
     }
 }
 
 static pthread_mutex_t pfdMutex = PTHREAD_MUTEX_INITIALIZER;
 static struct paraFetchData *pfdList = NULL, *pfdRunning = NULL, *pfdDone = NULL, *pfdNeverStarted = NULL;
 
 static void checkHideEmptySubtracks(struct track *tg)
 /* Suppress queries on subtracks w/o data in window (identified from multiIntersect file) */
 {
@@ -7939,38 +7949,45 @@
         }
     pthread_mutex_unlock( &pfdMutex );
     if (allDone)
 	return NULL;
 
     long thisTime = 0, lastTime = 0;
 
     if (measureTiming)
 	lastTime = clock1000();
 
     /* protect against errAbort */
     struct errCatch *errCatch = errCatchNew();
     if (errCatchStart(errCatch))
 	{
 	pfd->done = FALSE;
+
+        if (pfd->doLoadSummary)
+	    pfd->track->loadSummary(pfd->track);
+	else
+            {
 	    checkMaxWindowToDraw(pfd->track);
 	    checkHideEmptySubtracks(pfd->track);
 	    pfd->track->loadItems(pfd->track); 
 	    if (tdbHasDecorators(pfd->track))
 		{
 		loadDecorators(pfd->track);
 		decoratorMethods(pfd->track);
 		}
+            }
+
 	pfd->done = TRUE;
 	}
     errCatchEnd(errCatch);
     if (errCatch->gotWarning)
         {
         // do something intelligent to permit reporting of warnings
         // Can't pass it to warn yet - the fancy warnhandlers aren't ready
         }
     if (errCatch->gotError)
 	{
 	pfd->track->networkErrMsg = cloneString(errCatch->message->string);
 	pfd->done = TRUE;
 	}
     errCatchFree(&errCatch);
 
@@ -8871,49 +8888,55 @@
 // The reason that tends to work is that by luck most BED handlers have code to check if the item
 // overlaps the current window and to skip it if it does not.
 // I do not expect something so simple would work with wigs and other track types.
 // Even if we do want to optimize the BED-like tracks (which are already the fastest loading type),
 // to handle all of the virtmodes properly, this would have be be done differently.
 // Instead of just lumping them all into a single range, you would have to cluster together
 // ranges that are close together and on the same chromosome.
 // Clearly this was just to test an idea for optimizing.
 // NOT FINISHED.
 bool loadHack = FALSE; //TRUE;  // probably should only be tried on non-wiggle tracks
 //warn ("loadHack = %d", loadHack); // TODO
 int lastWinEnd = 0;
 for (window=windows; window; window=window->next)
     lastWinEnd = window->winEnd;
 
+int doLoadLoop;
+boolean doLoadSummary = FALSE;
+
+for (doLoadLoop=0; doLoadLoop < 2; ++doLoadLoop)
+    {
+
     for (window=windows; window; window=window->next)
 	{
 	trackList = window->trackList;  // set track list
 	setGlobalsFromWindow(window);
 
 	// TEMP HACK GALT REMOVE
 	if (loadHack)
 	    {
 	    if (currentWindow == windows) // first window
 		winEnd = lastWinEnd; // so now we load the entire span inside the first window.
 	    }
 
 	/* pre-load remote tracks in parallel */
 	int ptMax = atoi(cfgOptionDefault("parallelFetch.threads", "20"));  // default number of threads for parallel fetch.
 	int pfdListCount = 0;
 	pthread_t *threads = NULL;
 	if (ptMax > 0)     // parallelFetch.threads=0 to disable parallel fetch
 	    {
-	findLeavesForParallelLoad(trackList, &pfdList);
+	    findLeavesForParallelLoad(trackList, &pfdList, doLoadSummary);
 	    pfdListCount = slCount(pfdList);
 	    /* launch parallel threads */
 	    ptMax = min(ptMax, pfdListCount);
 	    if (ptMax > 0)
 		{
 		AllocArray(threads, ptMax);
 		/* Create threads */
 		int pt;
 		for (pt = 0; pt < ptMax; ++pt)
 		    {
 		    int rc = pthread_create(&threads[pt], NULL, remoteParallelLoad, NULL);
 		    if (rc)
 			{
 			errAbort("Unexpected error %d from pthread_create(): %s",rc,strerror(rc));
 			}
@@ -8930,37 +8953,47 @@
 	    if (track->visibility != tvHide)
 		{
 		if (!track->parallelLoading)
 		    {
 		    if (measureTiming)
 			lastTime = clock1000();
 
 		    checkMaxWindowToDraw(track);
 
 		    checkHideEmptySubtracks(track);
 
 		    checkIfWiggling(cart, track);
 
 		    if (!loadHack)
 			{
+
+			if (doLoadSummary)
+			    {
+			    if (track->loadSummary)
+				track->loadSummary(track);
+			    }
+			else
+			    {
 			    track->loadItems(track);
 			    if (tdbHasDecorators(track))
 				{
 				loadDecorators(track);
 				decoratorMethods(track);
 				}
 			    }
+
+			}
 		    else
 			{
 			// TEMP HACK GALT REMOVE
 			if (currentWindow == windows) // first window
 			    {
 			    track->loadItems(track);
 			    }
 			else
 			    {
 			    track->items = track->prevWindow->items;  // just point to the previous windows items (faster than loading)
 			    // apparently loadItems is setting some other fields that we want, but which ones?
 			    track->visibility = track->prevWindow->visibility;
 			    track->limitedVis = track->prevWindow->limitedVis;
 			    track->limitedVisSet = track->prevWindow->limitedVisSet;
 			    track->height = track->prevWindow->height;
@@ -8975,30 +9008,34 @@
 			thisTime = clock1000();
 			track->loadTime = thisTime - lastTime;
 			}
 		    }
 		}
 	    }
 
 	if (ptMax > 0)
 	    {
 	    /* wait for remote parallel load to finish */
 	    remoteParallelLoadWait(getParaLoadTimeout());  // wait up to default 90 seconds.
 	    if (measureTiming)
 		measureTime("Waiting for parallel (%d threads for %d tracks) remote data fetch", ptMax, pfdListCount);
 	    }
 	}
+
+    doLoadSummary = TRUE;
+    }
+
 trackLoadingInProgress = FALSE;
 
 setGlobalsFromWindow(windows); // first window // restore globals
 trackList = windows->trackList;  // restore track list
 
 // Some loadItems() calls will have already set limitedVis.
 // Look for lowest limitedVis across all windows
 // if found, set all windows to same lowest limitedVis
 for (track = trackList; track != NULL; track = track->next)
     {
     setSharedLimitedVisAcrossWindows(track);
     struct track *sub;
     for (sub=track->subtracks; sub; sub=sub->next)
 	{
 	setSharedLimitedVisAcrossWindows(sub);