71c2a0fed48f58560146832cad1fd6d13e8f15a8
gperez2
  Thu Dec 11 18:33:23 2025 -0800
Updating the script to stop parsing at genome/track lines to avoid overwriting hub labels, and adding a check for useOneFile to parse genome info from hub.txt instead of genomes.txt, refs #36827

diff --git src/utils/qa/hubPublicAutoUpdate src/utils/qa/hubPublicAutoUpdate
index be1d1378235..cfb6f7f5831 100755
--- src/utils/qa/hubPublicAutoUpdate
+++ src/utils/qa/hubPublicAutoUpdate
@@ -84,44 +84,52 @@
         label = label.replace("'","\\'")
     return(label)
 
 def curl(url):
     """Run curl command on URL - for http + ftp support"""
     rawCurlOutput = subprocess.run("curl --user-agent \"genome.ucsc.edu/net.c\" -skL --fail --connect-timeout 10 "+url,\
                          check=True, shell=True, stdout=subprocess.PIPE, universal_newlines=True)
     curlStdout = rawCurlOutput.stdout
     return(curlStdout)
 
 def buildCurrentHubTxtDic(hub):
     """Query hub.txt file and build dic of values"""
     currentHub = {}
     response = curl(hub)
     for line in response.splitlines():
+        # Stop parsing at genome or track lines to avoid overwriting hub labels
+        if line.startswith("genome ") or line.startswith("track "):
+            break
         if "\t" in line.rstrip():
             line = line.split("\t")
             currentHub[line[0]] = line[1]
         else:
             line = line.split(" ")
             currentHub[line[0]] = " ".join(line[1:])
     if not currentHub['descriptionUrl'].startswith("http") or currentHub['descriptionUrl'].startswith("ftp"):
         currentHub['descriptionUrl'] = '/'.join(hub.split('/')[0:len(hub.split('/'))-1])+"/"+currentHub['descriptionUrl']
     return(currentHub)
                     
 def queryHubTxt(currentHub,hub):
     """Query genomes.txt file and fill out dbList and dbCount values"""
     
     currentHub['dbList'] = []
+    # A check for useOneFile to parse genome info from hub.txt instead of genomes.txt
+    if currentHub.get('useOneFile') == 'on':
+        genomeInfo = curl(hub)
+    else:
+        # Gets genome info from genomes.txt
         genomeFileLocation = currentHub['genomesFile'].rstrip().lstrip()
         if genomeFileLocation.startswith("http"):
             genomeUrl = genomeFileLocation
         else:
             genomeUrl = "/".join(hub.split('/')[:-1])+"/"+genomeFileLocation
         genomeInfo = curl(genomeUrl)
     
     for line in genomeInfo.splitlines():
         if "\t" in line:
             line = line.split("\t")
             if line[0].rstrip().lstrip() == 'genome':
                 while "" in line:
                     line.remove("")
                 currentHub['dbList'].append(line[1].rstrip().lstrip())
         else: