376ad47e34d1ddf6a3aef79214b97c24f6910848
hiram
  Thu Jul 16 12:18:24 2020 -0700
loading up chrX 10way on each species refs #11636

diff --git src/hg/makeDb/doc/hg38/tba10way.txt src/hg/makeDb/doc/hg38/tba10way.txt
index 835ba60..02a7be6 100644
--- src/hg/makeDb/doc/hg38/tba10way.txt
+++ src/hg/makeDb/doc/hg38/tba10way.txt
@@ -1,2109 +1,2237 @@
 #############################################################################
 ## 10-Way Multiz (DONE - 2020-07-09 - Hiram)
     ssh hgwdev
     mkdir /hive/data/genomes/hg38/bed/tba10way
     cd /hive/data/genomes/hg38/bed/tba10way
 
     # from the 218-way in the source tree, select out the 10 used here:
     /cluster/bin/phast/tree_doctor \
         --prune-all-but hg38,panTro5,rheMac8,mm10,canFam3,neoSch1,pteAle1,loxAfr3,monDom5,ornAna2 \
         /cluster/home/hiram/kent/src/hg/utils/phyloTrees/218way.nh \
    | sed -e 's/panTro5/panTro6/; s/rheMac8/rheMac10/; s/canFam3/canFam4/;' \
       > t.nh
 
     # using TreeGraph2 tree editor on the Mac, rearrange to get hg38
     # at the top, and attempt to get the others in phylo order:
     /cluster/bin/phast/all_dists t.nh | grep hg38 \
         | sed -e "s/hg38.//" | sort -k2n | sed -e 's/^/#\t/;'
 #       panTro6 0.013390
 #       rheMac10        0.079575
 #       canFam4 0.332429
 #       pteAle1 0.337613
 #       loxAfr3 0.345811
 #       neoSch1 0.374971
 #       mm10    0.502391
 #       monDom5 0.715679
 #       ornAna2 0.953149
 
     #	what that looks like:
 ~/kent/src/hg/utils/phyloTrees/asciiTree.pl t.nh > hg38.10way.nh
 ~/kent/src/hg/utils/phyloTrees/asciiTree.pl hg38.10way.nh | sed -e 's/^/# /;'
 
 # (((((((hg38:0.00655,
 #       panTro6:0.00684):0.029424,
 #      rheMac10:0.043601):0.109934,
 #     mm10:0.356483):0.020593,
 #    ((canFam4:0.054458,
 #     neoSch1:0.097):0.076064,
 #    pteAle1:0.135706):0.035406):0.023664,
 #   loxAfr3:0.155646):0.234728,
 #  monDom5:0.290786):0.071664,
 # ornAna2:0.456592);
 
     # extract species list from that .nh file
     sed 's/[a-z][a-z]*_//g; s/:[0-9\.][0-9\.]*//g; s/;//; /^ *$/d' \
         hg38.10way.nh | xargs echo | sed 's/ //g; s/,/ /g' \
         | sed 's/[()]//g; s/,/ /g' | tr '[ ]' '[\n]' > species.list.txt
 
     # construct db to name translation list:
     cat species.list.txt | while read DB
 do
 hgsql -N -e "select name,organism from dbDb where name=\"${DB}\";" hgcentraltest
 done | sed -e "s/\t/->/; s/ /_/g;" | sed -e 's/$/;/' | sed -e 's/\./_/g' \
     | sed -e "s#'#_x_#g;" > db.to.name.txt
 
 # edited db.to.name.txt to change - to _ in some of the names.
 # e.g. Black_flying-fox -> Black_flying_fox,
 # the flying-fox didn't survive the tree_doctor
 
 /cluster/bin/phast/tree_doctor --rename "`cat db.to.name.txt`" hg38.10way.nh \
    | sed -e 's/0\+)/)/g; s/0\+,/,/g' \
      | $HOME/kent/src/hg/utils/phyloTrees/asciiTree.pl /dev/stdin \
        | sed -e "s#_x_#'#g;" > hg38.10way.commonNames.nh
 
     cat hg38.10way.commonNames.nh | sed -e 's/^/# /;'
 # (((((((Human:0.00655,
 #       Chimp:0.00684):0.029424,
 #      Rhesus:0.043601):0.109934,
 #     Mouse:0.356483):0.020593,
 #    ((Dog:0.054458,
 #     Hawaiian_monk_seal:0.097):0.076064,
 #    Black_flying_fox:0.135706):0.035406):0.023664,
 #   Elephant:0.155646):0.234728,
 #  Opossum:0.290786):0.071664,
 # Platypus:0.456592);
 
 #	Use this specification in the phyloGif tool:
 #	http://genome.ucsc.edu/cgi-bin/phyloGif
 #	to obtain a png image for src/hg/htdocs/images/phylo/hg38_10way.png
 
     ~/kent/src/hg/utils/phyloTrees/asciiTree.pl hg38.10way.nh > t.nh
     ~/kent/src/hg/utils/phyloTrees/scientificNames.sh t.nh \
        | $HOME/kent/src/hg/utils/phyloTrees/asciiTree.pl /dev/stdin \
           > hg38.10way.scientificNames.nh
     rm -f t.nh
     cat hg38.10way.scientificNames.nh | sed -e 's/^/# /;'
 # (((((((Homo_sapiens:0.00655,
 #       Pan_troglodytes:0.00684):0.029424,
 #      Macaca_mulatta:0.043601):0.109934,
 #     Mus_musculus:0.356483):0.020593,
 #    ((Canis_lupus_familiaris:0.054458,
 #     Neomonachus_schauinslandi:0.097):0.076064,
 #    Pteropus_alecto:0.135706):0.035406):0.023664,
 #   Loxodonta_africana:0.155646):0.234728,
 #  Monodelphis_domestica:0.290786):0.071664,
 # Ornithorhynchus_anatinus:0.456592);
 
     /cluster/bin/phast/all_dists hg38.10way.nh | grep hg38 \
         | sed -e "s/hg38.//" | sort -k2n > 10way.distances.txt
     #	Use this output to create the table below
     cat 10way.distances.txt | sed -e 's/^/# /;'
 # panTro6       0.013390
 # rheMac10      0.079575
 # canFam4       0.332429
 # pteAle1       0.337613
 # loxAfr3       0.345811
 # neoSch1       0.374971
 # mm10  0.502391
 # monDom5       0.715679
 # ornAna2       0.953149
 
     printf '#!/usr/bin/env perl
 
 use strict;
 use warnings;
 
 open (FH, "<10way.distances.txt") or
         die "can not read 10way.distances.txt";
 
 my $count = 0;
 while (my $line = <FH>) {
     chomp $line;
     my ($D, $dist) = split('"'"'\\s+'"'"', $line);
     my $chain = "chain" . ucfirst($D);
     my $B="/hive/data/genomes/hg38/bed/lastz.$D/fb.hg38." .
         $chain . "Link.txt";
     my $chainLinkMeasure =
         `awk '"'"'{print \\$5}'"'"' ${B} 2> /dev/null | sed -e "s/(//; s/)//"`;
     chomp $chainLinkMeasure;
     $chainLinkMeasure = 0.0 if (length($chainLinkMeasure) < 1);
     $chainLinkMeasure =~ s/\\%%//;
     my $swapFile="/hive/data/genomes/${D}/bed/lastz.hg38/fb.${D}.chainHg38Link.txt";
     my $swapMeasure = "N/A";
     if ( -s $swapFile ) {
 	$swapMeasure =
 	    `awk '"'"'{print \\$5}'"'"' ${swapFile} 2> /dev/null | sed -e "s/(//; s/)//"`;
 	chomp $swapMeasure;
 	$swapMeasure = 0.0 if (length($swapMeasure) < 1);
 	$swapMeasure =~ s/\\%%//;
     }
     my $orgName=
     `hgsql -N -e "select organism from dbDb where name='"'"'$D'"'"';" hgcentraltest`;
     chomp $orgName;
     if (length($orgName) < 1) {
         $orgName="N/A";
     }
     ++$count;
     printf "# %%02d  %%.4f (%%%% %%06.3f) (%%%% %%06.3f) - %%s %%s\\n", $count, $dist,
         $chainLinkMeasure, $swapMeasure, $orgName, $D;
 }
 close (FH);
 ' > sizeStats.pl
     chmod +x ./sizeStats.pl
     ./sizeStats.pl
 
 #	If you can fill in all the numbers in this table, you are ready for
 #	the multiple alignment procedure
 
 #       featureBits chainLink measures
 #               chainLink
 #  N distance  on hg38  on other     other species
 # 01  0.0134 (% 95.451) (% 93.239) - Chimp panTro6
 # 02  0.0796 (% 83.855) (% 84.642) - Rhesus rheMac10
 # 03  0.3324 (% 49.808) (% 60.163) - Dog canFam4
 # 04  0.3376 (% 48.281) (% 71.168) - Black flying-fox pteAle1
 # 05  0.3458 (% 45.214) (% 42.303) - Elephant loxAfr3
 # 06  0.3750 (% 52.020) (% 61.809) - Hawaiian monk seal neoSch1
 # 07  0.5024 (% 31.653) (% 35.372) - Mouse mm10
 # 08  0.7157 (% 14.370) (% 11.996) - Opossum monDom5
 # 09  0.9531 (% 08.419) (% 12.609) - Platypus ornAna2
 
 # What do all three types of chains look like:
 
 printf "#\t\tmafNet\tsynNet\trBest\n"
 
 for S in `grep -v hg38 species.list.txt`
 do
   rBest=`awk '{print $(NF-2)}' ../lastz.${S}/fb.*RBest* 2> /dev/null`
   if [ "x${rBest}y" = "xy" ]; then
      rBest="n/a"
   fi
   synNet=`awk '{print $(NF-2)}' ../lastz.${S}/fb.*chainSyn* 2> /dev/null`
   if [ "x${synNet}y" = "xy" ]; then
      synNet="n/a"
   fi
   mafNet=`grep intersection ../lastz.${S}/fb.* | egrep -v "RBest|chainSyn" | awk '{print $(NF-2)}'`
   printf "%s\t%s\t%s\t%s\n" "${S}" "${mafNet}" "${synNet}" "${rBest}"
 done
 
 #               mafNet  synNet  rBest
 panTro6 (95.451%)       (94.807%)       (89.983%)
 rheMac10        (83.855%)       (82.826%)       (78.262%)
 mm10    (31.653%)       (29.964%)       (29.238%)
 canFam4 (49.808%)       (47.849%)       (45.822%)
 neoSch1 (52.020%)       (49.683%)       (47.732%)
 pteAle1 (48.281%)       n/a     n/a
 loxAfr3 (45.214%)       n/a     n/a
 monDom5 (14.370%)       n/a     n/a
 ornAna2 (8.419%)        n/a     n/a
 
 # None of this concern for distances matters in building the first step, the
 # maf files.  The distances will be better calibrated later.
 
     # create species list and stripped down tree for autoMZ
     sed 's/[a-z][a-z]*_//g; s/:[0-9\.][0-9\.]*//g; s/;//; /^ *$/d' \
 	hg38.10way.nh | xargs echo | sed 's/ //g; s/,/ /g' > tree.nh
 
     sed 's/[()]//g; s/,/ /g' tree.nh > species.list
     cat species.list | fold -s -w 76 | sed -e 's/^/# /;'
 # hg38 panTro6 rheMac10 mm10 canFam4 neoSch1 pteAle1 loxAfr3 monDom5 ornAna2
 
     #	bash shell syntax here ...
     cd /hive/data/genomes/hg38/bed/tba10way
     export H=/hive/data/genomes/hg38/bed
     mkdir mafLinks
     # five of them have good syntenic net:
     for G in panTro6 rheMac10 mm10 canFam4 neoSch1
     do
       mkdir mafLinks/$G
       echo ln -s ${H}/lastz.$G/axtChain/hg38.${G}.synNet.maf.gz ./mafLinks/$G
       ln -s ${H}/lastz.$G/axtChain/hg38.${G}.synNet.maf.gz ./mafLinks/$G
     done
 
     # the other four only have mafNet:
     for G in pteAle1 loxAfr3 monDom5 ornAna2
     do
       mkdir mafLinks/$G
       echo ln -s ${H}/lastz.$G/mafNet/hg38.${G}.net.maf.gz ./mafLinks/$G
       ln -s ${H}/lastz.$G/mafNet/hg38.${G}.net.maf.gz ./mafLinks/$G
     done
 
     # verify the symLinks are good:
     ls -ogL mafLinks/*/* | sed -e 's/^/# /; s/-rw-rw-r-- 1//;'
 #  1105897467 Apr  3 01:43 mafLinks/canFam4/hg38.canFam4.synNet.maf.gz
 #  1032494722 Apr 29  2015 mafLinks/loxAfr3/hg38.loxAfr3.net.maf.gz
 #   710111073 Apr  9  2015 mafLinks/mm10/hg38.mm10.synNet.maf.gz
 #   323347908 May 29  2014 mafLinks/monDom5/hg38.monDom5.net.maf.gz
 #  1113427994 Mar 21  2018 mafLinks/neoSch1/hg38.neoSch1.synNet.maf.gz
 #   188096324 Feb 21  2017 mafLinks/ornAna2/hg38.ornAna2.net.maf.gz
 #  1669588497 Mar 25  2018 mafLinks/panTro6/hg38.panTro6.synNet.maf.gz
 #  1068903277 Apr 29  2015 mafLinks/pteAle1/hg38.pteAle1.net.maf.gz
 #  1386693308 Jul  3  2019 mafLinks/rheMac10/hg38.rheMac10.synNet.maf.gz
 
     #	need to split these things up into smaller pieces for
     #	efficient kluster run.
     mkdir /hive/data/genomes/hg38/bed/tba10way/mafSplit
     cd /hive/data/genomes/hg38/bed/tba10way/mafSplit
 
     #	mafSplitPos splits on gaps or repeat areas that will not have
     #	any chains, approx 5 Mbp intervals, gaps at least 10,000
     time mafSplitPos -minGap=10000 hg38 5 stdout | sort -u \
 	| sort -k1,1 -k2,2n > mafSplit.bed
     # real    3m3.111s
     #   see also multiz100way.txt for more discussion of this procedure
 
     #	run a kluster job to split them all
     # only 9 jobs, good for hgwdev cluster
     ssh hgwdev
     cd /hive/data/genomes/hg38/bed/tba10way/mafSplit
 
     printf '#!/bin/csh -ef
 set G = $1
 set M = $2
 mkdir -p $G
 pushd $G > /dev/null
 if ( -s hg38_${M}.00.maf ) then
     /bin/rm -f hg38_${M}.*.maf
 endif
 /cluster/bin/x86_64/mafSplit ../mafSplit.bed hg38_ ../../mafLinks/${G}/${M}.maf.gz
 /bin/gzip hg38_*.maf
 popd > /dev/null
 ' > runOne
 
     # << happy emacs
     chmod +x runOne
 
     printf '#LOOP
 runOne $(dir1) $(file1) {check out exists+ $(dir1)/hg38_chr1.00.maf.gz}
 #ENDLOOP
 ' > template
 
     find ../mafLinks -type l | awk -F'/' '{printf "%s/%s\n", $3,$4}' \
       | sed -e 's/.maf.gz//;' > maf.list
 
     gensub2 maf.list single template jobList
     para -ram=16g create jobList
     para try ... check ... push ... etc...
 # Completed: 9 of 9 jobs
 # CPU time in finished jobs:       6090s     101.51m     1.69h    0.07d  0.000 y
 # IO & Wait Time:                     4s       0.06m     0.00h    0.00d  0.000 y
 # Average job time:                 677s      11.29m     0.19h    0.01d
 # Longest finished job:            1188s      19.80m     0.33h    0.01d
 # Submission to last job:          1200s      20.00m     0.33h    0.01d
 
     # construct a list of all possible maf file names.
     # they do not all exist in each of the species directories
     find . -type f | grep "maf.gz" | wc -l
     # 5780
 
     find . -type f | grep ".maf.gz$" | xargs -L 1 basename | sort -u \
         > run.maf.list
     wc -l run.maf.list
     # 676 run.maf.list
 
     # number of chroms with data:
     awk -F'.' '{print $1}' run.maf.list  | sed -e 's/hg38_//;' \
       | sort | uniq -c | sort -n | wc -l
     #  356
 
     mkdir /hive/data/genomes/hg38/bed/tba10way/splitRun
     cd /hive/data/genomes/hg38/bed/tba10way/splitRun
     mkdir maf run
     cd run
     mkdir penn
     cp -p /cluster/bin/penn/multiz.2009-01-21_patched/multiz penn
     cp -p /cluster/bin/penn/multiz.2009-01-21_patched/maf_project penn
     cp -p /cluster/bin/penn/multiz.2009-01-21_patched/autoMZ penn
 
     #	set the db and pairs directories here
     cat > autoMultiz.csh << '_EOF_'
 #!/bin/csh -ef
 set db = hg38
 set c = $1
 set result = $2
 set run = `/bin/pwd`
 set tmp = /dev/shm/$db/multiz.$c
 set pairs = /hive/data/genomes/hg38/bed/tba10way/mafSplit
 /bin/rm -fr $tmp
 /bin/mkdir -p $tmp
 /bin/cp -p ../../tree.nh ../../species.list $tmp
 pushd $tmp > /dev/null
 foreach s (`/bin/sed -e "s/$db //" species.list`)
     set in = $pairs/$s/$c
     set out = $db.$s.sing.maf
     if (-e $in.gz) then
         /bin/zcat $in.gz > $out
         if (! -s $out) then
             echo "##maf version=1 scoring=autoMZ" > $out
         endif
     else if (-e $in) then
         /bin/ln -s $in $out
     else
         echo "##maf version=1 scoring=autoMZ" > $out
     endif
 end
 set path = ($run/penn $path); rehash
 $run/penn/autoMZ + T=$tmp E=$db "`cat tree.nh`" $db.*.sing.maf $c \
         > /dev/null
 popd > /dev/null
 /bin/rm -f $result
 /bin/cp -p $tmp/$c $result
 /bin/rm -fr $tmp
 /bin/rmdir --ignore-fail-on-non-empty /dev/shm/$db
 '_EOF_'
 # << happy emacs
     chmod +x autoMultiz.csh
 
     printf '#LOOP
 ./autoMultiz.csh $(root1) {check out line+ /hive/data/genomes/hg38/bed/tba10way/splitRun/maf/$(root1)}
 #ENDLOOP
 ' > template
 
     ln -s  ../../mafSplit/run.maf.list maf.list
 
     ssh ku
     cd /hive/data/genomes/hg38/bed/tba10way/splitRun/run
     gensub2 maf.list single template jobList
     para create jobList
     para try ... check ... push ... etc...
 # Completed: 676 of 676 jobs
 # CPU time in finished jobs:     368636s    6143.94m   102.40h    4.27d  0.012 y
 # IO & Wait Time:                  2457s      40.95m     0.68h    0.03d  0.000 y
 # Average job time:                 549s       9.15m     0.15h    0.01d
 # Longest finished job:            3628s      60.47m     1.01h    0.04d
 # Submission to last job:          8243s     137.38m     2.29h    0.10d
 
     # put the split maf results back together into a single per-chrom maf file
     #	eliminate duplicate comments
     ssh hgwdev
     cd /hive/data/genomes/hg38/bed/tba10way/splitRun
     mkdir ../maf
     #	no need to save the comments since they are lost with mafAddIRows
 
     printf '#!/bin/csh -fe
 set C = $1
 if ( -s ../maf/${C}.maf.gz ) then
     rm -f ../maf/${C}.maf.gz
 endif
 if ( -s maf/hg38_${C}.00.maf ) then
   head -q -n 1 maf/hg38_${C}.00.maf | sort -u > ../maf/${C}.maf
   grep -h -v "^#" `ls maf/hg38_${C}.*.maf | sort -t. -k2,2n` >> ../maf/${C}.maf
   tail -q -n 1 maf/hg38_${C}.00.maf | sort -u >> ../maf/${C}.maf
 else
   touch ../maf/${C}.maf
 endif
 ' > runOne
     chmod +x runOne
 
     cat << '_EOF_' >> template
     printf '#LOOP
 runOne $(root1) {check out exists ../maf/$(root1).maf}
 #ENDLOOP
 ' > template
 
     cut -f1 ../../../chrom.sizes > chr.list
     ssh ku
     cd /hive/data/genomes/hg38/bed/tba10way/splitRun
     gensub2 chr.list single template jobList
     para -ram=16g create jobList
     para try ... check ... push ... etc ...
     para -maxJob=32 push
 # Completed: 640 of 640 jobs
 # CPU time in finished jobs:        195s       3.25m     0.05h    0.00d  0.000 y
 # IO & Wait Time:                  1760s      29.33m     0.49h    0.02d  0.000 y
 # Average job time:                   3s       0.05m     0.00h    0.00d
 # Longest finished job:              28s       0.47m     0.01h    0.00d
 # Submission to last job:           157s       2.62m     0.04h    0.00d
 
     cd /hive/data/genomes/hg38/bed/tba10way/maf
     # 284 of them have empty results, they have to be removed
     ls -ogrt | awk '$3 == 0' | awk '{print $NF}' | xargs rm -f
 
     # Load into database
     mkdir -p /gbdb/hg38/tba10way/maf
     cd /hive/data/genomes/hg38/bed/tba10way/maf
     ln -s `pwd`/*.maf /gbdb/hg38/tba10way/maf/
 
     # this generates an immense tba10way.tab file in the directory
     #	where it is running.  Best to run this over in scratch.
     #   This is going to take all day.
     cd /dev/shm
     time hgLoadMaf -pathPrefix=/gbdb/hg38/tba10way/maf hg38 tba10way
     # Loaded 16954958 mafs in 356 files from /gbdb/hg38/tba10way/maf
     # real    3m52.493s
 
     time (cat /gbdb/hg38/tba10way/maf/*.maf \
         | hgLoadMafSummary -verbose=2 -minSize=30000 \
 	-mergeGap=1500 -maxSize=200000 hg38 tba10waySummary stdin)
 # Created 1887874 summary blocks from 82903651 components and 16954958 mafs from stdin
 # real    6m0.072s
 
 -rw-rw-r-- 1 896076678 Jul 10 09:18 tba10way.tab
 -rw-rw-r-- 1  89027991 Jul 13 09:39 tba10waySummary.tab
 
     wc -l tba10*.tab
 #  16954958 tba10way.tab
 #   1887874 tba10waySummary.tab
 
     rm tba10way*.tab
 
 #######################################################################
 # GAP ANNOTATE MULTIZ30WAY MAF AND LOAD TABLES (DONE - 2020-07-13 - Hiram)
     # mafAddIRows has to be run on single chromosome maf files, it does not
     #	function correctly when more than one reference sequence
     #	are in a single file.
     mkdir -p /hive/data/genomes/hg38/bed/tba10way/anno
     cd /hive/data/genomes/hg38/bed/tba10way/anno
 
     # check for N.bed files everywhere:
     for DB in `cat ../species.list`
 do
     if [ ! -s /hive/data/genomes/${DB}/${DB}.N.bed ]; then
         echo "MISS: ${DB}"
         cd /hive/data/genomes/${DB}
         twoBitInfo -nBed ${DB}.2bit ${DB}.N.bed
     else
         echo "  OK: ${DB}"
     fi
     cd /hive/data/genomes/hg38/bed/tba10way/anno
 done
 
     cd /hive/data/genomes/hg38/bed/tba10way/anno
     for DB in `cat ../species.list`
 do
     echo "${DB} "
     ln -s  /hive/data/genomes/${DB}/${DB}.N.bed ${DB}.bed
     echo ${DB}.bed  >> nBeds
     ln -s  /hive/data/genomes/${DB}/chrom.sizes ${DB}.len
     echo ${DB}.len  >> sizes
 done
     # make sure they all are successful symLinks:
     ls -ogrtL *.bed | wc -l
     # 10
 
     screen -S hg38      # use a screen to control this longish job
     ssh ku
     cd /hive/data/genomes/hg38/bed/tba10way/anno
     mkdir result
 
     printf '#LOOP
 mafAddIRows -nBeds=nBeds $(path1) /hive/data/genomes/hg38/hg38.2bit {check out line+ result/$(file1)}
 #ENDLOOP
 ' > template
     # << happy emacs
 
     ls ../maf/*.maf > maf.list
     gensub2 maf.list single template jobList
     # no need to limit these jobs, there are only 358 of them
     para -ram=64g create jobList
     para try ... check ...
     para -maxJob=10 push
 # Completed: 356 of 356 jobs
 # CPU time in finished jobs:        669s      11.15m     0.19h    0.01d  0.000 y
 # IO & Wait Time:                  1015s      16.92m     0.28h    0.01d  0.000 y
 # Average job time:                   5s       0.08m     0.00h    0.00d
 # Longest finished job:              56s       0.93m     0.02h    0.00d
 # Submission to last job:           152s       2.53m     0.04h    0.00d
 
     du -hsc result
     #  30G    result
 
     # Load into database
     rm -f /gbdb/hg38/tba10way/maf/*
     cd /hive/data/genomes/hg38/bed/tba10way/anno/result
 
     ln -s `pwd`/*.maf /gbdb/hg38/tba10way/maf/
 
     # this generates an immense tba10way.tab file in the directory
     #	where it is running.  Best to run this over in scratch.
     cd /dev/shm
     time hgLoadMaf -pathPrefix=/gbdb/hg38/tba10way/maf hg38 multiz10way
     # Loaded 17000572 mafs in 356 files from /gbdb/hg38/tba10way/maf
     # real    5m10.599s
 
 
     # -rw-rw-r--   1  900843709 Jul 13 11:01 multiz10way.tab
 
     time (cat /gbdb/hg38/tba10way/maf/*.maf \
         | hgLoadMafSummary -verbose=2 -minSize=30000 \
 	-mergeGap=1500 -maxSize=200000 hg38 multiz10waySummary stdin)
 # Created 1887874 summary blocks from 82903651 components and 17000572 mafs from stdin
 # real    7m15.373s
 
 # -rw-rw-r--   1  900843709 Jul 13 11:01 multiz10way.tab
 # -rw-rw-r--   1   92803739 Jul 13 11:10 multiz10waySummary.tab
 
     wc -l multiz10way*.tab
     # 17000572 multiz10way.tab
     #   1887874 multiz10waySummary.tab
 
     rm multiz10way*.tab
 
 ##############################################################################
 # MULTIZ7WAY MAF FRAMES (DONE - 2020-07-13 - Hiram)
     ssh hgwdev
     mkdir /hive/data/genomes/hg38/bed/tba10way/frames
     cd /hive/data/genomes/hg38/bed/tba10way/frames
 #   survey all the genomes to find out what kinds of gene tracks they have
 
     printf '#!/bin/csh -fe
 foreach db (`cat ../species.list`)
     echo -n "# ${db}: "
     set tables = `hgsql $db -N -e "show tables" | egrep "Gene|ncbiRefSeq"`
     foreach table ($tables)
         if ($table == "ensGene" || $table == "refGene" || \
            $table == "ncbiRefSeq" || $table == "ncbiRefSeqCurated" || \
            $table == "ncbiRefSeqPredicted" || $table == "mgcGenes" || \
            $table == "knownGene" || $table == "xenoRefGene" ) then
            set count = `hgsql $db -N -e "select count(*) from $table"`
             echo -n "${table}: ${count}, "
         endif
     end
     echo
 end
 ' > showGenes.csh
 
     chmod +x ./showGenes.csh
     time ./showGenes.csh
 # hg38: ensGene: 208239, knownGene: 247541, mgcGenes: 36638, ncbiRefSeq: 166923, ncbiRefSeqCurated: 78591, ncbiRefSeqPredicted: 88332, refGene: 86303, xenoRefGene: 199483, 
 # panTro6: ncbiRefSeq: 102471, ncbiRefSeqCurated: 2711, ncbiRefSeqPredicted: 99760, refGene: 2873, xenoRefGene: 242030, 
 # rheMac10: ensGene: 64191, ncbiRefSeq: 86732, ncbiRefSeqCurated: 6375, ncbiRefSeqPredicted: 80357, refGene: 6481, xenoRefGene: 240078, 
 # mm10: ensGene: 103734, knownGene: 142446, mgcGenes: 27606, ncbiRefSeq: 106520, ncbiRefSeqCurated: 34892, ncbiRefSeqPredicted: 71628, refGene: 46131, xenoRefGene: 190941, 
 # canFam4: refGene: 2380, xenoRefGene: 235387, 
 # neoSch1: ncbiRefSeq: 29897, ncbiRefSeqCurated: 13, ncbiRefSeqPredicted: 29884, xenoRefGene: 440659, 
 # pteAle1: ncbiRefSeq: 46978, ncbiRefSeqCurated: 32, ncbiRefSeqPredicted: 46946, 
 # loxAfr3: ensGene: 28847, ncbiRefSeq: 46056, ncbiRefSeqCurated: 35, ncbiRefSeqPredicted: 46021, refGene: 23, xenoRefGene: 355734, 
 # monDom5: ensGene: 32358, refGene: 1238, xenoRefGene: 254747, 
 # ornAna2: ensGene: 31006, refGene: 705, xenoRefGene: 699826, 
 
 # real    0m1.884s
 
     # from that summary, use these gene sets:
     # ncbiRefSeqCurated - hg38 mm10
     # ncbiRefSeq - panTro6 rheMac10 neoSch1 pteAle1 loxAfr3
     # ensGene - monDom5 ornAna2
     # xenoRefGene - canFam4
 
     mkdir genes
 
     #   1. ncbiRefSeqCurated: hg38 mm10
     for DB in hg38 mm10
 do
     hgsql -N -e "select name,chrom,strand,txStart,txEnd,cdsStart,cdsEnd,exonCount,exonStarts,exonEnds from ncbiRefSeqCurated" ${DB} \
       | genePredSingleCover stdin stdout | gzip -2c \
         > genes/${DB}.gp.gz
     echo -n "# ${DB}: "
     genePredCheck -db=${DB} genes/${DB}.gp.gz 2>&1 | sed -e 's/^/    # /;'
 done
 # hg38:     # checked: 21740 failed: 0
 # mm10:     # checked: 20520 failed: 0
 
     #   2. ncbiRefSeq: panTro6 rheMac10 neoSch1 pteAle1 loxAfr3
     for DB in panTro6 rheMac10 neoSch1 pteAle1 loxAfr3
 do
 hgsql -N -e "select
 name,chrom,strand,txStart,txEnd,cdsStart,cdsEnd,exonCount,exonStarts,exonEnds
 from ncbiRefSeq" ${DB} \
       | genePredSingleCover stdin stdout | gzip -2c \
         > /dev/shm/${DB}.tmp.gz
     mv /dev/shm/${DB}.tmp.gz genes/$DB.gp.gz
     echo -n "# ${DB}: "
     genePredCheck -db=${DB} genes/${DB}.gp.gz
 done
 # panTro6: checked: 21380 failed: 0
 # rheMac10: checked: 21021 failed: 0
 # neoSch1: checked: 18783 failed: 0
 # pteAle1: checked: 18326 failed: 0
 # loxAfr3: checked: 21061 failed: 0
 
     #   3. ensGene: monDom5 ornAna2
     for DB in monDom5 ornAna2
 do
     hgsql -N -e "select
 name,chrom,strand,txStart,txEnd,cdsStart,cdsEnd,exonCount,exonStarts,exonEnds
 from ensGene" ${DB} \
       | genePredSingleCover stdin stdout | gzip -2c \
         > /dev/shm/${DB}.tmp.gz
     mv /dev/shm/${DB}.tmp.gz genes/$DB.gp.gz
     echo -n "# ${DB}: "
     genePredCheck -db=${DB} genes/${DB}.gp.gz
 done
 # monDom5: checked: 21033 failed: 0
 # ornAna2: checked: 21311 failed: 0
 
     #   4. refGene: canFam4
     for DB in canFam4
 do
     hgsql -N -e "select
 name,chrom,strand,txStart,txEnd,cdsStart,cdsEnd,exonCount,exonStarts,exonEnds
 from xenoRefGene" ${DB} \
       | genePredSingleCover stdin stdout | gzip -2c \
         > /dev/shm/${DB}.tmp.gz
     mv /dev/shm/${DB}.tmp.gz genes/$DB.gp.gz
     echo -n "# ${DB}: "
     genePredCheck -db=${DB} genes/${DB}.gp.gz
 done
 # canFam4: checked: 20110 failed: 0
 
     # verify counts for genes are reasonable:
     for T in genes/*.gz
 do
     echo -n "# $T: "
     zcat $T | cut -f1 | sort | uniq -c | wc -l
 done
 # genes/canFam4.gp.gz: 19188
 # genes/hg38.gp.gz: 19211
 # genes/loxAfr3.gp.gz: 21061
 # genes/mm10.gp.gz: 20520
 # genes/monDom5.gp.gz: 21033
 # genes/neoSch1.gp.gz: 18783
 # genes/ornAna2.gp.gz: 21311
 # genes/panTro6.gp.gz: 21376
 # genes/pteAle1.gp.gz: 18326
 # genes/rheMac10.gp.gz: 21020
 
     # kluster job to annotate each maf file
     screen -S hg38      # manage long running procedure with screen
     ssh ku
     cd /hive/data/genomes/hg38/bed/tba10way/frames
 
     printf '#!/bin/csh -fe
 
 set C = $1
 set G = $2
 
 cat ../maf/${C}.maf | genePredToMafFrames hg38 stdin stdout \
         ${G} genes/${G}.gp.gz | gzip > parts/${C}.${G}.mafFrames.gz
 ' > runOne
 
     chmod +x runOne
 
     ls ../maf | sed -e "s/.maf//" > chr.list
     ls genes | sed -e "s/.gp.gz//" > gene.list
 
     printf '#LOOP
 runOne $(root1) $(root2) {check out exists+ parts/$(root1).$(root2).mafFrames.gz}
 #ENDLOOP
 ' > template
 
     mkdir parts
     gensub2 chr.list gene.list template jobList
     para -ram=64g create jobList
     para try ... check ... push
 # Completed: 3560 of 3560 jobs
 # CPU time in finished jobs:       3386s      56.43m     0.94h    0.04d  0.000 y
 # IO & Wait Time:                  8419s     140.32m     2.34h    0.10d  0.000 y
 # Average job time:                   3s       0.06m     0.00h    0.00d
 # Longest finished job:              19s       0.32m     0.01h    0.00d
 # Submission to last job:           753s      12.55m     0.21h    0.01d
 
     # collect all results into one file:
     cd /hive/data/genomes/hg38/bed/tba10way/frames
     time find ./parts -type f | while read F
 do
     echo "${F}" 1>&2
     zcat ${F}
 done | sort -k1,1 -k2,2n > multiz10wayFrames.bed
     # real    0m44.679s
 
     # -rw-rw-r-- 1 227919888 Jul 13 10:52 multiz10wayFrames.bed
 
     gzip multiz10wayFrames.bed
 
     # verify there are frames on everything, should be 46 species:
     # (count from: ls genes | wc)
     zcat multiz10wayFrames.bed.gz | awk '{print $4}' | sort | uniq -c \
         | sed -e 's/^/# /;' > species.check.list
     wc -l species.check.list
     # 10
 
 #  243766 canFam4
 #  210024 hg38
 #  389932 loxAfr3
 #  251743 mm10
 #  570387 monDom5
 #  252141 neoSch1
 #  589907 ornAna2
 #  225700 panTro6
 #  380179 pteAle1
 #  236549 rheMac10
 
     #   load the resulting file
     ssh hgwdev
     cd /hive/data/genomes/hg38/bed/tba10way/frames
     time hgLoadMafFrames hg38 multiz10wayFrames multiz10wayFrames.bed.gz
     #   real    0m21.014s
 
     hgsql -e 'select count(*) from multiz10wayFrames;' hg38
     # +----------+
     # | count(*) |
     # +----------+
     # |  3350328 |
     # +----------+
 
     time featureBits -countGaps hg38 multiz10wayFrames
     # 64900367 bases of 3272116950 (1.983%) in intersection
     # real    0m18.824s
 
     #   enable the trackDb entries:
 # frames multiz10wayFrames
 # irows on
     #   zoom to base level in an exon to see codon displays
     #	appears to work OK
 
 #########################################################################
 ## Experiment running tba on just chrX alignments (DONE - 2020-07-14 - Hiram)
 
     mkdir /hive/data/genomes/hg38/bed/tba10way/chrX
     cd /hive/data/genomes/hg38/bed/tba10way/chrX
     mkdir fasta
     # six of these species have chrX which corresponds to hg38 chrX
     # so use that sequence directly, the fasta header line is formatted
     # to correspond with desired meta info to be used by tba/multiz tools:
     for S in hg38 panTro6 rheMac10 mm10 canFam4 monDom5
 do
   size=`grep -w chrX /hive/data/genomes/${S}/chrom.sizes | awk '{print $2}'`
   twoBitToFa /hive/data/genomes/$S/$S.2bit:chrX stdout \
     | sed -e "s#>.*#>$S:chrX:1:+:$size##;" > fasta/$S.fa
 done
 
     # the other four require multiple contigs in order to get some matching
     # sequence to hg38.chrX.  From a survey of the resulting chrX.maf as
     # computed above, the following contigs were selected as the set to
     # match with hg38.chrX sequence:
 
 export S="loxAfr3"
 for C in scaffold_32 scaffold_39 scaffold_24 scaffold_56 scaffold_82 scaffold_81 scaffold_78 scaffold_89 scaffold_100 scaffold_94 scaffold_111 scaffold_85 scaffold_120 scaffold_130
 do
   size=`grep -w "${C}" /hive/data/genomes/${S}/chrom.sizes | awk '{print $2}'`
   twoBitToFa /hive/data/genomes/$S/$S.2bit:${C} stdout \
     | sed -e "s#>.*#>$S:${C}:1:+:$size##;"
   printf "%s.%s\n" "${S}" "${C}" 1>&2
 done > fasta/${S}.fa
 
 export S="neoSch1"
 for C in chrX_NW_018726553v1_random chrX_NW_018726535v1_random chrX_NW_018726533v1_random chrX_NW_018726541v1_random chrX_NW_018726552v1_random chrX_NW_018726532v1_random chrX_NW_018726536v1_random chrX_NW_018726539v1_random chrX_NW_018726538v1_random chrX_NW_018726551v1_random chrX_NW_018726550v1_random chrX_NW_018726549v1_random chrX_NW_018726548v1_random chrX_NW_018726540v1_random chrX_NW_018726546v1_random chrX_NW_018726534v1_random chrX_NW_018726547v1_random chrX_NW_018726545v1_random chrX_NW_018726543v1_random chrX_NW_018726542v1_random chrX_NW_018726544v1_random NW_018729802v1 chrX_NW_018726537v1_random NW_018729761v1
 do
   size=`grep -w "${C}" /hive/data/genomes/${S}/chrom.sizes | awk '{print $2}'`
   twoBitToFa /hive/data/genomes/$S/$S.2bit:${C} stdout \
     | sed -e "s#>.*#>$S:${C}:1:+:$size##;"
   printf "%s.%s\n" "${S}" "${C}" 1>&2
 done > fasta/${S}.fa
 
 export S="ornAna2"
 for C in chrX1 chrX5 chrX3 chrX2 chrUn_DS181337v1 chrUn_DS181394v1 chrUn_DS181098v1 chrUn_DS181265v1 chrUn_DS180891v1 chrUn_DS181278v1 chrUn_DS180962v1 chrUn_DS180974v1 chrUn_DS181276v1 chrUn_DS181171v1 chrUn_DS181191v1
 do
   size=`grep -w "${C}" /hive/data/genomes/${S}/chrom.sizes | awk '{print $2}'`
   twoBitToFa /hive/data/genomes/$S/$S.2bit:${C} stdout \
     | sed -e "s#>.*#>$S:${C}:1:+:$size##;"
   printf "%s.%s\n" "${S}" "${C}" 1>&2
 done > fasta/${S}.fa
 
 export S="pteAle1"
 for C in KB030639 KB031147 KB030981 KB030535 KB030400 KB031071 KB030344 KB030533 KB030633 KB030676 KB030941 KB030758 KB030280 KB030859 KB031069 KB031066 KB030496 KB030969 KB030414 KB030261 KB031044 KB030442 KB030674 KB030794
 do
   size=`grep -w "${C}" /hive/data/genomes/${S}/chrom.sizes | awk '{print $2}'`
   twoBitToFa /hive/data/genomes/$S/$S.2bit:${C} stdout \
     | sed -e "s#>.*#>$S:${C}:1:+:$size##;"
   printf "%s.%s\n" "${S}" "${C}" 1>&2
 done > fasta/${S}.fa
 
    # Now, to run all by all lastz comparisons with these sequences, this
    # set of 45 commands are run:
 ./runOne hg38 panTro6 "T=2 O=600 E=150 M=254 K=4500 L=4500 Y=15000 Q=/scratch/data/blastz/human_chimp.v2.q"
 ./runOne hg38 rheMac10 "M=254 Q=/hive/data/staging/data/blastz/human_chimp.v2.q O=600 E=150 K=4500 Y=15000 T=2"
 ./runOne hg38 mm10 ""
 ./runOne hg38 canFam4 "M=254"
 ./runOne hg38 neoSch1 "O=400 E=30 M=254"
 ./runOne hg38 pteAle1 "O=400 E=30 M=254"
 ./runOne hg38 loxAfr3 "O=400 E=30 M=254"
 ./runOne hg38 monDom5 "M=50 Y=3400 L=6000 K=2200 Q=/scratch/data/blastz/HoxD55.q"
 ./runOne hg38 ornAna2 "O=400 E=30 Y=3400 L=6000 K=2200 M=50 Q=/scratch/data/blastz/HoxD55.q"
 ./runOne panTro6 rheMac10 "M=254 Q=/hive/data/staging/data/blastz/human_chimp.v2.q O=600 E=150 K=4500 Y=15000 T=2"
 ./runOne panTro6 mm10 "M=254"
 ./runOne panTro6 canFam4 "M=254"
 ./runOne panTro6 neoSch1 "M=254"
 ./runOne panTro6 pteAle1 "M=254"
 ./runOne panTro6 loxAfr3 "M=254"
 ./runOne panTro6 monDom5 "E=30 Y=3400 L=6000 K=2200 M=50 Q=/hive/data/staging/data/blastz/HoxD55.q"
 ./runOne panTro6 ornAna2 "E=30 Y=3400 L=6000 K=2200 M=50 Q=/hive/data/staging/data/blastz/HoxD55.q"
 ./runOne rheMac10 mm10 "M=254"
 ./runOne rheMac10 canFam4 "M=254"
 ./runOne rheMac10 neoSch1 "M=254"
 ./runOne rheMac10 pteAle1 "M=254"
 ./runOne rheMac10 loxAfr3 "M=254"
 ./runOne rheMac10 monDom5 "E=30 Y=3400 L=6000 K=2200 M=50 Q=/hive/data/staging/data/blastz/HoxD55.q"
 ./runOne rheMac10 ornAna2 "E=30 Y=3400 L=6000 K=2200 M=50 Q=/hive/data/staging/data/blastz/HoxD55.q"
 ./runOne mm10 canFam4 "M=254"
 ./runOne mm10 neoSch1 "M=254"
 ./runOne mm10 pteAle1 "M=254"
 ./runOne mm10 loxAfr3 "M=254"
 ./runOne mm10 monDom5 "Y=3400 L=6000 K=2200 Q=/scratch/data/blastz/HoxD55.q"
 ./runOne mm10 ornAna2 "Y=3400 L=6000 K=2200 Q=/scratch/data/blastz/HoxD55.q"
 ./runOne canFam4 neoSch1 "M=254"
 ./runOne canFam4 pteAle1 "M=254"
 ./runOne canFam4 loxAfr3 "M=254"
 ./runOne canFam4 monDom5 "Y=3400 L=6000 K=2200 Q=/scratch/data/blastz/HoxD55.q"
 ./runOne canFam4 ornAna2 "Y=3400 L=6000 K=2200 Q=/scratch/data/blastz/HoxD55.q"
 ./runOne neoSch1 pteAle1 "M=254"
 ./runOne neoSch1 loxAfr3 "M=254"
 ./runOne neoSch1 monDom5 "Y=3400 L=6000 K=2200 Q=/scratch/data/blastz/HoxD55.q"
 ./runOne neoSch1 ornAna2 "Y=3400 L=6000 K=2200 Q=/scratch/data/blastz/HoxD55.q"
 ./runOne pteAle1 loxAfr3 "M=254"
 ./runOne pteAle1 monDom5 "Y=3400 L=6000 K=2200 Q=/scratch/data/blastz/HoxD55.q"
 ./runOne pteAle1 ornAna2 "Y=3400 L=6000 K=2200 Q=/scratch/data/blastz/HoxD55.q"
 ./runOne loxAfr3 monDom5 "Y=3400 L=6000 K=2200 Q=/scratch/data/blastz/HoxD55.q"
 ./runOne loxAfr3 ornAna2 "Y=3400 L=6000 K=2200 Q=/scratch/data/blastz/HoxD55.q"
 ./runOne monDom5 ornAna2 "H=2000 Y=3400 L=8000 K=2200 Q=/hive/data/staging/data/blastz/HoxD55.q"
 
     # where the runOne script is:
 
 #!/bin/bash
 
 PATH=/cluster/bin/penn/multiz.2009-01-21_patched:/cluster/bin/penn/lastz-distrib-1.04.03/bin:$PATH
 
 if [ $# -lt 3 ]; then
   printf "usage: runOne target query lastzArgs...\n" 1>&2
   exit 255
 fi
 
 export target=$1
 export query=$2
 shift 2
 export lastzArgs=$*
 
 mkdir -p "${target}"
 cd "${target}"
 rm -f ${target}.${query}.sing.maf
 rm -f "${query}"
 
 if [ ! -s "${target}" ]; then
   ln -s ../fasta/${target}.fa ${target}
 fi
 ln -s ../fasta/${query}.fa ${query}
 
 lastzWrapper "${target}" "${query}" $lastArgs \
     | lav2maf "/dev/stdin" "${target}" "${query}" \
       | maf_sort "/dev/stdin" "${target}" \
         > "${target}.${query}.orig.maf"
 single_cov2 ${target}.${query}.orig.maf > ${target}.${query}.sing.maf
 
     # with all the *.sing.maf results, make a working directory
     # on /dev/shm to run the tba procedure
 
     mkdir /dev/shm/chrX.10way
     cp -p */*.sing.maf /dev/shm/chrX.10way
     # and the procedure needs the sequences in files with their sequence names:
     for S in canFam4 hg38 loxAfr3 mm10 monDom5 neoSch1 ornAna2 panTro6 pteAle1 rheMac10
 do
     cp -p fasta/${S}.fa /dev/shm/chrX.10way/${S}
 done
 
     # now, can run the tba command:
     cd /dev/shm/chrX.10way
 
 PATH=/cluster/bin/penn/multiz.2009-01-21_patched:/cluster/bin/penn/lastz-distrib-1.04.03/bin:$PATH
 
 time tba "(((((((hg38 panTro6) rheMac10) mm10) ((canFam4 neoSch1) pteAle1)) loxAfr3) monDom5) ornAna2)" *.sing.maf chrX.tba10way.maf
 
     # real    154m33.929s
 
     # the resulting maf file:
 -rw-rw-r-- 1 1634826703 Jul 14 11:13 chrX.tba10way.maf
 
     # extract the hg38 reference from that:
 
 PATH=/cluster/bin/penn/multiz.2009-01-21_patched:/cluster/bin/penn/lastz-distrib-1.04.03/bin:$PATH
 
 maf_project chrX.tba10way.maf hg38 > hg38.chrX.tba10way.maf
 
     # add iRows to this maf file:
     mkdir /hive/data/genomes/hg38/bed/tba10way/chrX/anno
     cd /hive/data/genomes/hg38/bed/tba10way/chrX/anno
     for DB in hg38 panTro6 rheMac10 mm10 canFam4 neoSch1 pteAle1 loxAfr3 monDom5 ornAna2
 do
     echo "${DB} "
     ln -s  /hive/data/genomes/${DB}/${DB}.N.bed ${DB}.bed
     echo ${DB}.bed  >> nBeds
     ln -s  /hive/data/genomes/${DB}/chrom.sizes ${DB}.len
     echo ${DB}.len  >> sizes
 done
     # make sure they all are successful symLinks:
     ls -ogrtL *.bed | wc -l
 
 time mafAddIRows -nBeds=nBeds ../hg38.chrX.tba10way.maf /hive/data/genomes/hg38/hg38.2bit hg38.chrX.irows.maf
     # real    0m10.997s
 
     # verify how many iRows for each species:
     grep "^i " hg38.chrX.irows.maf | awk '{print $2}' \
       | awk -F'.' '{print $1}' | sort | uniq -c
 #  147436 canFam4
 #  132536 loxAfr3
 #   86472 mm10
 #   16095 monDom5
 #  145321 neoSch1
 #    9881 ornAna2
 #  212809 panTro6
 #   92233 pteAle1
 #  204033 rheMac10
 
     # loading this maf file:
 
     ln -s `pwd`/hg38.chrX.irows.maf /gbdb/hg38/tba10way/chrX.tba10way.maf
 
     time hgLoadMaf -loadFile=/gbdb/hg38/tba10way/chrX.tba10way.maf hg38 tba10way
     # Loaded 219436 mafs in 1 files from /gbdb/hg38/tba10way/
     # real    0m5.446s
 
     time (cat /gbdb/hg38/tba10way/chrX.tba10way.maf \
         | hgLoadMafSummary -verbose=2 -minSize=30000 \
 	-mergeGap=1500 -maxSize=200000 hg38 tba10waySummary stdin)
 #Created 65148 summary blocks from 1046816 components and 219436 mafs from stdin
 # real    0m11.363s
 
 #########################################################################
+# extract other references from the primary tba file:
+ 
+    mkdir /hive/data/genomes/hg38/bed/tba10way/chrX/eachReference
+    cd /hive/data/genomes/hg38/bed/tba10way/chrX/eachReference
+
+PATH=/cluster/bin/penn/multiz.2009-01-21_patched:/cluster/bin/penn/lastz-distrib-1.04.03/bin:$PATH
+
+time for S in panTro6 rheMac10 mm10 canFam4 monDom5
+do
+   printf "maf_project ../chrX.tba10way.maf ${S} > ${S}.chrX.tba10way.maf\n"
+   maf_project ../chrX.tba10way.maf ${S} > ${S}.chrX.tba10way.maf
+done
+# real    67m58.091s
+# -rw-rw-r-- 1 936990477 Jul 16 09:26 panTro6.chrX.tba10way.maf
+# -rw-rw-r-- 1 921988358 Jul 16 09:38 rheMac10.chrX.tba10way.maf
+# -rw-rw-r-- 1 569699889 Jul 16 09:57 mm10.chrX.tba10way.maf
+# -rw-rw-r-- 1 783347380 Jul 16 10:13 canFam4.chrX.tba10way.maf
+# -rw-rw-r-- 1 137853424 Jul 16 10:22 monDom5.chrX.tba10way.maf
+
+    # add iRows to each maf file:
+for S in panTro6 rheMac10 mm10 canFam4 monDom5
+do
+ mkdir /hive/data/genomes/hg38/bed/tba10way/chrX/eachReference/anno.${S}
+ cd /hive/data/genomes/hg38/bed/tba10way/chrX/eachReference/anno.${S}
+    for DB in hg38 panTro6 rheMac10 mm10 canFam4 neoSch1 pteAle1 loxAfr3 monDom5 ornAna2
+  do
+    echo "${DB} "
+    ln -s  /hive/data/genomes/${DB}/${DB}.N.bed ${DB}.bed
+    echo ${DB}.bed  >> nBeds
+    ln -s  /hive/data/genomes/${DB}/chrom.sizes ${DB}.len
+    echo ${DB}.len  >> sizes
+  done
+  time mafAddIRows -nBeds=nBeds ../$S.chrX.tba10way.maf /hive/data/genomes/${S}/${S}.2bit ${S}.chrX.irows.maf
+done
+
+# -rw-rw-r-- 1 1023324141 Jul 16 12:07 anno.panTro6/panTro6.chrX.irows.maf
+# -rw-rw-r-- 1 1008627672 Jul 16 12:08 anno.rheMac10/rheMac10.chrX.irows.maf
+# -rw-rw-r-- 1  619033378 Jul 16 12:08 anno.mm10/mm10.chrX.irows.maf
+# -rw-rw-r-- 1  864191117 Jul 16 12:08 anno.canFam4/canFam4.chrX.irows.maf
+# -rw-rw-r-- 1  148826717 Jul 16 12:08 anno.monDom5/monDom5.chrX.irows.maf
+
+    # verify how many iRows for each species:
+for S in panTro6 rheMac10 mm10 canFam4 monDom5
+do
+    printf "#### %s\n" "${S}"
+    grep "^i " anno.${S}/${S}.chrX.irows.maf | awk '{print $2}' \
+      | awk -F'.' '{print $1}' | sort | uniq -c
+done
+#### panTro6
+ 144828 canFam4
+ 212814 hg38
+ 130394 loxAfr3
+  85172 mm10
+  15781 monDom5
+ 142903 neoSch1
+   9711 ornAna2
+  90581 pteAle1
+ 200286 rheMac10
+#### rheMac10
+ 145304 canFam4
+ 204121 hg38
+ 130794 loxAfr3
+  85775 mm10
+  15981 monDom5
+ 143364 neoSch1
+   9804 ornAna2
+ 200375 panTro6
+  91252 pteAle1
+#### mm10
+  77338 canFam4
+  87110 hg38
+  73960 loxAfr3
+  13369 monDom5
+  76255 neoSch1
+   7848 ornAna2
+  85790 panTro6
+  51639 pteAle1
+  86370 rheMac10
+#### canFam4
+ 148757 hg38
+ 119363 loxAfr3
+  77434 mm10
+  14307 monDom5
+ 174404 neoSch1
+   8862 ornAna2
+ 146153 panTro6
+  97206 pteAle1
+ 146559 rheMac10
+#### monDom5
+  14647 canFam4
+  16511 hg38
+  15950 loxAfr3
+  13618 mm10
+  14652 neoSch1
+   4316 ornAna2
+  16183 panTro6
+   9169 pteAle1
+  16381 rheMac10
+
+    # load each maf file:
+for S in panTro6 rheMac10 mm10 canFam4 monDom5
+do
+  mkdir -p /gbdb/${S}/tba10way
+  rm -f /gbdb/${S}/tba10way/chrX.tba10way.maf
+  ln -s `pwd`/anno.${S}/${S}.chrX.irows.maf /gbdb/${S}/tba10way/chrX.tba10way.maf
+  printf "#### %s\n" "${S}"
+  hgLoadMaf -loadFile=/gbdb/${S}/tba10way/chrX.tba10way.maf ${S} tba10way
+  cat /gbdb/${S}/tba10way/chrX.tba10way.maf \
+        | hgLoadMafSummary -verbose=2 -minSize=30000 \
+	-mergeGap=1500 -maxSize=200000 ${S} tba10waySummary stdin
+done
+# #### panTro6
+# Loaded 215299 mafs in 1 files from /gbdb/panTro6/tba10way/
+#Created 64264 summary blocks from 1032470 components and 215299 mafs from stdin
+# #### rheMac10
+# Loaded 215752 mafs in 1 files from /gbdb/rheMac10/tba10way/
+#Created 65625 summary blocks from 1026770 components and 215752 mafs from stdin
+# #### mm10
+# Loaded 119058 mafs in 1 files from /gbdb/mm10/tba10way/
+# Created 79395 summary blocks from 559679 components and 119058 mafs from stdin
+# #### canFam4
+# Loaded 202205 mafs in 1 files from /gbdb/canFam4/tba10way/
+# Created 60026 summary blocks from 933045 components and 202205 mafs from stdin
+# #### monDom5
+# Loaded 26098 mafs in 1 files from /gbdb/monDom5/tba10way/
+# Created 40309 summary blocks from 121427 components and 26098 mafs from stdin
+
+#########################################################################
 # Phylogenetic tree from 30-way (DONE - 2013-09-13 - Hiram)
     mkdir /hive/data/genomes/hg38/bed/tba10way/4d
     cd /hive/data/genomes/hg38/bed/tba10way/4d
 
     # the annotated maf's are in:
     ../anno/result/*.maf
 
     # using knownGene for hg38, only transcribed genes and nothing
     #	from the randoms and other misc.
     hgsql -Ne "select name,chrom,strand,txStart,txEnd,cdsStart,cdsEnd,exonCount,exonStarts,exonEnds from knownGene where cdsEnd > cdsStart;" hg38 \
       | egrep -E -v "chrM|chrUn|random|_alt" > knownGene.gp
     wc -l *.gp
     #     95199 knownGene.gp
 
     # verify it is only on the chroms:
     cut -f2 knownGene.gp | sort | uniq -c | sort -rn | sed -e 's/^/    # /;'
     #    7956 chr1
     #    7306 chr19
     #    6554 chr17
     #    6371 chr11
     #    6301 chr2
     #    5794 chr12
     #    5688 chr3
     #    4971 chr16
     #    4324 chr7
     #    4277 chr6
     #    4108 chr5
     #    3751 chr14
     #    3622 chr4
     #    3580 chr8
     #    3364 chr15
     #    3076 chrX
     #    2968 chr10
     #    2961 chr9
     #    2107 chr22
     #    2091 chr20
     #    1703 chr18
     #    1175 chr13
     #     935 chr21
     #     216 chrY
 
     genePredSingleCover knownGene.gp stdout | sort > knownGeneNR.gp
     wc -l knownGeneNR.gp
     #	19306 knownGeneNR.gp
 
     ssh ku
     mkdir /hive/data/genomes/hg38/bed/tba10way/4d/run
     cd /hive/data/genomes/hg38/bed/tba10way/4d/run
     mkdir ../mfa
 
     # newer versions of msa_view have a slightly different operation
     # the sed of the gp file inserts the reference species in the chr name
     cat << '_EOF_' > 4d.csh
 #!/bin/csh -fe
 set PHASTBIN = /cluster/bin/phast.build/cornellCVS/phast.2010-12-30/bin
 set r = "/hive/data/genomes/hg38/bed/tba10way"
 set c = $1
 set infile = $r/anno/result/$2
 set outfile = $3
 cd /dev/shm
 # 'clean' maf, removes all chrom names, leaves only the db name
 perl -wpe 's/^s ([^.]+)\.\S+/s $1/' $infile > $c.maf
 awk -v C=$c '$2 == C {print}' $r/4d/knownGeneNR.gp | sed -e "s/\t$c\t/\thg38.$c\t/" > $c.gp
 set NL=`wc -l $c.gp| gawk '{print $1}'`
 if ("$NL" != "0") then
     $PHASTBIN/msa_view --4d --features $c.gp -i MAF $c.maf -o SS > $c.ss
     $PHASTBIN/msa_view -i SS --tuple-size 1 $c.ss > $r/4d/run/$outfile
 else
     echo "" > $r/4d/run/$outfile
 endif
 rm -f $c.gp $c.maf $c.ss
 '_EOF_'
     # << happy emacs
     chmod +x 4d.csh
 
     ls -1S /hive/data/genomes/hg38/bed/tba10way/anno/result/*.maf \
 	| sed -e "s#.*tba10way/anno/result/##" \
         | egrep -E -v "chrM|chrUn|random|_alt" > maf.list
 
     printf '#LOOP
 4d.csh $(root1) $(path1) {check out line+ ../mfa/$(root1).mfa}
 #ENDLOOP
 ' > template
 
     gensub2 maf.list single template jobList
     para -ram=64g create jobList
     para try ... check ... push ... etc...
     para time
 # Completed: 24 of 24 jobs
 # CPU time in finished jobs:       7202s     120.03m     2.00h    0.08d  0.000 y
 # IO & Wait Time:                   480s       8.00m     0.13h    0.01d  0.000 y
 # Average job time:                 320s       5.33m     0.09h    0.00d
 # Longest finished job:             706s      11.77m     0.20h    0.01d
 # Submission to last job:           718s      11.97m     0.20h    0.01d
 
     # combine mfa files
     ssh hgwdev
     cd /hive/data/genomes/hg38/bed/tba10way/4d
     # verify no tiny files:
     ls -og mfa | sort -k3nr | tail -2
     #  -rw-rw-r-- 1  235884 Nov  3 11:25 chrY.mfa
 
     #want comma-less species.list
     time /cluster/bin/phast.build/cornellCVS/phast.2010-12-30/bin/msa_view \
 	--aggregate "`cat ../species.list`" mfa/*.mfa | sed s/"> "/">"/ \
 	    > 4d.all.mfa
     # real    0m3.182s
 
     # check they are all in there:
     grep "^>" 4d.all.mfa | wc -l
     #   30
 
     sed 's/[a-z][a-z]*_//g; s/:[0-9\.][0-9\.]*//g; s/;//; /^ *$/d' \
         hg38.10way.nh
 
     sed 's/[a-z][a-z]*_//g; s/:[0-9\.][0-9\.]*//g; s/;//; /^ *$/d' \
 	../hg38.10way.nh > tree-commas.nh
 
     # use phyloFit to create tree model (output is phyloFit.mod)
     time /cluster/bin/phast.build/cornellCVS/phast.2010-12-30/bin/phyloFit \
 	    --EM --precision MED --msa-format FASTA --subst-mod REV \
 		--tree tree-commas.nh 4d.all.mfa
     #   real    8m6.444s
 
     mv phyloFit.mod all.mod
 
     grep TREE all.mod
 # ((((((((((((hg38:0.0101811,panTro5:0.00256557):0.00168527,
 # panPan2:0.00255779):0.00567544,gorGor5:0.00857055):0.0093291,
 # ponAbe2:0.0183757):0.00328934,nomLeu3:0.022488):0.0111201,
 # (((((rheMac8:0.00266214,(macFas5:0.00218171,
 # macNem1:0.00424092):0.00171674):0.00606702,cerAty1:0.00671556):0.00164923,
 # papAnu3:0.00691761):0.00171877,(chlSab2:0.0163497,
 # manLeu1:0.00699129):0.00165863):0.00933639,((nasLar1:0.00768293,
 # colAng1:0.0163932):0.00167418,(rhiRox1:0.00213201,
 # rhiBie1:0.00222829):0.00577271):0.0104228):0.0214064):0.0206136,
 # (((calJac3:0.0358464,saiBol1:0.0324064):0.00173657,
 # cebCap1:0.0283117):0.00202114,aotNan1:0.0232387):0.0378592):0.0606754,
 # tarSyr2:0.142222):0.011174,(((micMur3:0.0563648,
 # proCoq1:0.0388184):0.00530425,(eulMac1:0.00218443,
 # eulFla1:0.00228562):0.0410542):0.0370791,
 # otoGar3:0.132725):0.0335535):0.0178619,mm10:0.344583):0.0241482,
 # canFam3:0.163902):0.0880829,dasNov3:0.0880829);
 
     # compare these calculated lengths to what we started with
 
     /cluster/bin/phast/all_dists ../hg38.10way.nh  | grep hg38 \
 	| sed -e "s/hg38.//;" | sort > original.dists
 
     grep TREE all.mod | sed -e 's/TREE: //;' \
        | /cluster/bin/phast/all_dists /dev/stdin | grep hg38 \
           | sed -e "s/hg38.//;"  | sort > hg38.dists
 
     # printing out the 'original', the 'new' the 'difference' and
     #    percent difference/delta
     join original.dists hg38.dists | awk '{
   printf "#\t%s\t%8.6f\t%8.6f\t%8.6f\t%8.6f\n", $1, $2, $3, $2-$3, 100*($2-$3)/$3 }'       | sort -k4n
 #       panTro5 0.013390        0.012747        0.000643        5.044324
 #       panPan2 0.015610        0.014424        0.001186        8.222407
 #       gorGor5 0.019734        0.026112        -0.006378       -24.425551
 #       ponAbe2 0.039403        0.045247        -0.005844       -12.915773
 #       nomLeu3 0.046204        0.052648        -0.006444       -12.239781
 #       papAnu3 0.079626        0.080660        -0.001034       -1.281924
 #       manLeu1 0.090974        0.080673        0.010301        12.768832
 #       rhiRox1 0.075474        0.081014        -0.005540       -6.838324
 #       rhiBie1 0.075474        0.081111        -0.005637       -6.949736
 #       cerAty1 0.082584        0.082107        0.000477        0.580949
 #       nasLar1 0.075474        0.082467        -0.006993       -8.479756
 #       rheMac8 0.079575        0.084120        -0.004545       -5.402996
 #       macFas5 0.079575        0.085357        -0.005782       -6.773903
 #       macNem1 0.081584        0.087416        -0.005832       -6.671548
 #       chlSab2 0.087974        0.090031        -0.002057       -2.284769
 #       colAng1 0.075574        0.091177        -0.015603       -17.112868
 #       aotNan1 0.102804        0.122992        -0.020188       -16.414076
 #       cebCap1 0.108804        0.130086        -0.021282       -16.359946
 #       saiBol1 0.087804        0.135917        -0.048113       -35.398810
 #       calJac3 0.107454        0.139357        -0.031903       -22.893001
 #       eulMac1 0.190934        0.247615        -0.056681       -22.890778
 #       eulFla1 0.190934        0.247716        -0.056782       -22.922217
 #       proCoq1 0.230934        0.248499        -0.017565       -7.068439
 #       tarSyr2 0.221294        0.264791        -0.043497       -16.426918
 #       micMur3 0.236534        0.266045        -0.029511       -11.092484
 #       otoGar3 0.270334        0.300022        -0.029688       -9.895274
 #       canFam3 0.332429        0.339655        -0.007226       -2.127453
 #       dasNov3 0.366691        0.351919        0.014772        4.197557
 #       mm10    0.502391        0.496188        0.006203        1.250131
 
 #########################################################################
 # phastCons 30-way (DONE - 2015-05-07 - Hiram)
     # split 10way mafs into 10M chunks and generate sufficient statistics
     # files for # phastCons
     ssh ku
     mkdir -p /hive/data/genomes/hg38/bed/tba10way/cons/ss
     mkdir -p /hive/data/genomes/hg38/bed/tba10way/cons/msa.split
     cd /hive/data/genomes/hg38/bed/tba10way/cons/msa.split
 
     cat << '_EOF_' > doSplit.csh
 #!/bin/csh -ef
 set c = $1
 set MAF = /hive/data/genomes/hg38/bed/tba10way/anno/result/$c.maf
 set WINDOWS = /hive/data/genomes/hg38/bed/tba10way/cons/ss/$c
 set WC = `cat $MAF | wc -l`
 set NL = `grep "^#" $MAF | wc -l`
 if ( -s $2 ) then
     exit 0
 endif
 if ( -s $2.running ) then
     exit 0
 endif
 
 date >> $2.running
 
 rm -fr $WINDOWS
 mkdir $WINDOWS
 pushd $WINDOWS > /dev/null
 if ( $WC != $NL ) then
 /cluster/bin/phast.build/cornellCVS/phast.2010-12-30/bin/msa_split \
     $MAF -i MAF -o SS -r $WINDOWS/$c -w 3000000,0 -I 300 -B 5000
 endif
 popd > /dev/null
 date >> $2
 rm -f $2.running
 '_EOF_'
     # << happy emacs
     chmod +x doSplit.csh
 
     cat << '_EOF_' > template
     printf '#LOOP
 doSplit.csh $(root1) {check out line+ $(root1).done}
 #ENDLOOP
 ' > template
 
 F_' > doSplit.csh
 #!/bin/csh -ef
 set c = $1
 set MAF = /hive/data/genomes/hg38/bed/tba10way/anno/result/$c.maf
 set WINDOWS = /hive/data/genomes/hg38/bed/tba10way/cons/ss/$c
 set WC = `cat $MAF | wc -l`
 set NL = `grep "^#" $MAF | wc -l`
 if ( -s $2 ) then
     exit 0
 endif
 if ( -s $2.running ) then
     exit 0
 endif
 
 date >> $2.running
 
 rm -fr $WINDOWS
 mkdir $WINDOWS
 pushd $WINDOWS > /dev/null
 if ( $WC != $NL ) then
 /cluster/bin/phast.build/cornellCVS/phast.2010-12-30/bin/msa_split \
     $MAF -i MAF -o SS -r $WINDOWS/$c -w 3000000,0 -I 300 -B 5000
 endif
 popd > /dev/null
 date >> $2
 rm -f $2.running
 '_EOF_'
     # << happy emacs
     chmod +x doSplit.csh
 
     cat << '_EOF_' > template
 #LOOP
 doSplit.csh $(root1) {check out line+ $(root1).done}
 #ENDLOOP
 
 #	do the easy ones first to see some immediate results
     ls -1S -r ../../anno/result | sed -e "s/.maf//;" > maf.list
     # all can finish OK at a 64Gb memory limit
     gensub2 maf.list single template jobList
     para -ram=64g create jobList
     para try ... check ... etc
     para push
 # Completed: 358 of 358 jobs
 # CPU time in finished jobs:      13099s     218.32m     3.64h    0.15d  0.000 y
 # IO & Wait Time:                  1841s      30.68m     0.51h    0.02d  0.000 y
 # Average job time:                  42s       0.70m     0.01h    0.00d
 # Longest finished job:            1393s      23.22m     0.39h    0.02d
 # Submission to last job:          1468s      24.47m     0.41h    0.02d
 
     # Run phastCons
     #	This job is I/O intensive in its output files, beware where this
     #	takes place or do not run too many at once.
     ssh ku
     mkdir -p /hive/data/genomes/hg38/bed/tba10way/cons/run.cons
     cd /hive/data/genomes/hg38/bed/tba10way/cons/run.cons
 
     #	This is setup for multiple runs based on subsets, but only running
     #   the 'all' subset here.
     #   It triggers off of the current working directory
     #	$cwd:t which is the "grp" in this script.  Running:
     #	all and vertebrates
 
     cat << '_EOF_' > doPhast.csh
 #!/bin/csh -fe
 set PHASTBIN = /cluster/bin/phast.build/cornellCVS/phast.2010-12-30/bin
 set c = $1
 set f = $2
 set len = $3
 set cov = $4
 set rho = $5
 set grp = $cwd:t
 set cons = /hive/data/genomes/hg38/bed/tba10way/cons
 set tmp = $cons/tmp/$f
 mkdir -p $tmp
 set ssSrc = $cons/ss
 set useGrp = "$grp.mod"
 if (-s $cons/$grp/$grp.non-inf) then
   ln -s $cons/$grp/$grp.mod $tmp
   ln -s $cons/$grp/$grp.non-inf $tmp
   ln -s $ssSrc/$c/$f.ss $tmp
 else
   ln -s $ssSrc/$c/$f.ss $tmp
   ln -s $cons/$grp/$grp.mod $tmp
 endif
 pushd $tmp > /dev/null
 if (-s $grp.non-inf) then
   $PHASTBIN/phastCons $f.ss $useGrp \
     --rho $rho --expected-length $len --target-coverage $cov --quiet \
     --not-informative `cat $grp.non-inf` \
     --seqname $c --idpref $c --most-conserved $f.bed --score > $f.pp
 else
   $PHASTBIN/phastCons $f.ss $useGrp \
     --rho $rho --expected-length $len --target-coverage $cov --quiet \
     --seqname $c --idpref $c --most-conserved $f.bed --score > $f.pp
 endif
 popd > /dev/null
 mkdir -p pp/$c bed/$c
 sleep 4
 touch pp/$c bed/$c
 rm -f pp/$c/$f.pp
 rm -f bed/$c/$f.bed
 mv $tmp/$f.pp pp/$c
 mv $tmp/$f.bed bed/$c
 rm -fr $tmp
 '_EOF_'
     # << happy emacs
     chmod +x doPhast.csh
 
     #	this template will serve for all runs
     #	root1 == chrom name, file1 == ss file name without .ss suffix
     printf '#LOOP
 ../run.cons/doPhast.csh $(root1) $(file1) 45 0.3 0.3 {check out line+ pp/$(root1)/$(file1).pp}
 #ENDLOOP
 ' > template
 
     ls -1S ../ss/chr*/chr* | sed -e "s/.ss$//" > ss.list
     wc -l ss.list
     #	1337 ss.list
 
     # Create parasol batch and run it
     # run for all species
     cd /hive/data/genomes/hg38/bed/tba10way/cons
     mkdir -p all
     cd all
     #	Using the .mod tree
     cp -p ../../4d/all.mod ./all.mod
 
     gensub2 ../run.cons/ss.list single ../run.cons/template jobList
     # beware overwhelming the cluster with these fast running high I/O jobs
     para -ram=32g create jobList
     para try ... check ...
     para -maxJob=16 push
 # Completed: 1337 of 1337 jobs
 # CPU time in finished jobs:      17323s     288.72m     4.81h    0.20d  0.001 y
 # IO & Wait Time:                  9727s     162.11m     2.70h    0.11d  0.000 y
 # Average job time:                  20s       0.34m     0.01h    0.00d
 # Longest finished job:              31s       0.52m     0.01h    0.00d
 # Submission to last job:           230s       3.83m     0.06h    0.00d
 
     # create Most Conserved track
     cd /hive/data/genomes/hg38/bed/tba10way/cons/all
     time cut -f1 ../../../../chrom.sizes | while read C
 do
     echo $C 1>&2
     ls -d bed/${C} 2> /dev/null | while read D
     do
         cat ${D}/${C}*.bed
     done | sort -k1,1 -k2,2n \
     | awk '{printf "%s\t%d\t%d\tlod=%d\t%s\n", "'${C}'", $2, $3, $5, $5;}'
 done > tmpMostConserved.bed
     # real    0m50.678s
 
     # -rw-rw-r--   1 101245734 Nov  3 14:20 tmpMostConserved.bed
 
     time /cluster/bin/scripts/lodToBedScore tmpMostConserved.bed \
         > mostConserved.bed
     # real    0m24.196s
 
     # -rw-rw-r--   1 103966297 Nov  3 14:21 mostConserved.bed
 
     # load into database
     ssh hgwdev
     cd /hive/data/genomes/hg38/bed/tba10way/cons/all
     time hgLoadBed hg38 phastConsElements10way mostConserved.bed
     #  Read 2949865 elements of size 5 from mostConserved.bed
     #  real    0m26.263s
 
     #	--rho 0.3 --expected-length 45 --target-coverage 0.3
     time featureBits hg38 -enrichment knownGene:cds phastConsElements10way
 # knownGene:cds 1.271%, phastConsElements10way 5.795%, both 0.874%, cover 68.73%, enrich 11.86x
 # real    0m21.637s
 
     # Try for 5% overall cov, and 70% CDS cov
     time featureBits hg38 -enrichment refGene:cds phastConsElements10way
 # refGene:cds 1.225%, phastConsElements10way 5.795%, both 0.863%, cover 70.50%, enrich 12.16x
 
 # real    0m22.260s
 
     # Create merged posterier probability file and wiggle track data files
     cd /hive/data/genomes/hg38/bed/tba10way/cons/all
     mkdir downloads
 
     time for D in `ls -d pp/chr* | sed -e 's#pp/##'`
 do
     echo "working: $D" 1>&2
     find ./pp/${D} -type f | sed -e "s#^./##; s#\.# d #g; s#-# m #;" \
 	| sort -k1,1 -k3,3n | sed -e "s# d #.#g; s# m #-#g;" | xargs cat \
         | gzip -c > downloads/${D}.phastCons10way.wigFix.gz
 done
     # real    32m29.089s
 
 
     #	encode those files into wiggle data
     time (zcat downloads/*.wigFix.gz \
 	| wigEncode stdin phastCons10way.wig phastCons10way.wib)
     #   Converted stdin, upper limit 1.00, lower limit 0.00
     #   real    15m40.010s
 
     du -hsc *.wi?
     # 2.8G    phastCons10way.wib
     # 283M    phastCons10way.wig
 
     #	encode into a bigWig file:
     #	(warning wigToBigWig process may be too large for memory limits
     #	in bash, to avoid the 32 Gb memory limit, set 180 Gb here:
 export sizeG=188743680
 ulimit -d $sizeG
 ulimit -v $sizeG
     time (zcat downloads/*.wigFix.gz \
       | wigToBigWig -verbose=2 stdin \
 	../../../../chrom.sizes phastCons10way.bw) > bigWig.log 2>&1
     egrep "VmPeak|real" bigWig.log
     # pid=37111: VmPeak:    33886864 kB
     # real    42m13.614s
 
     # -rw-rw-r--   1 7077152013 Nov  6 08:52 phastCons10way.bw
 
 
     bigWigInfo phastCons10way.bw
 version: 4
 isCompressed: yes
 isSwapped: 0
 primaryDataSize: 5,097,637,987
 primaryIndexSize: 93,372,648
 zoomLevels: 10
 chromCount: 355
 basesCovered: 2,955,660,600
 mean: 0.128025
 min: 0.000000
 max: 1.000000
 std: 0.247422
 
     #	if you wanted to use the bigWig file, loading bigWig table:
     #   but we don't use the bigWig file
     mkdir /gbdb/hg38/bbi
     ln -s `pwd`/phastCons10way.bw /gbdb/hg38/bbi
     hgsql hg38 -e 'drop table if exists phastCons10way; \
             create table phastCons10way (fileName varchar(255) not null); \
             insert into phastCons10way values
 	("/gbdb/hg38/bbi/phastCons10way.bw");'
 
     # Load gbdb and database with wiggle.
     ssh hgwdev
     cd /hive/data/genomes/hg38/bed/tba10way/cons/all
     ln -s `pwd`/phastCons10way.wib /gbdb/hg38/tba10way/phastCons10way.wib
     time hgLoadWiggle -pathPrefix=/gbdb/hg38/tba10way hg38 \
 	phastCons10way phastCons10way.wig
     #   real    0m32.272s
 
     time wigTableStats.sh hg38 phastCons10way
 # db.table            min max   mean       count     sumData
 # hg38.phastCons10way     0 1 0.128025 2955660600 3.78397e+08
 #       stdDev viewLimits
 #     0.247422 viewLimits=0:1
 # real    0m13.507s
 
     #  Create histogram to get an overview of all the data
     ssh hgwdev
     cd /hive/data/genomes/hg38/bed/tba10way/cons/all
     time hgWiggle -doHistogram -db=hg38 \
 	-hBinSize=0.001 -hBinCount=300 -hMinVal=0.0 -verbose=2 \
 	    phastCons10way > histogram.data 2>&1
     #	real    2m38.952s
 
     #	create plot of histogram:
 
     printf 'set terminal png small x000000 xffffff xc000ff x66ff66 xffff00 x00ffff font \
 "/usr/share/fonts/default/Type1/n022004l.pfb"
 set size 1.4, 0.8
 set key left box
 set grid noxtics
 set grid ytics
 set title " Human Hg38 Histogram phastCons10way track"
 set xlabel " phastCons10way score"
 set ylabel " Relative Frequency"
 set y2label " Cumulative Relative Frequency (CRF)"
 set y2range [0:1]
 set y2tics
 set yrange [0:0.02]
 
 plot "histogram.data" using 2:5 title " RelFreq" with impulses, \
         "histogram.data" using 2:7 axes x1y2 title " CRF" with lines
 ' | gnuplot > histo.png
 
     # take a look to see if it is sane:
 
     display histo.png &
 
 #########################################################################
 # phyloP for 30-way (DONE - 2017-11-06 - Hiram)
 #
     # split SS files into 1M chunks, this business needs smaller files
     #   to complete
 
     ssh ku
     mkdir /hive/data/genomes/hg38/bed/tba10way/consPhyloP
     cd /hive/data/genomes/hg38/bed/tba10way/consPhyloP
     mkdir ss run.split
     cd run.split
 
     printf '#!/bin/csh -ef
 set c = $1
 set MAF = /hive/data/genomes/hg38/bed/tba10way/anno/result/$c.maf
 set WINDOWS = /hive/data/genomes/hg38/bed/tba10way/consPhyloP/ss/$c
 set WC = `cat $MAF | wc -l`
 set NL = `grep "^#" $MAF | wc -l`
 if ( -s $2 ) then
     exit 0
 endif
 if ( -s $2.running ) then
     exit 0
 endif
 
 date >> $2.running
 
 rm -fr $WINDOWS
 mkdir -p $WINDOWS
 pushd $WINDOWS > /dev/null
 if ( $WC != $NL ) then
 /cluster/bin/phast.build/cornellCVS/phast.2010-12-30/bin/msa_split \
     $MAF -i MAF -o SS -r $WINDOWS/$c -w 1000000,0 -I 1000 -B 5000
 endif
 popd > /dev/null
 date >> $2
 rm -f $2.running
 ' > doSplit.csh
 
     chmod +x doSplit.csh
 
     #	do the easy ones first to see some immediate results
     ls -1S -r ../../anno/result | sed -e "s/.maf//;" > maf.list
 
     # this needs a {check out line+ $(root1.done)} test for verification:
     printf '#LOOP
 ./doSplit.csh $(root1) $(root1).done
 #ENDLOOP
 ' > template
 
     gensub2 maf.list single template jobList
     # all can complete successfully at the 64Gb memory limit
     para -ram=64g create jobList
     para try ... check ... push ... etc...
 
 # Completed: 358 of 358 jobs
 # CPU time in finished jobs:      13512s     225.20m     3.75h    0.16d  0.000 y
 # IO & Wait Time:                  1646s      27.43m     0.46h    0.02d  0.000 y
 # Average job time:                  42s       0.71m     0.01h    0.00d
 # Longest finished job:            1494s      24.90m     0.41h    0.02d
 # Submission to last job:          1717s      28.62m     0.48h    0.02d
 
     # run phyloP with score=LRT
     ssh ku
     mkdir /cluster/data/hg38/bed/tba10way/consPhyloP
     cd /cluster/data/hg38/bed/tba10way/consPhyloP
 
     mkdir run.phyloP
     cd run.phyloP
     # Adjust model file base composition background and rate matrix to be
     # representative of the chromosomes in play
     grep BACK ../../4d/all.mod
     #   BACKGROUND: 0.207173 0.328301 0.237184 0.227343
 
     grep BACKGROUND ../../4d/all.mod | awk '{printf "%0.3f\n", $3 + $4}'
     #	0.565
     /cluster/bin/phast.build/cornellCVS/phast.2010-12-30/bin/modFreqs \
 	../../4d/all.mod 0.565 > all.mod
     # verify, the BACKGROUND should now be paired up:
     grep BACK all.mod
     #   BACKGROUND: 0.217500 0.282500 0.282500 0.217500
 
     printf '#!/bin/csh -fe
 set PHASTBIN = /cluster/bin/phast.build/cornellCVS/phast.2010-12-30/bin
 set f = $1
 set ssFile = $1:t
 set out = $2
 set cName = $f:h
 set n = $f:r:e
 set grp = $cwd:t
 set cons = /hive/data/genomes/hg38/bed/tba10way/consPhyloP
 set tmp = $cons/tmp/$grp/$f
 /bin/rm -fr $tmp
 /bin/mkdir -p $tmp
 set ssSrc = "$cons/ss/$cName/$ssFile"
 set useGrp = "$grp.mod"
 /bin/ln -s $cons/run.phyloP/$grp.mod $tmp
 pushd $tmp > /dev/null
 echo source: $ssSrc.ss
 $PHASTBIN/phyloP --method LRT --mode CONACC --wig-scores --chrom $cName \
     -i SS $useGrp $ssSrc.ss > $ssFile.wigFix
 popd > /dev/null
 /bin/mkdir -p $out:h
 sleep 4
 /bin/touch $out:h
 /bin/mv $tmp/$ssFile.wigFix $out
 /bin/rm -fr $tmp
 /bin/rmdir --ignore-fail-on-non-empty $cons/tmp/$grp
 /bin/rmdir --ignore-fail-on-non-empty $cons/tmp
 ' > doPhyloP.csh
 
     chmod +x doPhyloP.csh
 
     # Create list of chunks
     find ../ss -type f | sed -e "s/.ss$//; s#../ss/##;" > ss.list
     # make sure the list looks good
     wc -l ss.list
     #	3308 ss.list
 
     # Create template file
     #	file1 == $chr/$chunk/file name without .ss suffix
     printf '#LOOP
 ../run.phyloP/doPhyloP.csh $(path1) {check out line+ wigFix/$(dir1)/$(file1).wigFix}
 #ENDLOOP
 ' > template
 
     ######################   Running all species  #######################
     # setup run for all species
     mkdir /hive/data/genomes/hg38/bed/tba10way/consPhyloP/all
     cd /hive/data/genomes/hg38/bed/tba10way/consPhyloP/all
     rm -fr wigFix
     mkdir wigFix
 
     gensub2 ../run.phyloP/ss.list single ../run.phyloP/template jobList
     # beware overloading the cluster with these quick and high I/O jobs
     para -ram=32g create jobList
     para try ... check ...
     para -maxJob=16 push
     para time > run.time
 
 # Completed: 3308 of 3308 jobs
 # CPU time in finished jobs:     647954s   10799.23m   179.99h    7.50d  0.021 y
 # IO & Wait Time:                 22374s     372.90m     6.22h    0.26d  0.001 y
 # Average job time:                 203s       3.38m     0.06h    0.00d
 # Longest finished job:             349s       5.82m     0.10h    0.00d
 # Submission to last job:          3226s      53.77m     0.90h    0.04d
 
     mkdir downloads
     time for D in `ls -d wigFix/chr* | sed -e 's#wigFix/##'`
 do
     echo "working: $D" 1>&2
     find ./wigFix/${D} -type f | sed -e "s#^./##; s#\.# d #g; s#-# m #;" \
 	| sort -k1,1 -k3,3n | sed -e "s# d #.#g; s# m #-#g;" | xargs cat \
         | gzip -c > downloads/${D}.phyloP10way.wigFix.gz
 done
     #   real    48m50.219s
 
     du -hsc downloads
     #   4.6G    downloads
 
     # check integrity of data with wigToBigWig
     time (zcat downloads/*.wigFix.gz \
 	| wigToBigWig -verbose=2 stdin /hive/data/genomes/hg38/chrom.sizes \
 	phyloP10way.bw) > bigWig.log 2>&1
 
 
     egrep "real|VmPeak" bigWig.log
     # pid=66292: VmPeak:    33751268 kB
     #  real    43m40.194s
 
 
     bigWigInfo phyloP10way.bw  | sed -e 's/^/# /;'
 # version: 4
 # isCompressed: yes
 # isSwapped: 0
 # primaryDataSize: 6,304,076,591
 # primaryIndexSize: 93,404,704
 # zoomLevels: 10
 # chromCount: 355
 # basesCovered: 2,955,660,581
 # mean: 0.097833
 # min: -20.000000
 # max: 1.312000
 # std: 0.727453
 
     #	encode those files into wiggle data
     time (zcat downloads/*.wigFix.gz \
 	| wigEncode stdin phyloP10way.wig phyloP10way.wib)
 
 # Converted stdin, upper limit 1.31, lower limit -20.00
 # real    17m36.880s
 # -rw-rw-r--   1 2955660581 Nov  6 14:10 phyloP10way.wib
 # -rw-rw-r--   1  304274846 Nov  6 14:10 phyloP10way.wig
 
     du -hsc *.wi?
     # 2.8G    phyloP10way.wib
     # 291M    phyloP10way.wig
 
     # Load gbdb and database with wiggle.
     ln -s `pwd`/phyloP10way.wib /gbdb/hg38/tba10way/phyloP10way.wib
     time hgLoadWiggle -pathPrefix=/gbdb/hg38/tba10way hg38 \
 	phyloP10way phyloP10way.wig
     # real    0m30.538s
 
     # use to set trackDb.ra entries for wiggle min and max
     # and verify table is loaded correctly
 
     wigTableStats.sh hg38 phyloP10way
 # db.table          min   max     mean       count     sumData
 # hg38.phyloP10way  -20 1.312 0.0978331 2955660581 2.89162e+08
 #       stdDev viewLimits
 #     0.727453 viewLimits=-3.53943:1.312
 
     #	that range is: 20+1.312= 21.312 for hBinSize=0.021312
 
     #  Create histogram to get an overview of all the data
     time hgWiggle -doHistogram \
 	-hBinSize=0.021312 -hBinCount=1000 -hMinVal=-20 -verbose=2 \
 	    -db=hg38 phyloP10way > histogram.data 2>&1
     #   real    2m43.313s
 
     # xaxis range:
     grep -v chrom histogram.data | grep "^[0-9]" | ave -col=2 stdin \
 	| sed -e 's/^/# /;'
 # Q1 -10.953050
 # median -6.861155
 # Q3 -2.769245
 # average -6.875971
 # min -20.000000
 # max 1.312000
 # count 768
 # total -5280.745380
 # standard deviation 4.757034
 
     # find out the range for the 2:5 graph
     grep -v chrom histogram.data | grep "^[0-9]" | ave -col=5 stdin \
       | sed -e 's/^/# /;'
 # Q1 0.000000
 # median 0.000001
 # Q3 0.000140
 # average 0.001302
 # min 0.000000
 # max 0.023556
 # count 768
 # total 0.999975
 # standard deviation 0.003490
 
     #	create plot of histogram:
     printf 'set terminal png small x000000 xffffff xc000ff x66ff66 xffff00 x00ffff font \
 "/usr/share/fonts/default/Type1/n022004l.pfb"
 set size 1.4, 0.8
 set key left box
 set grid noxtics
 set grid ytics
 set title " Human hg38 Histogram phyloP10way track"
 set xlabel " phyloP10way score"
 set ylabel " Relative Frequency"
 set y2label " Cumulative Relative Frequency (CRF)"
 set y2range [0:1]
 set y2tics
 set xrange [-5:1.5]
 set yrange [0:0.04]
 
 plot "histogram.data" using 2:5 title " RelFreq" with impulses, \
         "histogram.data" using 2:7 axes x1y2 title " CRF" with lines
 ' | gnuplot > histo.png
 
     # verify it looks sane
     display histo.png &
 
 #############################################################################
 # construct download files for 30-way (TBD - 2015-04-15 - Hiram)
     mkdir /usr/local/apache/htdocs-hgdownload/goldenPath/hg38/tba10way
     mkdir /usr/local/apache/htdocs-hgdownload/goldenPath/hg38/phastCons10way
     mkdir /usr/local/apache/htdocs-hgdownload/goldenPath/hg38/phyloP10way
     mkdir /hive/data/genomes/hg38/bed/tba10way/downloads
     cd /hive/data/genomes/hg38/bed/tba10way/downloads
     mkdir tba10way phastCons10way phyloP10way
 
     #########################################################################
     ## create upstream refGene maf files
     cd /hive/data/genomes/hg38/bed/tba10way/downloads/tba10way
     # bash script
 
 #!/bin/sh
 export geneTbl="refGene"
 for S in 300 2000 5000
 do
     echo "making upstream${S}.maf"
     featureBits hg38 ${geneTbl}:upstream:${S} -fa=/dev/null -bed=stdout \
         | perl -wpe 's/_up[^\t]+/\t0/' | sort -k1,1 -k2,2n \
         | /cluster/bin/$MACHTYPE/mafFrags hg38 tba10way \
                 stdin stdout \
                 -orgs=/hive/data/genomes/hg38/bed/tba10way/species.list \
         | gzip -c > upstream${S}.${geneTbl}.maf.gz
     echo "done upstream${S}.${geneTbl}.maf.gz"
 done
 
     #   real    88m40.730s
 
 -rw-rw-r-- 1   52659159 Nov  6 11:46 upstream300.knownGene.maf.gz
 -rw-rw-r-- 1  451126665 Nov  6 12:15 upstream2000.knownGene.maf.gz
 -rw-rw-r-- 1 1080533794 Nov  6 12:55 upstream5000.knownGene.maf.gz
 
     ######################################################################
     ## compress the maf files
     cd /hive/data/genomes/hg38/bed/tba10way/downloads/tba10way
     mkdir maf
     rsync -a -P ../../anno/result/ ./maf/
     du -hsc maf/
     # 156G    maf
     cd maf
     time gzip *.maf &
     # real    135m1.784s
 
     du -hscL maf ../../anno/result/
     #  18G     maf
 
     cd maf
     md5sum *.maf.gz *.nh > md5sum.txt
 
     mkdir /usr/local/apache/htdocs-hgdownload/goldenPath/hg38/tba10way/maf
     cd maf
     ln -s `pwd`/* /usr/local/apache/htdocs-hgdownload/goldenPath/hg38/tba10way/maf
     cd --
     ln -s `pwd`/*.maf.gz `pwd`/*.nh `pwd`/*.txt \
          /usr/local/apache/htdocs-hgdownload/goldenPath/hg38/tba10way/
 
     ###########################################################################
 
     cd /hive/data/genomes/hg38/bed/tba10way/downloads/tba10way
     grep TREE ../../4d/all.mod | awk '{print $NF}' \
       | ~/kent/src/hg/utils/phyloTrees/asciiTree.pl /dev/stdin \
          > hg38.10way.nh
     ~/kent/src/hg/utils/phyloTrees/commonNames.sh hg38.10way.nh \
       | ~/kent/src/hg/utils/phyloTrees/asciiTree.pl /dev/stdin \
          > hg38.10way.commonNames.nh
     ~/kent/src/hg/utils/phyloTrees/scientificNames.sh hg38.10way.nh \
 	| $HOME/kent/src/hg/utils/phyloTrees/asciiTree.pl /dev/stdin \
 	    > hg38.10way.scientificNames.nh
     time md5sum *.nh *.maf.gz > md5sum.txt
     #   real    0m3.147s
 
     ln -s `pwd`/*.maf.gz `pwd`/*.nh \
         /usr/local/apache/htdocs-hgdownload/goldenPath/hg38/tba10way
 
     du -hsc ./maf ../../anno/result
     #  18G     ./maf
     # 156G    ../../anno/result
 
     # obtain the README.txt from hg38/multiz20way and update for this
     #   situation
     ln -s `pwd`/*.txt \
          /usr/local/apache/htdocs-hgdownload/goldenPath/hg38/tba10way/
 
     #####################################################################
     cd /hive/data/genomes/hg38/bed/tba10way/downloads/phastCons10way
 
     mkdir hg38.10way.phastCons
     cd hg38.10way.phastCons
     ln -s ../../../cons/all/downloads/*.wigFix.gz .
     md5sum *.gz > md5sum.txt
 
     cd /hive/data/genomes/hg38/bed/tba10way/downloads/phastCons10way
     ln -s ../../cons/all/phastCons10way.bw ./hg38.phastCons10way.bw
     ln -s ../../cons/all/all.mod ./hg38.phastCons10way.mod
     time md5sum *.mod *.bw > md5sum.txt
     #   real    0m20.354s
 
     # obtain the README.txt from hg38/phastCons20way and update for this
     mkdir /usr/local/apache/htdocs-hgdownload/goldenPath/hg38/phastCons10way/hg38.10way.phastCons
     cd hg38.10way.phastCons
     ln -s `pwd`/* /usr/local/apache/htdocs-hgdownload/goldenPath/hg38/phastCons10way/hg38.10way.phastCons
 
     cd ..
     #   situation
     ln -s `pwd`/*.mod `pwd`/*.bw `pwd`/*.txt \
       /usr/local/apache/htdocs-hgdownload/goldenPath/hg38/phastCons10way
 
     #####################################################################
     cd /hive/data/genomes/hg38/bed/tba10way/downloads/phyloP10way
 
     mkdir hg38.10way.phyloP
     cd hg38.10way.phyloP
 
     ln -s ../../../consPhyloP/all/downloads/*.wigFix.gz .
     md5sum *.wigFix.gz > md5sum.txt
 
     cd ..
 
     ln -s ../../consPhyloP/run.phyloP/all.mod hg38.phyloP10way.mod
     ln -s ../../consPhyloP/all/phyloP10way.bw hg38.phyloP10way.bw
 
     md5sum *.mod *.bw > md5sum.txt
 
     # obtain the README.txt from hg38/phyloP20way and update for this
     mkdir /usr/local/apache/htdocs-hgdownload/goldenPath/hg38/phyloP10way/hg38.10way.phyloP
     cd hg38.10way.phyloP
     ln -s `pwd`/* \
 /usr/local/apache/htdocs-hgdownload/goldenPath/hg38/phyloP10way/hg38.10way.phyloP
 
     cd ..
 
     #   situation
     ln -s `pwd`/*.mod `pwd`/*.bw `pwd`/*.txt \
       /usr/local/apache/htdocs-hgdownload/goldenPath/hg38/phyloP10way
 
 #############################################################################
 # hgPal downloads (DONE - 2017-11-06 - Hiram)
 #   FASTA from 30-way for knownGene, refGene and knownCanonical
 
     ssh hgwdev
     screen -S hg38HgPal
     mkdir /hive/data/genomes/hg38/bed/tba10way/pal
     cd /hive/data/genomes/hg38/bed/tba10way/pal
     cat ../species.list | tr '[ ]' '[\n]' > order.list
 
     ### knownCanonical with full CDS
     cd /hive/data/genomes/hg38/bed/tba10way/pal
     export mz=tba10way
     export gp=knownCanonical
     export db=hg38
     mkdir exonAA exonNuc knownCanonical
 
     time cut -f1 ../../../chrom.sizes | while read C
     do
         echo $C 1>&2
 	hgsql hg38 -N -e "select chrom, chromStart, chromEnd, transcript from knownCanonical where chrom='$C'" > knownCanonical/$C.known.bed
     done
 
     ls knownCanonical/*.known.bed | while read F
     do
       if [ -s $F ]; then
          echo $F | sed -e 's#knownCanonical/##; s/.known.bed//'
       fi
     done | while read C
     do
 	echo "date"
 	echo "mafGene -geneBeds=knownCanonical/$C.known.bed -noTrans $db $mz knownGene order.list stdout | \
 	    gzip -c > protNuc/$C.protNuc.fa.gz"
 	echo "mafGene -geneBeds=knownCanonical/$C.known.bed $db $mz knownGene order.list stdout | \
 	    gzip -c > protAA/$C.protAA.fa.gz"
     done > $gp.$mz.prot.jobs
 
     time sh -x $gp.$mz.jobs > $gp.$mz.job.log 2>&1 
     # 267m58.813s
 
     rm *.known.bed
     export mz=tba10way
     export gp=knownCanonical
     export db=hg38
     zcat protAA/c*.gz | gzip -c > $gp.$mz.protAA.fa.gz &
     zcat protNuc/c*.gz | gzip -c > $gp.$mz.protNuc.fa.gz &
     # about 6 minutes
 
     ### knownCanonical broken up by exon
     cd /hive/data/genomes/hg38/bed/multiz100way/pal
     export mz=multiz100way
     export gp=knownCanonical
     export db=hg38
     mkdir exonAA exonNuc knownCanonical
 
     time cut -f1 ../../../chrom.sizes | while read C
     do
         echo $C 1>&2
 	hgsql hg38 -N -e "select chrom, chromStart, chromEnd, transcript from knownCanonical where chrom='$C'" > knownCanonical/$C.known.bed
     done
     #   real    0m15.897s
 
     ls knownCanonical/*.known.bed | while read F
     do
       if [ -s $F ]; then
          echo $F | sed -e 's#knownCanonical/##; s/.known.bed//'
       fi
     done | while read C
     do
 	echo "date"
 	echo "mafGene -geneBeds=knownCanonical/$C.known.bed -exons -noTrans $db $mz knownGene order.list stdout | \
 	    gzip -c > exonNuc/$C.exonNuc.fa.gz"
 	echo "mafGene -geneBeds=knownCanonical/$C.known.bed -exons $db $mz knownGene order.list stdout | \
 	    gzip -c > exonAA/$C.exonAA.fa.gz"
     done > $gp.$mz.jobs
 
     time sh -x $gp.$mz.jobs > $gp.$mz.job.log 2>&1 
     # 267m58.813s
 
     rm *.known.bed
     export mz=tba10way
     export gp=knownCanonical
     export db=hg38
     zcat exonAA/c*.gz | gzip -c > $gp.$mz.exonAA.fa.gz &
     zcat exonNuc/c*.gz | gzip -c > $gp.$mz.exonNuc.fa.gz &
     # about 6 minutes
 
     rm -rf exonAA exonNuc
 
     export mz=multiz100way
     export gp=knownCanonical
     export db=hg38
     export pd=/usr/local/apache/htdocs-hgdownload/goldenPath/$db/$mz/alignments
     mkdir -p $pd
     ln -s `pwd`/$gp.$mz.exonAA.fa.gz $pd/$gp.exonAA.fa.gz
     ln -s `pwd`/$gp.$mz.exonNuc.fa.gz $pd/$gp.exonNuc.fa.gz
     ln -s `pwd`/$gp.$mz.protAA.fa.gz $pd/$gp.protAA.fa.gz
     ln -s `pwd`/$gp.$mz.protNuc.fa.gz $pd/$gp.protNuc.fa.gz
     cd  $pd
     md5sum *.fa.gz > md5sum.txt
 
     rm -rf exonAA exonNuc
 
     export mz=tba10way
     export gp=knownCanonical
     export db=hg38
     export pd=/usr/local/apache/htdocs-hgdownload/goldenPath/$db/$mz/alignments
     mkdir -p $pd
     ln -s `pwd`/$gp.$mz.exonAA.fa.gz $pd/$gp.exonAA.fa.gz
     ln -s `pwd`/$gp.$mz.exonNuc.fa.gz $pd/$gp.exonNuc.fa.gz
 
     # knownGene
     export mz=tba10way
     export gp=knownGene
     export db=hg38
     export I=0
     export D=0
     mkdir exonAA exonNuc
     for C in `sort -nk2 ../../../chrom.sizes | cut -f1`
     do
         I=`echo $I | awk '{print $1+1}'`
         D=`echo $D | awk '{print $1+1}'`
         dNum=`echo $D | awk '{printf "%03d", int($1/300)}'`
         mkdir -p exonNuc/${dNum} > /dev/null
         mkdir -p exonAA/${dNum} > /dev/null
 	echo "mafGene -chrom=$C -exons -noTrans $db $mz $gp order.list stdout | gzip -c > exonNuc/${dNum}/$C.exonNuc.fa.gz &"
 	echo "mafGene -chrom=$C -exons $db $mz $gp order.list stdout | gzip -c > exonAA/${dNum}/$C.exonAA.fa.gz &"
         if [ $I -gt 16 ]; then
             echo "date"
             echo "wait"
             I=0
         fi
     done > $gp.jobs
     echo "date" >> $gp.jobs
     echo "wait" >> $gp.jobs
 
     time (sh -x ./$gp.jobs) > $gp.jobs.log 2>&1
     # real    79m18.323s
 
     export mz=tba10way
     export gp=knownGene
     time find ./exonAA -type f | grep exonAA.fa.gz | xargs zcat \
      | gzip -c > $gp.$mz.exonAA.fa.gz
     # real    1m28.841s
 
     time find ./exonNuc -type f | grep exonNuc.fa.gz | xargs zcat \
      | gzip -c > $gp.$mz.exonNuc.fa.gz
     #   real    3m56.370s
 
     # -rw-rw-r-- 1 397928833 Nov  6 18:44 knownGene.tba10way.exonAA.fa.gz
     # -rw-rw-r-- 1 580377720 Nov  6 18:49 knownGene.tba10way.exonNuc.fa.gz
 
     export mz=tba10way
     export gp=knownGene
     export db=hg38
     export pd=/usr/local/apache/htdocs-hgdownload/goldenPath/$db/$mz/alignments
     mkdir -p $pd
     ln -s `pwd`/$gp.$mz.exonAA.fa.gz $pd/$gp.exonAA.fa.gz
     ln -s `pwd`/$gp.$mz.exonNuc.fa.gz $pd/$gp.exonNuc.fa.gz
     ln -s `pwd`/md5sum.txt $pd/
 
     cd  $pd
     md5sum *.fa.gz > md5sum.txt
 
     rm -rf exonAA exonNuc
 
 #############################################################################
 # wiki page for 30-way (DONE - 2017-11-06 - Hiram)
     mkdir /hive/users/hiram/bigWays/hg38.10way
     cd /hive/users/hiram/bigWays
     echo "hg38" > hg38.10way/ordered.list
     awk '{print $1}' /hive/data/genomes/hg38/bed/tba10way/10way.distances.txt \
        >> hg38.10way/ordered.list
 
     # sizeStats.sh catches up the cached measurements required for data
     # in the tables.  They are usually already mostly done, only new
     # assemblies will have updates.
     ./sizeStats.sh hg38.10way/ordered.list
     # dbDb.sh constructs hg38.10way/XenTro9_30-way_conservation_alignment.html
     # may need to add new assembly references to srcReference.list and
     # urlReference.list
     ./dbDb.sh hg38 10way
     # sizeStats.pl constructs hg38.10way/XenTro9_30-way_Genome_size_statistics.html
     # this requires entries in coverage.list for new sequences
     ./sizeStats.pl hg38 10way
 
     # defCheck.pl constructs XenTro9_30-way_conservation_lastz_parameters.html
     ./defCheck.pl hg38 10way
 
     # this constructs the html pages in hg38.10way/:
 # -rw-rw-r-- 1 6247 May  2 17:07 XenTro9_30-way_conservation_alignment.html
 # -rw-rw-r-- 1 8430 May  2 17:09 XenTro9_30-way_Genome_size_statistics.html
 # -rw-rw-r-- 1 5033 May  2 17:10 XenTro9_30-way_conservation_lastz_parameters.html
 
     # add those pages to the genomewiki.  Their page names are the
     # names of the .html files without the .html:
 #  XenTro9_30-way_conservation_alignment
 #  XenTro9_30-way_Genome_size_statistics
 #  XenTro9_30-way_conservation_lastz_parameters
 
     # when you view the first one you enter, it will have links to the
     # missing two.
 
 ############################################################################
 # pushQ readmine (DONE - 2017-11-07 - Hiram)
 
   cd /usr/local/apache/htdocs-hgdownload/goldenPath/hg38
   find -L `pwd`/tba10way `pwd`/phastCons10way `pwd`/phyloP10way \
 	/gbdb/hg38/tba10way -type f \
     > /hive/data/genomes/hg38/bed/tba10way/downloads/redmine.20216.fileList
   wc -l /hive/data/genomes/hg38/bed/tba10way/downloads/redmine.20216.fileList
 # 1450 /hive/data/genomes/hg38/bed/tba10way/downloads/redmine.20216.fileList
 
   cd /hive/data/genomes/hg38/bed/tba10way/downloads
   hgsql -e 'show tables;' hg38 | grep 10way \
 	| sed -e 's/^/hg38./;' > redmine.20216.table.list
 
 ############################################################################