ed0c81dda84b8a3c690fba2c78e2662c5cd6940f
angie
  Mon Jul 12 16:32:14 2021 -0700
Exclude NCBI metadata for sequences that are not (yet) in fasta.

diff --git src/hg/utils/otto/sarscov2phylo/getNcbi.sh src/hg/utils/otto/sarscov2phylo/getNcbi.sh
index 5897b6c..4117416 100755
--- src/hg/utils/otto/sarscov2phylo/getNcbi.sh
+++ src/hg/utils/otto/sarscov2phylo/getNcbi.sh
@@ -83,31 +83,31 @@
 $scriptDir/gbMetadataAddBioSample.pl gb.bioSample.tab ncbi_dataset.tsv \
     > ncbi_dataset.plusBioSample.tsv
 
 # Make a file for joining collection date with ID:
 tawk '$3 != "" {print $1, $3;}' ncbi_dataset.plusBioSample.tsv \
 | sort > gbToDate
 
 # Replace FASTA headers with reconstructed names from enhanced metadata.
 time cleanGenbank < ncbi_dataset/data/genomic.fna \
 | $scriptDir/fixNcbiFastaNames.pl ncbi_dataset.plusBioSample.tsv \
 | xz -T 50 \
     > genbank.fa.xz
 
 # Run pangolin and nextclade on sequences that are new since yesterday
 export TMPDIR=/dev/shm
-fastaNames genbank.fa.xz | awk '{print $1;}' | sed -re 's/\|.*//' | sort > gb.names
+fastaNames genbank.fa.xz | awk '{print $1;}' | sed -re 's/\|.*//' | grep -vx pdb | sort > gb.names
 splitDir=splitForNextclade
 rm -rf $splitDir
 mkdir $splitDir
 if [ -e ../ncbi.latest/nextclade.tsv ]; then
     cp ../ncbi.latest/nextclade.tsv .
     cut -f 1 ../ncbi.latest/nextclade.tsv | sort > nextclade.prev.names
     comm -23 gb.names nextclade.prev.names > nextclade.names
     faSomeRecords <(xzcat genbank.fa.xz) nextclade.names nextclade.fa
     faSplit about nextclade.fa 30000000 $splitDir/chunk
 else
     cp /dev/null nextclade.tsv
     faSplit about <(xzcat genbank.fa.xz) 30000000 $splitDir/chunk
 fi
 if (( $(ls -1 splitForNextclade | wc -l) > 0 )); then
     nDataDir=~angie/github/nextclade/data/sars-cov-2
@@ -150,21 +150,28 @@
 else
     splitDir=splitForPangolin
     rm -rf $splitDir
     mkdir $splitDir
     faSplit about <(xzcat genbank.fa.xz) 30000000 $splitDir/chunk
     find $splitDir -name chunk\*.fa \
     | parallel -j 10 "runPangolin {}"
     head -1 $(ls -1 $splitDir/chunk*.csv | head -1) > lineage_report.csv
     for f in $splitDir/chunk*.csv; do
         tail -n+2 $f >> lineage_report.csv
     done
     rm -rf $splitDir
 fi
 wc -l lineage_report.csv
 
+# It turns out that sometimes new sequences sneak into ncbi_dataset.tsv before they're included in
+# genomic.fna.  Filter those out so we don't have missing pangolin and nextclade for just-added
+# COG-UK sequences.  (https://github.com/theosanderson/taxodium/issues/10#issuecomment-876800176)
+grep -Fwf gb.names ncbi_dataset.plusBioSample.tsv > tmp
+wc -l tmp ncbi_dataset.plusBioSample.tsv
+mv tmp ncbi_dataset.plusBioSample.tsv
+
 rm -f $ottoDir/ncbi.latest
 ln -s ncbi.$today $ottoDir/ncbi.latest
 
 # Clean up
 rm -r ncbi_dataset
 nice xz all.bioSample.* &