d49c83bd184000554d36f1ffd9c07a89fb856352 angie Thu Sep 2 17:53:52 2021 -0700 Use the new bioSampleJsonToTab.py script but keep the EUtils stuff for now as a back up until biosample.jsonl looks stable and complete. Make a separate log file for gbMetadataAddBioSample.pl output for easier extraction of IDs to ping submitters / NCBI about. Watch out for duplicated sequences in fasta. diff --git src/hg/utils/otto/sarscov2phylo/getNcbi.sh src/hg/utils/otto/sarscov2phylo/getNcbi.sh index 8e0e040..4a15f79 100755 --- src/hg/utils/otto/sarscov2phylo/getNcbi.sh +++ src/hg/utils/otto/sarscov2phylo/getNcbi.sh @@ -42,30 +42,32 @@ if [[ ! -f ncbi_dataset.zip ]]; then echo "datasets command failed $maxAttempts times; quitting." exit 1 fi rm -rf ncbi_dataset unzip -o ncbi_dataset.zip # Creates ./ncbi_dataset/ # This makes something just like ncbi.datasets.tsv from the /table/ API query: jq -c -r '[.accession, .biosample, .isolate.collectionDate, .location.geographicLocation, .host.sciName, .isolate.name, .completeness, (.length|tostring)] | join("\t")' \ ncbi_dataset/data/data_report.jsonl \ | sed -e 's/COMPLETE/complete/; s/PARTIAL/partial/;' \ | sort \ > ncbi_dataset.tsv +# TODO: get rid of all this eutils cruft when biosample.jsonl is stable: + # Use EUtils (esearch) to get all SARS-CoV-2 BioSample GI# IDs: $scriptDir/searchAllSarsCov2BioSample.sh sort all.biosample.gids.txt > all.biosample.gids.sorted.txt # Copy yesterday's all.bioSample.txt so we don't have to refetch all the old stuff. if [ -e ../ncbi.latest/all.bioSample.txt.xz ]; then xzcat ../ncbi.latest/all.bioSample.txt.xz > all.bioSample.txt grep ^Accession all.bioSample.txt | sed -re 's/^.*ID: //' | sort -u > ids.loaded comm -23 all.biosample.gids.sorted.txt ids.loaded > ids.notLoaded elif [ -e ../ncbi.latest/all.bioSample.txt ]; then cp ../ncbi.latest/all.bioSample.txt all.bioSample.txt grep ^Accession all.bioSample.txt | sed -re 's/^.*ID: //' | sort -u > ids.loaded comm -23 all.biosample.gids.sorted.txt ids.loaded > ids.notLoaded else cp -p all.biosample.gids.txt ids.notLoaded @@ -82,56 +84,65 @@ $scriptDir/bioSampleIdToText.sh < ids.notLoaded >> all.bioSample.txt grep ^Accession all.bioSample.txt | sed -re 's/^.*ID: //' | sort > ids.loaded comm -23 all.biosample.gids.sorted.txt ids.loaded > ids.notLoaded if [ -s ids.notLoaded ]; then echo "Still have only $accCount accession lines after retrying; quitting." exit 1 fi fi # Extract properties of interest into tab-sep text: $scriptDir/bioSampleTextToTab.pl < all.bioSample.txt > all.bioSample.tab # Extract BioSample tab-sep lines just for BioSample accessions included in the ncbi_dataset data: tawk '$2 != "" {print $2;}' ncbi_dataset.tsv \ | grep -Fwf - all.bioSample.tab \ - > gb.bioSample.tab + > gb.bioSample.eutils.tab + +time $scriptDir/bioSampleJsonToTab.py ncbi_dataset/data/biosample.jsonl > gb.bioSample.tab + +# TODO: get rid of this when bioSample json is stable +comm -23 <(cut -f 2 gb.bioSample.eutils.tab | sort) <(cut -f 2 gb.bioSample.tab | sort) \ + > bioSample.missingFromJson.txt +wc -l bioSample.missingFromJson.txt +grep -Fwf bioSample.missingFromJson.txt gb.bioSample.eutils.tab \ + >> gb.bioSample.tab # Use BioSample metadata to fill in missing pieces of GenBank metadata and report conflicting # sample collection dates: $scriptDir/gbMetadataAddBioSample.pl gb.bioSample.tab ncbi_dataset.tsv \ - > ncbi_dataset.plusBioSample.tsv + > ncbi_dataset.plusBioSample.tsv 2>gbMetadataAddBioSample.log # Make a file for joining collection date with ID: tawk '$3 != "" {print $1, $3;}' ncbi_dataset.plusBioSample.tsv \ | sort > gbToDate # Replace FASTA headers with reconstructed names from enhanced metadata. time cleanGenbank < ncbi_dataset/data/genomic.fna \ | $scriptDir/fixNcbiFastaNames.pl ncbi_dataset.plusBioSample.tsv \ | xz -T 50 \ > genbank.fa.xz # Run pangolin and nextclade on sequences that are new since yesterday export TMPDIR=/dev/shm -fastaNames genbank.fa.xz | awk '{print $1;}' | sed -re 's/\|.*//' | grep -vx pdb | sort > gb.names +fastaNames genbank.fa.xz | awk '{print $1;}' | sed -re 's/\|.*//' | grep -vx pdb | sort -u > gb.names splitDir=splitForNextclade rm -rf $splitDir mkdir $splitDir if [ -e ../ncbi.latest/nextclade.tsv ]; then cp ../ncbi.latest/nextclade.tsv . - cut -f 1 ../ncbi.latest/nextclade.tsv | sort > nextclade.prev.names + cut -f 1 nextclade.tsv | sort -u > nextclade.prev.names comm -23 gb.names nextclade.prev.names > nextclade.names faSomeRecords <(xzcat genbank.fa.xz) nextclade.names nextclade.fa faSplit about nextclade.fa 30000000 $splitDir/chunk else cp /dev/null nextclade.tsv faSplit about <(xzcat genbank.fa.xz) 30000000 $splitDir/chunk fi if (( $(ls -1 splitForNextclade | wc -l) > 0 )); then nDataDir=~angie/github/nextclade/data/sars-cov-2 outDir=$(mktemp -d) outTsv=$(mktemp) for chunkFa in $splitDir/chunk*.fa; do nextclade -j 50 -i $chunkFa \ --input-root-seq $nDataDir/reference.fasta \ --input-tree $nDataDir/tree.json \