src/hg/utils/refreshNamedSessionCustomTracks/refreshSledgeHammer.pl 1.3

1.3 2009/05/18 21:47:18 galt
handling udc cache files touching
Index: src/hg/utils/refreshNamedSessionCustomTracks/refreshSledgeHammer.pl
===================================================================
RCS file: /projects/compbio/cvsroot/kent/src/hg/utils/refreshNamedSessionCustomTracks/refreshSledgeHammer.pl,v
retrieving revision 1.2
retrieving revision 1.3
diff -b -B -U 1000000 -r1.2 -r1.3
--- src/hg/utils/refreshNamedSessionCustomTracks/refreshSledgeHammer.pl	20 Jun 2008 20:49:34 -0000	1.2
+++ src/hg/utils/refreshNamedSessionCustomTracks/refreshSledgeHammer.pl	18 May 2009 21:47:18 -0000	1.3
@@ -1,34 +1,34 @@
 #!/usr/bin/env perl
 
 # DO NOT EDIT the /cluster/bin/scripts copy of this file --
 # edit ~/kent/src/hg/utils/refreshNamedSessionCustomTracks/refreshSledgeHammer.pl instead.
 
 # $Id$
 
 # Use the awesome power of Perl to force the access time of a file to be
 # updated when read (the NFS cache can prevent that and must be bypassed)
 # using O_DIRECT.  To be fair, that can be done in C -- on many platforms.
 # So that we don't have yet another portability concern in our C codebase,
 # I'll just use perl here.
 
 # This script parses the -verbose=4 output of refreshNamedSessionCustomTracks
 # looking for files which do exist, and then reads them with O_DIRECT which
 # seems to force an access time update even on stubborn cached files.
 
 use warnings;
 use Fcntl;
 use strict;
 
 while (<>) {
   my $fileName;
-  if (/^(Found live custom track: |setting \w+File: |\/)(\S+)/) {
+  if (/^(Found live custom track: |setting \w+File: |setting dataUrl: |\/)(\S+)/) {
     $fileName = $2;
     $fileName = $1 . $fileName if ($1 eq "/");
     $fileName =~ s@^\.\./@/usr/local/apache/@;
     sysopen(FH, $fileName, O_RDONLY | O_DIRECT)
       || die "Can't open $fileName: $!\n";
     <FH>;
     close(FH);
   }
 }