6f949e90b1ba3de976455fbcf9da21897761d134
markd
Fri Oct 29 16:11:58 2021 -0700
add timeout option to pipeline to allow kill long-running pipelines, especially ones run from CGIs
diff --git src/hg/lib/customFactory.c src/hg/lib/customFactory.c
index 1504712..6e51372 100644
--- src/hg/lib/customFactory.c
+++ src/hg/lib/customFactory.c
@@ -360,31 +360,31 @@
cmd1[index++] = dyStringCannibalize(&tmpDy);
}
cmd1[index++] = CUSTOM_TRASH;
cmd1[index++] = track->dbTableName;
cmd1[index++] = "stdin";
assert(index <= ArraySize(cmd1));
/* the "/dev/null" file isn't actually used for anything, but it is used
* in the pipeLineOpen to properly get a pipe started that isn't simply
* to STDOUT which is what a NULL would do here instead of this name.
* This function exits if it can't get the pipe created
* The dbStderrFile will get stderr messages from hgLoadBed into the
* our private error log so we can send it back to the user
*/
return pipelineOpen1(cmd1, pipelineWrite | pipelineNoAbort,
- "/dev/null", track->dbStderrFile);
+ "/dev/null", track->dbStderrFile, 0);
}
void pipelineFailExit(struct customTrack *track)
/* show up to three lines of error message to stderr and errAbort */
{
struct dyString *errDy = newDyString(0);
struct lineFile *lf;
char *line;
int i;
dyStringPrintf(errDy, "track load error (track name='%s'):
\n", track->tdb->track);
lf = lineFileOpen(track->dbStderrFile, TRUE);
i = 0;
while( (i < 10) && lineFileNext(lf, &line, NULL))
{
dyStringPrintf(errDy, "%s
\n", line);
@@ -775,31 +775,31 @@
dyStringPrintf(tmpDy, "-maxChromNameLength=%d", track->maxChromName);
cmd1[index++] = dyStringCannibalize(&tmpDy);
cmd1[index++] = CUSTOM_TRASH;
cmd1[index++] = track->dbTableName;
cmd1[index++] = "stdin";
assert(index <= ArraySize(cmd1));
/* the "/dev/null" file isn't actually used for anything, but it is used
* in the pipeLineOpen to properly get a pipe started that isn't simply
* to STDOUT which is what a NULL would do here instead of this name.
* This function exits if it can't get the pipe created
* The dbStderrFile will get stderr messages from hgLoadBed into the
* our private error log so we can send it back to the user
*/
return pipelineOpen1(cmd1, pipelineWrite | pipelineNoAbort,
- "/dev/null", track->dbStderrFile);
+ "/dev/null", track->dbStderrFile, 0);
}
/* Need customTrackEncodePeak */
static struct encodePeak *customTrackEncodePeak(char *db, char **row, enum encodePeakType pt,
struct hash *chromHash, struct lineFile *lf)
/* Convert a row of strings to a bed. */
{
struct encodePeak *peak = encodePeakLineFileLoad(row, pt, lf);
hashStoreName(chromHash, peak->chrom);
customFactoryCheckChromNameDb(db, peak->chrom, lf);
return peak;
}
/* remember to set all the custom track settings necessary */
static struct customTrack *encodePeakFinish(struct customTrack *track, struct encodePeak *peakList, enum encodePeakType pt)
@@ -938,31 +938,31 @@
dyStringPrintf(tmpDy, "-maxChromNameLength=%d", track->maxChromName);
cmd1[index++] = dyStringCannibalize(&tmpDy);
cmd1[index++] = CUSTOM_TRASH;
cmd1[index++] = track->dbTableName;
cmd1[index++] = "stdin";
assert(index <= ArraySize(cmd1));
/* the "/dev/null" file isn't actually used for anything, but it is used
* in the pipeLineOpen to properly get a pipe started that isn't simply
* to STDOUT which is what a NULL would do here instead of this name.
* This function exits if it can't get the pipe created
* The dbStderrFile will get stderr messages from hgLoadBed into the
* our private error log so we can send it back to the user
*/
return pipelineOpen1(cmd1, pipelineWrite | pipelineNoAbort,
- "/dev/null", track->dbStderrFile);
+ "/dev/null", track->dbStderrFile, 0);
}
/* customTrackBedDetail load item */
static struct bedDetail *customTrackBedDetail(char *db, char **row,
struct hash *chromHash, struct lineFile *lf, int size)
/* Convert a row of strings to a bed 4 + for bedDetail. */
{
struct bedDetail *item = bedDetailLineFileLoad(row, size, lf);
hashStoreName(chromHash, item->chrom);
customFactoryCheckChromNameDb(db, item->chrom, lf);
int chromSize = hChromSize(db, item->chrom);
if (item->chromEnd > chromSize)
lineFileAbort(lf, "chromEnd larger than chrom %s size (%d > %d)",
item->chrom, item->chromEnd, chromSize);
return item;
@@ -1143,31 +1143,31 @@
dyStringPrintf(tmpDy, "-maxChromNameLength=%d", track->maxChromName);
cmd1[index++] = dyStringCannibalize(&tmpDy);
cmd1[index++] = CUSTOM_TRASH;
cmd1[index++] = track->dbTableName;
cmd1[index++] = "stdin";
assert(index <= ArraySize(cmd1));
/* the "/dev/null" file isn't actually used for anything, but it is used
* in the pipeLineOpen to properly get a pipe started that isn't simply
* to STDOUT which is what a NULL would do here instead of this name.
* This function exits if it can't get the pipe created
* The dbStderrFile will get stderr messages from hgLoadBed into the
* our private error log so we can send it back to the user
*/
return pipelineOpen1(cmd1, pipelineWrite | pipelineNoAbort,
- "/dev/null", track->dbStderrFile);
+ "/dev/null", track->dbStderrFile, 0);
}
/* customTrackPgSnp load item */
static struct pgSnp *customTrackPgSnp(char *db, char **row,
struct hash *chromHash, struct lineFile *lf)
/* Convert a row of strings to pgSnp. */
{
struct pgSnp *item = pgSnpLineFileLoad(row, lf);
hashStoreName(chromHash, item->chrom);
customFactoryCheckChromNameDb(db, item->chrom, lf);
int chromSize = hChromSize(db, item->chrom);
if (item->chromEnd > chromSize)
lineFileAbort(lf, "chromEnd larger than chrom %s size (%d > %d)",
item->chrom, item->chromEnd, chromSize);
return item;
@@ -1374,31 +1374,31 @@
dyStringPrintf(ds, "-maxChromNameLength=%d", track->maxChromName);
cmd1[index++] = dyStringCannibalize(&ds);
cmd1[index++] = CUSTOM_TRASH;
cmd1[index++] = track->dbTableName;
cmd1[index++] = "stdin";
assert(index <= ArraySize(cmd1));
/* the "/dev/null" file isn't actually used for anything, but it is used
* in the pipeLineOpen to properly get a pipe started that isn't simply
* to STDOUT which is what a NULL would do here instead of this name.
* This function exits if it can't get the pipe created
* The dbStderrFile will get stderr messages from hgLoadBed into the
* our private error log so we can send it back to the user
*/
-return pipelineOpen1(cmd1, pipelineWrite | pipelineNoAbort, "/dev/null", track->dbStderrFile);
+return pipelineOpen1(cmd1, pipelineWrite | pipelineNoAbort, "/dev/null", track->dbStderrFile, 0);
}
static struct customTrack *barChartFinish(struct customTrack *track, struct barChartBed *itemList)
/* Finish up barChart tracks. TODO: reuse from pgSnp*/
{
struct barChartBed *item;
char buf[50];
track->tdb->type = cloneString("barChart");
track->dbTrackType = cloneString("barChart");
safef(buf, sizeof(buf), "%d", track->fieldCount);
ctAddToSettings(track, "fieldCount", cloneString(buf));
safef(buf, sizeof(buf), "%d", slCount(itemList));
ctAddToSettings(track, "itemCount", cloneString(buf));
safef(buf, sizeof(buf), "%s:%u-%u", itemList->chrom,
itemList->chromStart, itemList->chromEnd);
@@ -1563,31 +1563,31 @@
dyStringPrintf(ds, "-maxChromNameLength=%d", track->maxChromName);
cmd1[index++] = dyStringCannibalize(&ds);
cmd1[index++] = CUSTOM_TRASH;
cmd1[index++] = track->dbTableName;
cmd1[index++] = "stdin";
assert(index <= ArraySize(cmd1));
/* the "/dev/null" file isn't actually used for anything, but it is used
* in the pipeLineOpen to properly get a pipe started that isn't simply
* to STDOUT which is what a NULL would do here instead of this name.
* This function exits if it can't get the pipe created
* The dbStderrFile will get stderr messages from hgLoadBed into the
* our private error log so we can send it back to the user
*/
-return pipelineOpen1(cmd1, pipelineWrite | pipelineNoAbort, "/dev/null", track->dbStderrFile);
+return pipelineOpen1(cmd1, pipelineWrite | pipelineNoAbort, "/dev/null", track->dbStderrFile, 0);
}
static struct customTrack *interactFinish(struct customTrack *track, struct interact *itemList)
/* Finish up interact tracks */
{
struct interact *item;
char buf[50];
track->tdb->type = cloneString("interact");
track->dbTrackType = cloneString("interact");
safef(buf, sizeof(buf), "%d", track->fieldCount);
ctAddToSettings(track, "fieldCount", cloneString(buf));
safef(buf, sizeof(buf), "%d", slCount(itemList));
ctAddToSettings(track, "itemCount", cloneString(buf));
safef(buf, sizeof(buf), "%s:%u-%u", itemList->chrom,
itemList->chromStart, itemList->chromEnd);
@@ -2212,31 +2212,31 @@
"create directory or specify in hg.conf customTracks.tmpdir", tmpDir);
dyStringPrintf(tmpDy, "-tmpDir=%s", tmpDir);
cmd1[3] = dyStringCannibalize(&tmpDy); tmpDy = newDyString(0);
dyStringPrintf(tmpDy, "-loadFile=%s", mafFile);
cmd1[4] = dyStringCannibalize(&tmpDy); tmpDy = newDyString(0);
dyStringPrintf(tmpDy, "-refDb=%s", track->genomeDb);
cmd1[5] = dyStringCannibalize(&tmpDy); tmpDy = newDyString(0);
dyStringPrintf(tmpDy, "-maxNameLen=%d", track->maxChromName);
cmd1[6] = dyStringCannibalize(&tmpDy); tmpDy = newDyString(0);
dyStringPrintf(tmpDy, "-defPos=%s", tn.forCgi);
cmd1[7] = dyStringCannibalize(&tmpDy);
cmd1[8] = CUSTOM_TRASH;
cmd1[9] = track->dbTableName;
struct pipeline *dataPipe = pipelineOpen(cmds,
- pipelineWrite | pipelineNoAbort, "/dev/null", track->dbStderrFile);
+ pipelineWrite | pipelineNoAbort, "/dev/null", track->dbStderrFile, 0);
if(pipelineWait(dataPipe))
pipelineFailExit(track); /* prints error and exits */
pipelineFree(&dataPipe);
unlink(track->dbStderrFile); /* no errors, not used */
track->wigFile = NULL;
struct lineFile *lf = lineFileOpen(tn.forCgi, TRUE);
char *line;
int size;
lineFileNeedNext(lf, &line, &size);
lineFileClose(&lf);
unlink(tn.forCgi);
ctAddToSettings(track, "firstItemPos", cloneString(line));
@@ -2380,31 +2380,31 @@
*/
if (startsWith("/", trashDir()))
cmd2[6] = "-pathPrefix=/";
else
cmd2[6] = "-pathPrefix=.";
cmd2[7] = CUSTOM_TRASH;
cmd2[8] = track->dbTableName;
/* the "/dev/null" file isn't actually used for anything, but it is used
* in the pipeLineOpen to properly get a pipe started that isn't simply
* to STDOUT which is what a NULL would do here instead of this name.
* This function exits if it can't get the pipe created
* The dbStderrFile will get stderr messages from this pipeline into the
* our private error file so we can return the errors to the user.
*/
return pipelineOpen(cmds, pipelineWrite | pipelineNoAbort,
- "/dev/null", track->dbStderrFile);
+ "/dev/null", track->dbStderrFile, 0);
}
static void wigDbGetLimits(struct sqlConnection *conn, char *tableName,
double *retUpperLimit, double *retLowerLimit, int *retSpan)
/* Figure out upper/lower limits of wiggle table. */
{
char query[512];
sqlSafef(query,sizeof(query),
"select min(lowerLimit),max(lowerLimit+dataRange) from %s",
tableName);
struct sqlResult *sr = sqlGetResult(conn, query);
char **row;
if ((row = sqlNextRow(sr)) != NULL)
{
if (row[0]) *retLowerLimit = sqlDouble(row[0]);