95e2c3a93202ee912624bb730724313faae7c886 chmalee Wed Dec 4 16:41:31 2024 -0800 Use the DataTables select API instead of implementing my own checkboxes Working child/parent rows. Just need to add checkboxes to the children and indent them a smidge Fix log in for api key in pre-finish hook. Make the pre-finish hook determine if we came from hubtools or not and don't create hub.txts if we did. Fix writing of parentRows. Make client not use .child() but instead show or hide based on a load time constraint, which is not quite working all the way yet Make select/deselect work with child files. Remove some dead code Make deletes work by moving any requested directories to the end of the delete list and then deleting the directories so rmdir doesn't fail Fix new table rows not animating on add Make newly updloaded files show up in table include their parent dir and hub.txt, and indent them appropriately Hash filenames to file objects in the client so we don't add duplicate rows to the table when multiple files are uploaded at once. Update the selected file div after deleting files. Make clicks on hub.txt files load the hub. Turn the file clicked on to pack. Remove some old copy from the template page Revert early experimental commit when trying to allow bigDataUrl uploads directly through hgCustom Fix up some comments to be more correct Remove old CGI experiment diff --git src/hg/hgHubConnect/hooks/pre-finish.c src/hg/hgHubConnect/hooks/pre-finish.c index 0361107..26e947c 100644 --- src/hg/hgHubConnect/hooks/pre-finish.c +++ src/hg/hgHubConnect/hooks/pre-finish.c @@ -48,88 +48,94 @@ // stdout gets sent by the daemon back to the client int exitStatus = 0; struct jsonElement *response = makeDefaultResponse(); if (!(reqId)) { rejectUpload(response, "not a TUS request"); exitStatus = 1; } else { struct errCatch *errCatch = errCatchNew(0); if (errCatchStart(errCatch)) { // the variables for the row entry for this file, some can be NULL char *userName = NULL; + char *dataDir = NULL, *userDataDir = NULL; char *fileName = NULL; long long fileSize = 0; char *fileType = NULL; char *db = NULL; char *location = NULL; char *reqLm = NULL; time_t lastModified = 0; + boolean isHubToolsUpload = FALSE; char *parentDir = NULL; struct lineFile *lf = lineFileStdin(FALSE); char *request = lineFileReadAll(lf); struct jsonElement *req = jsonParse(request); fprintf(stderr, "Hook request:\n"); jsonPrintToFile(req, NULL, stderr, 0); char *reqCookie= jsonQueryString(req, "", "Event.HTTPRequest.Header.Cookie[0]", NULL); if (reqCookie) { setenv("HTTP_COOKIE", reqCookie, 0); } fprintf(stderr, "reqCookie='%s'\n", reqCookie); userName = getUserName(); if (!userName) { // maybe an apiKey was provided, use that instead to look up the userName char *apiKey = jsonQueryString(req, "", "Event.Upload.MetaData.apiKey", NULL); userName = userNameForApiKey(apiKey); - errAbort("not logged in"); + if (!userName) + errAbort("You are not logged in. Please navigate to My Data -> My Sessions and log in or create an account."); } fprintf(stderr, "userName='%s'\n'", userName); // NOTE: All Upload.MetaData values are strings fileName = cgiEncodeFull(jsonQueryString(req, "", "Event.Upload.MetaData.fileName", NULL)); fileSize = jsonQueryInt(req, "", "Event.Upload.Size", 0, NULL); fileType = jsonQueryString(req, "", "Event.Upload.MetaData.fileType", NULL); db = jsonQueryString(req, "", "Event.Upload.MetaData.genome", NULL); reqLm = jsonQueryString(req, "", "Event.Upload.MetaData.lastModified", NULL); lastModified = sqlLongLong(reqLm) / 1000; // yes Javascript dates are in millis + char *hubtoolsStr = jsonQueryString(req, "", "Event.Upload.MetaData.hubtools", NULL); + if (hubtoolsStr) + isHubToolsUpload = sameString(hubtoolsStr, "TRUE") || sameString(hubtoolsStr, "true"); parentDir = jsonQueryString(req, "", "Event.Upload.MetaData.parentDir", NULL); fprintf(stderr, "parentDir = '%s'\n", parentDir); fflush(stderr); // strip out plain leading '.' and '/' components // middle '.' components are dealt with later if (startsWith("./", parentDir) || startsWith("/", parentDir)) parentDir = skipBeyondDelimit(parentDir, '/'); fprintf(stderr, "parentDir = '%s'\n", parentDir); fflush(stderr); char *tusFile = jsonQueryString(req, "", "Event.Upload.Storage.Path", NULL); if (fileName == NULL) { errAbort("No Event.Upload.fileName setting"); } else if (tusFile == NULL) { errAbort("No Event.Path setting"); } else { char *tusInfo = catTwoStrings(tusFile, ".info"); - char *dataDir = getDataDir(userName); + userDataDir = dataDir = getDataDir(userName); struct dyString *newFile = dyStringNew(0); // if parentDir provided we are throwing the files in there if (parentDir) { if (!endsWith(parentDir, "/")) parentDir = catTwoStrings(parentDir, "/"); dataDir = catTwoStrings(dataDir, parentDir); } dyStringPrintf(newFile, "%s%s", dataDir, fileName); fprintf(stderr, "moving %s to %s\n", tusFile, dyStringContents(newFile)); // TODO: check if file exists or not and let user choose to overwrite // and re-call this hook, for now just exit if the file exists if (fileExists(dyStringContents(newFile))) { @@ -152,46 +158,51 @@ } copyFile(tusFile, dyStringContents(newFile)); // the files definitely should not be executable! chmod(dyStringContents(newFile), 0666); mustRemove(tusFile); mustRemove(tusInfo); dyStringCannibalize(&newFile); } } // we've passed all the checks so we can write a new or updated row // to the mysql table and return to the client that we were successful if (exitStatus == 0) { // create a hub for this upload, which can be edited later - createNewTempHubForUpload(reqId, userName, db, fileName, fileType, parentDir); - fprintf(stderr, "added hub.txt and hubSpace row for hub for file: '%s'\n", fileName); - fflush(stderr); struct hubSpace *row = NULL; AllocVar(row); row->userName = userName; row->fileName = fileName; row->fileSize = fileSize; row->fileType = fileType; row->creationTime = NULL; // automatically handled by mysql row->lastModified = sqlUnixTimeToDate(&lastModified, TRUE); - row->parentDir = parentDir; row->db = db; row->location = location; row->md5sum = md5HexForFile(row->location); row->parentDir = parentDir ? parentDir : ""; + if (!isHubToolsUpload) + { + createNewTempHubForUpload(reqId, row, userDataDir, parentDir); + fprintf(stderr, "added hub.txt and hubSpace row for hub for file: '%s'\n", fileName); + fflush(stderr); + } + // first make the parentDir rows + makeParentDirRows(row->userName, sqlDateToUnixTime(row->lastModified), row->db, row->parentDir, userDataDir); + row->parentDir = hubNameFromPath(parentDir); addHubSpaceRowForFile(row); fprintf(stderr, "added hubSpace row for file '%s'\n", fileName); fflush(stderr); } } if (errCatch->gotError) { rejectUpload(response, errCatch->message->string); exitStatus = 1; } errCatchEnd(errCatch); } // always print a response no matter what jsonPrintToFile(response, NULL, stdout, 0); return exitStatus;