commit 12ebfa5e63a0b7b3e28673ca254686ea5cb43e17
parent ea1a403cae04ab96ecd127ce76bf03d321e09bf6
Author: Louis Burda <quent.burda@gmail.com>
Date: Sun, 20 Dec 2020 23:01:45 +0100
integrated aoc helper
Diffstat:
13 files changed, 30 insertions(+), 191 deletions(-)
diff --git a/.gitmodules b/.gitmodules
@@ -0,0 +1,3 @@
+[submodule "helper"]
+ path = helper
+ url = git@github.com:Sinitax/aoc-helper.git
diff --git a/aoc b/aoc
@@ -1,38 +0,0 @@
-#!/bin/bash
-
-function rmprefix() {
- echo "${1:${#2}}"
-}
-
-function aoc() {
- [ $# -lt 1 ] && exit;
- if [ $1 == "deactivate" ]; then
- export PS1=$(rmprefix "$PS1" "$PS1_PREFIX")
- export PS1_PREFIX=
-
- export REPOROOT=
- unset aoc
- else
- [ $# -lt 1 ] && exit 0
- if [ -e "$REPOROOT/scripts/$1" ]; then
- "$REPOROOT/scripts/$1" ${@:2}
- else
- echo "No such script"
- fi
- fi
-}
-
-SCRIPTPATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
-REPOROOT=$(git -C "$SCRIPTPATH" rev-parse --show-toplevel)
-
-if [[ "${BASH_SOURCE[0]}" != "$0" ]]; then
- echo "Enabling AoC env.."
- if [ ! -z "$PS1_PREFIX" ]; then
- echo "Already initialized!"
- return 0
- fi
- export PS1_PREFIX="[🎄]:"
- export PS1="$PS1_PREFIX$PS1"
-
- export REPOROOT="$REPOROOT"
-fi
diff --git a/data/README b/data/README
@@ -1 +0,0 @@
-Save the request headers for accessing problem statements as json in a 'headers' file.
diff --git a/data/helper/config b/data/helper/config
@@ -0,0 +1,14 @@
+#!/bin/bash
+
+# year you are solving problems for
+export AOCYEAR=2020
+
+# directory you want day directories to be prepared in (format: src/dayN/..)
+export SRCDIR="src"
+
+# specify what files to copy to your day directory on prepare
+export TEMPLATE_DIR="data"
+export TEMPLATE_FILES="
+main.c:main.c
+makefile:makefile
+"
diff --git a/data/helper/init.sh b/data/helper/init.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+REPOROOT=$(git rev-parse --show-toplevel)
+
+set -e
+
+ln -sf "$REPOROOT/data/helper/config" "$REPOROOT/helper/data/config"
+for f in "$REPOROOT/data/helper/scripts"/*; do
+ ln -sf "$f" "$REPOROOT/helper/scripts"
+done
+
+echo "done"
diff --git a/scripts/build b/data/helper/scripts/build
diff --git a/scripts/run b/data/helper/scripts/run
diff --git a/scripts/test b/data/helper/scripts/test
diff --git a/data/template.zig b/data/helper/template/main.zig
diff --git a/helper b/helper
@@ -0,0 +1 @@
+Subproject commit 8e9352ce4a608995101857c1877682579027bd4c
diff --git a/scripts/prepare b/scripts/prepare
@@ -1,34 +0,0 @@
-#!/bin/bash
-
-if [ $# -ne 1 ]; then
- echo "Usage: prepare <daynum>"
- exit 1
-fi
-
-function load() {
- echo -n "$1.."
- $REPOROOT/scripts/scrape ${@:2}
- rv=$?
- if [ $rv -eq 0 ]; then
- echo "done"
- else
- echo "fail"
- fi
- return $rv
-}
-
-if [[ "$1" =~ "[0-9]+" ]]; then
- echo "Not a number"
- exit 1
-fi
-
-daynum="$1"
-folder="$REPOROOT/src/day$daynum"
-
-
-[ ! -e "$folder" ] && ( echo "Initializing folder"; mkdir "$folder" )
-[ ! -e "$folder/main.zig" ] && ( echo "Copying template"; cp "$REPOROOT/data/template.zig" "$folder/main.zig" )
-
-[ ! -e "$folder/input" ] && load "Loading problem input" $daynum input
-[ ! -e "$folder/part1" ] && load "Loading first problem text" $daynum part 1
-[ ! -e "$folder/part2" ] && load "Loading second problem text" $daynum part 2
diff --git a/scripts/scrape b/scripts/scrape
@@ -1,96 +0,0 @@
-#!/usr/bin/env python3
-
-import requests, sys, os, json
-from termcolor import colored
-from bs4 import BeautifulSoup, element
-
-if len(sys.argv) < 3:
- print("Usage: scrape <day> <cmd> [<args..>]")
- sys.exit(1)
-
-day = int(sys.argv[1])
-cmd = sys.argv[2]
-cmdargs = None if len(sys.argv) == 3 else sys.argv[3:]
-
-year = "2020"
-colortext = True
-reporoot = os.getenv("REPOROOT")
-headers = json.loads(open(f"{reporoot}/data/headers").read())
-baseurl = f"https://adventofcode.com/{year}/day/{day}"
-
-def scrape_input(day):
- path = f"{reporoot}/src/day{day}/input"
- if os.path.exists(path):
- sys.exit(1)
-
- r = requests.get(f"{baseurl}/input", headers = headers)
-
- if r.status_code != 200:
- sys.exit(1)
-
- with open(path, "w+") as f:
- f.write(r.text)
-
-def extract_hltext(elem):
- text = ""
- for ce in elem:
- if ce.name == "em":
- if not ce.get("class"):
- text += colored(ce.text, "white", attrs=["bold"])
- elif "star" in ce.get("class"):
- text += colored(ce.text, "yellow", attrs=["bold"])
- else:
- text += colored(ce.text, "magenta", attrs=["bold"])
- elif type(ce) == element.NavigableString:
- text += str(ce)
- elif ce.name == "li":
- text += " - " + extract_hltext(ce)
- elif ce.name[0] == "h":
- text += ce.text + "\n\n"
- elif ce.name == "p" or ce.name == "ul":
- text += extract_hltext(ce) + "\n"
- else:
- text += extract_hltext(ce)
- return text
-
-def scrape_text(day, part):
- path = f"{reporoot}/src/day{day}/part{part}"
- if os.path.exists(path):
- sys.exit(1)
-
- r = requests.get(baseurl, headers = headers)
- soup = BeautifulSoup(r.text, "html.parser")
-
- try:
- textobj = soup.select("article.day-desc")[part - 1]
- except:
- sys.exit(1)
-
- with open(path, "w+") as of:
- text = extract_hltext(textobj)
- parts = text.split("\n")
- for i,p in enumerate(parts):
- if p == "":
- continue
- newp = [list(),]
- words = p.split(" ")
- linelen = 0
- for w in words:
- if linelen + len(w) > 100:
- newp.append(list())
- linelen = 0
- newp[-1].append(w)
- linelen += len(w) + 1
- parts[i] = "\n".join([" ".join(words) for words in newp])
- of.write("{}\n".format("\n".join(parts)))
-
-def main():
- if cmd == "input":
- scrape_input(day)
- elif cmd == "part":
- part = int(cmdargs[0]) if cmdargs else 1
- scrape_text(day, part)
- else:
- sys.exit(1)
-
-main()
diff --git a/scripts/submit b/scripts/submit
@@ -1,22 +0,0 @@
-#!/usr/bin/env python3
-
-import requests, sys, os, json
-from bs4 import BeautifulSoup
-
-if len(sys.argv) != 4:
- print("Usage: submit <day> <part> <answer>")
- sys.exit(1)
-
-daynum = sys.argv[1]
-lvlnum = sys.argv[2]
-answer = sys.argv[3]
-
-year = "2020"
-url = f"https://adventofcode.com/{year}/day/{daynum}/answer"
-reporoot = os.getenv("REPOROOT")
-headers = json.loads(open(f"{reporoot}/data/headers").read())
-data = { 'level': lvlnum, 'answer': answer }
-
-r = requests.post(url, headers=headers, data=data)
-soup = BeautifulSoup(r.text, "html.parser")
-print(soup.select_one("main").text)