aboutsummaryrefslogtreecommitdiffstats
path: root/bin/ssg
diff options
context:
space:
mode:
authorla-ninpre <leobrekalini@gmail.com>2022-04-19 00:24:23 +0300
committerla-ninpre <leobrekalini@gmail.com>2022-04-19 00:32:37 +0300
commit8d962aabf74c42c3afdc38f2f85fa7b06fd04ef0 (patch)
treee5054517b940e5c535a4c7282d7faa7b6462e794 /bin/ssg
parent9f4b181b36418669462c0300c2e090d0f4547dc8 (diff)
downloadaaoth.xyz-8d962aabf74c42c3afdc38f2f85fa7b06fd04ef0.tar.gz
aaoth.xyz-8d962aabf74c42c3afdc38f2f85fa7b06fd04ef0.zip
reimplement website using shell scripts
major change, i know. now i'm using ssg and rssg by roman zolotarev. okay, well, not exactly. ssg is modified to generate pages for gemini too. it's hard to maintain two different things simultaneously. bye-bye jekyll!
Diffstat (limited to 'bin/ssg')
-rwxr-xr-xbin/ssg273
1 files changed, 273 insertions, 0 deletions
diff --git a/bin/ssg b/bin/ssg
new file mode 100755
index 0000000..94e87b6
--- /dev/null
+++ b/bin/ssg
@@ -0,0 +1,273 @@
+#!/bin/sh -e
+#
+# https://rgz.ee/bin/ssg6
+# Copyright 2018-2019 Roman Zolotarev <hi@romanzolotarev.com>
+# Copyright 2022 la-ninpre <aaoth@aaoth.xyz>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+#
+
+main() {
+ test -n "$1" || usage
+ test -n "$2" || usage
+ test -n "$3" || usage
+ test -n "$4" || usage
+ test -n "$5" || usage
+ test -d "$1" || no_dir "$1"
+ test -d "$2" || no_dir "$2"
+ test -d "$3" || no_dir "$3"
+
+ src=$(readlink_f "$1")
+ dst=$(readlink_f "$2")
+ gemdst=$(readlink_f "$3")
+
+ IGNORE=$(
+ if ! test -f "$src/.ssgignore"; then
+ printf ' ! -path "*/.*"'
+ return
+ fi
+ while read -r x; do
+ test -n "$x" || continue
+ printf ' ! -path "*/%s*"' "$x"
+ done <"$src/.ssgignore"
+ )
+
+ # files
+
+ title="$4"
+
+ h_file="$src/_header.html"
+ f_file="$src/_footer.html"
+ gf_file="$src/_gemfooter.gmi"
+ test -f "$f_file" && FOOTER=$(cat "$f_file") && export FOOTER
+ test -f "$h_file" && HEADER=$(cat "$h_file") && export HEADER
+ test -f "$gf_file" && GEMFOOTER=$(cat "$gf_file") && export GEMFOOTER
+
+ list_dirs "$src" |
+ (cd "$src" && cpio -pdu "$dst")
+ list_dirs "$src" |
+ (cd "$src" && cpio -pdu "$gemdst")
+
+ fs=$(
+ if test -f "$dst/.files"; then
+ list_affected_files "$src" "$dst/.files"
+ if test -f "$gemdst/.files"; then
+ list_affected_files "$src" "$gemdst/.files"
+ fi
+ else
+ list_files "$1"
+ fi
+ )
+
+ if test -n "$fs"; then
+ echo "$fs" | tee "$dst/.files"
+ echo "$fs" | tee "$gemdst/.files"
+
+ if echo "$fs" | grep -q '\.md$'; then
+ if test -x "$(which lowdown 2>/dev/null)"; then
+ echo "$fs" | grep '\.md$' |
+ render_md_files_lowdown "$src" "$dst" "$title"
+ echo "$fs" | grep '\.md$' |
+ render_md_files_lowdown_gemini "$src" "$gemdst"
+
+ else
+ echo "couldn't find lowdown nor Markdown.pl"
+ exit 3
+ fi
+ fi
+
+ echo "$fs" | grep '\.html$' |
+ render_html_files "$src" "$dst" "$title"
+
+ echo "$fs" | grep -Ev '\.md$|\.html$' |
+ (cd "$src" && cpio -pu "$dst")
+ echo "$fs" | grep -Ev '\.md$|\.gmi$' |
+ (cd "$src" && cpio -pu "$gemdst")
+ fi
+
+ printf '[ssg] ' >&2
+ print_status 'file, ' 'files, ' "$fs" >&2
+
+ # sitemap
+
+ base_url="$5"
+ date=$(date +%Y-%m-%d)
+ urls=$(list_pages "$src")
+
+ test -n "$urls" &&
+ render_sitemap "$urls" "$base_url" "$date" >"$dst/sitemap.xml"
+
+ print_status 'url' 'urls' "$urls" >&2
+ echo >&2
+}
+
+readlink_f() {
+ file="$1"
+ cd "$(dirname "$file")"
+ file=$(basename "$file")
+ while test -L "$file"; do
+ file=$(readlink "$file")
+ cd "$(dirname "$file")"
+ file=$(basename "$file")
+ done
+ dir=$(pwd -P)
+ echo "$dir/$file"
+}
+
+print_status() {
+ test -z "$3" && printf 'no %s' "$2" && return
+
+ echo "$3" | awk -v singular="$1" -v plural="$2" '
+ END {
+ if (NR==1) printf NR " " singular
+ if (NR>1) printf NR " " plural
+ }'
+}
+
+usage() {
+ echo "usage: ${0##*/} src dst gemdst title base_url" >&2
+ exit 1
+}
+
+no_dir() {
+ echo "${0##*/}: $1: No such directory" >&2
+ exit 2
+}
+
+list_dirs() {
+ cd "$1" && eval "find . -type d ! -name '.' ! -path '*/_*' $IGNORE"
+}
+
+list_files() {
+ cd "$1" && eval "find . -type f ! -name '.' ! -path '*/_*' $IGNORE"
+}
+
+list_dependant_files() {
+ e="\\( -name '*.html' -o -name '*.md' -o -name '*.css' -o -name '*.js' \\)"
+ cd "$1" && eval "find . -type f ! -name '.' ! -path '*/_*' $IGNORE $e"
+}
+
+list_newer_files() {
+ cd "$1" && eval "find . -type f ! -name '.' $IGNORE -newer $2"
+}
+
+has_partials() {
+ grep -qE '^./_.*\.html$|^./_.*\.js$|^./_.*\.css$'
+}
+
+list_affected_files() {
+ fs=$(list_newer_files "$1" "$2")
+
+ if echo "$fs" | has_partials; then
+ list_dependant_files "$1"
+ else
+ echo "$fs"
+ fi
+}
+
+render_html_files() {
+ while read -r f; do
+ render_html_file "$3" <"$1/$f" >"$2/$f"
+ done
+}
+
+render_md_files_lowdown() {
+ while read -r f; do
+ lowdown \
+ --html-no-escapehtml \
+ --html-no-skiphtml \
+ --parse-no-metadata \
+ --parse-no-autolink <"$1/$f" |
+ render_html_file "$3" \
+ >"$2/${f%\.md}.html"
+ done
+}
+
+render_md_files_lowdown_gemini() {
+ while read -r f; do
+ lowdown \
+ -Tgemini <"$1/$f" |
+ render_gmi_file \
+ >"$2/${f%\.md}.gmi"
+ done
+}
+
+render_html_file() {
+ # h/t Devin Teske
+ awk -v title="$1" '
+ { body = body "\n" $0 }
+ END {
+ body = substr(body, 2)
+ if (body ~ /<\/?[Hh][Tt][Mm][Ll]/) {
+ print body
+ exit
+ }
+ if (match(body, /<[[:space:]]*[Hh]1(>|[[:space:]][^>]*>)/)) {
+ t = substr(body, RSTART + RLENGTH)
+ sub("<[[:space:]]*/[[:space:]]*[Hh]1.*", "", t)
+ gsub(/^[[:space:]]*|[[:space:]]$/, "", t)
+ if (t) title = t " &mdash; " title
+ }
+ n = split(ENVIRON["HEADER"], header, /\n/)
+ for (i = 1; i <= n; i++) {
+ if (match(tolower(header[i]), "<title></title>")) {
+ head = substr(header[i], 1, RSTART - 1)
+ tail = substr(header[i], RSTART + RLENGTH)
+ print head "<title>" title "</title>" tail
+ } else print header[i]
+ }
+ print body
+ print ENVIRON["FOOTER"]
+ }'
+}
+
+render_gmi_file() {
+ awk '
+ { body = body "\n" $0 }
+ END {
+ body = substr(body, 2)
+ n = split(body, body_n, /\n/)
+ for (i = 1; i <= n; i++) {
+ if (!match(body_n[i], /^=>[[:space:]]*[Hh][Tt]{2}[Pp][Ss]?:\/\/.*/)) {
+ sub(/\.html[[:space:]]*/, ".gmi ", body_n[i])
+ }
+ print body_n[i]
+ }
+ #print body
+ print ENVIRON["GEMFOOTER"]
+ }'
+}
+
+list_pages() {
+ e="\\( -name '*.html' -o -name '*.md' \\)"
+ cd "$1" && eval "find . -type f ! -path '*/.*' ! -path '*/_*' $IGNORE $e" |
+ sed 's#^./##;s#.md$#.html#;s#/index.html$#/#'
+}
+
+render_sitemap() {
+ urls="$1"
+ base_url="$2"
+ date="$3"
+
+ echo '<?xml version="1.0" encoding="UTF-8"?>'
+ echo '<urlset'
+ echo 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"'
+ echo 'xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9'
+ echo 'http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd"'
+ echo 'xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">'
+ echo "$urls" |
+ sed -E 's#^(.*)$#<url><loc>'"$base_url"'/\1</loc><lastmod>'"$date"'</lastmod><priority>1.0</priority></url>#'
+ echo '</urlset>'
+}
+
+main "$@"