Remove nginx cache invalidations

Remove screenshots
Fix race condition bug
This commit is contained in:
hzrd149 2025-04-05 15:13:05 +01:00
parent c3778507d4
commit ef5262f73c
23 changed files with 590 additions and 1170 deletions

View File

@ -0,0 +1,5 @@
---
"nsite-gateway": major
---
Remove screenshots feature

View File

@ -0,0 +1,5 @@
---
"nsite-gateway": major
---
Remove nginx cache invalidations

View File

@ -0,0 +1,5 @@
---
"nsite-gateway": patch
---
Fix race condition when streaming blob

View File

@ -2,7 +2,7 @@
# can be in-memory, redis:// or sqlite://
CACHE_PATH="in-memory"
# How long to keep a pubkeys relays and blossom servers in cache (in seconds)
# How long to keep cached data (in seconds)
CACHE_TIME=3600
# A list of relays to find users relay lists (10002) and blossom servers (10063)
@ -17,9 +17,6 @@ BLOSSOM_SERVERS=https://nostr.download,https://cdn.satellite.earth
# The max file size to serve
MAX_FILE_SIZE='2 MB'
# The cache folder for nginx (used for cache invalidation)
NGINX_CACHE_DIR='/var/nginx/cache'
# A nprofile pointer for an nsite to use as the default homepage
# Setting this will override anything in the ./public folder
NSITE_HOMEPAGE=""
@ -27,10 +24,6 @@ NSITE_HOMEPAGE=""
# a local directory to download the homepage to
NSITE_HOMEPAGE_DIR="public"
# Screenshots require Puppeteer to be setup https://pptr.dev/troubleshooting#setting-up-chrome-linux-sandbox
ENABLE_SCREENSHOTS="false"
SCREENSHOTS_DIR="./screenshots"
# If this is set, nsite will return the 'Onion-Location' header in responses
# ONION_HOST=https://<hostname>.onion

1
.gitignore vendored
View File

@ -3,5 +3,4 @@ build
.env
data
.netrc
screenshots

3
.vscode/launch.json vendored
View File

@ -26,8 +26,7 @@
"internalConsoleOptions": "openOnSessionStart",
"outputCapture": "std",
"env": {
"DEBUG": "nsite,nsite:*",
"ENABLE_SCREENSHOTS": "true"
"DEBUG": "nsite,nsite:*"
}
}
]

View File

@ -1,13 +1,9 @@
# syntax=docker/dockerfile:1
FROM node:22-alpine AS base
ENV PNPM_HOME="/pnpm"
ENV PATH="$PNPM_HOME:$PATH"
RUN corepack enable
RUN apk update && apk add --no-cache nginx supervisor
COPY supervisord.conf /etc/supervisord.conf
WORKDIR /app
COPY package.json .
COPY pnpm-lock.yaml .
@ -27,25 +23,13 @@ FROM base AS main
RUN addgroup -S nsite && adduser -S nsite -G nsite
RUN chown -R nsite:nsite /app
# Setup nginx
COPY nginx/nginx.conf /etc/nginx/nginx.conf
COPY nginx/http.conf /etc/nginx/conf.d/default.conf
# setup nsite
COPY --from=prod-deps /app/node_modules /app/node_modules
COPY --from=build ./app/build ./build
COPY ./public ./public
VOLUME [ "/var/cache/nginx" ]
EXPOSE 80 3000
ENV NSITE_PORT="3000"
ENV NGINX_CACHE_DIR="/var/cache/nginx"
ENV ENABLE_SCREENSHOTS="false"
COPY docker-entrypoint.sh /
RUN chmod +x /docker-entrypoint.sh
ENTRYPOINT ["/docker-entrypoint.sh"]
CMD ["/usr/bin/supervisord", "-c", "/etc/supervisord.conf"]
CMD ["node", "."]

View File

@ -1,75 +0,0 @@
# syntax=docker/dockerfile:1
FROM node:20-slim AS base
ENV PNPM_HOME="/pnpm"
ENV PATH="$PNPM_HOME:$PATH"
RUN corepack enable
# Setup nsite user
RUN groupadd -r nsite && useradd -r -g nsite -G audio,video nsite && usermod -d /app nsite
# Install nginx and supervisor
RUN apt-get update && apt-get install -y nginx supervisor
# setup supervisor
COPY supervisord.conf /etc/supervisord.conf
# Setup nginx
COPY nginx/nginx.conf /etc/nginx/nginx.conf
COPY nginx/http.conf /etc/nginx/conf.d/default.conf
RUN chown nsite:nsite -R /etc/nginx
# install google chrome for screenshots. copied from (https://pptr.dev/troubleshooting#running-puppeteer-in-docker)
# Install latest chrome dev package and fonts to support major charsets (Chinese, Japanese, Arabic, Hebrew, Thai and a few others)
# Note: this installs the necessary libs to make the bundled version of Chrome for Testing that Puppeteer
# installs, work.
RUN apt-get update \
&& apt-get install -y wget gnupg \
&& wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add - \
&& sh -c 'echo "deb [arch=amd64] http://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google.list' \
&& apt-get update \
&& apt-get install -y google-chrome-stable fonts-ipafont-gothic fonts-wqy-zenhei fonts-thai-tlwg fonts-kacst fonts-freefont-ttf libxss1 \
--no-install-recommends \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /app
COPY package.json .
COPY pnpm-lock.yaml .
FROM base AS prod-deps
RUN --mount=type=cache,id=pnpm,target=/pnpm/store pnpm install --prod --frozen-lockfile
FROM base AS build
RUN --mount=type=cache,id=pnpm,target=/pnpm/store pnpm install --frozen-lockfile
COPY tsconfig.json .
COPY src ./src
RUN pnpm build
FROM base AS main
# setup nsite
COPY --from=prod-deps /app/node_modules /app/node_modules
COPY --from=build ./app/build ./build
COPY ./public ./public
VOLUME [ "/var/cache/nginx" ]
VOLUME [ "/screenshots" ]
EXPOSE 80 3000
ENV NSITE_PORT="3000"
ENV NGINX_CACHE_DIR="/var/cache/nginx"
ENV ENABLE_SCREENSHOTS="true"
ENV SCREENSHOTS_DIR="/screenshots"
ENV PUPPETEER_SKIP_DOWNLOAD="true"
COPY docker-entrypoint.sh /
RUN chmod +x /docker-entrypoint.sh
# change ownership of app
RUN chown nsite:nsite -R /app
# Run /docker-entrypoint as root so supervisor can run
ENTRYPOINT ["/docker-entrypoint.sh"]
CMD ["/usr/bin/supervisord", "-c", "/etc/supervisord.conf"]

View File

@ -1,17 +1,21 @@
version: "3.7"
services:
redis:
image: redis:alpine
command: redis-server --save 60 1 --loglevel warning
volumes:
- redis-data:/data
nsite:
build: .
image: ghcr.io/hzrd149/nsite-gateway:master
environment:
LOOKUP_RELAYS: wss://user.kindpag.es,wss://purplepag.es
SUBSCRIPTION_RELAYS: wss://nostrue.com/,wss://nos.lol/,wss://relay.damus.io/,wss://purplerelay.com/
volumes:
- type: tmpfs
target: /var/cache/nginx
tmpfs:
size: 100M
CACHE_PATH: redis://redis:6379
depends_on:
- redis
ports:
- 8080:80
- 3000:3000
volumes:
redis-data:

View File

@ -1,7 +0,0 @@
#!/bin/sh
echo Changing permission on volumes
chown -R nsite:nsite /var/cache/nginx
chown -R nsite:nsite /screenshots
exec "$@"

View File

@ -31,12 +31,11 @@
"koa-morgan": "^1.0.1",
"koa-send": "^5.0.1",
"koa-static": "^5.0.0",
"mime": "^4.0.6",
"nostr-tools": "^2.11.0",
"mime": "^4.0.7",
"nostr-tools": "^2.12.0",
"nsite-cli": "^0.1.16",
"pac-proxy-agent": "^7.2.0",
"proxy-agent": "^6.5.0",
"puppeteer": "^23.11.1",
"websocket-polyfill": "1.0.0",
"ws": "^8.18.1",
"xbytes": "^1.9.1"
@ -44,8 +43,8 @@
"devDependencies": {
"@changesets/cli": "^2.28.1",
"@swc-node/register": "^1.10.10",
"@swc/core": "^1.11.10",
"@types/better-sqlite3": "^7.6.12",
"@swc/core": "^1.11.16",
"@types/better-sqlite3": "^7.6.13",
"@types/debug": "^4.1.12",
"@types/follow-redirects": "^1.14.4",
"@types/koa": "^2.15.0",
@ -54,14 +53,14 @@
"@types/koa-static": "^4.0.4",
"@types/koa__cors": "^5.0.0",
"@types/koa__router": "^12.0.4",
"@types/node": "^20.17.24",
"@types/node": "^20.17.30",
"@types/proxy-from-env": "^1.0.4",
"@types/ws": "^8.18.0",
"esbuild": "^0.25.1",
"@types/ws": "^8.18.1",
"esbuild": "^0.25.2",
"nodemon": "^3.1.9",
"pkg": "^5.8.1",
"prettier": "^3.5.3",
"typescript": "^5.8.2"
"typescript": "^5.8.3"
},
"resolutions": {
"websocket-polyfill": "1.0.0"

1192
pnpm-lock.yaml generated

File diff suppressed because it is too large Load Diff

BIN
public/favicon.ico Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

View File

@ -1,53 +1,67 @@
import { IncomingMessage } from "node:http";
import { BLOSSOM_SERVERS, MAX_FILE_SIZE } from "./env.js";
import { MAX_FILE_SIZE } from "./env.js";
import { makeRequestWithAbort } from "./helpers/http.js";
import { blobURLs } from "./cache.js";
import logger from "./logger.js";
/**
* Downloads a file from multiple servers
* @todo download the file to /tmp and verify it
*/
export function downloadBlob(sha256: string, servers = BLOSSOM_SERVERS): Promise<IncomingMessage> {
return new Promise((resolve, reject) => {
const controllers = new Map<string, AbortController>();
const log = logger.extend("blossom");
// make all requests in parallel
servers.forEach(async (server) => {
/** Checks all servers for a blob and returns the URLs */
export async function findBlobURLs(sha256: string, servers: string[]): Promise<string[]> {
const cache = await blobURLs.get(sha256);
if (cache) return cache;
const urls = await Promise.all(
servers.map(async (server) => {
const url = new URL(sha256, server);
const check = await fetch(url, { method: "HEAD" }).catch(() => null);
if (check?.status === 200) return url.toString();
else return null;
}),
);
const filtered = urls.filter((url) => url !== null);
log(`Found ${filtered.length}/${servers.length} URLs for ${sha256}`);
await blobURLs.set(sha256, filtered);
return filtered;
}
/** Downloads a file from multiple servers */
export async function streamBlob(sha256: string, servers: string[]): Promise<IncomingMessage | undefined> {
if (servers.length === 0) return undefined;
// First find all available URLs
const urls = await findBlobURLs(sha256, servers);
if (urls.length === 0) return undefined;
// Try each URL sequentially with timeout
for (const urlString of urls) {
const controller = new AbortController();
let res: IncomingMessage | undefined = undefined;
controllers.set(server, controller);
try {
// Set up timeout to abort after 10s
const timeout = setTimeout(() => {
controller.abort();
}, 10_000);
const url = new URL(urlString);
const response = await makeRequestWithAbort(url, controller);
res = response;
clearTimeout(timeout);
if (!response.statusCode) throw new Error("Missing headers or status code");
const size = response.headers["content-length"];
if (size && parseInt(size) > MAX_FILE_SIZE) throw new Error("File too large");
if (response.statusCode >= 200 && response.statusCode < 300) {
// cancel the other requests
for (const [other, abort] of controllers) {
if (other !== server) abort.abort();
}
controllers.delete(server);
return resolve(response);
}
if (response.statusCode >= 200 && response.statusCode < 300) return response;
} catch (error) {
controllers.delete(server);
if (res) res.resume();
continue; // Try next URL if this one fails
}
}
// reject if last
if (controllers.size === 0) reject(new Error("Failed to find blob on servers"));
});
// reject if all servers don't respond in 30s
setTimeout(() => {
reject(new Error("Timeout"));
}, 30_000);
});
}

View File

@ -1,13 +1,18 @@
import Keyv from "keyv";
import { CACHE_PATH, CACHE_TIME } from "./env.js";
import logger from "./logger.js";
const log = logger.extend("cache");
async function createStore() {
if (!CACHE_PATH || CACHE_PATH === "in-memory") return undefined;
else if (CACHE_PATH.startsWith("redis://")) {
const { default: KeyvRedis } = await import("@keyv/redis");
log(`Using redis cache at ${CACHE_PATH}`);
return new KeyvRedis(CACHE_PATH);
} else if (CACHE_PATH.startsWith("sqlite://")) {
const { default: KeyvSqlite } = await import("@keyv/sqlite");
log(`Using sqlite cache at ${CACHE_PATH}`);
return new KeyvSqlite(CACHE_PATH);
}
}
@ -15,7 +20,7 @@ async function createStore() {
const store = await createStore();
store?.on("error", (err) => {
console.log("Connection Error", err);
log("Connection Error", err);
process.exit(1);
});
@ -42,9 +47,16 @@ export const userRelays = new Keyv<string[] | undefined>({
ttl: CACHE_TIME * 1000,
});
/** A cache that maps a pubkey + path to blossom servers that had the blob ( pubkey/path -> servers ) */
export const pathServers = new Keyv<string[] | undefined>({
/** A cache that maps a pubkey + path to sha256 hash of the blob ( pubkey/path -> sha256 ) */
export const pathBlobs = new Keyv<string | undefined>({
...opts,
namespace: "paths",
ttl: CACHE_TIME * 1000,
});
/** A cache that maps a sha256 hash to a set of URLs that had the blob ( sha256 -> URLs ) */
export const blobURLs = new Keyv<string[] | undefined>({
...opts,
namespace: "blobs",
ttl: CACHE_TIME * 1000,
});

View File

@ -16,7 +16,6 @@ const BLOSSOM_SERVERS = process.env.BLOSSOM_SERVERS?.split(",").map((u) => u.tri
const MAX_FILE_SIZE = process.env.MAX_FILE_SIZE ? xbytes.parseSize(process.env.MAX_FILE_SIZE) : Infinity;
const NGINX_CACHE_DIR = process.env.NGINX_CACHE_DIR;
const CACHE_PATH = process.env.CACHE_PATH;
const CACHE_TIME = process.env.CACHE_TIME ? parseInt(process.env.CACHE_TIME) : 60 * 60;
@ -28,9 +27,6 @@ const NSITE_HOST = process.env.NSITE_HOST || "0.0.0.0";
const NSITE_PORT = process.env.NSITE_PORT ? parseInt(process.env.NSITE_PORT) : 3000;
const HOST = `${NSITE_HOST}:${NSITE_PORT}`;
const ENABLE_SCREENSHOTS = process.env.ENABLE_SCREENSHOTS === "true";
const SCREENSHOTS_DIR = process.env.SCREENSHOTS_DIR || "./screenshots";
const ONION_HOST = process.env.ONION_HOST;
export {
@ -40,7 +36,6 @@ export {
LOOKUP_RELAYS,
BLOSSOM_SERVERS,
MAX_FILE_SIZE,
NGINX_CACHE_DIR,
CACHE_PATH,
PAC_PROXY,
TOR_PROXY,
@ -48,8 +43,6 @@ export {
NSITE_HOST,
NSITE_PORT,
HOST,
ENABLE_SCREENSHOTS,
SCREENSHOTS_DIR,
ONION_HOST,
CACHE_TIME,
};

View File

@ -2,6 +2,7 @@ import { extname, join } from "path";
import { NSITE_KIND } from "./const.js";
import { requestEvents } from "./nostr.js";
/** Returns all the `d` tags that should be searched for a given path */
export function getSearchPaths(path: string) {
const paths = [path];
@ -24,13 +25,21 @@ export function parseNsiteEvent(event: { pubkey: string; tags: string[][]; creat
};
}
export async function getNsiteBlobs(pubkey: string, path: string, relays: string[]) {
/** Returns the first blob found for a given path */
export async function getNsiteBlob(
pubkey: string,
path: string,
relays: string[],
): Promise<{ sha256: string; path: string; created_at: number } | undefined> {
// NOTE: hack, remove "/" paths since it breaks some relays
const paths = getSearchPaths(path).filter((p) => p !== "/");
const events = await requestEvents(relays, { kinds: [NSITE_KIND], "#d": paths, authors: [pubkey] });
return Array.from(events)
// Sort the found blobs by the order of the paths array
const blobs = Array.from(events)
.map(parseNsiteEvent)
.filter((e) => !!e)
.sort((a, b) => paths.indexOf(a.path) - paths.indexOf(b.path));
return blobs[0];
}

View File

@ -2,23 +2,21 @@
import "./polyfill.js";
import Koa from "koa";
import serve from "koa-static";
import path, { basename } from "node:path";
import path from "node:path";
import cors from "@koa/cors";
import fs from "node:fs";
import { fileURLToPath } from "node:url";
import mime from "mime";
import morgan from "koa-morgan";
import send from "koa-send";
import { npubEncode } from "nostr-tools/nip19";
import { spawn } from "node:child_process";
import { nip19 } from "nostr-tools";
import { resolveNpubFromHostname } from "./helpers/dns.js";
import { getNsiteBlobs } from "./events.js";
import { downloadBlob } from "./blossom.js";
import { getNsiteBlob } from "./events.js";
import { streamBlob } from "./blossom.js";
import {
BLOSSOM_SERVERS,
ENABLE_SCREENSHOTS,
HOST,
NSITE_HOMEPAGE,
NSITE_HOMEPAGE_DIR,
@ -31,6 +29,7 @@ import { userDomains, userRelays, userServers } from "./cache.js";
import pool, { getUserBlossomServers, getUserOutboxes } from "./nostr.js";
import logger from "./logger.js";
import { watchInvalidation } from "./invalidation.js";
import { NSITE_KIND } from "./const.js";
const __dirname = path.dirname(fileURLToPath(import.meta.url));
@ -84,69 +83,51 @@ app.use(async (ctx, next) => {
const log = logger.extend(npub);
ctx.state.pubkey = pubkey;
let relays = await userRelays.get<string[] | undefined>(pubkey);
// fetch relays if not in cache
if (!relays) {
log(`Fetching relays`);
relays = await getUserOutboxes(pubkey);
if (relays) {
await userRelays.set(pubkey, relays);
log(`Found ${relays.length} relays`);
} else {
relays = [];
await userServers.set(pubkey, [], 30_000);
log(`Failed to find relays`);
}
}
const relays = (await getUserOutboxes(pubkey)) || [];
// always check subscription relays
relays.push(...SUBSCRIPTION_RELAYS);
if (relays.length === 0) throw new Error("No nostr relays");
if (relays.length === 0) throw new Error("No relays found");
log(`Searching for ${ctx.path}`);
let blobs = await getNsiteBlobs(pubkey, ctx.path, relays);
// fetch servers and events in parallel
let [servers, event] = await Promise.all([
getUserBlossomServers(pubkey, relays).then((s) => s || []),
(async () => {
let e = await getNsiteBlob(pubkey, ctx.path, relays);
if (blobs.length === 0) {
// fallback to custom 404 page
if (!e) {
log(`Looking for custom 404 page`);
blobs = await getNsiteBlobs(pubkey, "/404.html", relays);
e = await getNsiteBlob(pubkey, "/404.html", relays);
}
if (blobs.length === 0) {
log(`Found 0 events`);
return e;
})(),
]);
if (!event) {
log(`Found 0 events for ${ctx.path}`);
ctx.status = 404;
ctx.body = "Not Found";
ctx.body = `Not Found: no events found\npath: ${ctx.path}\nkind: ${NSITE_KIND}\npubkey: ${pubkey}\nrelays: ${relays.join(", ")}`;
return;
}
let servers = await userServers.get<string[] | undefined>(pubkey);
// fetch blossom servers if not in cache
if (!servers) {
log(`Fetching blossom servers`);
servers = await getUserBlossomServers(pubkey, relays);
if (servers) {
await userServers.set(pubkey, servers);
log(`Found ${servers.length} servers`);
} else {
servers = [];
await userServers.set(pubkey, [], 30_000);
log(`Failed to find servers`);
}
}
// always fetch from additional servers
servers.push(...BLOSSOM_SERVERS);
for (const blob of blobs) {
const res = await downloadBlob(blob.sha256, servers);
if (!res) continue;
if (servers.length === 0) throw new Error("Failed to find blossom servers");
const type = mime.getType(blob.path);
try {
const res = await streamBlob(event.sha256, servers);
if (!res) {
ctx.status = 502;
ctx.body = `Failed to find blob\npath: ${event.path}\nsha256: ${event.sha256}\nservers: ${servers.join(", ")}`;
return;
}
const type = mime.getType(event.path);
if (type) ctx.set("content-type", type);
else if (res.headers["content-type"]) ctx.set("content-type", res.headers["content-type"]);
@ -161,13 +142,15 @@ app.use(async (ctx, next) => {
}
// add cache headers
ctx.set("ETag", res.headers["etag"] || `"${blob.sha256}"`);
ctx.set("ETag", res.headers["etag"] || `"${event.sha256}"`);
ctx.set("Cache-Control", "public, max-age=3600");
ctx.set("Last-Modified", res.headers["last-modified"] || new Date(blob.created_at * 1000).toUTCString());
ctx.set("Last-Modified", res.headers["last-modified"] || new Date(event.created_at * 1000).toUTCString());
ctx.status = 200;
ctx.body = res;
return;
} catch (error) {
log(`Failed to stream ${event.sha256}\n${error}`);
}
ctx.status = 500;
@ -185,22 +168,6 @@ if (ONION_HOST) {
});
}
// get screenshots for websites
if (ENABLE_SCREENSHOTS) {
app.use(async (ctx, next) => {
if (ctx.method === "GET" && ctx.path.startsWith("/screenshot")) {
const [pubkey, etx] = basename(ctx.path).split(".");
if (pubkey) {
const { hasScreenshot, takeScreenshot, getScreenshotPath } = await import("./screenshots.js");
if (!(await hasScreenshot(pubkey))) await takeScreenshot(pubkey);
await send(ctx, getScreenshotPath(pubkey));
} else throw Error("Missing pubkey");
} else return next();
});
}
// download homepage
if (NSITE_HOMEPAGE) {
try {

View File

@ -1,14 +1,13 @@
import { nip19 } from "nostr-tools";
import { ENABLE_SCREENSHOTS, NGINX_CACHE_DIR, SUBSCRIPTION_RELAYS } from "./env.js";
import { SUBSCRIPTION_RELAYS } from "./env.js";
import { parseNsiteEvent } from "./events.js";
import pool from "./nostr.js";
import { invalidatePubkeyPath } from "./nginx.js";
import { NSITE_KIND } from "./const.js";
import logger from "./logger.js";
export function watchInvalidation() {
// invalidate nginx cache and screenshots on new events
// invalidate nginx cache on new events
if (SUBSCRIPTION_RELAYS.length > 0) {
logger(`Listening for new nsite events on: ${SUBSCRIPTION_RELAYS.join(", ")}`);
@ -18,16 +17,6 @@ export function watchInvalidation() {
const nsite = parseNsiteEvent(event);
if (nsite) {
const log = logger.extend(nip19.npubEncode(nsite.pubkey));
if (NGINX_CACHE_DIR) {
log(`Invalidating ${nsite.path}`);
await invalidatePubkeyPath(nsite.pubkey, nsite.path);
}
// invalidate screenshot for nsite
if (ENABLE_SCREENSHOTS && (nsite.path === "/" || nsite.path === "/index.html")) {
const { removeScreenshot } = await import("./screenshots.js");
await removeScreenshot(nsite.pubkey);
}
}
} catch (error) {
console.log(`Failed to invalidate ${event.id}`);

View File

@ -1,37 +0,0 @@
import pfs from "node:fs/promises";
import crypto from "node:crypto";
import { join } from "node:path";
import { NGINX_CACHE_DIR } from "./env.js";
import { userDomains } from "./cache.js";
export async function invalidatePubkeyPath(pubkey: string, path: string) {
const iterator = userDomains.iterator?.(undefined);
if (!iterator) return;
const promises: Promise<boolean | undefined>[] = [];
for await (const [domain, key] of iterator) {
if (key === pubkey) {
promises.push(invalidateNginxCache(domain, path));
}
}
await Promise.allSettled(promises);
}
export async function invalidateNginxCache(host: string, path: string) {
if (!NGINX_CACHE_DIR) return Promise.resolve(false);
try {
const key = `${host}${path}`;
const md5 = crypto.createHash("md5").update(key).digest("hex");
// NOTE: hard coded to cache levels 1:2
const cachePath = join(NGINX_CACHE_DIR, md5.slice(-1), md5.slice(-3, -1), md5);
await pfs.rm(cachePath);
console.log(`Invalidated ${key} (${md5})`);
} catch (error) {
// ignore errors
}
}

View File

@ -2,20 +2,50 @@ import { Filter, NostrEvent, SimplePool } from "nostr-tools";
import { getServersFromServerListEvent, USER_BLOSSOM_SERVER_LIST_KIND } from "blossom-client-sdk";
import { LOOKUP_RELAYS } from "./env.js";
import { userRelays, userServers } from "./cache.js";
import logger from "./logger.js";
import { npubEncode } from "nostr-tools/nip19";
const pool = new SimplePool();
const log = logger.extend("nostr");
/** Fetches a pubkeys mailboxes from the cache or relays */
export async function getUserOutboxes(pubkey: string) {
const cached = await userRelays.get(pubkey);
if (cached) return cached;
const mailboxes = await pool.get(LOOKUP_RELAYS, { kinds: [10002], authors: [pubkey] });
if (!mailboxes) return;
return mailboxes.tags.filter((t) => t[0] === "r" && (t[2] === undefined || t[2] === "write")).map((t) => t[1]);
const relays = mailboxes.tags
.filter((t) => t[0] === "r" && (t[2] === undefined || t[2] === "write"))
.map((t) => t[1]);
log(`Found ${relays.length} relays for ${npubEncode(pubkey)}`);
await userRelays.set(pubkey, relays);
await userRelays.set(pubkey, relays);
return relays;
}
/** Fetches a pubkeys blossom servers from the cache or relays */
export async function getUserBlossomServers(pubkey: string, relays: string[]) {
const blossomServersEvent = await pool.get(relays, { kinds: [USER_BLOSSOM_SERVER_LIST_KIND], authors: [pubkey] });
const cached = await userServers.get(pubkey);
if (cached) return cached;
return blossomServersEvent ? getServersFromServerListEvent(blossomServersEvent).map((u) => u.toString()) : undefined;
const blossomServersEvent = await pool.get(relays, { kinds: [USER_BLOSSOM_SERVER_LIST_KIND], authors: [pubkey] });
const servers = blossomServersEvent
? getServersFromServerListEvent(blossomServersEvent).map((u) => u.toString())
: undefined;
// Save servers if found
if (servers) {
log(`Found ${servers.length} blossom servers for ${npubEncode(pubkey)}`);
await userServers.set(pubkey, servers);
}
return servers;
}
export function requestEvents(relays: string[], filter: Filter) {

View File

@ -1,47 +0,0 @@
import { nip19 } from "nostr-tools";
import puppeteer, { PuppeteerLaunchOptions } from "puppeteer";
import { join } from "path";
import pfs from "fs/promises";
import { npubEncode } from "nostr-tools/nip19";
import { NSITE_PORT, SCREENSHOTS_DIR } from "./env.js";
try {
await pfs.mkdir(SCREENSHOTS_DIR, { recursive: true });
} catch (error) {}
export function getScreenshotPath(pubkey: string) {
return join(SCREENSHOTS_DIR, pubkey + ".png");
}
export async function hasScreenshot(pubkey: string) {
try {
await pfs.stat(getScreenshotPath(pubkey));
return true;
} catch (error) {
return false;
}
}
export async function takeScreenshot(pubkey: string) {
console.log(`${npubEncode(pubkey)}: Generating screenshot`);
const opts: PuppeteerLaunchOptions = {
args: ["--no-sandbox"],
};
if (process.env.PUPPETEER_SKIP_DOWNLOAD) opts.executablePath = "google-chrome-stable";
const browser = await puppeteer.launch(opts);
const page = await browser.newPage();
const url = new URL(`http://${nip19.npubEncode(pubkey)}.localhost:${NSITE_PORT}`);
await page.goto(url.toString());
await page.screenshot({ path: getScreenshotPath(pubkey) });
await browser.close();
}
export async function removeScreenshot(pubkey: string) {
try {
await pfs.rm(getScreenshotPath(pubkey));
console.log(`${npubEncode(pubkey)}: Removed screenshot`);
} catch (error) {}
}

View File

@ -1,23 +0,0 @@
[supervisord]
nodaemon=true
user=root
[program:nginx]
command=nginx -g "daemon off;"
autostart=true
autorestart=true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
[program:nsite]
user=nsite
group=nsite
command=node /app
autostart=true
autorestart=true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0