Compare commits

..

No commits in common. "master" and "v0.2.0" have entirely different histories.

39 changed files with 1174 additions and 3172 deletions

2
.env Normal file
View File

@ -0,0 +1,2 @@
CACHE_PATH="in-memory"
LOOKUP_RELAYS=wss://user.kindpag.es,wss://purplepag.es

View File

@ -2,9 +2,6 @@
# can be in-memory, redis:// or sqlite://
CACHE_PATH="in-memory"
# How long to keep cached data (in seconds)
CACHE_TIME=3600
# A list of relays to find users relay lists (10002) and blossom servers (10063)
LOOKUP_RELAYS=wss://user.kindpag.es,wss://purplepag.es
@ -12,30 +9,10 @@ LOOKUP_RELAYS=wss://user.kindpag.es,wss://purplepag.es
SUBSCRIPTION_RELAYS=wss://nos.lol,wss://relay.damus.io
# A list of fallback blossom servers
BLOSSOM_SERVERS="https://nostr.download,https://cdn.satellite.earth"
BLOSSOM_SERVERS=https://cdn.satellite.earth
# The max file size to serve
MAX_FILE_SIZE="2 MB"
MAX_FILE_SIZE='2 MB'
# A nprofile pointer for an nsite to use as the default homepage
# Setting this will override anything in the ./public folder
NSITE_HOMEPAGE=""
# a local directory to download the homepage to
NSITE_HOMEPAGE_DIR="public"
# The public domain of the gateway (optional) (used to detect when to show the nsite homepage)
PUBLIC_DOMAIN="nsite.gateway.com"
# The nip-05 domain to use for name resolution
# NIP05_NAME_DOMAINS="example.com,nostr.other.site"
# If this is set, nsite will return the 'Onion-Location' header in responses
# ONION_HOST=https://<hostname>.onion
# Use a proxy auto config
# PAC_PROXY="file:///path/to/proxy.pac"
# Or set tor and i2p proxies separately
# I2P_PROXY="127.0.0.1:4447"
# TOR_PROXY="127.0.0.1:9050"
# the hostname or ip of the upstream nginx proxy cache
NGINX_CACHE_DIR='/var/nginx/cache'

View File

@ -26,10 +26,10 @@ jobs:
- uses: pnpm/action-setup@v4
- name: Setup Node.js
- name: Setup Node.js 20
uses: actions/setup-node@v4
with:
node-version-file: .nvmrc
node-version: 20
cache: "pnpm"
- name: Install Dependencies

1
.gitignore vendored
View File

@ -3,4 +3,3 @@ build
.env
data
.netrc

1
.nvmrc
View File

@ -1 +0,0 @@
22

14
.vscode/launch.json vendored
View File

@ -14,20 +14,6 @@
"env": {
"DEBUG": "nsite,nsite:*"
}
},
{
"name": "dev-proxy",
"type": "node",
"request": "launch",
"args": ["./src/index.ts"],
"runtimeArgs": ["--loader", "@swc-node/register/esm"],
"cwd": "${workspaceRoot}",
"protocol": "inspector",
"internalConsoleOptions": "openOnSessionStart",
"outputCapture": "std",
"env": {
"DEBUG": "nsite,nsite:*"
}
}
]
}

View File

@ -1,86 +1,4 @@
# nsite-gateway
## 1.0.1
### Patch Changes
- 1473eee: Fix returning setup page when event can't be found for pubkey
## 1.0.0
### Major Changes
- ef5262f: Remove screenshots feature
- ef5262f: Remove nginx cache invalidations
### Minor Changes
- b37664b: Cleanup DNS pubkey resolution
- 9a04f63: Add support for resolving NIP-05 names on set domains
- b2b8e01: Make blossom requests in parallel
### Patch Changes
- ef5262f: Fix race condition when streaming blob
## 0.7.0
### Minor Changes
- 023e03e: Rename package to nsite-gateway
## 0.6.1
### Patch Changes
- 3747037: Add license file
## 0.6.0
### Minor Changes
- c84396e: Replace homepage with simple welcome page
- c84396e: Add option to download another nsite as a homepage
- 2ac847f: Add colors to logging
### Patch Changes
- 5be0822: Fix serving hidden files in .well-known
## 0.5.2
### Patch Changes
- 6704516: Fix package missing build folder
## 0.5.1
### Patch Changes
- ba71f35: bump dependnecies
## 0.5.0
### Minor Changes
- db172d4: Add support for custom 404.html pages
## 0.4.0
### Minor Changes
- 7c3c9c0: Add ONION_HOST env variable
## 0.3.0
### Minor Changes
- 145f89d: Add support for ALL_PROXY env variable
- f25e240: Add screenshots for nsites
### Patch Changes
- 87fecfc: Update landing page
# nsite-ts
## 0.2.0

View File

@ -1,8 +0,0 @@
#{
# email your-email@example.com
#}
# This will match example.com and all its subdomains (*.example.com)
example.com, *.example.com {
reverse_proxy nsite:3000
}

View File

@ -1,9 +1,13 @@
FROM node:22-alpine AS base
# syntax=docker/dockerfile:1
FROM node:20-alpine AS base
ENV PNPM_HOME="/pnpm"
ENV PATH="$PNPM_HOME:$PATH"
RUN corepack enable
RUN apk update && apk add --no-cache nginx supervisor
COPY supervisord.conf /etc/supervisord.conf
WORKDIR /app
COPY package.json .
COPY pnpm-lock.yaml .
@ -23,13 +27,24 @@ FROM base AS main
RUN addgroup -S nsite && adduser -S nsite -G nsite
RUN chown -R nsite:nsite /app
# Setup nginx
COPY nginx/nginx.conf /etc/nginx/nginx.conf
COPY nginx/default.conf /etc/nginx/conf.d/default.conf
# setup nsite
COPY --from=prod-deps /app/node_modules /app/node_modules
COPY --from=build ./app/build ./build
COPY ./public ./public
EXPOSE 3000
ENV NSITE_PORT="3000"
VOLUME [ "/var/cache/nginx" ]
CMD ["node", "."]
EXPOSE 80 3000
ENV NSITE_PORT="3000"
ENV NGINX_CACHE_DIR="/var/cache/nginx"
COPY docker-entrypoint.sh /
RUN chmod +x /docker-entrypoint.sh
ENTRYPOINT ["/docker-entrypoint.sh"]
CMD ["/usr/bin/supervisord", "-c", "/etc/supervisord.conf"]

21
LICENSE
View File

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2025 hzrd149
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -1,77 +1,15 @@
# nsite-gateway
# nsite-ts
A Typescript implementation of [static websites on nostr](https://github.com/nostr-protocol/nips/pull/1538)
## Configuring
All configuration is done through the `.env` file. start by copying the example file and modifying it.
```sh
cp .env.example .env
```
## Running with npx
```sh
npx nsite-gateway
```
A Typescript implementation of [nsite](https://github.com/lez/nsite)
## Running with docker-compose
```sh
git clone https://github.com/hzrd149/nsite-gateway.git
cd nsite-gateway
git clone https://github.com/hzrd149/nsite-ts.git
cd nsite-ts
docker compose up
```
Once the service is running you can access the gateway at `http://localhost:3000`
Once the service is running you can access the cached version at `http://localhost:8080`
## Running with docker
The `ghcr.io/hzrd149/nsite-gateway` image can be used to run a http instance locally
```sh
docker run --rm -it --name nsite -p 3000:3000 ghcr.io/hzrd149/nsite-gateway
```
## Tor setup
First you need to install tor (`sudo apt install tor` on debian systems) or [Documentation](https://community.torproject.org/onion-services/setup/install/)
Then able the tor service
```sh
sudo systemctl enable tor
sudo systemctl start tor
```
### Setup hidden service
Modify the torrc file to enable `HiddenServiceDir` and `HiddenServicePort`
```
HiddenServiceDir /var/lib/tor/hidden_service/
HiddenServicePort 80 127.0.0.1:8080
```
Then restart tor
```sh
sudo systemctl restart tor
```
Next get the onion address using `cat /var/lib/tor/hidden_service/hostname` and set the `ONION_HOST` variable in the `.env` file
```sh
# don't forget to start with http://
ONION_HOST="http://q457mvdt5smqj726m4lsqxxdyx7r3v7gufzt46zbkop6mkghpnr7z3qd.onion"
```
### Connecting to Tor and I2P relays and blossom servers
Install Tor ([Documentation](https://community.torproject.org/onion-services/setup/install/)) and optionally I2Pd ([Documentation](https://i2pd.readthedocs.io/en/latest/user-guide/install/)) and then add the `TOR_PROXY` and `I2P_PROXY` variables to the `.env` file
```sh
TOR_PROXY=127.0.0.1:9050
I2P_PROXY=127.0.0.1:4447
```
If you need to test, you can directly access the ts server at `http://localhost:3000`

View File

@ -1,13 +0,0 @@
[Unit]
Description=nsite Server
After=network.target
[Service]
Type=simple
WorkingDirectory=/<path-to>/nsite-gateway
ExecStart=/usr/bin/node .
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target

View File

@ -1,36 +1,17 @@
services:
redis:
image: redis:alpine
restart: unless-stopped
command: redis-server --save 60 1 --loglevel warning
volumes:
- redis-data:/data
version: "3.7"
services:
nsite:
build: .
image: ghcr.io/hzrd149/nsite-gateway:master
restart: unless-stopped
image: ghcr.io/hzrd149/nsite-ts:master
environment:
LOOKUP_RELAYS: wss://user.kindpag.es,wss://purplepag.es
SUBSCRIPTION_RELAYS: wss://nostrue.com/,wss://nos.lol/,wss://relay.damus.io/,wss://purplerelay.com/
CACHE_PATH: redis://redis:6379
depends_on:
- redis
caddy:
image: caddy:alpine
restart: unless-stopped
ports:
- "80:80"
- "443:443"
volumes:
- ./Caddyfile:/etc/caddy/Caddyfile:ro
- caddy_data:/data
- caddy_config:/config
depends_on:
- nsite
volumes:
redis-data:
caddy_data:
caddy_config:
- type: tmpfs
target: /var/cache/nginx
tmpfs:
size: 100M
ports:
- 8080:80
- 3000:3000

5
docker-entrypoint.sh Executable file
View File

@ -0,0 +1,5 @@
#!/bin/sh
chown -R nginx:nginx /var/cache/nginx
exec "$@"

22
nginx/default.conf Normal file
View File

@ -0,0 +1,22 @@
server {
listen 80;
listen [::]:80;
server_name nsite;
location / {
proxy_cache request_cache;
proxy_cache_valid 200 60m;
proxy_cache_valid 404 10m;
proxy_cache_key $host$uri;
proxy_cache_use_stale error timeout updating http_500 http_502 http_503 http_504;
add_header X-Cache $upstream_cache_status;
add_header X-Cache-Status $upstream_status;
expires 30d;
add_header Cache-Control "public, no-transform";
proxy_set_header Host $host;
proxy_pass http://127.0.0.1:3000;
}
}

33
nginx/nginx.conf Normal file
View File

@ -0,0 +1,33 @@
user nginx;
worker_processes auto;
error_log /dev/stderr notice;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
# add custom cache
proxy_cache_path /var/cache/nginx levels=1:2 keys_zone=request_cache:10m max_size=10g inactive=60m use_temp_path=off;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /dev/stdout main;
sendfile on;
#tcp_nopush on;
keepalive_timeout 65;
gzip on;
include /etc/nginx/conf.d/*.conf;
}

View File

@ -1,6 +1,6 @@
{
"name": "nsite-gateway",
"version": "1.0.1",
"name": "nsite-ts",
"version": "0.2.0",
"description": "A blossom server implementation written in Typescript",
"main": "build/index.js",
"type": "module",
@ -8,7 +8,6 @@
"license": "MIT",
"scripts": {
"start": "node build/index.js",
"prepack": "tsc",
"build": "tsc",
"dev": "nodemon -i '**/data/**' --exec 'node' --loader @swc-node/register/esm src/index.ts",
"format": "prettier -w ."
@ -19,48 +18,38 @@
"public"
],
"dependencies": {
"@keyv/redis": "^4.3.2",
"@keyv/redis": "^3.0.1",
"@keyv/sqlite": "^4.0.1",
"@koa/cors": "^5.0.0",
"blossom-client-sdk": "^3.0.1",
"debug": "^4.4.0",
"dotenv": "^16.4.7",
"follow-redirects": "^1.15.9",
"keyv": "^5.3.2",
"koa": "^2.16.0",
"blossom-client-sdk": "^1.1.1",
"dotenv": "^16.4.5",
"follow-redirects": "^1.15.6",
"keyv": "^5.0.1",
"koa": "^2.15.3",
"koa-morgan": "^1.0.1",
"koa-send": "^5.0.1",
"koa-static": "^5.0.0",
"mime": "^4.0.7",
"nostr-tools": "^2.12.0",
"nsite-cli": "^0.1.16",
"pac-proxy-agent": "^7.2.0",
"proxy-agent": "^6.5.0",
"websocket-polyfill": "1.0.0",
"ws": "^8.18.1",
"mime": "^4.0.4",
"nostr-tools": "^2.7.2",
"websocket-polyfill": "^1.0.0",
"ws": "^8.18.0",
"xbytes": "^1.9.1"
},
"devDependencies": {
"@changesets/cli": "^2.28.1",
"@swc-node/register": "^1.10.10",
"@swc/core": "^1.11.16",
"@types/better-sqlite3": "^7.6.13",
"@types/debug": "^4.1.12",
"@changesets/cli": "^2.27.8",
"@swc-node/register": "^1.9.0",
"@swc/core": "^1.5.0",
"@types/better-sqlite3": "^7.6.9",
"@types/follow-redirects": "^1.14.4",
"@types/koa": "^2.15.0",
"@types/koa": "^2.14.0",
"@types/koa-morgan": "^1.0.8",
"@types/koa-send": "^4.1.6",
"@types/koa-static": "^4.0.4",
"@types/koa__cors": "^5.0.0",
"@types/koa__router": "^12.0.4",
"@types/node": "^20.17.30",
"@types/proxy-from-env": "^1.0.4",
"@types/ws": "^8.18.1",
"esbuild": "^0.25.2",
"nodemon": "^3.1.9",
"pkg": "^5.8.1",
"prettier": "^3.5.3",
"typescript": "^5.8.3"
"@types/node": "^20.11.19",
"@types/ws": "^8.5.10",
"nodemon": "^3.0.3",
"prettier": "^3.3.3",
"typescript": "^5.3.3"
},
"resolutions": {
"websocket-polyfill": "1.0.0"

2772
pnpm-lock.yaml generated

File diff suppressed because it is too large Load Diff

View File

@ -1,51 +0,0 @@
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>404 - Page Not Found</title>
<style>
body {
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Helvetica, Arial, sans-serif;
line-height: 1.6;
color: #333;
max-width: 800px;
margin: 40px auto;
padding: 0 20px;
background-color: #f5f5f5;
}
.container {
background-color: white;
padding: 30px;
border-radius: 8px;
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
}
h1 {
color: #2c3e50;
margin-bottom: 20px;
}
.info {
background-color: #f8f9fa;
border-left: 4px solid #dc3545;
padding: 15px;
margin: 20px 0;
}
</style>
</head>
<body>
<div class="container">
<h1>404 - Page Not Found</h1>
<div class="info">
<p>We couldn't find an nsite for this domain.</p>
<p>This could mean either:</p>
<ul>
<li>The domain is not configured to point to an nsite</li>
</ul>
</div>
<p>
For more information about setting up an nsite, please refer to the
<a href="https://github.com/hzrd149/nsite-gateway">documentation</a>
</p>
</div>
</body>
</html>

Binary file not shown.

Before

Width:  |  Height:  |  Size: 15 KiB

View File

@ -3,65 +3,30 @@
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Welcome to nsite-gateway</title>
<style>
body {
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Helvetica, Arial, sans-serif;
line-height: 1.6;
color: #333;
max-width: 800px;
margin: 40px auto;
padding: 0 20px;
background-color: #f5f5f5;
<title>nsite</title>
<script type="importmap">
{
"imports": {
"blossom-client-sdk": "https://esm.run/blossom-client-sdk",
"nostr-tools": "https://esm.run/nostr-tools"
}
}
.container {
background-color: white;
padding: 30px;
border-radius: 8px;
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
}
h1 {
color: #2c3e50;
margin-bottom: 20px;
}
.info {
background-color: #f8f9fa;
border-left: 4px solid #007bff;
padding: 15px;
margin: 20px 0;
}
code {
background-color: #f1f1f1;
padding: 2px 6px;
border-radius: 3px;
font-family: Monaco, monospace;
user-select: all;
word-break: break-all;
}
</style>
</script>
</head>
<body>
<div class="container">
<h1>Welcome to nsite-gateway</h1>
<p>If you're seeing this page, nsite-gateway has been successfully installed and is working.</p>
<h1>nsite-ts</h1>
<a href="https://github.com/hzrd149/nsite-ts" target="_blank">Source Code</a>
<div class="info">
<p>
To set a custom homepage, set the <code>NSITE_HOMEPAGE</code> environment variable to your desired nprofile
</p>
<p>
Example:
<br />
<code
>NSITE_HOMEPAGE=nprofile1qqspspfsrjnurtf0jdyswm8jstustv7pu4qw3pn4u99etptvgzm4uvcpz9mhxue69uhkummnw3e82efwvdhk6qg5waehxw309aex2mrp0yhxgctdw4eju6t04mzfem</code
>
</p>
<h2>Latest nsites:</h2>
<div id="sites"></div>
<template id="site">
<div class="site">
<a class="pubkey link" target="_blank"></a>
<span class="date"></span>
</div>
</template>
<p>
For more information about configuring nsite-gateway, please refer to the
<a href="https://github.com/hzrd149/nsite-gateway">documentation</a>
</p>
</div>
<script type="module" src="./main.js"></script>
</body>
</html>

32
public/main.js Normal file
View File

@ -0,0 +1,32 @@
import { nip19, SimplePool } from "nostr-tools";
const seen = new Set();
function addSite(event) {
if (seen.has(event.pubkey)) return;
seen.add(event.pubkey);
try {
const template = document.getElementById("site");
const site = template.content.cloneNode(true);
const npub = nip19.npubEncode(event.pubkey);
site.querySelector(".pubkey").textContent = npub;
site.querySelector(".link").href = new URL("/", `${location.protocol}//${npub}.${location.host}`).toString();
document.getElementById("sites").appendChild(site);
} catch (error) {
console.log("Failed to add site", event);
console.log(error);
}
}
const pool = new SimplePool();
console.log("Loading sites");
pool.subscribeMany(
["wss://relay.damus.io", "wss://nos.lol", "wss://nostr.wine"],
[{ kinds: [34128], "#d": ["/index.html"] }],
{
onevent: addSite,
},
);

46
public/upload/index.html Normal file
View File

@ -0,0 +1,46 @@
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>nsite</title>
<script type="importmap">
{
"imports": {
"blossom-client-sdk": "https://esm.run/blossom-client-sdk",
"nostr-tools": "https://esm.run/nostr-tools"
}
}
</script>
</head>
<body>
<label>relays</label>
<br />
<textarea type="text" id="relays" cols="50" rows="4"></textarea>
<br />
<br />
<label>blossom servers</label>
<br />
<textarea type="text" id="servers" cols="50" rows="4"></textarea>
<br />
<br />
<input type="file" id="files" webkitdirectory directory multiple />
<button id="upload-button">Upload nsite</button>
<div
id="log"
style="
max-height: 50em;
max-width: 80em;
width: 100%;
border: 1px solid gray;
min-height: 8em;
margin: 0.5em 0;
overflow: auto;
font-size: 0.8em;
gap: 0.1em;
white-space: pre;
"
></div>
<script type="module" src="/upload/upload.js"></script>
</body>
</html>

142
public/upload/upload.js Normal file
View File

@ -0,0 +1,142 @@
import { multiServerUpload, BlossomClient } from "blossom-client-sdk";
import { SimplePool } from "nostr-tools";
const logContainer = document.getElementById("log");
function log(...args) {
const el = document.createElement("div");
el.innerText = args.join(" ");
logContainer.appendChild(el);
}
const uploadButton = document.getElementById("upload-button");
/** @type {HTMLInputElement} */
const filesInput = document.getElementById("files");
/**
* @param {FileSystemFileEntry} fileEntry
* @returns {File}
*/
export function readFileSystemFile(fileEntry) {
return new Promise((res, rej) => {
fileEntry.file(
(file) => res(file),
(err) => rej(err),
);
});
}
/**
* @param {FileSystemDirectoryEntry} directory
* @returns {FileSystemEntry[]}
*/
export function readFileSystemDirectory(directory) {
return new Promise((res, rej) => {
directory.createReader().readEntries(
(entries) => res(entries),
(err) => rej(err),
);
});
}
/**
* uploads a file system entry to blossom servers
* @param {FileSystemEntry} entry
* @returns {{file: File, path: string, sha256: string}[]}
*/
async function readFileSystemEntry(entry) {
const files = [];
if (entry instanceof FileSystemFileEntry && entry.isFile) {
try {
const file = await readFileSystemFile(entry);
const sha256 = await BlossomClient.getFileSha256(file);
const path = entry.fullPath;
files.push({ file, path, sha256 });
} catch (e) {
log("Failed to add" + entry.fullPath);
log(e.message);
}
} else if (entry instanceof FileSystemDirectoryEntry && entry.isDirectory) {
const entries = await readFileSystemDirectory(entry);
for (const e of entries) files.push(...(await readFileSystemEntry(e)));
}
return files;
}
/**
* uploads a file system entry to blossom servers
* @param {FileList} list
* @returns {{file: File, path: string, sha256: string}[]}
*/
async function readFileList(list) {
const files = [];
for (const file of list) {
const path = file.webkitRelativePath ? file.webkitRelativePath : file.name;
const sha256 = await BlossomClient.getFileSha256(file);
files.push({ file, path, sha256 });
}
return files;
}
const pool = new SimplePool();
/**
* uploads a file system entry to blossom servers
* @param {{file:File, path:string}} files
* @param {import("blossom-client-sdk").Signer} signer
* @param {*} auth
* @param {string[]} servers
* @param {string[]} relays
*/
async function uploadFiles(files, signer, auth, servers, relays) {
for (const { file, path, sha256 } of files) {
try {
const upload = multiServerUpload(servers, file, signer, auth);
let published = false;
for await (let { blob } of upload) {
if (!published) {
const signed = await signer({
kind: 34128,
content: "",
created_at: Math.round(Date.now() / 1000),
tags: [
["d", path],
["x", sha256],
],
});
await pool.publish(relays, signed);
log("Published", path, sha256, signed.id);
}
}
} catch (error) {
log(`Failed to upload ${path}`, error);
}
}
}
uploadButton.addEventListener("click", async () => {
if (!window.nostr) return alert("Missing NIP-07 signer");
const signer = (draft) => window.nostr.signEvent(draft);
const relays = document.getElementById("relays").value.split(/\n|,/);
const servers = document.getElementById("servers").value.split(/\n|,/);
try {
if (filesInput.files) {
const files = await readFileList(filesInput.files);
// strip leading dir
for (const file of files) file.path = file.path.replace(/^[^\/]+\//, "/");
log(`Found ${files.length} files`);
await uploadFiles(files, signer, undefined, servers, relays);
}
} catch (error) {
alert(`Failed to upload files: ${error.message}`);
}
});

View File

@ -1,67 +1,35 @@
import { IncomingMessage } from "node:http";
import { getServersFromServerListEvent, USER_BLOSSOM_SERVER_LIST_KIND } from "blossom-client-sdk";
import { MAX_FILE_SIZE } from "./env.js";
import { BLOSSOM_SERVERS, MAX_FILE_SIZE } from "./env.js";
import { makeRequestWithAbort } from "./helpers/http.js";
import { blobURLs } from "./cache.js";
import logger from "./logger.js";
import pool from "./nostr.js";
const log = logger.extend("blossom");
export async function getUserBlossomServers(pubkey: string, relays: string[]) {
const blossomServersEvent = await pool.get(relays, { kinds: [USER_BLOSSOM_SERVER_LIST_KIND], authors: [pubkey] });
/** Checks all servers for a blob and returns the URLs */
export async function findBlobURLs(sha256: string, servers: string[]): Promise<string[]> {
const cache = await blobURLs.get(sha256);
if (cache) return cache;
const urls = await Promise.all(
servers.map(async (server) => {
const url = new URL(sha256, server);
const check = await fetch(url, { method: "HEAD" }).catch(() => null);
if (check?.status === 200) return url.toString();
else return null;
}),
);
const filtered = urls.filter((url) => url !== null);
log(`Found ${filtered.length}/${servers.length} URLs for ${sha256}`);
await blobURLs.set(sha256, filtered);
return filtered;
return blossomServersEvent ? getServersFromServerListEvent(blossomServersEvent).map((u) => u.toString()) : undefined;
}
/** Downloads a file from multiple servers */
export async function streamBlob(sha256: string, servers: string[]): Promise<IncomingMessage | undefined> {
if (servers.length === 0) return undefined;
// First find all available URLs
const urls = await findBlobURLs(sha256, servers);
if (urls.length === 0) return undefined;
// Try each URL sequentially with timeout
for (const urlString of urls) {
const controller = new AbortController();
let res: IncomingMessage | undefined = undefined;
// TODO: download the file to /tmp and verify it
export async function downloadFile(sha256: string, servers = BLOSSOM_SERVERS) {
for (const server of servers) {
try {
// Set up timeout to abort after 10s
const timeout = setTimeout(() => {
controller.abort();
}, 10_000);
const url = new URL(urlString);
const response = await makeRequestWithAbort(url, controller);
res = response;
clearTimeout(timeout);
const { response } = await makeRequestWithAbort(new URL(sha256, server));
if (!response.statusCode) throw new Error("Missing headers or status code");
const size = response.headers["content-length"];
if (size && parseInt(size) > MAX_FILE_SIZE) throw new Error("File too large");
if (size && parseInt(size) > MAX_FILE_SIZE) {
throw new Error("File too large");
}
if (response.statusCode >= 200 && response.statusCode < 300) return response;
if (response.statusCode >= 200 && response.statusCode < 300) {
return response;
} else {
// Consume response data to free up memory
response.resume();
}
} catch (error) {
if (res) res.resume();
continue; // Try next URL if this one fails
// ignore error, try next server
}
}
}

View File

@ -1,19 +1,18 @@
import Keyv, { KeyvOptions } from "keyv";
import { CACHE_PATH, CACHE_TIME } from "./env.js";
import logger from "./logger.js";
import { ParsedEvent } from "./events.js";
import Keyv from "keyv";
import pfs from "fs/promises";
import { CACHE_PATH } from "./env.js";
const log = logger.extend("cache");
try {
await pfs.mkdir("data");
} catch (error) {}
async function createStore() {
if (!CACHE_PATH || CACHE_PATH === "in-memory") return undefined;
else if (CACHE_PATH.startsWith("redis://")) {
const { default: KeyvRedis } = await import("@keyv/redis");
log(`Using redis cache at ${CACHE_PATH}`);
return new KeyvRedis(CACHE_PATH);
} else if (CACHE_PATH.startsWith("sqlite://")) {
const { default: KeyvSqlite } = await import("@keyv/sqlite");
log(`Using sqlite cache at ${CACHE_PATH}`);
return new KeyvSqlite(CACHE_PATH);
}
}
@ -21,49 +20,32 @@ async function createStore() {
const store = await createStore();
store?.on("error", (err) => {
log("Connection Error", err);
console.log("Connection Error", err);
process.exit(1);
});
const json: KeyvOptions = { serialize: JSON.stringify, deserialize: JSON.parse };
const opts: KeyvOptions = store ? { store } : {};
const opts = store ? { store } : {};
/** A cache that maps a domain to a pubkey ( domain -> pubkey ) */
export const pubkeyDomains = new Keyv<string | undefined>({
/** domain -> pubkey */
export const userDomains = new Keyv({
...opts,
...json,
namespace: "domains",
ttl: CACHE_TIME * 1000,
// cache domains for an hour
ttl: 60 * 60 * 1000,
});
/** A cache that maps a pubkey to a set of blossom servers ( pubkey -> servers ) */
export const pubkeyServers = new Keyv<string[] | undefined>({
/** pubkey -> blossom servers */
export const userServers = new Keyv({
...opts,
...json,
namespace: "servers",
ttl: CACHE_TIME * 1000,
// cache servers for an hour
ttl: 60 * 60 * 1000,
});
/** A cache that maps a pubkey to a set of relays ( pubkey -> relays ) */
export const pubkeyRelays = new Keyv<string[] | undefined>({
/** pubkey -> relays */
export const userRelays = new Keyv({
...opts,
...json,
namespace: "relays",
ttl: CACHE_TIME * 1000,
});
/** A cache that maps a pubkey + path to sha256 hash of the blob ( pubkey/path -> sha256 ) */
export const pathBlobs = new Keyv<ParsedEvent | undefined>({
...opts,
...json,
namespace: "paths",
ttl: CACHE_TIME * 1000,
});
/** A cache that maps a sha256 hash to a set of URLs that had the blob ( sha256 -> URLs ) */
export const blobURLs = new Keyv<string[] | undefined>({
...opts,
...json,
namespace: "blobs",
ttl: CACHE_TIME * 1000,
// cache relays for an hour
ttl: 60 * 60 * 1000,
});

View File

@ -1,95 +0,0 @@
import dns from "node:dns";
import { nip05, nip19 } from "nostr-tools";
import { pubkeyDomains as pubkeyDomains } from "./cache.js";
import logger from "./logger.js";
import { NIP05_NAME_DOMAINS } from "./env.js";
export function getCnameRecords(hostname: string): Promise<string[]> {
return new Promise<string[]>((res, rej) => {
dns.resolveCname(hostname, (err, records) => {
if (err) rej(err);
else res(records);
});
});
}
export function getTxtRecords(hostname: string): Promise<string[][]> {
return new Promise<string[][]>((res, rej) => {
dns.resolveTxt(hostname, (err, records) => {
if (err) rej(err);
else res(records);
});
});
}
function extractPubkeyFromHostname(hostname: string): string | undefined {
const [npub] = hostname.split(".");
if (npub.startsWith("npub")) {
const parsed = nip19.decode(npub);
if (parsed.type !== "npub") throw new Error("Expected npub");
return parsed.data;
}
}
const log = logger.extend("DNS");
export async function resolvePubkeyFromHostname(hostname: string): Promise<string | undefined> {
if (hostname === "localhost") return undefined;
const cached = await pubkeyDomains.get(hostname);
if (cached) return cached;
// check if domain contains an npub
let pubkey = extractPubkeyFromHostname(hostname);
if (!pubkey) {
// try to get npub from CNAME
try {
const cnameRecords = await getCnameRecords(hostname);
for (const cname of cnameRecords) {
const p = extractPubkeyFromHostname(cname);
if (p) {
pubkey = p;
break;
}
}
} catch (error) {}
}
if (!pubkey) {
// Try to get npub from TXT records
try {
const txtRecords = await getTxtRecords(hostname);
for (const txt of txtRecords) {
for (const entry of txt) {
const p = extractPubkeyFromHostname(entry);
if (p) {
pubkey = p;
break;
}
}
}
} catch (error) {}
}
// Try to get npub from NIP-05
if (!pubkey && NIP05_NAME_DOMAINS) {
for (const domain of NIP05_NAME_DOMAINS) {
try {
const [name] = hostname.split(".");
const result = await nip05.queryProfile(name + "@" + domain);
if (result) {
pubkey = result.pubkey;
break;
}
} catch (err) {}
}
}
log(`Resolved ${hostname} to ${pubkey}`);
await pubkeyDomains.set(hostname, pubkey);
return pubkey;
}

View File

@ -1,53 +1,16 @@
import "dotenv/config";
import xbytes from "xbytes";
const NSITE_HOMEPAGE = process.env.NSITE_HOMEPAGE;
const NSITE_HOMEPAGE_DIR = process.env.NSITE_HOMEPAGE_DIR || "public";
const LOOKUP_RELAYS = process.env.LOOKUP_RELAYS?.split(",").map((u) => u.trim()) ?? [
"wss://user.kindpag.es/",
"wss://purplepag.es/",
];
const SUBSCRIPTION_RELAYS = process.env.SUBSCRIPTION_RELAYS?.split(",").map((u) => u.trim()) ?? [
"wss://nos.lol",
"wss://relay.damus.io",
];
const SUBSCRIPTION_RELAYS = process.env.SUBSCRIPTION_RELAYS?.split(",").map((u) => u.trim()) ?? [];
const BLOSSOM_SERVERS = process.env.BLOSSOM_SERVERS?.split(",").map((u) => u.trim()) ?? [];
const MAX_FILE_SIZE = process.env.MAX_FILE_SIZE ? xbytes.parseSize(process.env.MAX_FILE_SIZE) : Infinity;
const NGINX_CACHE_DIR = process.env.NGINX_CACHE_DIR;
const CACHE_PATH = process.env.CACHE_PATH;
const CACHE_TIME = process.env.CACHE_TIME ? parseInt(process.env.CACHE_TIME) : 60 * 60;
const NIP05_NAME_DOMAINS = process.env.NIP05_NAME_DOMAINS?.split(",").map((d) => d.trim());
const PUBLIC_DOMAIN = process.env.PUBLIC_DOMAIN;
const PAC_PROXY = process.env.PAC_PROXY;
const TOR_PROXY = process.env.TOR_PROXY;
const I2P_PROXY = process.env.I2P_PROXY;
const NSITE_HOST = process.env.NSITE_HOST || "0.0.0.0";
const NSITE_PORT = process.env.NSITE_PORT ? parseInt(process.env.NSITE_PORT) : 3000;
const HOST = `${NSITE_HOST}:${NSITE_PORT}`;
const ONION_HOST = process.env.ONION_HOST;
export {
NSITE_HOMEPAGE,
NSITE_HOMEPAGE_DIR,
SUBSCRIPTION_RELAYS,
LOOKUP_RELAYS,
BLOSSOM_SERVERS,
MAX_FILE_SIZE,
CACHE_PATH,
PAC_PROXY,
TOR_PROXY,
I2P_PROXY,
NSITE_HOST,
NSITE_PORT,
HOST,
ONION_HOST,
CACHE_TIME,
NIP05_NAME_DOMAINS,
PUBLIC_DOMAIN,
};
export { SUBSCRIPTION_RELAYS, LOOKUP_RELAYS, BLOSSOM_SERVERS, MAX_FILE_SIZE, NGINX_CACHE_DIR, CACHE_PATH };

View File

@ -1,16 +1,7 @@
import { extname, join } from "path";
import { extname, isAbsolute, join } from "path";
import { NSITE_KIND } from "./const.js";
import { requestEvents } from "./nostr.js";
import { pathBlobs } from "./cache.js";
export type ParsedEvent = {
pubkey: string;
path: string;
sha256: string;
created_at: number;
};
/** Returns all the `d` tags that should be searched for a given path */
export function getSearchPaths(path: string) {
const paths = [path];
@ -20,7 +11,7 @@ export function getSearchPaths(path: string) {
return paths.filter((p) => !!p);
}
export function parseNsiteEvent(event: { pubkey: string; tags: string[][]; created_at: number }) {
export function parseNsiteEvent(event: { pubkey: string; tags: string[][] }) {
const path = event.tags.find((t) => t[0] === "d" && t[1])?.[1];
const sha256 = event.tags.find((t) => t[0] === "x" && t[1])?.[1];
@ -29,29 +20,16 @@ export function parseNsiteEvent(event: { pubkey: string; tags: string[][]; creat
pubkey: event.pubkey,
path: join("/", path),
sha256,
created_at: event.created_at,
};
}
/** Returns the first blob found for a given path */
export async function getNsiteBlob(pubkey: string, path: string, relays: string[]): Promise<ParsedEvent | undefined> {
const key = pubkey + path;
const cached = await pathBlobs.get(key);
if (cached) return cached;
export async function getNsiteBlobs(pubkey: string, path: string, relays: string[]) {
// NOTE: hack, remove "/" paths since it breaks some relays
const paths = getSearchPaths(path).filter((p) => p !== "/");
const events = await requestEvents(relays, { kinds: [NSITE_KIND], "#d": paths, authors: [pubkey] });
// Sort the found blobs by the order of the paths array
const options = Array.from(events)
return Array.from(events)
.map(parseNsiteEvent)
.filter((e) => !!e)
.sort((a, b) => paths.indexOf(a.path) - paths.indexOf(b.path));
// Remember the blob for this path
if (options.length > 0) await pathBlobs.set(key, options[0]);
return options[0];
}

59
src/helpers/dns.ts Normal file
View File

@ -0,0 +1,59 @@
import dns from "node:dns";
import { nip19 } from "nostr-tools";
export function getCnameRecords(hostname: string) {
return new Promise<string[]>((res, rej) => {
dns.resolveCname(hostname, (err, records) => {
if (err) rej(err);
else res(records);
});
});
}
export function getTxtRecords(hostname: string) {
return new Promise<string[][]>((res, rej) => {
dns.resolveTxt(hostname, (err, records) => {
if (err) rej(err);
else res(records);
});
});
}
function extractNpubFromHostname(hostname: string) {
const [npub] = hostname.split(".");
if (npub.startsWith("npub")) {
const parsed = nip19.decode(npub);
if (parsed.type !== "npub") throw new Error("Expected npub");
return parsed.data;
}
}
export async function resolveNpubFromHostname(hostname: string) {
// check if domain contains an npub
let pubkey = extractNpubFromHostname(hostname);
if (pubkey) return pubkey;
if (hostname === "localhost") return undefined;
// try to get npub from CNAME or TXT records
try {
const cnameRecords = await getCnameRecords(hostname);
for (const cname of cnameRecords) {
const p = extractNpubFromHostname(cname);
if (p) return p;
}
} catch (error) {}
try {
const txtRecords = await getTxtRecords(hostname);
for (const txt of txtRecords) {
for (const entry of txt) {
const p = extractNpubFromHostname(entry);
if (p) return p;
}
}
} catch (error) {}
}

View File

@ -2,20 +2,16 @@ import { IncomingMessage } from "http";
import followRedirects from "follow-redirects";
const { http, https } = followRedirects;
import agent from "../proxy.js";
export function makeRequestWithAbort(url: URL, controller: AbortController) {
return new Promise<IncomingMessage>((res, rej) => {
controller.signal.addEventListener("abort", () => rej(new Error("Aborted")));
export function makeRequestWithAbort(url: URL) {
return new Promise<{ response: IncomingMessage; controller: AbortController }>((res, rej) => {
const cancelController = new AbortController();
const request = (url.protocol === "https:" ? https : http).get(
url,
{
signal: controller.signal,
agent,
signal: cancelController.signal,
},
(response) => {
res(response);
res({ response, controller: cancelController });
},
);
request.on("error", (err) => rej(err));

View File

@ -8,27 +8,15 @@ import fs from "node:fs";
import { fileURLToPath } from "node:url";
import mime from "mime";
import morgan from "koa-morgan";
import { npubEncode } from "nostr-tools/nip19";
import { nip19 } from "nostr-tools";
import { resolvePubkeyFromHostname } from "./dns.js";
import { getNsiteBlob } from "./events.js";
import { streamBlob } from "./blossom.js";
import {
BLOSSOM_SERVERS,
HOST,
NSITE_HOMEPAGE,
NSITE_HOMEPAGE_DIR,
NSITE_HOST,
NSITE_PORT,
ONION_HOST,
PUBLIC_DOMAIN,
SUBSCRIPTION_RELAYS,
} from "./env.js";
import pool, { getUserBlossomServers, getUserOutboxes } from "./nostr.js";
import logger from "./logger.js";
import { watchInvalidation } from "./invalidation.js";
import { resolveNpubFromHostname } from "./helpers/dns.js";
import { getNsiteBlobs, parseNsiteEvent } from "./events.js";
import { downloadFile, getUserBlossomServers } from "./blossom.js";
import { BLOSSOM_SERVERS, NGINX_CACHE_DIR, SUBSCRIPTION_RELAYS } from "./env.js";
import { userDomains, userRelays, userServers } from "./cache.js";
import { NSITE_KIND } from "./const.js";
import { invalidatePubkeyPath } from "./nginx.js";
import pool, { getUserOutboxes, subscribeForEvents } from "./nostr.js";
const __dirname = path.dirname(fileURLToPath(import.meta.url));
@ -55,142 +43,141 @@ app.use(async (ctx, next) => {
} catch (err) {
console.log(err);
ctx.status = 500;
if (err instanceof Error) ctx.body = { message: err.message };
ctx.body = { message: "Something went wrong" };
}
});
// handle nsite requests
app.use(async (ctx, next) => {
let pubkey = await resolvePubkeyFromHostname(ctx.hostname);
let fallthrough = false;
if (!pubkey && NSITE_HOMEPAGE && (!PUBLIC_DOMAIN || ctx.hostname === PUBLIC_DOMAIN)) {
const parsed = nip19.decode(NSITE_HOMEPAGE);
// TODO: use the relays in the nprofile
if (parsed.type === "nprofile") pubkey = parsed.data.pubkey;
else if (parsed.type === "npub") pubkey = parsed.data;
// Fallback to public dir if path cannot be found on the nsite homepage
if (pubkey) fallthrough = true;
}
let pubkey = await userDomains.get<string | undefined>(ctx.hostname);
// resolve pubkey if not in cache
if (!pubkey) {
if (fallthrough) return next();
console.log(`${ctx.hostname}: Resolving`);
pubkey = await resolveNpubFromHostname(ctx.hostname);
ctx.status = 404;
ctx.body = fs.readFileSync(path.resolve(__dirname, "../public/404.html"), "utf-8");
return;
if (pubkey) {
await userDomains.set(ctx.hostname, pubkey);
console.log(`${ctx.hostname}: Found ${pubkey}`);
} else {
await userDomains.set(ctx.hostname, "");
}
}
// fetch relays
const relays = (await getUserOutboxes(pubkey)) || [];
if (pubkey) {
ctx.state.pubkey = pubkey;
// always check subscription relays
relays.push(...SUBSCRIPTION_RELAYS);
let relays = await userRelays.get<string[] | undefined>(pubkey);
if (relays.length === 0) throw new Error("No relays found");
// fetch relays if not in cache
if (!relays) {
console.log(`${pubkey}: Fetching relays`);
// fetch servers and events in parallel
let [servers, event] = await Promise.all([
getUserBlossomServers(pubkey, relays).then((s) => s || []),
getNsiteBlob(pubkey, ctx.path, relays).then((e) => {
if (!e) return getNsiteBlob(pubkey, "/404.html", relays);
else return e;
}),
]);
relays = await getUserOutboxes(pubkey);
if (relays) {
await userRelays.set(pubkey, relays);
console.log(`${pubkey}: Found ${relays.length} relays`);
} else {
relays = [];
await userServers.set(pubkey, [], 30_000);
console.log(`${pubkey}: Failed to find relays`);
}
}
if (!event) {
if (fallthrough) return next();
console.log(`${pubkey}: Searching for ${ctx.path}`);
const blobs = await getNsiteBlobs(pubkey, ctx.path, relays);
ctx.status = 404;
ctx.body = `Not Found: no events found\npath: ${ctx.path}\nkind: ${NSITE_KIND}\npubkey: ${pubkey}\nrelays: ${relays.join(", ")}`;
return;
}
// always fetch from additional servers
servers.push(...BLOSSOM_SERVERS);
if (servers.length === 0) throw new Error("Failed to find blossom servers");
try {
const res = await streamBlob(event.sha256, servers);
if (!res) {
ctx.status = 502;
ctx.body = `Failed to find blob\npath: ${event.path}\nsha256: ${event.sha256}\nservers: ${servers.join(", ")}`;
if (blobs.length === 0) {
console.log(`${pubkey}: Found 0 events`);
ctx.status = 404;
ctx.body = "Not Found";
return;
}
const type = mime.getType(event.path);
if (type) ctx.set("content-type", type);
else if (res.headers["content-type"]) ctx.set("content-type", res.headers["content-type"]);
let servers = await userServers.get<string[] | undefined>(pubkey);
// pass headers along
if (res.headers["content-length"]) ctx.set("content-length", res.headers["content-length"]);
// fetch blossom servers if not in cache
if (!servers) {
console.log(`${pubkey}: Searching for blossom servers`);
servers = await getUserBlossomServers(pubkey, relays);
// set Onion-Location header
if (ONION_HOST) {
const url = new URL(ONION_HOST);
url.hostname = npubEncode(pubkey) + "." + url.hostname;
ctx.set("Onion-Location", url.toString().replace(/\/$/, ""));
if (servers) {
await userServers.set(pubkey, servers);
console.log(`${pubkey}: Found ${servers.length} servers`);
} else {
servers = [];
await userServers.set(pubkey, [], 30_000);
console.log(`${pubkey}: Failed to find servers`);
}
}
// add cache headers
ctx.set("ETag", res.headers["etag"] || `"${event.sha256}"`);
ctx.set("Cache-Control", "public, max-age=3600");
ctx.set("Last-Modified", res.headers["last-modified"] || new Date(event.created_at * 1000).toUTCString());
// always fetch from additional servers
servers.push(...BLOSSOM_SERVERS);
for (const blob of blobs) {
const res = await downloadFile(blob.sha256, servers);
if (res) {
const type = mime.getType(blob.path);
if (type) ctx.set("Content-Type", type);
else if (res.headers["content-type"]) ctx.set("content-type", res.headers["content-type"]);
// pass headers along
if (res.headers["content-length"]) ctx.set("content-length", res.headers["content-length"]);
ctx.body = res;
return;
}
}
ctx.status = 200;
ctx.body = res;
return;
} catch (error) {
ctx.status = 500;
ctx.body = `Failed to stream blob ${event.path}\n${error}`;
return;
}
ctx.body = "Failed to find blob";
} else await next();
});
if (ONION_HOST) {
app.use((ctx, next) => {
// set Onion-Location header if it was not set before
if (!ctx.get("Onion-Location") && ONION_HOST) {
ctx.set("Onion-Location", ONION_HOST);
}
return next();
});
}
// serve static files from public
const serveOptions: serve.Options = {
hidden: true,
maxAge: 60 * 60 * 1000,
index: "index.html",
};
try {
const www = NSITE_HOMEPAGE_DIR;
const www = path.resolve(process.cwd(), "public");
fs.statSync(www);
app.use(serve(www, serveOptions));
app.use(serve(www));
} catch (error) {
const www = path.resolve(__dirname, "../public");
app.use(serve(www, serveOptions));
app.use(serve(www));
}
// start the server
app.listen({ host: NSITE_HOST, port: NSITE_PORT }, () => {
logger("Started on port", HOST);
});
app.listen(
{
port: process.env.NSITE_PORT || 3000,
host: process.env.NSITE_HOST || "0.0.0.0",
},
() => {
console.log("Started on port", process.env.PORT || 3000);
},
);
// watch for invalidations
watchInvalidation();
// invalidate nginx cache on new events
if (NGINX_CACHE_DIR && SUBSCRIPTION_RELAYS.length > 0) {
console.log(`Listening for new nsite events`);
subscribeForEvents(SUBSCRIPTION_RELAYS, async (event) => {
try {
const nsite = parseNsiteEvent(event);
if (nsite) {
console.log(`${nsite.pubkey}: Invalidating ${nsite.path}`);
await invalidatePubkeyPath(nsite.pubkey, nsite.path);
}
} catch (error) {
console.log(`Failed to invalidate ${event.id}`);
}
});
}
process.on("unhandledRejection", (reason, promise) => {
console.error("Unhandled Rejection at:", promise, "reason:", reason);
});
async function shutdown() {
logger("Shutting down...");
console.log("Shutting down...");
pool.destroy();
process.exit(0);
}

View File

@ -1,31 +0,0 @@
import { npubEncode } from "nostr-tools/nip19";
import { SUBSCRIPTION_RELAYS } from "./env.js";
import { parseNsiteEvent } from "./events.js";
import pool from "./nostr.js";
import { NSITE_KIND } from "./const.js";
import logger from "./logger.js";
import { pathBlobs } from "./cache.js";
const log = logger.extend("invalidation");
export function watchInvalidation() {
if (SUBSCRIPTION_RELAYS.length === 0) return;
logger(`Listening for new nsite events on: ${SUBSCRIPTION_RELAYS.join(", ")}`);
pool.subscribeMany(SUBSCRIPTION_RELAYS, [{ kinds: [NSITE_KIND], since: Math.round(Date.now() / 1000) - 60 * 60 }], {
onevent: async (event) => {
try {
const parsed = parseNsiteEvent(event);
if (parsed) {
pathBlobs.delete(parsed.pubkey + parsed.path);
log(`Invalidated ${npubEncode(parsed.pubkey) + parsed.path}`);
}
} catch (error) {
console.log(`Failed to invalidate ${event.id}`);
}
},
});
}

View File

@ -1,8 +0,0 @@
import debug from "debug";
// enable default logging
if (!debug.enabled("nsite")) debug.enable("nsite,nsite:*");
const logger = debug("nsite");
export default logger;

37
src/nginx.ts Normal file
View File

@ -0,0 +1,37 @@
import pfs from "node:fs/promises";
import crypto from "node:crypto";
import { join } from "node:path";
import { NGINX_CACHE_DIR } from "./env.js";
import { userDomains } from "./cache.js";
export async function invalidatePubkeyPath(pubkey: string, path: string) {
const iterator = userDomains.iterator?.(undefined);
if (!iterator) return;
const promises: Promise<boolean | undefined>[] = [];
for await (const [domain, key] of iterator) {
if (key === pubkey) {
promises.push(invalidateNginxCache(domain, path));
}
}
await Promise.allSettled(promises);
}
export async function invalidateNginxCache(host: string, path: string) {
if (!NGINX_CACHE_DIR) return Promise.resolve(false);
try {
const key = `${host}${path}`;
const md5 = crypto.createHash("md5").update(key).digest("hex");
// NOTE: hard coded to cache levels 1:2
const cachePath = join(NGINX_CACHE_DIR, md5.slice(-1), md5.slice(-3, -1), md5);
await pfs.rm(cachePath);
console.log(`Invalidated ${key} (${md5})`);
} catch (error) {
// ignore errors
}
}

View File

@ -1,51 +1,20 @@
import { Filter, NostrEvent, SimplePool } from "nostr-tools";
import { getServersFromServerListEvent, USER_BLOSSOM_SERVER_LIST_KIND } from "blossom-client-sdk";
import { LOOKUP_RELAYS } from "./env.js";
import { pubkeyRelays, pubkeyServers } from "./cache.js";
import logger from "./logger.js";
import { npubEncode } from "nostr-tools/nip19";
import { NSITE_KIND } from "./const.js";
const pool = new SimplePool();
const log = logger.extend("nostr");
/** Fetches a pubkeys mailboxes from the cache or relays */
export async function getUserOutboxes(pubkey: string) {
const cached = await pubkeyRelays.get(pubkey);
if (cached) return cached;
const mailboxes = await pool.get(LOOKUP_RELAYS, { kinds: [10002], authors: [pubkey] });
if (!mailboxes) return;
const relays = mailboxes.tags
.filter((t) => t[0] === "r" && (t[2] === undefined || t[2] === "write"))
.map((t) => t[1]);
log(`Found ${relays.length} relays for ${npubEncode(pubkey)}`);
await pubkeyRelays.set(pubkey, relays);
await pubkeyRelays.set(pubkey, relays);
return relays;
return mailboxes.tags.filter((t) => t[0] === "r" && (t[2] === undefined || t[2] === "write")).map((t) => t[1]);
}
/** Fetches a pubkeys blossom servers from the cache or relays */
export async function getUserBlossomServers(pubkey: string, relays: string[]) {
const cached = await pubkeyServers.get(pubkey);
if (cached) return cached;
const blossomServersEvent = await pool.get(relays, { kinds: [USER_BLOSSOM_SERVER_LIST_KIND], authors: [pubkey] });
const servers = blossomServersEvent
? getServersFromServerListEvent(blossomServersEvent).map((u) => u.toString())
: undefined;
// Save servers if found
if (servers) {
log(`Found ${servers.length} blossom servers for ${npubEncode(pubkey)}`);
await pubkeyServers.set(pubkey, servers);
}
return servers;
export function subscribeForEvents(relays: string[], onevent: (event: NostrEvent) => any) {
return pool.subscribeMany(relays, [{ kinds: [NSITE_KIND], since: Math.round(Date.now() / 1000) - 60 * 60 }], {
onevent,
});
}
export function requestEvents(relays: string[], filter: Filter) {

View File

@ -1,13 +1,3 @@
import { ClientOptions, WebSocket } from "ws";
import { ClientRequestArgs } from "http";
import { WebSocket } from "ws";
import agent from "./proxy.js";
class ProxyWebSocket extends WebSocket {
constructor(address: string | URL, options?: ClientOptions | ClientRequestArgs) {
super(address, { agent, ...options });
}
}
// @ts-expect-error
global.WebSocket = agent ? ProxyWebSocket : WebSocket;
global.WebSocket = global.WebSocket || WebSocket;

View File

@ -1,57 +0,0 @@
import { ProxyAgent } from "proxy-agent";
import { PacProxyAgent } from "pac-proxy-agent";
import { I2P_PROXY, PAC_PROXY, TOR_PROXY } from "./env.js";
function buildPacURI() {
const statements: string[] = [];
if (I2P_PROXY) {
statements.push(
`
if (shExpMatch(host, "*.i2p"))
{
return "SOCKS5 ${I2P_PROXY}";
}
`.trim(),
);
}
if (TOR_PROXY) {
statements.push(
`
if (shExpMatch(host, "*.onion"))
{
return "SOCKS5 ${TOR_PROXY}";
}
`.trim(),
);
}
statements.push('return "DIRECT";');
const PACFile = `
// SPDX-License-Identifier: CC0-1.0
function FindProxyForURL(url, host)
{
${statements.join("\n")}
}
`.trim();
return "pac+data:application/x-ns-proxy-autoconfig;base64," + btoa(PACFile);
}
function buildProxy() {
if (PAC_PROXY) {
console.log(`Using PAC proxy file`);
return new PacProxyAgent(PAC_PROXY);
} else if (TOR_PROXY || I2P_PROXY) {
if (TOR_PROXY) console.log("Tor connection enabled");
if (I2P_PROXY) console.log("I2P connection enabled");
return new PacProxyAgent(buildPacURI());
} else return new ProxyAgent({ keepAlive: true });
}
const agent = buildProxy();
export default agent;

22
supervisord.conf Normal file
View File

@ -0,0 +1,22 @@
[supervisord]
nodaemon=true
user=root
[program:nginx]
command=nginx -g "daemon off;"
autostart=true
autorestart=true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
[program:nsite]
command=node /app
autostart=true
autorestart=true
user=root
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0