mirror of
https://github.com/bitvora/wot-relay.git
synced 2025-06-23 16:05:35 +00:00
Compare commits
No commits in common. "master" and "v0.1.15" have entirely different histories.
@ -25,6 +25,3 @@ ARCHIVE_REACTIONS="FALSE" # optional, reactions take up a lot of space and compu
|
||||
|
||||
# optional, certain note kinds older than this many days will be deleted
|
||||
MAX_AGE_DAYS=365
|
||||
|
||||
# comma delimited list of pubkeys who follow bots and ruin the WoT
|
||||
IGNORE_FOLLOWS_LIST=""
|
@ -24,7 +24,6 @@ Don't want to run the relay, just want to connect to some? Here are some availab
|
||||
- [wss://wot.tealeaf.dev](https://wot.tealeaf.dev)
|
||||
- [wss://wot.nostr.net](https://wot.nostr.net)
|
||||
- [wss://relay.goodmorningbitcoin.com](https://relay.goodmorningbitcoin.com)
|
||||
- [wss://wot.sudocarlos.com](wss://wot.sudocarlos.com)
|
||||
|
||||
## Prerequisites
|
||||
|
||||
@ -65,7 +64,6 @@ REFRESH_INTERVAL_HOURS=24 # interval in hours to refresh the web of trust
|
||||
MINIMUM_FOLLOWERS=3 #how many followers before they're allowed in the WoT
|
||||
ARCHIVAL_SYNC="FALSE" # set to TRUE to archive every note from every person in the WoT (not recommended)
|
||||
ARCHIVE_REACTIONS="FALSE" # set to TRUE to archive every reaction from every person in the WoT (not recommended)
|
||||
IGNORE_FOLLOWS_LIST="" # comma separated list of pubkeys who follow too many bots and ruin the WoT
|
||||
```
|
||||
|
||||
### 4. Build the project
|
||||
|
552
main.go
552
main.go
@ -7,13 +7,8 @@ import (
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/fiatjaf/eventstore"
|
||||
@ -43,45 +38,20 @@ type Config struct {
|
||||
RelayIcon string
|
||||
MaxAgeDays int
|
||||
ArchiveReactions bool
|
||||
IgnoredPubkeys []string
|
||||
MaxTrustNetwork int
|
||||
MaxRelays int
|
||||
MaxOneHopNetwork int
|
||||
}
|
||||
|
||||
var pool *nostr.SimplePool
|
||||
var wdb nostr.RelayStore
|
||||
var relays []string
|
||||
var relaySet = make(map[string]bool) // O(1) lookup
|
||||
var config Config
|
||||
var trustNetwork []string
|
||||
var trustNetworkSet = make(map[string]bool) // O(1) lookup
|
||||
var seedRelays []string
|
||||
var booted bool
|
||||
var oneHopNetwork []string
|
||||
var oneHopNetworkSet = make(map[string]bool) // O(1) lookup
|
||||
var trustNetworkMap map[string]bool
|
||||
var pubkeyFollowerCount = make(map[string]int)
|
||||
var trustedNotes uint64
|
||||
var untrustedNotes uint64
|
||||
var archiveEventSemaphore = make(chan struct{}, 20) // Reduced from 100 to 20
|
||||
|
||||
// Performance counters
|
||||
var (
|
||||
totalEvents uint64
|
||||
rejectedEvents uint64
|
||||
archivedEvents uint64
|
||||
profileRefreshCount uint64
|
||||
networkRefreshCount uint64
|
||||
)
|
||||
|
||||
// Mutexes for thread safety
|
||||
var (
|
||||
relayMutex sync.RWMutex
|
||||
trustNetworkMutex sync.RWMutex
|
||||
oneHopMutex sync.RWMutex
|
||||
followerMutex sync.RWMutex
|
||||
)
|
||||
|
||||
func main() {
|
||||
nostr.InfoLogger = log.New(io.Discard, "", 0)
|
||||
@ -134,7 +104,6 @@ func main() {
|
||||
relay.RejectFilter = append(relay.RejectFilter,
|
||||
policies.NoEmptyFilters,
|
||||
policies.NoComplexFilters,
|
||||
policies.FilterIPRateLimiter(5, time.Minute*1, 30),
|
||||
)
|
||||
|
||||
relay.RejectConnection = append(relay.RejectConnection,
|
||||
@ -145,29 +114,10 @@ func main() {
|
||||
relay.QueryEvents = append(relay.QueryEvents, db.QueryEvents)
|
||||
relay.DeleteEvent = append(relay.DeleteEvent, db.DeleteEvent)
|
||||
relay.RejectEvent = append(relay.RejectEvent, func(ctx context.Context, event *nostr.Event) (bool, string) {
|
||||
atomic.AddUint64(&totalEvents, 1)
|
||||
|
||||
// Don't reject events if we haven't booted yet or if trust network is empty
|
||||
if !booted {
|
||||
return false, ""
|
||||
}
|
||||
|
||||
trustNetworkMutex.RLock()
|
||||
trusted := trustNetworkMap[event.PubKey]
|
||||
hasNetwork := len(trustNetworkMap) > 0
|
||||
trustNetworkMutex.RUnlock()
|
||||
|
||||
// If we don't have a trust network yet, allow all events
|
||||
if !hasNetwork {
|
||||
return false, ""
|
||||
}
|
||||
|
||||
if !trusted {
|
||||
atomic.AddUint64(&rejectedEvents, 1)
|
||||
return true, "not in web of trust"
|
||||
if !trustNetworkMap[event.PubKey] {
|
||||
return true, "we are rebuilding the trust network, please try again later"
|
||||
}
|
||||
if event.Kind == nostr.KindEncryptedDirectMessage {
|
||||
atomic.AddUint64(&rejectedEvents, 1)
|
||||
return true, "only gift wrapped DMs are allowed"
|
||||
}
|
||||
|
||||
@ -192,8 +142,6 @@ func main() {
|
||||
}
|
||||
|
||||
go refreshTrustNetwork(ctx, relay)
|
||||
go monitorMemoryUsage() // Add memory monitoring
|
||||
go monitorPerformance() // Add performance monitoring
|
||||
|
||||
mux := relay.Router()
|
||||
static := http.FileServer(http.Dir(config.StaticPath))
|
||||
@ -201,10 +149,6 @@ func main() {
|
||||
mux.Handle("GET /static/", http.StripPrefix("/static/", static))
|
||||
mux.Handle("GET /favicon.ico", http.StripPrefix("/", static))
|
||||
|
||||
// Add debug endpoints
|
||||
mux.HandleFunc("GET /debug/stats", debugStatsHandler)
|
||||
mux.HandleFunc("GET /debug/goroutines", debugGoroutinesHandler)
|
||||
|
||||
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
tmpl := template.Must(template.ParseFiles(os.Getenv("INDEX_PATH")))
|
||||
data := struct {
|
||||
@ -225,10 +169,6 @@ func main() {
|
||||
})
|
||||
|
||||
log.Println("🎉 relay running on port :3334")
|
||||
log.Println("🔍 debug endpoints available at:")
|
||||
log.Println(" http://localhost:3334/debug/pprof/ (CPU/memory profiling)")
|
||||
log.Println(" http://localhost:3334/debug/stats (application stats)")
|
||||
log.Println(" http://localhost:3334/debug/goroutines (goroutine info)")
|
||||
err := http.ListenAndServe(":3334", relay)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
@ -269,28 +209,8 @@ func LoadConfig() Config {
|
||||
os.Setenv("ARCHIVE_REACTIONS", "FALSE")
|
||||
}
|
||||
|
||||
if os.Getenv("MAX_TRUST_NETWORK") == "" {
|
||||
os.Setenv("MAX_TRUST_NETWORK", "40000")
|
||||
}
|
||||
|
||||
if os.Getenv("MAX_RELAYS") == "" {
|
||||
os.Setenv("MAX_RELAYS", "1000")
|
||||
}
|
||||
|
||||
if os.Getenv("MAX_ONE_HOP_NETWORK") == "" {
|
||||
os.Setenv("MAX_ONE_HOP_NETWORK", "50000")
|
||||
}
|
||||
|
||||
ignoredPubkeys := []string{}
|
||||
if ignoreList := os.Getenv("IGNORE_FOLLOWS_LIST"); ignoreList != "" {
|
||||
ignoredPubkeys = splitAndTrim(ignoreList)
|
||||
}
|
||||
|
||||
minimumFollowers, _ := strconv.Atoi(os.Getenv("MINIMUM_FOLLOWERS"))
|
||||
maxAgeDays, _ := strconv.Atoi(os.Getenv("MAX_AGE_DAYS"))
|
||||
maxTrustNetwork, _ := strconv.Atoi(os.Getenv("MAX_TRUST_NETWORK"))
|
||||
maxRelays, _ := strconv.Atoi(os.Getenv("MAX_RELAYS"))
|
||||
maxOneHopNetwork, _ := strconv.Atoi(os.Getenv("MAX_ONE_HOP_NETWORK"))
|
||||
|
||||
config := Config{
|
||||
RelayName: getEnv("RELAY_NAME"),
|
||||
@ -307,10 +227,6 @@ func LoadConfig() Config {
|
||||
ArchivalSync: getEnv("ARCHIVAL_SYNC") == "TRUE",
|
||||
MaxAgeDays: maxAgeDays,
|
||||
ArchiveReactions: getEnv("ARCHIVE_REACTIONS") == "TRUE",
|
||||
IgnoredPubkeys: ignoredPubkeys,
|
||||
MaxTrustNetwork: maxTrustNetwork,
|
||||
MaxRelays: maxRelays,
|
||||
MaxOneHopNetwork: maxOneHopNetwork,
|
||||
}
|
||||
|
||||
return config
|
||||
@ -325,101 +241,44 @@ func getEnv(key string) string {
|
||||
}
|
||||
|
||||
func updateTrustNetworkFilter() {
|
||||
// Build new trust network in temporary variables
|
||||
newTrustNetworkMap := make(map[string]bool)
|
||||
var newTrustNetwork []string
|
||||
newTrustNetworkSet := make(map[string]bool)
|
||||
trustNetworkMap = make(map[string]bool)
|
||||
|
||||
log.Println("🌐 building new trust network map")
|
||||
|
||||
followerMutex.RLock()
|
||||
log.Println("🌐 updating trust network map")
|
||||
for pubkey, count := range pubkeyFollowerCount {
|
||||
if count >= config.MinimumFollowers {
|
||||
newTrustNetworkMap[pubkey] = true
|
||||
if !newTrustNetworkSet[pubkey] && len(pubkey) == 64 && len(newTrustNetwork) < config.MaxTrustNetwork {
|
||||
newTrustNetwork = append(newTrustNetwork, pubkey)
|
||||
newTrustNetworkSet[pubkey] = true
|
||||
}
|
||||
trustNetworkMap[pubkey] = true
|
||||
appendPubkey(pubkey)
|
||||
}
|
||||
}
|
||||
followerMutex.RUnlock()
|
||||
|
||||
// Now atomically replace the active trust network
|
||||
trustNetworkMutex.Lock()
|
||||
trustNetworkMap = newTrustNetworkMap
|
||||
trustNetwork = newTrustNetwork
|
||||
trustNetworkSet = newTrustNetworkSet
|
||||
trustNetworkMutex.Unlock()
|
||||
|
||||
log.Println("🌐 trust network map updated with", len(newTrustNetwork), "keys")
|
||||
|
||||
// Cleanup follower count map periodically to prevent unbounded growth
|
||||
followerMutex.Lock()
|
||||
if len(pubkeyFollowerCount) > config.MaxOneHopNetwork*2 {
|
||||
log.Println("🧹 cleaning follower count map")
|
||||
newFollowerCount := make(map[string]int)
|
||||
for pubkey, count := range pubkeyFollowerCount {
|
||||
if count >= config.MinimumFollowers || newTrustNetworkMap[pubkey] {
|
||||
newFollowerCount[pubkey] = count
|
||||
}
|
||||
}
|
||||
oldCount := len(pubkeyFollowerCount)
|
||||
pubkeyFollowerCount = newFollowerCount
|
||||
log.Printf("🧹 cleaned follower count map: %d -> %d entries", oldCount, len(newFollowerCount))
|
||||
}
|
||||
followerMutex.Unlock()
|
||||
log.Println("🌐 trust network map updated with", len(trustNetwork), "keys")
|
||||
}
|
||||
|
||||
func refreshProfiles(ctx context.Context) {
|
||||
atomic.AddUint64(&profileRefreshCount, 1)
|
||||
start := time.Now()
|
||||
|
||||
// Get a snapshot of current trust network to avoid holding locks during network operations
|
||||
trustNetworkMutex.RLock()
|
||||
currentTrustNetwork := make([]string, len(trustNetwork))
|
||||
copy(currentTrustNetwork, trustNetwork)
|
||||
trustNetworkMutex.RUnlock()
|
||||
|
||||
for i := 0; i < len(currentTrustNetwork); i += 200 {
|
||||
for i := 0; i < len(trustNetwork); i += 200 {
|
||||
timeout, cancel := context.WithTimeout(ctx, 4*time.Second)
|
||||
defer cancel()
|
||||
|
||||
end := i + 200
|
||||
if end > len(currentTrustNetwork) {
|
||||
end = len(currentTrustNetwork)
|
||||
if end > len(trustNetwork) {
|
||||
end = len(trustNetwork)
|
||||
}
|
||||
|
||||
filters := []nostr.Filter{{
|
||||
Authors: currentTrustNetwork[i:end],
|
||||
Authors: trustNetwork[i:end],
|
||||
Kinds: []int{nostr.KindProfileMetadata},
|
||||
}}
|
||||
|
||||
for ev := range pool.SubManyEose(timeout, seedRelays, filters) {
|
||||
wdb.Publish(ctx, *ev.Event)
|
||||
}
|
||||
|
||||
cancel() // Cancel after each iteration
|
||||
}
|
||||
duration := time.Since(start)
|
||||
log.Printf("👤 profiles refreshed: %d profiles in %v", len(currentTrustNetwork), duration)
|
||||
log.Println("👤 profiles refreshed: ", len(trustNetwork))
|
||||
}
|
||||
|
||||
func refreshTrustNetwork(ctx context.Context, relay *khatru.Relay) {
|
||||
|
||||
runTrustNetworkRefresh := func() {
|
||||
atomic.AddUint64(&networkRefreshCount, 1)
|
||||
start := time.Now()
|
||||
|
||||
// Build new networks in temporary variables to avoid disrupting the active network
|
||||
var newOneHopNetwork []string
|
||||
newOneHopNetworkSet := make(map[string]bool)
|
||||
newPubkeyFollowerCount := make(map[string]int)
|
||||
|
||||
// Copy existing follower counts to preserve data
|
||||
followerMutex.RLock()
|
||||
for k, v := range pubkeyFollowerCount {
|
||||
newPubkeyFollowerCount[k] = v
|
||||
}
|
||||
followerMutex.RUnlock()
|
||||
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, 3*time.Second)
|
||||
defer cancel()
|
||||
|
||||
@ -429,48 +288,32 @@ func refreshTrustNetwork(ctx context.Context, relay *khatru.Relay) {
|
||||
}}
|
||||
|
||||
log.Println("🔍 fetching owner's follows")
|
||||
eventCount := 0
|
||||
for ev := range pool.SubManyEose(timeoutCtx, seedRelays, filters) {
|
||||
eventCount++
|
||||
for _, contact := range ev.Event.Tags.GetAll([]string{"p"}) {
|
||||
pubkey := contact[1]
|
||||
if isIgnored(pubkey, config.IgnoredPubkeys) {
|
||||
fmt.Println("ignoring follows from pubkey: ", pubkey)
|
||||
continue
|
||||
}
|
||||
newPubkeyFollowerCount[contact[1]]++
|
||||
|
||||
// Add to new one-hop network
|
||||
if !newOneHopNetworkSet[contact[1]] && len(contact[1]) == 64 && len(newOneHopNetwork) < config.MaxOneHopNetwork {
|
||||
newOneHopNetwork = append(newOneHopNetwork, contact[1])
|
||||
newOneHopNetworkSet[contact[1]] = true
|
||||
}
|
||||
pubkeyFollowerCount[contact[1]]++ // Increment follower count for the pubkey
|
||||
appendOneHopNetwork(contact[1])
|
||||
}
|
||||
}
|
||||
log.Printf("🔍 processed %d follow list events", eventCount)
|
||||
|
||||
log.Println("🌐 building web of trust graph")
|
||||
totalProcessed := 0
|
||||
for i := 0; i < len(newOneHopNetwork); i += 100 {
|
||||
for i := 0; i < len(oneHopNetwork); i += 100 {
|
||||
timeout, cancel := context.WithTimeout(ctx, 4*time.Second)
|
||||
defer cancel()
|
||||
|
||||
end := i + 100
|
||||
if end > len(newOneHopNetwork) {
|
||||
end = len(newOneHopNetwork)
|
||||
if end > len(oneHopNetwork) {
|
||||
end = len(oneHopNetwork)
|
||||
}
|
||||
|
||||
filters = []nostr.Filter{{
|
||||
Authors: newOneHopNetwork[i:end],
|
||||
Authors: oneHopNetwork[i:end],
|
||||
Kinds: []int{nostr.KindFollowList, nostr.KindRelayListMetadata, nostr.KindProfileMetadata},
|
||||
}}
|
||||
|
||||
batchCount := 0
|
||||
for ev := range pool.SubManyEose(timeout, seedRelays, filters) {
|
||||
batchCount++
|
||||
totalProcessed++
|
||||
for _, contact := range ev.Event.Tags.GetAll([]string{"p"}) {
|
||||
if len(contact) > 1 {
|
||||
newPubkeyFollowerCount[contact[1]]++
|
||||
pubkeyFollowerCount[contact[1]]++ // Increment follower count for the pubkey
|
||||
}
|
||||
}
|
||||
|
||||
@ -482,87 +325,34 @@ func refreshTrustNetwork(ctx context.Context, relay *khatru.Relay) {
|
||||
wdb.Publish(ctx, *ev.Event)
|
||||
}
|
||||
}
|
||||
cancel() // Cancel after each iteration
|
||||
|
||||
if i%500 == 0 { // Log progress every 5 batches
|
||||
log.Printf("🌐 processed batch %d-%d (%d events in this batch)", i, end, batchCount)
|
||||
}
|
||||
}
|
||||
|
||||
// Now atomically replace the active data structures
|
||||
oneHopMutex.Lock()
|
||||
oneHopNetwork = newOneHopNetwork
|
||||
oneHopNetworkSet = newOneHopNetworkSet
|
||||
oneHopMutex.Unlock()
|
||||
|
||||
followerMutex.Lock()
|
||||
pubkeyFollowerCount = newPubkeyFollowerCount
|
||||
followerMutex.Unlock()
|
||||
|
||||
duration := time.Since(start)
|
||||
log.Printf("🫂 total network size: %d (processed %d events in %v)", len(newPubkeyFollowerCount), totalProcessed, duration)
|
||||
relayMutex.RLock()
|
||||
log.Println("🫂 total network size:", len(pubkeyFollowerCount))
|
||||
log.Println("🔗 relays discovered:", len(relays))
|
||||
relayMutex.RUnlock()
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(time.Duration(config.RefreshInterval) * time.Hour)
|
||||
defer ticker.Stop()
|
||||
|
||||
// Run initial refresh
|
||||
log.Println("🚀 performing initial trust network build...")
|
||||
runTrustNetworkRefresh()
|
||||
updateTrustNetworkFilter()
|
||||
|
||||
// Mark as booted after initial trust network is built
|
||||
booted = true
|
||||
log.Println("✅ trust network initialized, relay is now active")
|
||||
|
||||
deleteOldNotes(relay)
|
||||
archiveTrustedNotes(ctx, relay)
|
||||
|
||||
// Then run on timer
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
log.Println("🔄 refreshing trust network in background...")
|
||||
runTrustNetworkRefresh()
|
||||
updateTrustNetworkFilter()
|
||||
deleteOldNotes(relay)
|
||||
archiveTrustedNotes(ctx, relay)
|
||||
log.Println("✅ trust network refresh completed")
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
runTrustNetworkRefresh()
|
||||
updateTrustNetworkFilter()
|
||||
deleteOldNotes(relay)
|
||||
archiveTrustedNotes(ctx, relay)
|
||||
}
|
||||
}
|
||||
|
||||
func appendRelay(relay string) {
|
||||
relayMutex.Lock()
|
||||
defer relayMutex.Unlock()
|
||||
|
||||
if len(relays) >= config.MaxRelays {
|
||||
return // Prevent unbounded growth
|
||||
for _, r := range relays {
|
||||
if r == relay {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if relaySet[relay] {
|
||||
return // Already exists
|
||||
}
|
||||
|
||||
relays = append(relays, relay)
|
||||
relaySet[relay] = true
|
||||
}
|
||||
|
||||
func appendPubkey(pubkey string) {
|
||||
trustNetworkMutex.Lock()
|
||||
defer trustNetworkMutex.Unlock()
|
||||
|
||||
if len(trustNetwork) >= config.MaxTrustNetwork {
|
||||
return // Prevent unbounded growth
|
||||
}
|
||||
|
||||
if trustNetworkSet[pubkey] {
|
||||
return // Already exists
|
||||
for _, pk := range trustNetwork {
|
||||
if pk == pubkey {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if len(pubkey) != 64 {
|
||||
@ -570,7 +360,20 @@ func appendPubkey(pubkey string) {
|
||||
}
|
||||
|
||||
trustNetwork = append(trustNetwork, pubkey)
|
||||
trustNetworkSet[pubkey] = true
|
||||
}
|
||||
|
||||
func appendOneHopNetwork(pubkey string) {
|
||||
for _, pk := range oneHopNetwork {
|
||||
if pk == pubkey {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if len(pubkey) != 64 {
|
||||
return
|
||||
}
|
||||
|
||||
oneHopNetwork = append(oneHopNetwork, pubkey)
|
||||
}
|
||||
|
||||
func archiveTrustedNotes(ctx context.Context, relay *khatru.Relay) {
|
||||
@ -580,13 +383,12 @@ func archiveTrustedNotes(ctx context.Context, relay *khatru.Relay) {
|
||||
done := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
defer close(done)
|
||||
if config.ArchivalSync {
|
||||
go refreshProfiles(ctx)
|
||||
|
||||
var filters []nostr.Filter
|
||||
since := nostr.Now()
|
||||
if config.ArchiveReactions {
|
||||
|
||||
filters = []nostr.Filter{{
|
||||
Kinds: []int{
|
||||
nostr.KindArticle,
|
||||
@ -601,7 +403,6 @@ func archiveTrustedNotes(ctx context.Context, relay *khatru.Relay) {
|
||||
nostr.KindZap,
|
||||
nostr.KindTextNote,
|
||||
},
|
||||
Since: &since,
|
||||
}}
|
||||
} else {
|
||||
filters = []nostr.Filter{{
|
||||
@ -617,54 +418,24 @@ func archiveTrustedNotes(ctx context.Context, relay *khatru.Relay) {
|
||||
nostr.KindZap,
|
||||
nostr.KindTextNote,
|
||||
},
|
||||
Since: &since,
|
||||
}}
|
||||
}
|
||||
|
||||
log.Println("📦 archiving trusted notes...")
|
||||
|
||||
eventCount := 0
|
||||
for ev := range pool.SubMany(timeout, seedRelays, filters) {
|
||||
eventCount++
|
||||
|
||||
// Check GC pressure every 1000 events
|
||||
if eventCount%1000 == 0 {
|
||||
var m runtime.MemStats
|
||||
runtime.ReadMemStats(&m)
|
||||
if m.NumGC > 0 && eventCount > 1000 {
|
||||
// If we're doing more than 2 GCs per 1000 events, slow down
|
||||
gcRate := float64(m.NumGC) / float64(eventCount/1000)
|
||||
if gcRate > 2.0 {
|
||||
log.Printf("⚠️ High GC pressure (%.1f GC/1000 events), slowing archive process", gcRate)
|
||||
time.Sleep(100 * time.Millisecond) // Brief pause
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Use semaphore to limit concurrent goroutines
|
||||
select {
|
||||
case archiveEventSemaphore <- struct{}{}:
|
||||
go func(event nostr.Event) {
|
||||
defer func() { <-archiveEventSemaphore }()
|
||||
archiveEvent(ctx, relay, event)
|
||||
}(*ev.Event)
|
||||
case <-timeout.Done():
|
||||
log.Printf("📦 archive timeout reached, processed %d events", eventCount)
|
||||
return
|
||||
default:
|
||||
// If semaphore is full, process synchronously to avoid buildup
|
||||
archiveEvent(ctx, relay, *ev.Event)
|
||||
}
|
||||
go archiveEvent(ctx, relay, *ev.Event)
|
||||
}
|
||||
|
||||
log.Printf("📦 archived %d trusted notes and discarded %d untrusted notes (processed %d total events)",
|
||||
atomic.LoadUint64(&trustedNotes), atomic.LoadUint64(&untrustedNotes), eventCount)
|
||||
log.Println("📦 archived", trustedNotes, "trusted notes and discarded", untrustedNotes, "untrusted notes")
|
||||
} else {
|
||||
log.Println("🔄 web of trust will refresh in", config.RefreshInterval, "hours")
|
||||
select {
|
||||
case <-timeout.Done():
|
||||
}
|
||||
}
|
||||
|
||||
close(done)
|
||||
}()
|
||||
|
||||
select {
|
||||
@ -676,17 +447,12 @@ func archiveTrustedNotes(ctx context.Context, relay *khatru.Relay) {
|
||||
}
|
||||
|
||||
func archiveEvent(ctx context.Context, relay *khatru.Relay, ev nostr.Event) {
|
||||
trustNetworkMutex.RLock()
|
||||
trusted := trustNetworkMap[ev.PubKey]
|
||||
trustNetworkMutex.RUnlock()
|
||||
|
||||
if trusted {
|
||||
if trustNetworkMap[ev.PubKey] {
|
||||
wdb.Publish(ctx, ev)
|
||||
relay.BroadcastEvent(&ev)
|
||||
atomic.AddUint64(&trustedNotes, 1)
|
||||
atomic.AddUint64(&archivedEvents, 1)
|
||||
trustedNotes++
|
||||
} else {
|
||||
atomic.AddUint64(&untrustedNotes, 1)
|
||||
untrustedNotes++
|
||||
}
|
||||
}
|
||||
|
||||
@ -720,7 +486,6 @@ func deleteOldNotes(relay *khatru.Relay) error {
|
||||
nostr.KindZap,
|
||||
nostr.KindTextNote,
|
||||
},
|
||||
Limit: 1000, // Process in batches to avoid memory issues
|
||||
}
|
||||
|
||||
ch, err := relay.QueryEvents[0](ctx, filter)
|
||||
@ -729,47 +494,27 @@ func deleteOldNotes(relay *khatru.Relay) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Process events in batches to avoid memory issues
|
||||
batchSize := 100
|
||||
events := make([]*nostr.Event, 0, batchSize)
|
||||
count := 0
|
||||
events := make([]*nostr.Event, 0)
|
||||
|
||||
for evt := range ch {
|
||||
events = append(events, evt)
|
||||
count++
|
||||
|
||||
if len(events) >= batchSize {
|
||||
// Delete this batch
|
||||
for num_evt, del_evt := range events {
|
||||
for _, del := range relay.DeleteEvent {
|
||||
if err := del(ctx, del_evt); err != nil {
|
||||
log.Printf("error deleting note %d of batch. event id: %s", num_evt, del_evt.ID)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
events = events[:0] // Reset slice but keep capacity
|
||||
}
|
||||
}
|
||||
|
||||
// Delete remaining events
|
||||
if len(events) > 0 {
|
||||
for num_evt, del_evt := range events {
|
||||
for _, del := range relay.DeleteEvent {
|
||||
if err := del(ctx, del_evt); err != nil {
|
||||
log.Printf("error deleting note %d of final batch. event id: %s", num_evt, del_evt.ID)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if count == 0 {
|
||||
if len(events) < 1 {
|
||||
log.Println("0 old notes found")
|
||||
} else {
|
||||
log.Printf("%d old (until %d) notes deleted", count, oldAge)
|
||||
return nil
|
||||
}
|
||||
|
||||
for num_evt, del_evt := range events {
|
||||
for _, del := range relay.DeleteEvent {
|
||||
if err := del(ctx, del_evt); err != nil {
|
||||
log.Printf("error deleting note %d of %d. event id: %s", num_evt, len(events), del_evt.ID)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("%d old (until %d) notes deleted", len(events), oldAge)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -778,160 +523,3 @@ func getDB() badger.BadgerBackend {
|
||||
Path: getEnv("DB_PATH"),
|
||||
}
|
||||
}
|
||||
|
||||
func splitAndTrim(input string) []string {
|
||||
items := strings.Split(input, ",")
|
||||
for i, item := range items {
|
||||
items[i] = strings.TrimSpace(item)
|
||||
}
|
||||
return items
|
||||
}
|
||||
|
||||
func isIgnored(pubkey string, ignoredPubkeys []string) bool {
|
||||
for _, ignored := range ignoredPubkeys {
|
||||
if pubkey == ignored {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Add memory monitoring
|
||||
func monitorMemoryUsage() {
|
||||
ticker := time.NewTicker(5 * time.Minute)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
var m runtime.MemStats
|
||||
runtime.ReadMemStats(&m)
|
||||
|
||||
relayMutex.RLock()
|
||||
relayCount := len(relays)
|
||||
relayMutex.RUnlock()
|
||||
|
||||
trustNetworkMutex.RLock()
|
||||
trustNetworkCount := len(trustNetwork)
|
||||
trustNetworkMutex.RUnlock()
|
||||
|
||||
oneHopMutex.RLock()
|
||||
oneHopCount := len(oneHopNetwork)
|
||||
oneHopMutex.RUnlock()
|
||||
|
||||
followerMutex.RLock()
|
||||
followerCount := len(pubkeyFollowerCount)
|
||||
followerMutex.RUnlock()
|
||||
|
||||
log.Printf("📊 Memory: Alloc=%d KB, Sys=%d KB, NumGC=%d",
|
||||
m.Alloc/1024, m.Sys/1024, m.NumGC)
|
||||
log.Printf("📊 Data structures: Relays=%d, TrustNetwork=%d, OneHop=%d, Followers=%d",
|
||||
relayCount, trustNetworkCount, oneHopCount, followerCount)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add performance monitoring
|
||||
func monitorPerformance() {
|
||||
ticker := time.NewTicker(1 * time.Minute)
|
||||
defer ticker.Stop()
|
||||
|
||||
var lastGC uint32
|
||||
var lastEvents, lastRejected, lastArchived uint64
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
var m runtime.MemStats
|
||||
runtime.ReadMemStats(&m)
|
||||
|
||||
currentEvents := atomic.LoadUint64(&totalEvents)
|
||||
currentRejected := atomic.LoadUint64(&rejectedEvents)
|
||||
currentArchived := atomic.LoadUint64(&archivedEvents)
|
||||
|
||||
eventsPerMin := currentEvents - lastEvents
|
||||
rejectedPerMin := currentRejected - lastRejected
|
||||
archivedPerMin := currentArchived - lastArchived
|
||||
gcPerMin := m.NumGC - lastGC
|
||||
|
||||
numGoroutines := runtime.NumGoroutine()
|
||||
|
||||
log.Printf("⚡ Performance: Events/min=%d, Rejected/min=%d, Archived/min=%d, GC/min=%d, Goroutines=%d",
|
||||
eventsPerMin, rejectedPerMin, archivedPerMin, gcPerMin, numGoroutines)
|
||||
|
||||
if gcPerMin > 60 {
|
||||
log.Printf("⚠️ HIGH GC ACTIVITY: %d garbage collections in last minute!", gcPerMin)
|
||||
}
|
||||
|
||||
if numGoroutines > 1000 {
|
||||
log.Printf("⚠️ HIGH GOROUTINE COUNT: %d goroutines active!", numGoroutines)
|
||||
}
|
||||
|
||||
lastGC = m.NumGC
|
||||
lastEvents = currentEvents
|
||||
lastRejected = currentRejected
|
||||
lastArchived = currentArchived
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Debug handlers
|
||||
func debugStatsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
var m runtime.MemStats
|
||||
runtime.ReadMemStats(&m)
|
||||
|
||||
stats := fmt.Sprintf(`Debug Statistics:
|
||||
|
||||
Memory:
|
||||
Allocated: %d KB
|
||||
System: %d KB
|
||||
Total Allocations: %d
|
||||
GC Cycles: %d
|
||||
Goroutines: %d
|
||||
|
||||
Events:
|
||||
Total Events: %d
|
||||
Rejected Events: %d
|
||||
Archived Events: %d
|
||||
Trusted Notes: %d
|
||||
Untrusted Notes: %d
|
||||
|
||||
Refreshes:
|
||||
Profile Refreshes: %d
|
||||
Network Refreshes: %d
|
||||
|
||||
Data Structures:
|
||||
Relays: %d
|
||||
Trust Network: %d
|
||||
One Hop Network: %d
|
||||
Follower Count Map: %d
|
||||
`,
|
||||
m.Alloc/1024,
|
||||
m.Sys/1024,
|
||||
m.Mallocs,
|
||||
m.NumGC,
|
||||
runtime.NumGoroutine(),
|
||||
atomic.LoadUint64(&totalEvents),
|
||||
atomic.LoadUint64(&rejectedEvents),
|
||||
atomic.LoadUint64(&archivedEvents),
|
||||
atomic.LoadUint64(&trustedNotes),
|
||||
atomic.LoadUint64(&untrustedNotes),
|
||||
atomic.LoadUint64(&profileRefreshCount),
|
||||
atomic.LoadUint64(&networkRefreshCount),
|
||||
len(relays),
|
||||
len(trustNetwork),
|
||||
len(oneHopNetwork),
|
||||
len(pubkeyFollowerCount),
|
||||
)
|
||||
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
w.Write([]byte(stats))
|
||||
}
|
||||
|
||||
func debugGoroutinesHandler(w http.ResponseWriter, r *http.Request) {
|
||||
buf := make([]byte, 1<<20) // 1MB buffer
|
||||
stackSize := runtime.Stack(buf, true)
|
||||
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
w.Write(buf[:stackSize])
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user