add sndev shell script and enhance docker compose local dev
* add hot reloading worker:dev script * refine docker config * sndev bash script and docker reliability stuff * make posix shell * restart: always -> unless-stopped * proper check for postgres health * add db seed to sndev * refinements after fresh builds * begin adding regtest network * add changes to .env.sample * reorganize docker and add static certs/macroon to lnd * copy wallet and macaroon dbs for deterministic wallets/macaroons * fix perms of shared directories * allow debian useradd with duplicate id * add auto-mining * make bitcoin health check dependent on blockheight * open channel between ln nodes * improve channel opens * add sndev payinvoice * add sndev withdraw * ascii art * add sndev status * sndev passthrough to docker and containers * add sndev psql command * remove script logging * small script cleanup * smaller db seed * pin opensearch version Co-authored-by: ekzyis <ek@stacker.news> * pin opensearch dashboard Co-authored-by: ekzyis <ek@stacker.news> * add sndev prisma * add help for all commands * set -e * s3 and image proxy with broken name resolution * finally fully working image uploads * use a better diff algo --------- Co-authored-by: ekzyis <ek@stacker.news>
This commit is contained in:
parent
179a539d4d
commit
23ee62fb21
129
.env.sample
129
.env.sample
|
@ -4,22 +4,22 @@
|
|||
############################################################################
|
||||
|
||||
# github
|
||||
GITHUB_ID=<YOUR GITHUB ID>
|
||||
GITHUB_SECRET=<YOUR GITHUB SECRET>
|
||||
GITHUB_ID=
|
||||
GITHUB_SECRET=
|
||||
|
||||
# twitter
|
||||
TWITTER_ID=<YOUR TWITTER ID>
|
||||
TWITTER_SECRET=<YOUR TWITTER SECRET>
|
||||
TWITTER_ID=
|
||||
TWITTER_SECRET=
|
||||
|
||||
# email
|
||||
LOGIN_EMAIL_SERVER=smtp://<YOUR EMAIL>:<YOUR PASSWORD>@<YOUR SMTP DOMAIN>:587
|
||||
LOGIN_EMAIL_FROM=<YOUR FROM ALIAS>
|
||||
LOGIN_EMAIL_SERVER=
|
||||
LOGIN_EMAIL_FROM=
|
||||
LIST_MONK_AUTH=
|
||||
|
||||
#####################################################################
|
||||
# OTHER / OPTIONAL #
|
||||
# configuration for push notifications, slack and imgproxy are here #
|
||||
#####################################################################
|
||||
########################################################
|
||||
# OTHER / OPTIONAL #
|
||||
# configuration for push notifications, slack are here #
|
||||
########################################################
|
||||
|
||||
# VAPID for Web Push
|
||||
VAPID_MAILTO=
|
||||
|
@ -30,38 +30,13 @@ VAPID_PRIVKEY=
|
|||
SLACK_BOT_TOKEN=
|
||||
SLACK_CHANNEL_ID=
|
||||
|
||||
# imgproxy
|
||||
NEXT_PUBLIC_IMGPROXY_URL=
|
||||
IMGPROXY_KEY=
|
||||
IMGPROXY_SALT=
|
||||
# lnurl ... you'll need a tunnel to localhost:3000 for these
|
||||
LNAUTH_URL=
|
||||
LNWITH_URL=
|
||||
|
||||
# search
|
||||
OPENSEARCH_URL=http://opensearch:9200
|
||||
OPENSEARCH_USERNAME=
|
||||
OPENSEARCH_PASSWORD=
|
||||
OPENSEARCH_INDEX=item
|
||||
OPENSEARCH_MODEL_ID=
|
||||
|
||||
#######################################################
|
||||
# WALLET / OPTIONAL #
|
||||
# if you want to work with payments you'll need these #
|
||||
#######################################################
|
||||
|
||||
# lnd
|
||||
LND_CERT=<YOUR LND HEX CERT>
|
||||
LND_MACAROON=<YOUR LND HEX MACAROON>
|
||||
LND_SOCKET=<YOUR LND GRPC HOST>:<YOUR LND GRPC PORT>
|
||||
|
||||
# lnurl
|
||||
LNAUTH_URL=<PUBLIC URL TO /api/lnauth>
|
||||
LNWITH_URL=<PUBLIC URL TO /api/lnwith>
|
||||
|
||||
# nostr (NIP-57 zap receipts)
|
||||
NOSTR_PRIVATE_KEY=<YOUR NOSTR PRIVATE KEY IN HEX>
|
||||
|
||||
###############
|
||||
# LEAVE AS IS #
|
||||
###############
|
||||
#########################
|
||||
# SNDEV STUFF WE PRESET #
|
||||
#########################
|
||||
|
||||
# static things
|
||||
NEXTAUTH_URL=http://localhost:3000/api/auth
|
||||
|
@ -72,7 +47,21 @@ NEXTAUTH_SECRET=3_0W_PhDRZVanbeJsZZGIEljexkKoGbL6qGIqSwTjjI
|
|||
JWT_SIGNING_PRIVATE_KEY={"kty":"oct","kid":"FvD__hmeKoKHu2fKjUrWbRKfhjimIM4IKshyrJG4KSM","alg":"HS512","k":"3_0W_PhDRZVanbeJsZZGIEljexkKoGbL6qGIqSwTjjI"}
|
||||
INVOICE_HMAC_KEY=a4c1d9c81edb87b79d28809876a18cf72293eadb39f92f3f4f2f1cfbdf907c91
|
||||
|
||||
# imgproxy options
|
||||
# lnd
|
||||
# xxd -p -c0 docker/lnd/sn/regtest/admin.macaroon
|
||||
LND_CERT=2d2d2d2d2d424547494e2043455254494649434154452d2d2d2d2d0a4d494943516a43434165696741774942416749516139493834682b48653350385a437541525854554d54414b42676771686b6a4f50515144416a41344d5238770a485159445651514b45785a73626d5167595856306232646c626d56795958526c5a43426a5a584a304d5255774577594456515144457778694e6a41785a5749780a4d474d354f444d774868634e4d6a51774d7a41334d5463774d6a45355768634e4d6a55774e5441794d5463774d6a4535576a41344d523877485159445651514b0a45785a73626d5167595856306232646c626d56795958526c5a43426a5a584a304d5255774577594456515144457778694e6a41785a5749784d474d354f444d770a5754415442676371686b6a4f5051494242676771686b6a4f50514d4242774e4341415365596a4b62542b4a4a4a37624b6770677a6d6c3278496130364e3174680a2f4f7033533173382b4f4a41387836647849682f326548556b4f7578675a36703549434b496f375a544c356a5963764375793941334b6e466f3448544d4948510a4d41344741315564447745422f775145417749437044415442674e56485355454444414b4267677242674546425163444154415042674e5648524d42416638450a425441444151482f4d4230474131556444675157424252545756796e653752786f747568717354727969466d6a36736c557a423542674e5648524545636a42770a676778694e6a41785a5749784d474d354f444f4343577876593246736147397a64494947633235666247356b6768526f62334e304c6d52765932746c636935700a626e526c636d356862494945645735706549494b64573570654842685932746c64494948596e566d59323975626f6345667741414159635141414141414141410a41414141414141414141414141596345724273414254414b42676771686b6a4f5051514441674e4941444246416945413873616c4a667134476671465557532f0a35347a335461746c6447736673796a4a383035425a5263334f326f434943794e6e3975716976566f5575365935345143624c3966394c575779547a516e61616e0a656977482f51696b0a2d2d2d2d2d454e442043455254494649434154452d2d2d2d2d0a
|
||||
LND_MACAROON=0201036c6e6402f801030a106cf4e146abffa5d766befbbf4c73b5a31201301a160a0761646472657373120472656164120577726974651a130a04696e666f120472656164120577726974651a170a08696e766f69636573120472656164120577726974651a210a086d616361726f6f6e120867656e6572617465120472656164120577726974651a160a076d657373616765120472656164120577726974651a170a086f6666636861696e120472656164120577726974651a160a076f6e636861696e120472656164120577726974651a140a057065657273120472656164120577726974651a180a067369676e6572120867656e6572617465120472656164000006202c3bfd55c191e925cbffd73712c9d4b9b4a8440410bde5f8a0a6e33af8b3d876
|
||||
LND_SOCKET=sn_lnd:10009
|
||||
|
||||
# nostr (NIP-57 zap receipts)
|
||||
# openssl rand -hex 32
|
||||
NOSTR_PRIVATE_KEY=5f30b7e7714360f51f2be2e30c1d93b7fdf67366e730658e85777dfcc4e4245f
|
||||
|
||||
# imgproxy
|
||||
NEXT_PUBLIC_IMGPROXY_URL=http://localhost:3001
|
||||
IMGPROXY_KEY=9c273e803fd5d444bf8883f8c3000de57bee7995222370cab7f2d218dd9a4bbff6ca11cbf902e61eeef4358616f231da51e183aee6841e3a797a5c9a9530ba67
|
||||
IMGPROXY_SALT=47b802be2c9250a66b998f411fc63912ab0bc1c6b47d99b8d37c61019d1312a984b98745eac83db9791b01bb8c93ecbc9b2ef9f2981d66061c7d0a4528ff6465
|
||||
|
||||
IMGPROXY_ENABLE_WEBP_DETECTION=1
|
||||
IMGPROXY_ENABLE_AVIF_DETECTION=1
|
||||
IMGPROXY_MAX_ANIMATION_FRAMES=2000
|
||||
|
@ -84,11 +73,67 @@ IMGPROXY_DOWNLOAD_TIMEOUT=9
|
|||
# IMGPROXY_DEVELOPMENT_ERRORS_MODE=1
|
||||
# IMGPROXY_ENABLE_DEBUG_HEADERS=true
|
||||
|
||||
NEXT_PUBLIC_AWS_UPLOAD_BUCKET=uploads
|
||||
NEXT_PUBLIC_MEDIA_DOMAIN=localhost:4566
|
||||
NEXT_PUBLIC_MEDIA_URL=http://localhost:4566/uploads
|
||||
|
||||
# search
|
||||
OPENSEARCH_URL=http://opensearch:9200
|
||||
OPENSEARCH_USERNAME=
|
||||
OPENSEARCH_PASSWORD=
|
||||
OPENSEARCH_INDEX=item
|
||||
OPENSEARCH_MODEL_ID=
|
||||
|
||||
# prisma db url
|
||||
DATABASE_URL="postgresql://sn:password@db:5432/stackernews?schema=public"
|
||||
|
||||
###################
|
||||
# FOR DOCKER ONLY #
|
||||
###################
|
||||
|
||||
# containers can't use localhost, so we need to use the container name
|
||||
IMGPROXY_URL_DOCKER=http://imgproxy:8080
|
||||
MEDIA_URL_DOCKER=http://s3:4566/uploads
|
||||
|
||||
# postgres container stuff
|
||||
POSTGRES_PASSWORD=password
|
||||
POSTGRES_USER=sn
|
||||
POSTGRES_DB=stackernews
|
||||
|
||||
# opensearch container stuff
|
||||
OPENSEARCH_INITIAL_ADMIN_PASSWORD=mVchg1T5oA9wudUh
|
||||
plugins.security.disabled=true
|
||||
discovery.type=single-node
|
||||
DISABLE_SECURITY_DASHBOARDS_PLUGIN=true
|
||||
|
||||
# bitcoind container stuff
|
||||
RPC_AUTH='7c68e5fcdba94a366bfdf629ecc676bb$0d0fc087c3bf7f068f350292bf8de1418df3dd8cb31e35682d5d3108d601002b'
|
||||
RPC_USER=bitcoin
|
||||
RPC_PASS=bitcoin
|
||||
RPC_PORT=18443
|
||||
P2P_PORT=18444
|
||||
ZMQ_BLOCK_PORT=28334
|
||||
ZMQ_TX_PORT=28335
|
||||
|
||||
# sn lnd container stuff
|
||||
LND_REST_PORT=8080
|
||||
LND_GRPC_PORT=10009
|
||||
LND_P2P_PORT=9735
|
||||
# docker exec -u lnd sn_lnd lncli newaddress p2wkh --unused
|
||||
LND_ADDR=bcrt1q7q06n5st4vqq3lssn0rtkrn2qqypghv9xg2xnl
|
||||
LND_PUBKEY=02cb2e2d5a6c5b17fa67b1a883e2973c82e328fb9bd08b2b156a9e23820c87a490
|
||||
|
||||
# stacker lnd container stuff
|
||||
STACKER_LND_REST_PORT=8081
|
||||
STACKER_LND_GRPC_PORT=10010
|
||||
# docker exec -u lnd stacker_lnd lncli newaddress p2wkh --unused
|
||||
STACKER_LND_ADDR=bcrt1qfqau4ug9e6rtrvxrgclg58e0r93wshucumm9vu
|
||||
STACKER_LND_PUBKEY=028093ae52e011d45b3e67f2e0f2cb6c3a1d7f88d2920d408f3ac6db3a56dc4b35
|
||||
|
||||
LNCLI_NETWORK=regtest
|
||||
|
||||
# localstack container stuff
|
||||
AWS_ACCESS_KEY_ID=AKIAIOSFODNN7EXAMPLE
|
||||
AWS_SECRET_ACCESS_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
|
||||
PERSISTENCE=1
|
||||
SKIP_SSL_CERT_DOWNLOAD=1
|
||||
|
|
|
@ -28,12 +28,9 @@ yarn-debug.log*
|
|||
yarn-error.log*
|
||||
|
||||
# local env files
|
||||
.env
|
||||
envbak
|
||||
.env.local
|
||||
.env.development.local
|
||||
.env.test.local
|
||||
.env.production.local
|
||||
.env*
|
||||
!.env.sample
|
||||
|
||||
# vercel
|
||||
.vercel
|
||||
|
|
10
Dockerfile
10
Dockerfile
|
@ -4,8 +4,16 @@ FROM node:18.17.0-bullseye
|
|||
|
||||
ENV NODE_ENV=development
|
||||
|
||||
ARG UID
|
||||
ARG GID
|
||||
RUN groupadd -fg "$GID" apprunner
|
||||
RUN useradd -om -u "$UID" -g "$GID" apprunner
|
||||
USER apprunner
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
EXPOSE 3000
|
||||
|
||||
CMD npm install --loglevel verbose --legacy-peer-deps; npx prisma migrate dev; npm run dev
|
||||
COPY package.json package-lock.json ./
|
||||
RUN npm ci --legacy-peer-deps --loglevel verbose
|
||||
CMD ["sh","-c","npm install --loglevel verbose --legacy-peer-deps && npx prisma migrate dev && npm run dev"]
|
|
@ -1,4 +1,5 @@
|
|||
import AWS from 'aws-sdk'
|
||||
import { MEDIA_URL } from '../../lib/constants'
|
||||
|
||||
const bucketRegion = 'us-east-1'
|
||||
const Bucket = process.env.NEXT_PUBLIC_AWS_UPLOAD_BUCKET
|
||||
|
@ -7,8 +8,18 @@ AWS.config.update({
|
|||
region: bucketRegion
|
||||
})
|
||||
|
||||
const config = {
|
||||
apiVersion: '2006-03-01',
|
||||
endpoint: process.env.NODE_ENV === 'development' ? `${MEDIA_URL}` : undefined,
|
||||
s3ForcePathStyle: process.env.NODE_ENV === 'development'
|
||||
}
|
||||
|
||||
export function createPresignedPost ({ key, type, size }) {
|
||||
const s3 = new AWS.S3({ apiVersion: '2006-03-01' })
|
||||
const s3 = new AWS.S3({
|
||||
...config,
|
||||
// in development, we need to be able to call this from localhost
|
||||
endpoint: process.env.NODE_ENV === 'development' ? `${process.env.NEXT_PUBLIC_MEDIA_URL}` : undefined
|
||||
})
|
||||
return new Promise((resolve, reject) => {
|
||||
s3.createPresignedPost({
|
||||
Bucket,
|
||||
|
@ -25,7 +36,7 @@ export function createPresignedPost ({ key, type, size }) {
|
|||
}
|
||||
|
||||
export async function deleteObjects (keys) {
|
||||
const s3 = new AWS.S3({ apiVersion: '2006-03-01' })
|
||||
const s3 = new AWS.S3(config)
|
||||
// max 1000 keys per request
|
||||
// see https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html
|
||||
const batchSize = 1000
|
||||
|
|
|
@ -4,7 +4,7 @@ import { IMGPROXY_URL_REGEXP } from '../lib/url'
|
|||
import { useShowModal } from './modal'
|
||||
import { useMe } from './me'
|
||||
import { Dropdown } from 'react-bootstrap'
|
||||
import { UNKNOWN_LINK_REL, UPLOAD_TYPES_ALLOW } from '../lib/constants'
|
||||
import { UNKNOWN_LINK_REL, UPLOAD_TYPES_ALLOW, MEDIA_URL } from '../lib/constants'
|
||||
import { useToast } from './toast'
|
||||
import gql from 'graphql-tag'
|
||||
import { useMutation } from '@apollo/client'
|
||||
|
@ -68,6 +68,10 @@ function ImageProxy ({ src, srcSet: { dimensions, ...srcSetObj } = {}, onClick,
|
|||
if (!srcSetObj) return undefined
|
||||
// srcSetObj shape: { [widthDescriptor]: <imgproxyUrl>, ... }
|
||||
return Object.entries(srcSetObj).reduce((acc, [wDescriptor, url], i, arr) => {
|
||||
// backwards compatibility: we used to replace image urls with imgproxy urls rather just storing paths
|
||||
if (!url.startsWith('http')) {
|
||||
url = `${process.env.NEXT_PUBLIC_IMGPROXY_URL}${url}`
|
||||
}
|
||||
return acc + `${url} ${wDescriptor}` + (i < arr.length - 1 ? ', ' : '')
|
||||
}, '')
|
||||
}, [srcSetObj])
|
||||
|
@ -77,6 +81,9 @@ function ImageProxy ({ src, srcSet: { dimensions, ...srcSetObj } = {}, onClick,
|
|||
const bestResSrc = useMemo(() => {
|
||||
if (!srcSetObj) return src
|
||||
return Object.entries(srcSetObj).reduce((acc, [wDescriptor, url]) => {
|
||||
if (!url.startsWith('http')) {
|
||||
url = `${process.env.NEXT_PUBLIC_IMGPROXY_URL}${url}`
|
||||
}
|
||||
const w = Number(wDescriptor.replace(/w$/, ''))
|
||||
return w > acc.w ? { w, url } : acc
|
||||
}, { w: 0, url: undefined }).url
|
||||
|
@ -224,7 +231,7 @@ export const ImageUpload = forwardRef(({ children, className, onSelect, onUpload
|
|||
return
|
||||
}
|
||||
|
||||
const url = `https://${process.env.NEXT_PUBLIC_MEDIA_DOMAIN}/${data.getSignedPOST.fields.key}`
|
||||
const url = `${MEDIA_URL}/${data.getSignedPOST.fields.key}`
|
||||
// key is upload id in database
|
||||
const id = data.getSignedPOST.fields.key
|
||||
onSuccess?.({ ...variables, id, name: file.name, url, file })
|
||||
|
|
|
@ -10,6 +10,7 @@ import { timeSince } from '../lib/time'
|
|||
import EmailIcon from '../svgs/mail-open-line.svg'
|
||||
import Share from './share'
|
||||
import Hat from './hat'
|
||||
import { MEDIA_URL } from '../lib/constants'
|
||||
|
||||
export default function ItemJob ({ item, toc, rank, children }) {
|
||||
const isEmail = string().email().isValidSync(item.url)
|
||||
|
@ -25,7 +26,7 @@ export default function ItemJob ({ item, toc, rank, children }) {
|
|||
<div className={styles.item}>
|
||||
<Link href={`/items/${item.id}`}>
|
||||
<Image
|
||||
src={item.uploadId ? `https://${process.env.NEXT_PUBLIC_MEDIA_DOMAIN}/${item.uploadId}` : '/jobs-default.png'} width='42' height='42' className={styles.companyImage}
|
||||
src={item.uploadId ? `${MEDIA_URL}/${item.uploadId}` : '/jobs-default.png'} width='42' height='42' className={styles.companyImage}
|
||||
/>
|
||||
</Link>
|
||||
<div className={`${styles.hunk} align-self-center mb-0`}>
|
||||
|
|
|
@ -15,7 +15,7 @@ import Link from 'next/link'
|
|||
import { usePrice } from './price'
|
||||
import Avatar from './avatar'
|
||||
import { jobSchema } from '../lib/validate'
|
||||
import { MAX_TITLE_LENGTH } from '../lib/constants'
|
||||
import { MAX_TITLE_LENGTH, MEDIA_URL } from '../lib/constants'
|
||||
import { useToast } from './toast'
|
||||
import { toastDeleteScheduled } from '../lib/form'
|
||||
import { ItemButtonBar } from './post'
|
||||
|
@ -110,7 +110,7 @@ export default function JobForm ({ item, sub }) {
|
|||
<label className='form-label'>logo</label>
|
||||
<div className='position-relative' style={{ width: 'fit-content' }}>
|
||||
<Image
|
||||
src={logoId ? `https://${process.env.NEXT_PUBLIC_MEDIA_DOMAIN}/${logoId}` : '/jobs-default.png'} width='135' height='135' roundedCircle
|
||||
src={logoId ? `${MEDIA_URL}/${logoId}` : '/jobs-default.png'} width='135' height='135' roundedCircle
|
||||
/>
|
||||
<Avatar onSuccess={setLogoId} />
|
||||
</div>
|
||||
|
|
|
@ -28,7 +28,7 @@ import { hexToBech32 } from '../lib/nostr'
|
|||
import NostrIcon from '../svgs/nostr.svg'
|
||||
import GithubIcon from '../svgs/github-fill.svg'
|
||||
import TwitterIcon from '../svgs/twitter-fill.svg'
|
||||
import { UNKNOWN_LINK_REL } from '../lib/constants'
|
||||
import { UNKNOWN_LINK_REL, MEDIA_URL } from '../lib/constants'
|
||||
|
||||
export default function UserHeader ({ user }) {
|
||||
const router = useRouter()
|
||||
|
@ -96,7 +96,7 @@ function HeaderPhoto ({ user, isMe }) {
|
|||
}
|
||||
}
|
||||
)
|
||||
const src = user.photoId ? `https://${process.env.NEXT_PUBLIC_MEDIA_DOMAIN}/${user.photoId}` : '/dorian400.jpg'
|
||||
const src = user.photoId ? `${MEDIA_URL}/${user.photoId}` : '/dorian400.jpg'
|
||||
|
||||
return (
|
||||
<div className='position-relative align-self-start' style={{ width: 'fit-content' }}>
|
||||
|
|
|
@ -8,6 +8,7 @@ import { useQuery } from '@apollo/client'
|
|||
import MoreFooter from './more-footer'
|
||||
import { useData } from './use-data'
|
||||
import Hat from './hat'
|
||||
import { MEDIA_URL } from '../lib/constants'
|
||||
|
||||
// all of this nonsense is to show the stat we are sorting by first
|
||||
const Stacked = ({ user }) => (user.optional.stacked !== null && <span>{abbrNum(user.optional.stacked)} stacked</span>)
|
||||
|
@ -48,7 +49,7 @@ function User ({ user, rank, statComps, Embellish }) {
|
|||
<div className={`${styles.item} mb-2`}>
|
||||
<Link href={`/${user.name}`}>
|
||||
<Image
|
||||
src={user.photoId ? `https://${process.env.NEXT_PUBLIC_MEDIA_DOMAIN}/${user.photoId}` : '/dorian400.jpg'} width='32' height='32'
|
||||
src={user.photoId ? `${MEDIA_URL}/${user.photoId}` : '/dorian400.jpg'} width='32' height='32'
|
||||
className={`${userStyles.userimg} me-2`}
|
||||
/>
|
||||
</Link>
|
||||
|
|
|
@ -2,114 +2,379 @@ version: "3"
|
|||
services:
|
||||
db:
|
||||
container_name: db
|
||||
build: ./db
|
||||
restart: always
|
||||
build: ./docker/db
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "PGPASSWORD=${POSTGRES_PASSWORD} psql -U ${POSTGRES_USER} ${POSTGRES_DB} -c 'SELECT 1 FROM users LIMIT 1'"]
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 10
|
||||
start_period: 1m
|
||||
expose:
|
||||
- "5432"
|
||||
ports:
|
||||
- "5431:5432"
|
||||
env_file:
|
||||
- ./.env.sample
|
||||
- .env.development
|
||||
volumes:
|
||||
- ./docker/db/seed.sql:/docker-entrypoint-initdb.d/seed.sql
|
||||
- db:/var/lib/postgresql/data
|
||||
labels:
|
||||
CONNECT: "localhost:5431"
|
||||
app:
|
||||
container_name: app
|
||||
build: ./
|
||||
build:
|
||||
context: ./
|
||||
args:
|
||||
- UID=${CURRENT_UID}
|
||||
- GID=${CURRENT_GID}
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:3000"]
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 10
|
||||
start_period: 1m30s
|
||||
start_period: 1m
|
||||
depends_on:
|
||||
- db
|
||||
db:
|
||||
condition: service_healthy
|
||||
restart: true
|
||||
opensearch:
|
||||
condition: service_healthy
|
||||
restart: true
|
||||
sn_lnd:
|
||||
condition: service_healthy
|
||||
restart: true
|
||||
# s3:
|
||||
# condition: service_healthy
|
||||
# restart: true
|
||||
env_file:
|
||||
- ./.env.sample
|
||||
- .env.development
|
||||
expose:
|
||||
- "3000"
|
||||
ports:
|
||||
- "3000:3000"
|
||||
volumes:
|
||||
- ./:/app
|
||||
links:
|
||||
- db
|
||||
- opensearch
|
||||
labels:
|
||||
CONNECT: "localhost:3000"
|
||||
worker:
|
||||
container_name: worker
|
||||
build: ./worker
|
||||
build:
|
||||
context: ./worker
|
||||
args:
|
||||
- UID=${CURRENT_UID}
|
||||
- GID=${CURRENT_GID}
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_started
|
||||
condition: service_healthy
|
||||
restart: true
|
||||
app:
|
||||
condition: service_healthy
|
||||
restart: true
|
||||
opensearch:
|
||||
condition: service_healthy
|
||||
restart: true
|
||||
sn_lnd:
|
||||
condition: service_healthy
|
||||
restart: true
|
||||
env_file:
|
||||
- ./.env.sample
|
||||
ports:
|
||||
- "8080:8080"
|
||||
- .env.development
|
||||
volumes:
|
||||
- ./:/app
|
||||
links:
|
||||
- db
|
||||
- app
|
||||
- opensearch
|
||||
entrypoint: ["/bin/sh", "-c"]
|
||||
command:
|
||||
- npm run worker
|
||||
- npm run worker:dev
|
||||
imgproxy:
|
||||
container_name: imgproxy
|
||||
image: darthsim/imgproxy:v3.18.1
|
||||
image: docker.imgproxy.pro/imgproxy:v3.23.0
|
||||
healthcheck:
|
||||
test: [ "CMD", "imgproxy", "health" ]
|
||||
timeout: 10s
|
||||
interval: 10s
|
||||
retries: 3
|
||||
restart: always
|
||||
timeout: 10s
|
||||
retries: 10
|
||||
start_period: 1m
|
||||
restart: unless-stopped
|
||||
env_file:
|
||||
- ./.env.sample
|
||||
expose:
|
||||
- "8080"
|
||||
- .env.development
|
||||
ports:
|
||||
- "3001:8080"
|
||||
links:
|
||||
- app
|
||||
opensearch:
|
||||
image: opensearchproject/opensearch:latest
|
||||
container_name: opensearch
|
||||
expose:
|
||||
- "8080"
|
||||
labels:
|
||||
- "CONNECT=localhost:3001"
|
||||
s3:
|
||||
container_name: s3
|
||||
image: localstack/localstack:s3-latest
|
||||
# healthcheck:
|
||||
# test: ["CMD-SHELL", "awslocal", "s3", "ls", "s3://uploads"]
|
||||
# interval: 10s
|
||||
# timeout: 10s
|
||||
# retries: 10
|
||||
# start_period: 1m
|
||||
restart: unless-stopped
|
||||
env_file:
|
||||
- .env.development
|
||||
environment:
|
||||
- discovery.type=single-node
|
||||
- plugins.security.disabled=true
|
||||
- DEBUG=1
|
||||
ports:
|
||||
- "4566:4566"
|
||||
expose:
|
||||
- "4566"
|
||||
volumes:
|
||||
- 's3:/var/lib/localstack'
|
||||
- './docker/s3/init-s3.sh:/etc/localstack/init/ready.d/init-s3.sh'
|
||||
- './docker/s3/cors.json:/etc/localstack/init/ready.d/cors.json'
|
||||
labels:
|
||||
- "CONNECT=localhost:4566"
|
||||
opensearch:
|
||||
image: opensearchproject/opensearch:2.12.0
|
||||
container_name: opensearch
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "curl -ku admin:${OPENSEARCH_INITIAL_ADMIN_PASSWORD} --silent --fail localhost:9200/_cluster/health || exit 1"]
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 10
|
||||
start_period: 1m
|
||||
restart: unless-stopped
|
||||
env_file:
|
||||
- .env.development
|
||||
environment:
|
||||
- OPENSEARCH_INITIAL_ADMIN_PASSWORD=mVchg1T5oA9wudUh
|
||||
ports:
|
||||
- 9200:9200 # REST API
|
||||
- 9600:9600 # Performance Analyzer
|
||||
volumes:
|
||||
- os:/usr/share/opensearch/data
|
||||
- ./:/app
|
||||
labels:
|
||||
CONNECT: "localhost:9200"
|
||||
command: >
|
||||
bash -c '
|
||||
set -m
|
||||
/usr/share/opensearch/opensearch-docker-entrypoint.sh &
|
||||
until curl -sS "http://localhost:9200/_cat/health?h=status" -ku admin:admin | grep -q "green\|yellow"; do
|
||||
until curl -sS "http://localhost:9200/_cat/health?h=status" -ku admin:${OPENSEARCH_INITIAL_ADMIN_PASSWORD} | grep -q "green\|yellow"; do
|
||||
echo "Waiting for OpenSearch to start..."
|
||||
sleep 1
|
||||
done
|
||||
echo "OpenSearch started."
|
||||
curl -X PUT "http://localhost:9200/item" -ku admin:admin
|
||||
curl -X PUT "http://localhost:9200/item" -ku admin:${OPENSEARCH_INITIAL_ADMIN_PASSWORD}
|
||||
echo "OpenSearch index created."
|
||||
fg
|
||||
'
|
||||
os-dashboard:
|
||||
image: opensearchproject/opensearch-dashboards:latest
|
||||
image: opensearchproject/opensearch-dashboards:2.12.0
|
||||
container_name: os-dashboard
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
opensearch:
|
||||
condition: service_healthy
|
||||
restart: true
|
||||
env_file:
|
||||
- .env.development
|
||||
environment:
|
||||
- opensearch.ssl.verificationMode=none
|
||||
- server.ssl.enabled=false
|
||||
- plugins.security.disabled=true
|
||||
- DISABLE_SECURITY_DASHBOARDS_PLUGIN=true
|
||||
- OPENSEARCH_HOSTS=http://opensearch:9200
|
||||
- server.ssl.enabled=false
|
||||
ports:
|
||||
- 5601:5601
|
||||
expose:
|
||||
- "5601"
|
||||
links:
|
||||
- opensearch
|
||||
labels:
|
||||
CONNECT: "localhost:5601"
|
||||
bitcoin:
|
||||
image: polarlightning/bitcoind:26.0
|
||||
container_name: bitcoin
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "bitcoin-cli -chain=regtest -rpcport=${RPC_PORT} -rpcuser=${RPC_USER} -rpcpassword=${RPC_PASS} getblockchaininfo"]
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 10
|
||||
start_period: 1m
|
||||
command:
|
||||
- 'bitcoind'
|
||||
- '-server=1'
|
||||
- '-regtest=1'
|
||||
- '-rpcauth=${RPC_USER}:${RPC_AUTH}'
|
||||
- '-debug=1'
|
||||
- '-zmqpubrawblock=tcp://0.0.0.0:${ZMQ_BLOCK_PORT}'
|
||||
- '-zmqpubrawtx=tcp://0.0.0.0:${ZMQ_TX_PORT}'
|
||||
- '-txindex=1'
|
||||
- '-dnsseed=0'
|
||||
- '-upnp=0'
|
||||
- '-rpcbind=0.0.0.0'
|
||||
- '-rpcallowip=0.0.0.0/0'
|
||||
- '-rpcport=${RPC_PORT}'
|
||||
- '-rest'
|
||||
- '-listen=1'
|
||||
- '-listenonion=0'
|
||||
- '-fallbackfee=0.0002'
|
||||
- '-blockfilterindex=1'
|
||||
- '-peerblockfilters=1'
|
||||
expose:
|
||||
- "${RPC_PORT}"
|
||||
- "${P2P_PORT}"
|
||||
- "${ZMQ_BLOCK_PORT}"
|
||||
- "${ZMQ_TX_PORT}"
|
||||
volumes:
|
||||
- bitcoin:/home/bitcoin/.bitcoin
|
||||
labels:
|
||||
ofelia.enabled: "true"
|
||||
ofelia.job-exec.minecron.schedule: "@every 1m"
|
||||
ofelia.job-exec.minecron.command: >
|
||||
bash -c '
|
||||
blockcount=$$(bitcoin-cli -chain=regtest -rpcport=${RPC_PORT} -rpcuser=${RPC_USER} -rpcpassword=${RPC_PASS} getblockcount 2>/dev/null)
|
||||
if (( blockcount <= 0 )); then
|
||||
echo "Mining 10 blocks to sn_lnd and stacker_lnd..."
|
||||
bitcoin-cli -chain=regtest -rpcport=${RPC_PORT} -rpcuser=${RPC_USER} -rpcpassword=${RPC_PASS} generatetoaddress 100 ${LND_ADDR}
|
||||
bitcoin-cli -chain=regtest -rpcport=${RPC_PORT} -rpcuser=${RPC_USER} -rpcpassword=${RPC_PASS} generatetoaddress 100 ${STACKER_LND_ADDR}
|
||||
else
|
||||
echo "Mining a block to sn_lnd... ${LND_ADDR}"
|
||||
bitcoin-cli -chain=regtest -rpcport=${RPC_PORT} -rpcuser=${RPC_USER} -rpcpassword=${RPC_PASS} generatetoaddress 1 ${LND_ADDR}
|
||||
echo "Mining a block to stacker_lnd... ${STACKER_LND_ADDR}"
|
||||
bitcoin-cli -chain=regtest -rpcport=${RPC_PORT} -rpcuser=${RPC_USER} -rpcpassword=${RPC_PASS} generatetoaddress 1 ${STACKER_LND_ADDR}
|
||||
fi
|
||||
'
|
||||
sn_lnd:
|
||||
build:
|
||||
context: ./docker/lnd
|
||||
args:
|
||||
- LN_NODE_FOR=sn
|
||||
container_name: sn_lnd
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "lncli", "getinfo"]
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 10
|
||||
start_period: 1m
|
||||
depends_on:
|
||||
bitcoin:
|
||||
condition: service_healthy
|
||||
restart: true
|
||||
env_file:
|
||||
- .env.development
|
||||
command:
|
||||
- 'lnd'
|
||||
- '--noseedbackup'
|
||||
- '--trickledelay=5000'
|
||||
- '--alias=sn_lnd'
|
||||
- '--externalip=sn_lnd'
|
||||
- '--tlsextradomain=sn_lnd'
|
||||
- '--tlsextradomain=host.docker.internal'
|
||||
- '--listen=0.0.0.0:${LND_P2P_PORT}'
|
||||
- '--rpclisten=0.0.0.0:${LND_GRPC_PORT}'
|
||||
- '--restlisten=0.0.0.0:${LND_REST_PORT}'
|
||||
- '--bitcoin.active'
|
||||
- '--bitcoin.regtest'
|
||||
- '--bitcoin.node=bitcoind'
|
||||
- '--bitcoind.rpchost=bitcoin'
|
||||
- '--bitcoind.rpcuser=${RPC_USER}'
|
||||
- '--bitcoind.rpcpass=${RPC_PASS}'
|
||||
- '--bitcoind.zmqpubrawblock=tcp://bitcoin:${ZMQ_BLOCK_PORT}'
|
||||
- '--bitcoind.zmqpubrawtx=tcp://bitcoin:${ZMQ_TX_PORT}'
|
||||
- '--protocol.wumbo-channels'
|
||||
- '--maxchansize=1000000000'
|
||||
- '--allow-circular-route'
|
||||
- '--bitcoin.defaultchanconfs=1'
|
||||
- '--maxpendingchannels=10'
|
||||
expose:
|
||||
- "${LND_P2P_PORT}"
|
||||
ports:
|
||||
- "${LND_REST_PORT}:${LND_REST_PORT}"
|
||||
- "${LND_GRPC_PORT}:${LND_GRPC_PORT}"
|
||||
volumes:
|
||||
- sn_lnd:/home/lnd/.lnd
|
||||
labels:
|
||||
ofelia.enabled: "true"
|
||||
ofelia.job-exec.sn_channel_cron.schedule: "@every 1m"
|
||||
ofelia.job-exec.sn_channel_cron.command: >
|
||||
su lnd -c bash -c "
|
||||
if [ $$(lncli getinfo | jq '.num_active_channels + .num_pending_channels') -ge 3 ]; then
|
||||
exit 0
|
||||
else
|
||||
lncli openchannel --node_key=$STACKER_LND_PUBKEY --connect stacker_lnd:9735 --sat_per_vbyte 1 \\
|
||||
--min_confs 0 --local_amt=1000000000 --push_amt=500000000
|
||||
fi
|
||||
"
|
||||
stacker_lnd:
|
||||
build:
|
||||
context: ./docker/lnd
|
||||
args:
|
||||
- LN_NODE_FOR=stacker
|
||||
container_name: stacker_lnd
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "lncli", "getinfo"]
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 10
|
||||
start_period: 1m
|
||||
depends_on:
|
||||
bitcoin:
|
||||
condition: service_healthy
|
||||
restart: true
|
||||
env_file:
|
||||
- .env.development
|
||||
command:
|
||||
- 'lnd'
|
||||
- '--noseedbackup'
|
||||
- '--trickledelay=5000'
|
||||
- '--alias=stacker_lnd'
|
||||
- '--externalip=stacker_lnd'
|
||||
- '--tlsextradomain=stacker_lnd'
|
||||
- '--tlsextradomain=host.docker.internal'
|
||||
- '--listen=0.0.0.0:${LND_P2P_PORT}'
|
||||
- '--rpclisten=0.0.0.0:${LND_GRPC_PORT}'
|
||||
- '--restlisten=0.0.0.0:${LND_REST_PORT}'
|
||||
- '--bitcoin.active'
|
||||
- '--bitcoin.regtest'
|
||||
- '--bitcoin.node=bitcoind'
|
||||
- '--bitcoind.rpchost=bitcoin'
|
||||
- '--bitcoind.rpcuser=${RPC_USER}'
|
||||
- '--bitcoind.rpcpass=${RPC_PASS}'
|
||||
- '--bitcoind.zmqpubrawblock=tcp://bitcoin:${ZMQ_BLOCK_PORT}'
|
||||
- '--bitcoind.zmqpubrawtx=tcp://bitcoin:${ZMQ_TX_PORT}'
|
||||
- '--protocol.wumbo-channels'
|
||||
- '--maxchansize=1000000000'
|
||||
- '--allow-circular-route'
|
||||
- '--bitcoin.defaultchanconfs=1'
|
||||
- '--maxpendingchannels=10'
|
||||
expose:
|
||||
- "${LND_P2P_PORT}"
|
||||
ports:
|
||||
- "${STACKER_LND_REST_PORT}:${LND_REST_PORT}"
|
||||
- "${STACKER_LND_GRPC_PORT}:${LND_GRPC_PORT}"
|
||||
volumes:
|
||||
- stacker_lnd:/home/lnd/.lnd
|
||||
labels:
|
||||
ofelia.enabled: "true"
|
||||
ofelia.job-exec.stacker_channel_cron.schedule: "@every 1m"
|
||||
ofelia.job-exec.stacker_channel_cron.command: >
|
||||
su lnd -c bash -c "
|
||||
if [ $$(lncli getinfo | jq '.num_active_channels + .num_pending_channels') -ge 3 ]; then
|
||||
exit 0
|
||||
else
|
||||
lncli openchannel --node_key=$LND_PUBKEY --connect sn_lnd:9735 --sat_per_vbyte 1 \\
|
||||
--min_confs 0 --local_amt=1000000000 --push_amt=500000000
|
||||
fi
|
||||
"
|
||||
channdler:
|
||||
image: mcuadros/ofelia:latest
|
||||
container_name: channdler
|
||||
depends_on:
|
||||
- bitcoin
|
||||
- sn_lnd
|
||||
- stacker_lnd
|
||||
restart: unless-stopped
|
||||
command: daemon --docker -f label=com.docker.compose.project=${COMPOSE_PROJECT_NAME}
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
volumes:
|
||||
db:
|
||||
os:
|
||||
bitcoin:
|
||||
sn_lnd:
|
||||
stacker_lnd:
|
||||
s3:
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -0,0 +1,12 @@
|
|||
FROM polarlightning/lnd:0.17.4-beta
|
||||
|
||||
ARG LN_NODE_FOR
|
||||
ENV LN_NODE_FOR=$LN_NODE_FOR
|
||||
|
||||
RUN apt-get update -y \
|
||||
&& apt-get install -y jq \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
COPY ["./$LN_NODE_FOR/regtest/*", "/home/lnd/.lnd/data/chain/bitcoin/regtest/"]
|
||||
COPY ["./$LN_NODE_FOR/tls.*", "/home/lnd/.lnd/"]
|
|
@ -0,0 +1,5 @@
|
|||
We assume control of certs so that the app container doesn't need to inspect lnd for these things.
|
||||
|
||||
For the admin.macaroon, we do the same but we also need to store `macaroons.db` because it contains the master key.
|
||||
|
||||
For the wallet addresses, we do the same but we also need to store `wallet.db` because it contains the master key.
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,15 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIICQjCCAeigAwIBAgIQa9I84h+He3P8ZCuARXTUMTAKBggqhkjOPQQDAjA4MR8w
|
||||
HQYDVQQKExZsbmQgYXV0b2dlbmVyYXRlZCBjZXJ0MRUwEwYDVQQDEwxiNjAxZWIx
|
||||
MGM5ODMwHhcNMjQwMzA3MTcwMjE5WhcNMjUwNTAyMTcwMjE5WjA4MR8wHQYDVQQK
|
||||
ExZsbmQgYXV0b2dlbmVyYXRlZCBjZXJ0MRUwEwYDVQQDEwxiNjAxZWIxMGM5ODMw
|
||||
WTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAASeYjKbT+JJJ7bKgpgzml2xIa06N1th
|
||||
/Op3S1s8+OJA8x6dxIh/2eHUkOuxgZ6p5ICKIo7ZTL5jYcvCuy9A3KnFo4HTMIHQ
|
||||
MA4GA1UdDwEB/wQEAwICpDATBgNVHSUEDDAKBggrBgEFBQcDATAPBgNVHRMBAf8E
|
||||
BTADAQH/MB0GA1UdDgQWBBRTWVyne7RxotuhqsTryiFmj6slUzB5BgNVHREEcjBw
|
||||
ggxiNjAxZWIxMGM5ODOCCWxvY2FsaG9zdIIGc25fbG5kghRob3N0LmRvY2tlci5p
|
||||
bnRlcm5hbIIEdW5peIIKdW5peHBhY2tldIIHYnVmY29ubocEfwAAAYcQAAAAAAAA
|
||||
AAAAAAAAAAAAAYcErBsABTAKBggqhkjOPQQDAgNIADBFAiEA8salJfq4GfqFUWS/
|
||||
54z3TatldGsfsyjJ805BZRc3O2oCICyNn9uqivVoUu6Y54QCbL9f9LWWyTzQnaan
|
||||
eiwH/Qik
|
||||
-----END CERTIFICATE-----
|
|
@ -0,0 +1,5 @@
|
|||
-----BEGIN EC PRIVATE KEY-----
|
||||
MHcCAQEEIPw/v7CND3euIqjULW5tCnD5tve0L0E0N8dBtRkJM3u2oAoGCCqGSM49
|
||||
AwEHoUQDQgAEnmIym0/iSSe2yoKYM5pdsSGtOjdbYfzqd0tbPPjiQPMencSIf9nh
|
||||
1JDrsYGeqeSAiiKO2Uy+Y2HLwrsvQNypxQ==
|
||||
-----END EC PRIVATE KEY-----
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,15 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIICRzCCAe2gAwIBAgIQc06vWIBuP9uKeQNHKbFllDAKBggqhkjOPQQDAjA4MR8w
|
||||
HQYDVQQKExZsbmQgYXV0b2dlbmVyYXRlZCBjZXJ0MRUwEwYDVQQDEww4Y2M4NDFk
|
||||
MjY2MzgwHhcNMjQwMzA3MTcwMjE5WhcNMjUwNTAyMTcwMjE5WjA4MR8wHQYDVQQK
|
||||
ExZsbmQgYXV0b2dlbmVyYXRlZCBjZXJ0MRUwEwYDVQQDEww4Y2M4NDFkMjY2Mzgw
|
||||
WTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQT/nwvMHaVCfdVaeIgv8MKS+SHAS9c
|
||||
Elif7Xqa7qsVvPiW7Vnh4MDVEBlM5rg0nkaH6V17sCC3rse/OqPLfVY1o4HYMIHV
|
||||
MA4GA1UdDwEB/wQEAwICpDATBgNVHSUEDDAKBggrBgEFBQcDATAPBgNVHRMBAf8E
|
||||
BTADAQH/MB0GA1UdDgQWBBQmamVn/KcRqHoNR9dk9C1g2M+jSTB+BgNVHREEdzB1
|
||||
ggw4Y2M4NDFkMjY2MziCCWxvY2FsaG9zdIILc3RhY2tlcl9sbmSCFGhvc3QuZG9j
|
||||
a2VyLmludGVybmFsggR1bml4ggp1bml4cGFja2V0ggdidWZjb25uhwR/AAABhxAA
|
||||
AAAAAAAAAAAAAAAAAAABhwSsGwAGMAoGCCqGSM49BAMCA0gAMEUCIFD273WBcMKz
|
||||
UPoOL8bwq15JXtrSGePKpAeN1TblY4Q5AiEAvKtuk+ssx9WQFZBEiWxCSjW5geKk
|
||||
6HB7TdxsU+ZbfLg=
|
||||
-----END CERTIFICATE-----
|
|
@ -0,0 +1,5 @@
|
|||
-----BEGIN EC PRIVATE KEY-----
|
||||
MHcCAQEEIOxH9uY8mpnlo/X5gRAAVOzOuEPIAOuHHlezkba3vIuHoAoGCCqGSM49
|
||||
AwEHoUQDQgAEE/58LzB2lQn3VWniIL/DCkvkhwEvXBJYn+16mu6rFbz4lu1Z4eDA
|
||||
1RAZTOa4NJ5Gh+lde7Agt67Hvzqjy31WNQ==
|
||||
-----END EC PRIVATE KEY-----
|
|
@ -0,0 +1,19 @@
|
|||
{
|
||||
"CORSRules": [
|
||||
{
|
||||
"AllowedHeaders": [
|
||||
"*"
|
||||
],
|
||||
"AllowedMethods": [
|
||||
"PUT",
|
||||
"POST",
|
||||
"GET",
|
||||
"HEAD"
|
||||
],
|
||||
"AllowedOrigins": [
|
||||
"http://localhost:3000"
|
||||
],
|
||||
"ExposeHeaders": []
|
||||
}
|
||||
]
|
||||
}
|
|
@ -0,0 +1,5 @@
|
|||
#!/bin/bash
|
||||
|
||||
pip3 install --upgrade virtualenv awscli awscli-local requests
|
||||
awslocal s3 mb s3://uploads
|
||||
awslocal s3api put-bucket-cors --bucket uploads --cors-configuration file:///etc/localstack/init/ready.d/cors.json
|
|
@ -1,116 +1,403 @@
|
|||
# install postgresql anonymizer extension
|
||||
```bash
|
||||
git clone https://gitlab.com/dalibo/postgresql_anonymizer.git
|
||||
make extension
|
||||
make install
|
||||
```
|
||||
|
||||
-- todo need a function to modify foreign key distribution
|
||||
-- like randomly assign a valid foreign key to a row
|
||||
# take a 45 day sample from prod database
|
||||
```bash
|
||||
pg_sample --limit=""Item"=created_at >= now() - interval '45 days',"Donation"=created_at >= now() - interval '45 days',"Earn"=created_at >= now() - interval '45 days',"ItemAct"=created_at >= now() - interval '45 days',"Sub"=*,"SubAct"=*,_prisma_migrations=*" stackernews > sample.sql
|
||||
```
|
||||
|
||||
anon.random_in(SELECT id FROM users)
|
||||
# load the sample and take 5% of it
|
||||
```bash
|
||||
createdb sample
|
||||
psql sample < sample.sql
|
||||
pg_sample --limit=""Item"=5%,"Donation"=5%,"Earn"=5%,"ItemAct"=5%,"Sub"=*,"SubAct"=*,_prisma_migrations=*" sample > sample.sql
|
||||
```
|
||||
|
||||
# create a new database from data
|
||||
```bash
|
||||
dropdb sample
|
||||
createdb sample
|
||||
psql sample < sample.sql
|
||||
```
|
||||
|
||||
# initialize the extension
|
||||
we turn on the privacy by default just to make sure default values are set if we forget to mask a column
|
||||
|
||||
```sql
|
||||
ALTER DATABASE sample SET session_preload_libraries = 'anon';
|
||||
ALTER DATABASE sample SET anon.privacy_by_default = true;
|
||||
-- restart session
|
||||
-- turn off triggers
|
||||
```
|
||||
|
||||
restart session
|
||||
|
||||
# begin statically masking the data
|
||||
|
||||
We want to keep the variety of data for development purposes, but the exactness is irrelevant. We lose quite a bit of data consistency but that shouldn't matter. We turn off triggers to make this faster.
|
||||
|
||||
In some future version it might be worth keeping data consistency and masking other tables.
|
||||
|
||||
### initialize the extension
|
||||
```sql
|
||||
-- turn off triggers to make this faster
|
||||
SET session_replication_role = replica;
|
||||
CREATE EXTENSION IF NOT EXISTS anon CASCADE;
|
||||
SELECT anon.init();
|
||||
```
|
||||
|
||||
-- basically we dont want to mask the following columns ... preferring instead to shuffle the data
|
||||
-- and in some cases introduce noise
|
||||
### drop all sensitive tables we won't need
|
||||
```sql
|
||||
DELETE FROM pgboss.job;
|
||||
DELETE FROM pgboss.archive;
|
||||
DELETE FROM "Snl";
|
||||
DELETE FROM "Wallet";
|
||||
DELETE FROM "WalletLightningAddress";
|
||||
DELETE FROM "WalletLND";
|
||||
DELETE FROM "Mute";
|
||||
DELETE FROM "Arc";
|
||||
DELETE FROM "Streak";
|
||||
DELETE FROM "NostrRelay";
|
||||
DELETE FROM "UserNostrRelay";
|
||||
DELETE FROM "ItemUpload";
|
||||
DELETE FROM "Upload";
|
||||
DELETE FROM "LnAuth";
|
||||
DELETE FROM "LnWith";
|
||||
DELETE FROM "Invite";
|
||||
DELETE FROM "Message";
|
||||
DELETE FROM "ItemForward";
|
||||
DELETE FROM "PollOption";
|
||||
DELETE FROM "PollVote";
|
||||
DELETE FROM "MuteSub";
|
||||
DELETE FROM "Pin";
|
||||
DELETE FROM "ReferralAct";
|
||||
DELETE FROM "Mention";
|
||||
DELETE FROM "Invoice";
|
||||
DELETE FROM "Withdrawl";
|
||||
DELETE FROM "accounts";
|
||||
DELETE FROM "OFAC";
|
||||
DELETE FROM "sessions";
|
||||
DELETE FROM "verification_requests";
|
||||
DELETE FROM "Bookmark";
|
||||
DELETE FROM "ThreadSubscription";
|
||||
DELETE FROM "UserSubscription";
|
||||
DELETE FROM "PushSubscription";
|
||||
DELETE FROM "Log";
|
||||
DELETE FROM "TerritoryTransfer";
|
||||
```
|
||||
|
||||
### mask and shuffle the users table
|
||||
|
||||
```sql
|
||||
-- users
|
||||
SELECT anon.shuffle_column('users', 'created_at', 'id');
|
||||
SELECT anon.shuffle_column('users', 'updated_at', 'id');
|
||||
SELECT anon.shuffle_column('users', 'lastSeenAt', 'id');
|
||||
SELECT anon.shuffle_column('users', 'inviteId', 'id');
|
||||
SELECT anon.shuffle_column('users', 'referrerId', 'id');
|
||||
SELECT anon.shuffle_column('users', 'msats', 'id');
|
||||
SELECT anon.shuffle_column('users', 'stackedMsats', 'id');
|
||||
SELECT anon.shuffle_column('users', 'bioId', 'id');
|
||||
-- introduce noise on these columns
|
||||
SELECT anon.add_noise_on_numeric_column('users', 'msats', 1);
|
||||
SELECT anon.add_noise_on_numeric_column('users', 'stackedMsats', 1);
|
||||
SECURITY LABEL FOR anon ON COLUMN users.created_at
|
||||
IS 'MASKED WITH FUNCTION anon.random_in_tsrange(''[2021-10-01,2024-2-20]'')';
|
||||
SECURITY LABEL FOR anon ON COLUMN users.updated_at
|
||||
IS 'MASKED WITH FUNCTION anon.random_in_tsrange(''[2021-10-01,2024-2-20]'')';
|
||||
SECURITY LABEL FOR anon ON COLUMN users.msats
|
||||
IS 'MASKED WITH FUNCTION anon.random_in_int8range(''[0,250000000]'')';
|
||||
SECURITY LABEL FOR anon ON COLUMN users."stackedMsats"
|
||||
IS 'MASKED WITH FUNCTION anon.random_in_int8range(''[0,2500000000]'')';
|
||||
-- set masking for columns we want to mask
|
||||
SECURITY LABEL FOR anon ON COLUMN users.name
|
||||
IS 'MASKED WITH VALUE anon.fake_first_name() || anon.fake_last_name() || anon.random_string(3)';
|
||||
-- set not to mask for columns we don't want to mask
|
||||
SECURITY LABEL FOR anon ON COLUMN users.created_at
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN users.updated_at
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN users."lastSeenAt"
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN users."inviteId"
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN users."referrerId"
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN users.msats
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN users."stackedMsats"
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN users."bioId"
|
||||
SECURITY LABEL FOR anon ON COLUMN users.id
|
||||
IS 'NOT MASKED';
|
||||
SELECT anon.anonymize_table('users');
|
||||
```
|
||||
|
||||
select
|
||||
### mask other tables mostly by randomizing the userId column
|
||||
|
||||
#### donation
|
||||
|
||||
```sql
|
||||
-- donation
|
||||
SELECT anon.shuffle_column('"Donation"', 'created_at', 'id');
|
||||
SELECT anon.shuffle_column('"Donation"', 'updated_at', 'id');
|
||||
-- introduce noise on these columns
|
||||
SELECT anon.add_noise_on_numeric_column('"Donation"', 'sats', 1);
|
||||
-- set masking for columns we want to mask
|
||||
SECURITY LABEL FOR anon ON COLUMN "Donation"."userId"
|
||||
IS 'MASKED WITH FUNCTION anon.random_in(ARRAY(SELECT id FROM users))';
|
||||
IS 'MASKED WITH FUNCTION anon.random_in(ARRAY(SELECT id FROM public.users))';
|
||||
-- set not to mask for columns we don't want to mask
|
||||
SECURITY LABEL FOR anon ON COLUMN "Donation".id
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN "Donation".sats
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN "Donation".created_at
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN "Donation".updated_at
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN users."lastSeenAt"
|
||||
SELECT anon.anonymize_table('"Donation"');
|
||||
```
|
||||
|
||||
#### earn
|
||||
```sql
|
||||
-- set masking for columns we want to mask
|
||||
SECURITY LABEL FOR anon ON COLUMN "Earn"."userId"
|
||||
IS 'MASKED WITH FUNCTION anon.random_in(ARRAY(SELECT id FROM public.users))';
|
||||
-- set not to mask for columns we don't want to mask
|
||||
SECURITY LABEL FOR anon ON COLUMN "Earn".id
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN "Earn".created_at
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN "Earn".updated_at
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN "Earn".msats
|
||||
IS 'NOT MASKED';
|
||||
SELECT anon.anonymize_table('"Earn"');
|
||||
```
|
||||
|
||||
#### item
|
||||
```sql
|
||||
SECURITY LABEL FOR anon ON COLUMN "Item"."userId"
|
||||
IS 'MASKED WITH FUNCTION anon.random_in(ARRAY(SELECT id FROM public.users))';
|
||||
-- set not to mask for columns we don't want to mask
|
||||
SECURITY LABEL FOR anon ON COLUMN "Item".id
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN "Item".created_at
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN "Item".msats
|
||||
IS 'MASKED WITH FUNCTION anon.random_in_int8range(''[0,250000000]'')';
|
||||
SECURITY LABEL FOR anon ON COLUMN "Item"."weightedVotes"
|
||||
IS 'MASKED WITH FUNCTION anon.random_in_numrange(''[0,30]'')';
|
||||
SECURITY LABEL FOR anon ON COLUMN "Item".text
|
||||
IS 'MASKED WITH VALUE CASE WHEN "Item".text IS NULL
|
||||
THEN "Item".text
|
||||
ELSE anon.lorem_ipsum(characters := LENGTH("Item".text))
|
||||
END';
|
||||
SECURITY LABEL FOR anon ON COLUMN "Item".title
|
||||
IS 'MASKED WITH VALUE CASE WHEN "Item".title IS NULL
|
||||
THEN "Item".title
|
||||
ELSE anon.lorem_ipsum(characters := LENGTH("Item".title))
|
||||
END';
|
||||
SECURITY LABEL FOR anon ON COLUMN "Item".url
|
||||
IS 'MASKED WITH VALUE ''https://example.com/''';
|
||||
SECURITY LABEL FOR anon ON COLUMN "Item".updated_at
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN "Item".path
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN "Item"."parentId"
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN "Item"."subName"
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN "Item"."ncomments"
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN "Item"."rootId"
|
||||
IS 'NOT MASKED';
|
||||
SELECT anon.anonymize_table('"Item"');
|
||||
```
|
||||
|
||||
#### itemAct
|
||||
```sql
|
||||
SECURITY LABEL FOR anon ON COLUMN "ItemAct"."userId"
|
||||
IS 'MASKED WITH FUNCTION anon.random_in(ARRAY(SELECT id FROM public.users))';
|
||||
-- set not to mask for columns we don't want to mask
|
||||
SECURITY LABEL FOR anon ON COLUMN "ItemAct"."id"
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN "ItemAct"."itemId"
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN "ItemAct".created_at
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN "ItemAct".updated_at
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN "ItemAct".act
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN "ItemAct".msats
|
||||
IS 'NOT MASKED';
|
||||
SELECT anon.anonymize_table('"ItemAct"');
|
||||
```
|
||||
|
||||
-- earn
|
||||
-- Invite
|
||||
-- Item
|
||||
-- ItemAct
|
||||
-- ItemForward
|
||||
-- PollOption
|
||||
-- PollVote
|
||||
-- Sub
|
||||
-- SubAct
|
||||
-- Pin
|
||||
-- ReferralAct
|
||||
#### sub
|
||||
```sql
|
||||
SECURITY LABEL FOR anon ON COLUMN "Sub"."userId"
|
||||
IS 'MASKED WITH FUNCTION anon.random_in(ARRAY(SELECT id FROM public.users))';
|
||||
-- set not to mask for columns we don't want to mask
|
||||
SECURITY LABEL FOR anon ON COLUMN "Sub"."name"
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN "Sub"."path"
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN "Sub".created_at
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN "Sub".updated_at
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN "Sub"."billingType"
|
||||
IS 'MASKED WITH VALUE ''ONCE''';
|
||||
SECURITY LABEL FOR anon ON COLUMN "Sub"."billingCost"
|
||||
IS 'MASKED WITH VALUE 0';
|
||||
SECURITY LABEL FOR anon ON COLUMN "Sub"."rankingType"
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN "Sub"."postTypes"
|
||||
IS 'NOT MASKED';
|
||||
SELECT anon.anonymize_table('"Sub"');
|
||||
```
|
||||
|
||||
-- truncate tables that are irrelevant for local development
|
||||
TRUNCATE TABLE "Wallet";
|
||||
TRUNCATE TABLE "WalletLightningAddress";
|
||||
TRUNCATE TABLE "WalletLND";
|
||||
TRUNCATE TABLE "Mute";
|
||||
TRUNCATE TABLE "Arc";
|
||||
TRUNCATE TABLE "Streak";
|
||||
TRUNCATE TABLE "NostrRelay";
|
||||
TRUNCATE TABLE "UserNostrRelay";
|
||||
TRUNCATE TABLE "LNAuth";
|
||||
TRUNCATE TABLE "LnWith";
|
||||
TRUNCATE TABLE "Message";
|
||||
TRUNCATE TABLE "MuteSub";
|
||||
TRUNCATE TABLE "Mention";
|
||||
TRUNCATE TABLE "Invoice";
|
||||
TRUNCATE TABLE "Withdrawal";
|
||||
TRUNCATE TABLE "Account";
|
||||
TRUNCATE TABLE "OFAC";
|
||||
TRUNCATE TABLE "Session";
|
||||
TRUNCATE TABLE "VerificationToken";
|
||||
TRUNCATE TABLE "ThreadSubscription";
|
||||
TRUNCATE TABLE "UserSubscription";
|
||||
TRUNCATE TABLE "PushSubscription";
|
||||
TRUNCATE TABLE "Log";
|
||||
#### subAct
|
||||
```sql
|
||||
SECURITY LABEL FOR anon ON COLUMN "SubAct"."userId"
|
||||
IS 'MASKED WITH FUNCTION anon.random_in(ARRAY(SELECT id FROM public.users))';
|
||||
-- shuffle the subName column
|
||||
SELECT anon.shuffle_column('"SubAct"', 'subName', 'id');
|
||||
SELECT anon.shuffle_column('"SubAct"', 'msats', 'id');
|
||||
-- set not to mask for columns we don't want to mask
|
||||
SECURITY LABEL FOR anon ON COLUMN "SubAct".id
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN "SubAct"."subName"
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN "SubAct".type
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN "SubAct".msats
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN "SubAct".created_at
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN "SubAct".updated_at
|
||||
IS 'NOT MASKED';
|
||||
SELECT anon.anonymize_table('"SubAct"');
|
||||
```
|
||||
|
||||
SELECT anon.add_noise_on_datetime_column('users', 'created_at', '1 year');
|
||||
SELECT anon.add_noise_on_datetime_column('users', 'updated_at', '1 year');
|
||||
#### _prisma_migrations
|
||||
|
||||
don't mask this table
|
||||
|
||||
```sql
|
||||
SECURITY LABEL FOR anon ON COLUMN _prisma_migrations.id
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN _prisma_migrations.checksum
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN _prisma_migrations.finished_at
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN _prisma_migrations.migration_name
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN _prisma_migrations.started_at
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN _prisma_migrations.applied_steps_count
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN _prisma_migrations.rolled_back_at
|
||||
IS 'NOT MASKED';
|
||||
SELECT anon.anonymize_table('_prisma_migrations');
|
||||
```
|
||||
|
||||
#### pgboss.schedule
|
||||
|
||||
SELECT anon.shuffle_column('"Item"', 'userId', 'id');
|
||||
don't mask this table
|
||||
|
||||
```sql
|
||||
SECURITY LABEL FOR anon ON COLUMN pgboss.schedule.name
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN pgboss.schedule.cron
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN pgboss.schedule.timezone
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN pgboss.schedule.data
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN pgboss.schedule.options
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN pgboss.schedule.created_on
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN pgboss.schedule.updated_on
|
||||
IS 'NOT MASKED';
|
||||
SELECT anon.anonymize_table('pgboss.schedule');
|
||||
```
|
||||
|
||||
#### pgboss.version
|
||||
|
||||
don't mask this table
|
||||
|
||||
```sql
|
||||
SECURITY LABEL FOR anon ON COLUMN pgboss.version.version
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN pgboss.version.maintained_on
|
||||
IS 'NOT MASKED';
|
||||
SECURITY LABEL FOR anon ON COLUMN pgboss.version.cron_on
|
||||
IS 'NOT MASKED';
|
||||
SELECT anon.anonymize_table('pgboss.version');
|
||||
```
|
||||
|
||||
# turn triggers back on
|
||||
```sql
|
||||
SET session_replication_role = DEFAULT;
|
||||
DROP EXTENSION IF EXISTS anon CASCADE;
|
||||
```
|
||||
|
||||
# refresh all materialized views
|
||||
```sql
|
||||
SET search_path TO public;
|
||||
|
||||
CREATE OR REPLACE FUNCTION public.RefreshAllMaterializedViews(schema_arg TEXT DEFAULT 'public')
|
||||
RETURNS INT AS $$
|
||||
DECLARE
|
||||
r RECORD;
|
||||
BEGIN
|
||||
RAISE NOTICE 'Refreshing materialized view in schema %', schema_arg;
|
||||
FOR r IN SELECT matviewname FROM pg_matviews WHERE schemaname = schema_arg
|
||||
LOOP
|
||||
RAISE NOTICE 'Refreshing %.%', schema_arg, r.matviewname;
|
||||
EXECUTE 'REFRESH MATERIALIZED VIEW ' || schema_arg || '.' || r.matviewname;
|
||||
END LOOP;
|
||||
|
||||
RETURN 1;
|
||||
END
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
-- make sure materialized views are refreshed
|
||||
SELECT public.RefreshAllMaterializedViews();
|
||||
```
|
||||
|
||||
# dump it
|
||||
```bash
|
||||
pg_dump sample --no-owner --no-security-labels > anon.sql
|
||||
```
|
||||
|
||||
# modify search_path
|
||||
```sql
|
||||
SELECT pg_catalog.set_config('search_path', 'public', false);
|
||||
```
|
||||
|
||||
# set the time zone to UTC
|
||||
```sql
|
||||
ALTER DATABASE stackernews SET timezone TO 'UTC';
|
||||
```
|
||||
|
||||
# modify the dump to timewarp the data at the end
|
||||
```sql
|
||||
CREATE OR REPLACE FUNCTION timewarp()
|
||||
RETURNS VOID LANGUAGE plpgsql AS $$
|
||||
DECLARE
|
||||
r RECORD;
|
||||
max_timestamp TIMESTAMP;
|
||||
interval_to_add INTERVAL;
|
||||
BEGIN
|
||||
FOR r IN SELECT c.table_schema, c.table_name, c.column_name
|
||||
FROM information_schema.columns c
|
||||
JOIN information_schema.tables t ON c.table_schema = t.table_schema AND c.table_name = t.table_name
|
||||
WHERE c.data_type IN ('timestamp without time zone', 'timestamp with time zone')
|
||||
AND c.table_schema NOT IN ('pg_catalog', 'information_schema') -- Exclude system schemas
|
||||
AND t.table_type = 'BASE TABLE' -- Ensure targeting only user-defined tables (excluding views)
|
||||
AND t.table_schema NOT LIKE 'pg_%' -- Exclude other potential PostgreSQL system schemas
|
||||
LOOP
|
||||
-- Calculate the maximum value in the column
|
||||
EXECUTE format('SELECT max(%I) FROM %I.%I', r.column_name, r.table_schema, r.table_name) INTO max_timestamp;
|
||||
|
||||
-- If there's a maximum value, calculate the interval and update the column
|
||||
IF max_timestamp IS NOT NULL THEN
|
||||
interval_to_add := now() - max_timestamp;
|
||||
EXECUTE format('UPDATE %I.%I SET %I = %I + %L', r.table_schema, r.table_name, r.column_name, r.column_name, interval_to_add);
|
||||
END IF;
|
||||
END LOOP;
|
||||
END;
|
||||
$$;
|
||||
|
||||
SELECT timewarp();
|
||||
```
|
||||
|
||||
# fix denormalized comment stuff
|
||||
```sql
|
||||
UPDATE "Item" p SET (ncomments, "commentMsats") =
|
||||
(SELECT COALESCE(count(*), 0), COALESCE(sum(msats), 0)
|
||||
FROM "Item" c
|
||||
WHERE c.path <@ p.path AND p.id <> c.id);
|
||||
```
|
||||
|
||||
# index all the tables
|
||||
```sql
|
||||
INSERT INTO pgboss.job (name) VALUES ('indexAllItems');
|
||||
```
|
|
@ -10,7 +10,9 @@ export const BOOST_MIN = BOOST_MULT * 5
|
|||
export const UPLOAD_SIZE_MAX = 25 * 1024 * 1024
|
||||
export const UPLOAD_SIZE_MAX_AVATAR = 5 * 1024 * 1024
|
||||
export const IMAGE_PIXELS_MAX = 35000000
|
||||
export const AWS_S3_URL_REGEXP = new RegExp(`https://${process.env.NEXT_PUBLIC_MEDIA_DOMAIN}/([0-9]+)`, 'g')
|
||||
// backwards compatibile with old media domain env var and precedence for docker url if set
|
||||
export const MEDIA_URL = process.env.MEDIA_URL_DOCKER || process.env.NEXT_PUBLIC_MEDIA_URL || `https://${process.env.NEXT_PUBLIC_MEDIA_DOMAIN}`
|
||||
export const AWS_S3_URL_REGEXP = new RegExp(`${MEDIA_URL}/([0-9]+)`, 'g')
|
||||
export const UPLOAD_TYPES_ALLOW = [
|
||||
'image/gif',
|
||||
'image/heic',
|
||||
|
|
|
@ -20,13 +20,16 @@ export function middleware (request) {
|
|||
}
|
||||
|
||||
const nonce = Buffer.from(crypto.randomUUID()).toString('base64')
|
||||
// we want to load media from other localhost ports during development
|
||||
const devSrc = process.env.NODE_ENV === 'development' ? 'localhost:* ' : ''
|
||||
|
||||
const cspHeader = [
|
||||
// if something is not explicitly allowed, we don't allow it.
|
||||
"default-src 'none'",
|
||||
"font-src 'self' a.stacker.news",
|
||||
// we want to load images from everywhere but we can limit to HTTPS at least
|
||||
"img-src 'self' a.stacker.news m.stacker.news https: data: blob:",
|
||||
"media-src 'self' a.stacker.news m.stacker.news",
|
||||
`img-src 'self' ${devSrc}a.stacker.news m.stacker.news https: data: blob:`,
|
||||
`media-src 'self' ${devSrc}a.stacker.news m.stacker.news`,
|
||||
// Using nonces and strict-dynamic deploys a strict CSP.
|
||||
// see https://cheatsheetseries.owasp.org/cheatsheets/Content_Security_Policy_Cheat_Sheet.html#strict-policy.
|
||||
// Old browsers will ignore nonce and strict-dynamic and fallback to host-based matching and unsafe-inline
|
||||
|
@ -39,7 +42,7 @@ export function middleware (request) {
|
|||
"style-src 'self' a.stacker.news 'unsafe-inline'",
|
||||
"manifest-src 'self'",
|
||||
'frame-src www.youtube.com platform.twitter.com',
|
||||
"connect-src 'self' https: wss:",
|
||||
`connect-src 'self' ${devSrc}https: wss:`,
|
||||
// disable dangerous plugins like Flash
|
||||
"object-src 'none'",
|
||||
// blocks injection of <base> tags
|
||||
|
|
|
@ -9,7 +9,8 @@
|
|||
"start": "NODE_OPTIONS='--trace-warnings' next start -p $PORT --keepAliveTimeout 120000",
|
||||
"lint": "standard",
|
||||
"test": "NODE_OPTIONS='--experimental-vm-modules' jest",
|
||||
"worker": "tsx --trace-warnings worker/index.js"
|
||||
"worker": "tsx --trace-warnings worker/index.js",
|
||||
"worker:dev": "tsx --trace-warnings --watch worker/index.js"
|
||||
},
|
||||
"dependencies": {
|
||||
"@apollo/client": "^3.8.5",
|
||||
|
|
|
@ -0,0 +1,351 @@
|
|||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
docker__compose() {
|
||||
if [ ! -x "$(command -v docker-compose)" ]; then
|
||||
echo "docker compose is not installed"
|
||||
echo "installation instructions are here: https://docs.docker.com/desktop/"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
CURRENT_UID=$(id -u) CURRENT_GID=$(id -g) command docker compose --env-file .env.development "$@"
|
||||
}
|
||||
|
||||
docker__exec() {
|
||||
if [ ! -x "$(command -v docker)" ]; then
|
||||
echo "docker is not installed"
|
||||
echo "installation instructions are here: https://docs.docker.com/desktop/"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
command docker exec -i "$@"
|
||||
}
|
||||
|
||||
docker__sn_lnd() {
|
||||
docker__exec -u lnd sn_lnd lncli "$@"
|
||||
}
|
||||
|
||||
docker__stacker_lnd() {
|
||||
docker__exec -u lnd stacker_lnd lncli "$@"
|
||||
}
|
||||
|
||||
docker__sn_lnd() {
|
||||
t=$1
|
||||
if [ "$t" = "-t" ]; then
|
||||
shift
|
||||
else
|
||||
t=""
|
||||
fi
|
||||
|
||||
docker__exec "$t" -u lnd sn_lnd lncli "$@"
|
||||
}
|
||||
|
||||
docker__stacker_lnd() {
|
||||
t=$1
|
||||
if [ "$t" = "-t" ]; then
|
||||
shift
|
||||
else
|
||||
t=""
|
||||
fi
|
||||
|
||||
docker__exec "$t" -u lnd stacker_lnd lncli "$@"
|
||||
}
|
||||
|
||||
sndev__start() {
|
||||
shift
|
||||
|
||||
if ! [ -f .env.development ]; then
|
||||
echo ".env.development does not exist ... creating from .env.sample"
|
||||
cp .env.sample .env.development
|
||||
elif ! git diff --exit-code --diff-algorithm=histogram .env.sample .env.development; then
|
||||
echo ".env.development is different from .env.sample ..."
|
||||
echo "do you want to merge .env.sample into .env.development? [y/N]"
|
||||
read -r answer
|
||||
if [ "$answer" = "y" ]; then
|
||||
# merge .env.sample into .env.development in a posix compliant way
|
||||
git merge-file --theirs .env.development /dev/fd/3 3<<-EOF /dev/fd/4 4<<-EOF
|
||||
$(git show HEAD:.env.sample)
|
||||
EOF
|
||||
$(cat .env.sample)
|
||||
EOF
|
||||
else
|
||||
echo "merge cancelled"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ $# -eq 0 ]; then
|
||||
docker__compose up --build
|
||||
exit 0
|
||||
fi
|
||||
|
||||
docker__compose up "$@"
|
||||
}
|
||||
|
||||
sndev__help_start() {
|
||||
help="
|
||||
start the sndev env
|
||||
|
||||
USAGE
|
||||
$ sndev start [OPTIONS]
|
||||
|
||||
OPTIONS"
|
||||
|
||||
echo "$help"
|
||||
docker compose up --help | awk '/Options:/{y=1;next}y'
|
||||
}
|
||||
|
||||
sndev__stop() {
|
||||
shift
|
||||
docker__compose down "$@"
|
||||
}
|
||||
|
||||
sndev__help_stop() {
|
||||
help="
|
||||
stop the sndev env
|
||||
|
||||
USAGE
|
||||
$ sndev stop [OPTIONS]
|
||||
|
||||
OPTIONS"
|
||||
|
||||
echo "$help"
|
||||
docker compose down --help | awk '/Options:/{y=1;next}y'
|
||||
}
|
||||
|
||||
sndev__restart() {
|
||||
shift
|
||||
docker__compose restart "$@"
|
||||
}
|
||||
|
||||
sndev__help_restart() {
|
||||
help="
|
||||
restart the sndev env
|
||||
|
||||
USAGE
|
||||
$ sndev restart [OPTIONS]
|
||||
|
||||
OPTIONS"
|
||||
|
||||
echo "$help"
|
||||
docker compose restart --help | awk '/Options:/{y=1;next}y'
|
||||
}
|
||||
|
||||
sndev__status() {
|
||||
shift
|
||||
if [ $# -eq 0 ]; then
|
||||
docker__compose ps --format 'table {{.Service}}\t{{.State}}\t{{.Status}}\t{{.Label "CONNECT"}}'
|
||||
exit 0
|
||||
fi
|
||||
docker__compose ps "$@"
|
||||
}
|
||||
|
||||
sndev__help_status() {
|
||||
help="
|
||||
show container status of sndev env
|
||||
|
||||
USAGE
|
||||
$ sndev status [OPTIONS]
|
||||
|
||||
OPTIONS"
|
||||
|
||||
echo "$help"
|
||||
docker compose ps --help | awk '/Options:/{y=1;next}y'
|
||||
}
|
||||
|
||||
sndev__delete() {
|
||||
echo "this will delete the containers, volumes, and orphans - are you sure? [y/N]"
|
||||
read -r answer
|
||||
if [ "$answer" = "y" ]; then
|
||||
docker__compose down --volumes --remove-orphans
|
||||
else
|
||||
echo "delete cancelled"
|
||||
fi
|
||||
}
|
||||
|
||||
sndev__help_delete() {
|
||||
help="
|
||||
remove orphans and volumes from sndev env
|
||||
equivalent to sndev stop --volumes --remove-orphans
|
||||
|
||||
USAGE
|
||||
$ sndev delete
|
||||
"
|
||||
|
||||
echo "$help"
|
||||
}
|
||||
|
||||
sndev__fund() {
|
||||
shift
|
||||
docker__stacker_lnd -t payinvoice "$@"
|
||||
}
|
||||
|
||||
sndev__help_fund() {
|
||||
help="
|
||||
pay a bolt11 for funding
|
||||
|
||||
USAGE
|
||||
$ sndev fund <bolt11> [OPTIONS]
|
||||
|
||||
OPTIONS"
|
||||
|
||||
echo "$help"
|
||||
docker__stacker_lnd payinvoice -h | awk '/OPTIONS:/{y=1;next}y' | awk '!/^[\t ]+--pay_req value/'
|
||||
}
|
||||
|
||||
sndev__withdraw() {
|
||||
shift
|
||||
docker__stacker_lnd addinvoice --amt "$@" | jq -r '.payment_request'
|
||||
}
|
||||
|
||||
sndev__help_withdraw() {
|
||||
help="
|
||||
create a bolt11 for withdrawal
|
||||
|
||||
USAGE
|
||||
$ sndev withdraw <amount sats> [OPTIONS]
|
||||
|
||||
OPTIONS"
|
||||
|
||||
echo "$help"
|
||||
docker__stacker_lnd addinvoice -h | awk '/OPTIONS:/{y=1;next}y' | awk '!/^[\t ]+(--amt|--amt_msat) value/'
|
||||
}
|
||||
|
||||
sndev__psql() {
|
||||
shift
|
||||
docker__exec -t db psql "$@" -U sn -d stackernews
|
||||
}
|
||||
|
||||
sndev__help_psql() {
|
||||
help="
|
||||
open psql on db
|
||||
|
||||
USAGE
|
||||
$ sndev psql [OPTIONS]
|
||||
|
||||
OPTIONS"
|
||||
|
||||
echo "$help"
|
||||
docker__exec db psql --help | awk '/General options:/{y=1;next}y' | sed -n '/Connection options:/q;p' |
|
||||
awk '!/^([\t ]+-l, --list)|([\t ]+-d, --dbname)|([\t ]+-\?, --help)|([\t ]--help=)/'
|
||||
}
|
||||
|
||||
sndev__prisma() {
|
||||
shift
|
||||
docker__exec -t -u apprunner app npx prisma "$@"
|
||||
}
|
||||
|
||||
sndev__help_prisma() {
|
||||
help="
|
||||
run prisma commands
|
||||
|
||||
USAGE
|
||||
$ sndev prisma [COMMAND]
|
||||
|
||||
COMMANDS"
|
||||
|
||||
echo "$help"
|
||||
sndev__prisma --help | awk '/Commands/{y=1;next}y' | awk '!/^([\t ]+init)|([\t ]+studio)/' | sed -n '/Flags/q;p'
|
||||
}
|
||||
|
||||
sndev__compose() {
|
||||
shift
|
||||
docker__compose "$@"
|
||||
}
|
||||
|
||||
sndev__help_compose() {
|
||||
docker__compose --help
|
||||
}
|
||||
|
||||
sndev__sn_lncli() {
|
||||
shift
|
||||
docker__sn_lnd -t "$@"
|
||||
}
|
||||
|
||||
sndev__help_sn_lncli() {
|
||||
docker__sn_lnd --help
|
||||
}
|
||||
|
||||
sndev__stacker_lncli() {
|
||||
shift
|
||||
docker__stacker_lnd -t "$@"
|
||||
}
|
||||
|
||||
sndev__help_stacker_lncli() {
|
||||
docker__stacker_lnd --help
|
||||
}
|
||||
|
||||
sndev__help() {
|
||||
if [ $# -eq 2 ]; then
|
||||
call "sndev__$1_$2" "$@"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
help="
|
||||
888
|
||||
888
|
||||
888
|
||||
.d8888b 88888b. .d88888 .d88b. 888 888
|
||||
88K 888 '88b d88' 888 d8P Y8b 888 888
|
||||
'Y8888b. 888 888 888 888 88888888 Y88 88P
|
||||
X88 888 888 Y88b 888 Y8b. Y8bd8P
|
||||
88888P' 888 888 'Y88888 'Y8888 Y88P
|
||||
|
||||
manages a docker based stacker news development environment
|
||||
|
||||
USAGE
|
||||
$ sndev [COMMAND]
|
||||
$ sndev help [COMMAND]
|
||||
|
||||
COMMANDS
|
||||
help show help
|
||||
|
||||
env:
|
||||
start start env
|
||||
stop stop env
|
||||
restart restart env
|
||||
status status of env
|
||||
delete delete env
|
||||
|
||||
lnd:
|
||||
fund pay a bolt11 for funding
|
||||
withdraw create a bolt11 for withdrawal
|
||||
|
||||
db:
|
||||
psql open psql on db
|
||||
prisma run prisma commands
|
||||
|
||||
other:
|
||||
compose docker compose passthrough
|
||||
sn_lncli lncli passthrough on sn_lnd
|
||||
stacker_lncli lncli passthrough on stacker_lnd
|
||||
"
|
||||
echo "$help"
|
||||
}
|
||||
|
||||
call() {
|
||||
func=$1
|
||||
if type "$func" 1>/dev/null 2>&1; then
|
||||
# if it's sndev COMMAND help, then call help for that command
|
||||
case $3 in
|
||||
-h|--help|help)
|
||||
call "sndev__help_$2"
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
shift # remove func from args
|
||||
"$func" "$@" # invoke our named function w/ all remaining arguments
|
||||
else
|
||||
# if it's sndev -h COMMAND, then call help for that command
|
||||
case $2 in
|
||||
-h|--help)
|
||||
call "sndev__help_$3"
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
sndev__help
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
call "sndev__$1" "$@"
|
|
@ -4,6 +4,10 @@ FROM node:18.17.0-bullseye
|
|||
|
||||
ENV NODE_ENV=development
|
||||
|
||||
WORKDIR /app
|
||||
ARG UID
|
||||
ARG GID
|
||||
RUN groupadd -fg "$GID" apprunner
|
||||
RUN useradd -om -u "$UID" -g "$GID" apprunner
|
||||
USER apprunner
|
||||
|
||||
EXPOSE 8080
|
||||
WORKDIR /app
|
|
@ -10,7 +10,7 @@ if (!imgProxyEnabled) {
|
|||
console.warn('IMGPROXY_* env vars not set, imgproxy calls are no-ops now')
|
||||
}
|
||||
|
||||
const IMGPROXY_URL = process.env.NEXT_PUBLIC_IMGPROXY_URL
|
||||
const IMGPROXY_URL = process.env.IMGPROXY_URL_DOCKER || process.env.NEXT_PUBLIC_IMGPROXY_URL
|
||||
const IMGPROXY_SALT = process.env.IMGPROXY_SALT
|
||||
const IMGPROXY_KEY = process.env.IMGPROXY_KEY
|
||||
|
||||
|
@ -89,6 +89,12 @@ export const createImgproxyUrls = async (id, text, { models, forceFetch }) => {
|
|||
const imgproxyUrls = {}
|
||||
for (let url of urls) {
|
||||
if (!url) continue
|
||||
let fetchUrl = url
|
||||
if (process.env.MEDIA_URL_DOCKER) {
|
||||
console.log('[imgproxy] id:', id, '-- replacing media url:', url)
|
||||
fetchUrl = url.replace(process.env.NEXT_PUBLIC_MEDIA_URL, process.env.MEDIA_URL_DOCKER)
|
||||
console.log('[imgproxy] id:', id, '-- with:', fetchUrl)
|
||||
}
|
||||
|
||||
console.log('[imgproxy] id:', id, '-- processing url:', url)
|
||||
if (url.startsWith(IMGPROXY_URL)) {
|
||||
|
@ -97,17 +103,17 @@ export const createImgproxyUrls = async (id, text, { models, forceFetch }) => {
|
|||
url = decodeOriginalUrl(url)
|
||||
console.log('[imgproxy] id:', id, '-- original url:', url)
|
||||
}
|
||||
if (!(await isImageURL(url, { forceFetch }))) {
|
||||
if (!(await isImageURL(fetchUrl, { forceFetch }))) {
|
||||
console.log('[imgproxy] id:', id, '-- not image url:', url)
|
||||
continue
|
||||
}
|
||||
imgproxyUrls[url] = {
|
||||
dimensions: await getDimensions(url)
|
||||
dimensions: await getDimensions(fetchUrl)
|
||||
}
|
||||
for (const res of resolutions) {
|
||||
const [w, h] = res.split('x')
|
||||
const processingOptions = `/rs:fit:${w}:${h}`
|
||||
imgproxyUrls[url][`${w}w`] = createImgproxyUrl({ url, options: processingOptions })
|
||||
imgproxyUrls[url][`${w}w`] = createImgproxyPath({ url: fetchUrl, options: processingOptions })
|
||||
}
|
||||
}
|
||||
return imgproxyUrls
|
||||
|
@ -115,17 +121,17 @@ export const createImgproxyUrls = async (id, text, { models, forceFetch }) => {
|
|||
|
||||
const getDimensions = async (url) => {
|
||||
const options = '/d:1'
|
||||
const imgproxyUrl = createImgproxyUrl({ url, options, pathname: 'info' })
|
||||
const imgproxyUrl = new URL(createImgproxyPath({ url, options, pathname: '/info' }), IMGPROXY_URL).toString()
|
||||
const res = await fetch(imgproxyUrl)
|
||||
const { width, height } = await res.json()
|
||||
return { width, height }
|
||||
}
|
||||
|
||||
const createImgproxyUrl = ({ url, pathname = '', options }) => {
|
||||
const createImgproxyPath = ({ url, pathname = '/', options }) => {
|
||||
const b64Url = Buffer.from(url, 'utf-8').toString('base64url')
|
||||
const target = path.join(options, b64Url)
|
||||
const signature = sign(target)
|
||||
return new URL(path.join(pathname, signature, target), IMGPROXY_URL).toString()
|
||||
return path.join(pathname, signature, target)
|
||||
}
|
||||
|
||||
async function fetchWithTimeout (resource, { timeout = 1000, ...options } = {}) {
|
||||
|
|
Loading…
Reference in New Issue