Compare commits

..

172 Commits

Author SHA1 Message Date
asonix
6ff7b59778 Prepare 0.3.116 2024-12-09 20:59:14 -06:00
asonix
d9da352558 Update teloxide 2024-12-09 19:40:33 -06:00
asonix
aea64c726a Update opentelemetry stack 2024-12-09 19:37:35 -06:00
asonix
e243bd4600 Update bcrypt 2024-12-09 19:30:59 -06:00
asonix
a452fb91ba Fix build due to reqwest-tracing semver break 2024-12-09 19:30:30 -06:00
asonix
35acc916f2 flake: Use nixos-24.11 stable 2024-12-09 19:23:49 -06:00
asonix
752067ffb7 Update dependencies (minor & point) 2024-08-05 16:45:32 -05:00
asonix
b308e080af Update console-subscriber 2024-08-05 16:44:39 -05:00
asonix
6ab37dc06f Update opentelemetry stack 2024-08-05 16:43:51 -05:00
asonix
a23b30cc91 Bump version 2024-07-09 16:45:38 -05:00
asonix
1b58a50d44 Merge pull request 'Start upgrading to hyper and http 1' (#3) from asonix/hyper-1 into main
Reviewed-on: https://git.asonix.dog/asonix/relay/pulls/3
2024-07-09 21:39:10 +00:00
asonix
308a945283 Start upgrading to http1 2024-07-09 16:32:05 -05:00
asonix
86cab5d2d9 Update opentelemetry stack 2024-07-09 16:28:00 -05:00
asonix
a70e75665b Update console-subscriber 2024-07-09 16:26:33 -05:00
asonix
f1792c8eb3 Update dashmap 2024-07-09 16:26:00 -05:00
asonix
d918ef1495 Update rustls 2024-07-09 16:24:44 -05:00
asonix
2870789e1f Update background jobs, async-cpupool, metrics 2024-07-09 16:21:53 -05:00
asonix
cda92e7523 Update flake 2024-06-23 13:57:40 -05:00
asonix
43b03a176c Don't fail publish on clippy warnings
unfixable without ructe release
2024-06-23 13:57:28 -05:00
asonix
a465d1ae5b Allow versions to be unused 2024-06-23 13:56:37 -05:00
asonix
4fa7674a35 Move cargo config to config.toml 2024-06-23 13:55:10 -05:00
asonix
8c14d613f7 Prepare v0.3.114 2024-06-23 13:45:10 -05:00
asonix
aff2431681 Update dependencies (minor & point) 2024-06-23 13:42:26 -05:00
asonix
5aa97212b3 Impose limits on the size of downloaded content from foreign servers 2024-06-23 13:35:24 -05:00
asonix
97567cf598 Prepare v0.3.113 2024-05-01 15:45:53 -05:00
asonix
4c663f399e Update dependencies (minor & point) 2024-05-01 15:43:53 -05:00
asonix
8a3256f52a Avoid deadlock of iterating over tree while transacting on that tree 2024-05-01 15:43:08 -05:00
asonix
13a2653fe8 Remove prerelease flag 2024-04-23 14:00:04 -05:00
asonix
8dd9a86d22 Use match_pattern rather than path for metrics differentiation 2024-04-21 11:44:16 -05:00
asonix
5c0c0591dd Prepare 0.3.112 2024-04-14 22:47:38 -05:00
asonix
04ca4e5401 Stable async-cpupool 2024-04-14 19:53:31 -05:00
asonix
1de1d76506 prerelease 2024-04-13 13:57:12 -05:00
asonix
dd9225bb89 Prepare v0.3.111 2024-04-07 11:53:24 -05:00
asonix
b577730836 Fix build 2024-04-07 11:40:57 -05:00
asonix
21883c168b BROKEN! Start collecting more metrics about various sizes 2024-04-07 11:04:03 -05:00
asonix
76a0c79369 Update base64, ammonia 2024-04-06 13:42:29 -05:00
asonix
6444782db9 Bump version, Update dependencies (minor & point) 2024-04-06 13:34:54 -05:00
asonix
14aea3256d Update dependencies (minor & point) 2024-03-23 19:10:13 -05:00
asonix
f4f2aa2025 Update flake 2024-03-23 19:09:53 -05:00
asonix
615271fe80 Update opentelemetry dependencies, other dependencies minor & point 2024-03-10 20:09:16 -05:00
asonix
4aed601664 No console by default 2024-02-25 21:08:17 -06:00
asonix
bf21f05aca Strip release binaries 2024-02-12 15:16:20 -06:00
asonix
e69f6c6edb Remove prerelease marker 2024-02-12 14:32:46 -06:00
asonix
1e05eb4fe4 Bump version 2024-02-12 13:46:44 -06:00
asonix
7f09ac3edd Update dependencies (minor & point) 2024-02-12 13:42:45 -06:00
asonix
4788ad332a Update image version in docker-compose 2024-02-11 14:56:26 -06:00
asonix
1fd82915d3 Remove bad argument 2024-02-11 14:52:57 -06:00
asonix
0472082a97 Add actions, remove drone 2024-02-11 14:49:22 -06:00
asonix
c8250acce7 Bump version 2024-02-05 00:25:15 -06:00
asonix
b074759eb4 Update background-jobs, rework errors 2024-02-05 00:24:49 -06:00
asonix
ed399f1531 Be more accurate for reqwest errors 2024-02-04 20:51:25 -06:00
asonix
7e39acdcb0 Update config 2024-02-04 20:28:18 -06:00
asonix
894d096622 Bump version 2024-02-04 20:25:59 -06:00
asonix
05e31254ba Update rustls for actix-web, log less 2024-02-04 20:25:50 -06:00
asonix
086ca9fbf2 Support live-reloading TLS certificate 2024-01-31 16:49:23 -06:00
asonix
603fcc6e57 Bump version 2024-01-18 13:35:00 -05:00
asonix
6b8f15ee08 Use stable background-jobs 2024-01-18 13:34:10 -05:00
asonix
53939f8ae8 Go back to job-server per core 2024-01-18 12:31:26 -05:00
asonix
b53b34c515 Update dependencies (minor & point) 2024-01-14 16:16:56 -05:00
asonix
6dcdf2fc87 clippy 2024-01-14 16:10:32 -05:00
asonix
83e5619eb4 Update flake.lock 2024-01-14 16:10:19 -05:00
asonix
9090bb5c62 Bump version 2024-01-14 15:59:16 -05:00
asonix
d862bf8106 Use tokio rather than actix-rt 2024-01-14 15:56:07 -05:00
asonix
417553e643 Bump version 2024-01-09 18:09:51 -06:00
asonix
a2456c3d5f Update dependencies (minor & point) 2024-01-09 18:08:10 -06:00
asonix
2b3cb8db92 clippy 2024-01-08 17:10:31 -06:00
asonix
18f1096221 Update version 2024-01-08 17:06:02 -06:00
asonix
c640567206 Update to newest background-jobs, implement Job rather than ActixJob 2024-01-08 17:00:15 -06:00
asonix
36aa9120ea Update metrics 2024-01-07 12:43:58 -06:00
asonix
e377f3988b Update minify-html, dependencies (minor & point) 2024-01-07 12:10:43 -06:00
asonix
8c811710ac Bump version 2023-11-25 21:27:05 -06:00
asonix
e4f665d75f use stable async-cpupool 2023-11-25 21:17:59 -06:00
asonix
4383357abe update flake 2023-11-25 20:27:20 -06:00
asonix
f70af22c6a clippy 2023-11-25 20:27:11 -06:00
asonix
8bce3d172f Update streem 2023-11-25 20:20:38 -06:00
asonix
8540e93469 Use async-cpupool 2023-11-25 20:18:11 -06:00
asonix
708e7da301 Update opentelemetry, ring, http-signature-normalization, tracing-log 2023-11-25 20:16:13 -06:00
asonix
a0f9827e18 Bump version 2023-09-09 18:10:31 -04:00
asonix
9ebed87cde Update http-signature-normalization-actix 2023-09-09 18:09:24 -04:00
asonix
ae3d19a774 Bump version 2023-09-09 17:31:42 -04:00
asonix
2a5e769afb Update http-signature-normalization-actix 2023-09-09 17:30:07 -04:00
asonix
f4839d688e Update dependencies (minor & point) 2023-09-09 16:52:53 -04:00
asonix
206db2079f Remove futures-util dependency 2023-09-09 16:46:22 -04:00
asonix
6714fe48ed Bump version, enable tokio_unstable for console 2023-09-08 19:15:19 -06:00
asonix
804d22ee81 Enable different breaker failure cases for different endpoints
Additionally, don't count 4xx towards succeeding a breaker
2023-09-08 19:11:24 -06:00
asonix
5a6fbbcb77 Update tracing-opentelemetry 2023-09-08 18:41:55 -06:00
asonix
ea926f73c4 Update dependencies (minor & point) 2023-09-08 18:39:37 -06:00
asonix
53b14c3329 Bump version 2023-08-29 23:05:41 -05:00
asonix
9b1fad0e2e Update rustls 2023-08-29 22:15:41 -05:00
asonix
a8ba53fe11 Update flake 2023-08-26 12:20:36 -05:00
asonix
927fb91a5e Update flume 2023-08-17 17:11:08 -05:00
asonix
4d4093c15a Bump version 2023-08-17 17:10:24 -05:00
asonix
75df271b58 Switch from awc to reqwest, enable HTTP Proxies 2023-08-17 17:09:35 -05:00
asonix
73b429ab51 Update opentelemetry 2023-08-05 12:47:52 -05:00
asonix
2f57c855a4 Bump version 2023-08-04 19:01:05 -05:00
asonix
cdbde9519e Update dependencies (minor & point) 2023-08-04 18:57:53 -05:00
asonix
2cbe4864c3 Switch to ring for crypto 2023-08-04 18:57:53 -05:00
asonix
731a831070 Bump version 2023-07-28 17:47:51 -05:00
asonix
795d3238ad Hide nodes that failed breakers from index page 2023-07-28 17:46:23 -05:00
asonix
60abec2b96 Bump version 2023-07-27 13:48:01 -05:00
asonix
e63e1f975e Use verify spawner in routes 2023-07-27 13:39:31 -05:00
asonix
5430da58aa Update description in nix file 2023-07-27 13:14:46 -05:00
asonix
927f15c4ca Update dependencies (minor & point) 2023-07-27 13:11:00 -05:00
asonix
ef57576c57 Bump version 2023-07-27 13:10:00 -05:00
asonix
7438b0c5d0 Use verify spawner in all cases in verify path 2023-07-27 13:09:03 -05:00
asonix
f06316c6b2 Bump version 2023-07-27 12:20:41 -05:00
asonix
f86bbc95ae Pass spawner to digest middleware 2023-07-27 12:20:05 -05:00
asonix
a500824a7d Shorten thread names 2023-07-27 11:21:44 -05:00
asonix
433c981a21 Simplify < RATIO, bump version 2023-07-27 11:10:29 -05:00
asonix
f3ff8ae5f7 Split available signature threads between sign & verify 2023-07-27 11:08:20 -05:00
asonix
f24685e700 Allow naming spawner threads 2023-07-27 10:53:01 -05:00
asonix
5de244b848 Add complete to signature thread duration 2023-07-27 10:39:24 -05:00
asonix
769f7451f9 Simplify signature thread 2023-07-27 10:19:20 -05:00
asonix
fff9bf112d Bump version 2023-07-27 09:57:13 -05:00
asonix
05c266c23c Give verify & admin a different queue than deliver 2023-07-27 09:55:13 -05:00
asonix
2a7fed743f Bump version 2023-07-27 09:26:49 -05:00
asonix
240eee730c Add more metrics around spawn-blocking 2023-07-27 09:26:16 -05:00
asonix
8071c6ce3f Make signature threads configurable 2023-07-26 23:04:04 -05:00
asonix
78dcce5a08 Bump version 2023-07-26 22:52:13 -05:00
asonix
11d81683e3 Add logging around parallelism 2023-07-26 22:52:13 -05:00
asonix
5d526c60fe Clippy :( 2023-07-26 19:29:03 -05:00
asonix
73c7150f97 Use spawner for CPU-bound operations 2023-07-26 18:11:44 -05:00
asonix
7cfebd927e Bump version 2023-07-26 18:04:09 -05:00
asonix
d97cc4e5a4 Use custom threadpool for client signatures 2023-07-26 18:03:21 -05:00
asonix
8ff4961ded Bump version 2023-07-25 16:07:18 -05:00
asonix
970672a392 Make client timeout configurable 2023-07-25 16:06:56 -05:00
asonix
dfbd5c9035 Add deliver_concurrency to readme 2023-07-25 14:48:09 -05:00
asonix
d365e34f47 Bump version 2023-07-25 14:46:44 -05:00
asonix
de97adc2d6 Update dependencies (minor & point) 2023-07-25 14:45:46 -05:00
asonix
d1c6f6ff5d Make delivery concurrency configurable 2023-07-25 14:45:15 -05:00
asonix
582f311a20 Bump version 2023-07-24 13:20:09 -05:00
asonix
09436746c8 Update dependencies 2023-07-24 13:19:40 -05:00
asonix
a65ff19f6a Remove unneeded mut 2023-07-21 16:36:07 -05:00
asonix
bcdef5caa1 Don't clippy dependencies 2023-07-21 16:29:58 -05:00
asonix
4651fcc9d2 Update bcrypt, lru 2023-07-19 20:25:24 -05:00
asonix
fb6d8af1ca Update flake, dependencies 2023-07-19 20:23:44 -05:00
asonix
9779518dc1 Allow rel attribute in local & footer blurb
Patch from Jaehong Kang <sinoru@me.com>
2023-07-16 22:33:43 -05:00
asonix
7a00229508 Bump version, update docs 2023-06-23 15:15:27 -05:00
asonix
346664396c Run workers on handler threads 2023-06-23 15:08:59 -05:00
asonix
74f35faa22 Keep client in thread-local storage 2023-06-23 15:01:56 -05:00
asonix
e005adfcf8 Bump version 2023-06-23 14:32:14 -05:00
asonix
d40db33eb5 Don't drop and rebuild clients, share clients better 2023-06-23 14:27:20 -05:00
asonix
246e79b261 Bump version 2023-06-23 13:47:40 -05:00
asonix
8d565a1fbe Add ability to tweak client pool size 2023-06-23 13:46:13 -05:00
asonix
18ff2864a0 Update dependencies (minor & point) 2023-06-23 13:34:39 -05:00
asonix
4b71e56f31 Update nixpkgs 2023-06-23 13:34:21 -05:00
asonix
9b4f6b47a6 cargo update 2023-06-03 13:13:37 -05:00
asonix
5fa1d4983a Update nix 2023-06-03 13:10:19 -05:00
asonix
d69a80ebe8 Update dependencies, not rustls 2023-05-24 10:19:34 -05:00
asonix
a9a47e8ee2 Update flake 2023-04-27 19:54:15 -05:00
asonix
ab2dbfb439 Update metrics, rsa 2023-04-27 19:34:23 -05:00
asonix
73bf4d1597 Remove unneeded .into_iter() 2023-03-23 14:37:33 -05:00
asonix
2cb5ad9917 Replace Double with Fanout 2023-03-23 13:51:32 -05:00
asonix
afd4105d0f Add flake 2023-03-23 13:51:23 -05:00
asonix
d644e83733 Bump version 2023-02-25 15:14:24 -06:00
asonix
ae91aa8fa7 Update bcrypt 2023-02-25 15:06:18 -06:00
asonix
73c016d418 Update deps 2023-02-25 15:04:30 -06:00
asonix
a1ea5d676c Rework misskey fetch to reuse deliver plumbing
Only count server errors towards failed breakers
2023-02-25 15:02:16 -06:00
perillamint
667d586160 Send dummy JSON when trying Misskey API endpoint
From Misskey 13, Misskey expects valid JSON (does not care its content
though) in POST body. To workaround this, send empty JSON object when
requesting Misskey API endpoint
2023-02-25 14:34:38 -06:00
perillamint
4a7775b56d Misskey metadata support
This commit implements misskey metadata support and corresponding test
for From<MskyMeta> implementation

Also, it is good to note that, Misskey does not return 404 but 200 OK
when they poked at nonexistant endpoint, so the implementation should
handle for invalid json case
2023-02-25 14:34:22 -06:00
asonix
9b809913ad Add note about JSON-LD problems 2023-02-11 18:16:06 -06:00
asonix
a952b528df Use transpose in a couple places 2023-02-05 21:09:47 -06:00
asonix
b5138fc16d Bump version 2023-01-29 13:23:11 -06:00
asonix
0e9b88a7ae Bump deps 2023-01-29 13:21:53 -06:00
asonix
f9cad61049 Add healthcheck for db, new clippy lints 2023-01-29 13:21:36 -06:00
Tealk
96547230bc
update Pleroma text
Signed-off-by: Tealk <tealk@rollenspiel.monster>
2023-01-28 23:45:31 +01:00
asonix
c11ff17192 Bump version 2023-01-23 08:58:07 -06:00
asonix
e93dd2da56 Update teloxide 2023-01-23 08:57:16 -06:00
asonix
34dc1a2281 Update rsa 2023-01-23 08:56:18 -06:00
asonix
9cdebeae4c Update base64, ructe 2023-01-23 08:38:55 -06:00
asonix
662620be46 Only show open_registrations: false when restricted mode is enabled 2023-01-23 08:29:32 -06:00
asonix
5488acb59d Fix docker volume mount in readme 2023-01-03 15:17:56 -06:00
60 changed files with 5007 additions and 2990 deletions

View File

@ -1,2 +0,0 @@
[build]
# rustflags = ["--cfg", "tokio_unstable"]

2
.cargo/config.toml Normal file
View File

@ -0,0 +1,2 @@
[build]
rustflags = ["--cfg", "tokio_unstable"]

View File

@ -1,421 +0,0 @@
kind: pipeline
type: docker
name: clippy
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: clippy
image: asonix/rust-builder:latest-linux-amd64
pull: always
commands:
- rustup component add clippy
- cargo clippy -- -D warnings
trigger:
event:
- push
- pull_request
- tag
---
kind: pipeline
type: docker
name: tests
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: tests
image: asonix/rust-builder:latest-linux-amd64
pull: always
commands:
- cargo test
trigger:
event:
- push
- pull_request
- tag
---
kind: pipeline
type: docker
name: check-amd64
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: check
image: asonix/rust-builder:latest-linux-amd64
pull: always
commands:
- cargo check --target=$TARGET
trigger:
event:
- push
- pull_request
---
kind: pipeline
type: docker
name: build-amd64
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: build
image: asonix/rust-builder:latest-linux-amd64
pull: always
commands:
- cargo build --target=$TARGET --release
- $TOOL-strip target/$TARGET/release/relay
- cp target/$TARGET/release/relay .
- cp relay relay-linux-amd64
- name: push
image: plugins/docker:20
settings:
username: asonix
password:
from_secret: dockerhub_token
repo: asonix/relay
dockerfile: docker/drone/Dockerfile
auto_tag: true
auto_tag_suffix: linux-amd64
build_args:
- REPO_ARCH=amd64
- name: publish
image: plugins/gitea-release:1
settings:
api_key:
from_secret: gitea_token
base_url: https://git.asonix.dog
files:
- relay-linux-amd64
depends_on:
- clippy
- tests
trigger:
event:
- tag
---
kind: pipeline
type: docker
name: check-arm64v8
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: check
image: asonix/rust-builder:latest-linux-arm64v8
pull: always
commands:
- cargo check --target=$TARGET
trigger:
event:
- push
- pull_request
---
kind: pipeline
type: docker
name: build-arm64v8
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: build
image: asonix/rust-builder:latest-linux-arm64v8
pull: always
commands:
- cargo build --target=$TARGET --release
- $TOOL-strip target/$TARGET/release/relay
- cp target/$TARGET/release/relay .
- cp relay relay-linux-arm64v8
- name: push
image: plugins/docker:20
settings:
username: asonix
password:
from_secret: dockerhub_token
repo: asonix/relay
dockerfile: docker/drone/Dockerfile
auto_tag: true
auto_tag_suffix: linux-arm64v8
build_args:
- REPO_ARCH=arm64v8
- name: publish
image: plugins/gitea-release:1
settings:
api_key:
from_secret: gitea_token
base_url: https://git.asonix.dog
files:
- relay-linux-arm64v8
depends_on:
- clippy
- tests
trigger:
event:
- tag
---
kind: pipeline
type: docker
name: check-arm32v7
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: check
image: asonix/rust-builder:latest-linux-arm32v7
pull: always
commands:
- cargo check --target=$TARGET
trigger:
event:
- push
- pull_request
---
kind: pipeline
type: docker
name: build-arm32v7
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: build
image: asonix/rust-builder:latest-linux-arm32v7
pull: always
commands:
- cargo build --target=$TARGET --release
- $TOOL-strip target/$TARGET/release/relay
- cp target/$TARGET/release/relay .
- cp relay relay-linux-arm32v7
- name: push
image: plugins/docker:20
settings:
username: asonix
password:
from_secret: dockerhub_token
repo: asonix/relay
dockerfile: docker/drone/Dockerfile
auto_tag: true
auto_tag_suffix: linux-arm32v7
build_args:
- REPO_ARCH=arm32v7
- name: publish
image: plugins/gitea-release:1
settings:
api_key:
from_secret: gitea_token
base_url: https://git.asonix.dog
files:
- relay-linux-arm32v7
depends_on:
- clippy
- tests
trigger:
event:
- tag
---
kind: pipeline
type: docker
name: manifest
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: manifest
image: plugins/manifest:1
settings:
username: asonix
password:
from_secret: dockerhub_token
dump: true
auto_tag: true
ignore_missing: true
spec: docker/drone/manifest.tmpl
depends_on:
- build-amd64
- build-arm64v8
- build-arm32v7
trigger:
event:
- tag
---
kind: pipeline
type: docker
name: publish-crate
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: publish
image: asonix/rust-builder:latest-linux-amd64
pull: always
environment:
CRATES_IO_TOKEN:
from_secret: crates_io_token
commands:
- cargo publish --token $CRATES_IO_TOKEN
depends_on:
- build-amd64
- build-arm64v8
- build-arm32v7
trigger:
event:
- tag

View File

@ -0,0 +1,61 @@
on:
push:
branches:
- '*'
pull_request:
branches:
- main
jobs:
clippy:
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
steps:
-
name: Checkout relay
uses: https://github.com/actions/checkout@v4
-
name: Cargo Cache
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
-
name: Clippy
run: |
cargo clippy --no-default-features -- -D warnings
tests:
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
steps:
-
name: Checkout relay
uses: https://github.com/actions/checkout@v4
-
name: Cargo Cache
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
-
name: Test
run: cargo test
check:
strategy:
fail-fast: false
matrix:
target:
- x86_64-unknown-linux-musl
- armv7-unknown-linux-musleabihf
- aarch64-unknown-linux-musl
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
steps:
-
name: Checkout relay
uses: https://github.com/actions/checkout@v4
-
name: Cargo Cache
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
-
name: Debug builds
run: cargo zigbuild --target ${{ matrix.target }}

View File

@ -0,0 +1,226 @@
on:
push:
tags:
- 'v*.*.*'
env:
REGISTRY_IMAGE: asonix/relay
jobs:
clippy:
runs-on: base-image
container:
image: docker.io/asonix/actions-base-image:0.1
steps:
-
name: Checkout relay
uses: https://github.com/actions/checkout@v4
-
name: Cargo Cache
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
-
name: Clippy
run: |
# cargo clippy --no-default-features -- -D warnings
cargo clippy --no-default-features
tests:
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
steps:
-
name: Checkout relay
uses: https://github.com/actions/checkout@v4
-
name: Cargo Cache
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
-
name: Test
run: cargo test
build:
needs:
- clippy
- tests
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
strategy:
fail-fast: false
matrix:
info:
- target: x86_64-unknown-linux-musl
artifact: linux-amd64
platform: linux/amd64
- target: armv7-unknown-linux-musleabihf
artifact: linux-arm32v7
platform: linux/arm/v7
- target: aarch64-unknown-linux-musl
artifact: linux-arm64v8
platform: linux/arm64
steps:
-
name: Checkout relay
uses: https://github.com/actions/checkout@v4
-
name: Cargo Cache
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
-
name: Prepare Platform
run: |
platform=${{ matrix.info.platform }}
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
shell: bash
-
name: Docker meta
id: meta
uses: https://github.com/docker/metadata-action@v5
with:
images: ${{ env.REGISTRY_IMAGE }}
flavor: |
latest=auto
suffix=-${{ matrix.info.artifact }}
tags: |
type=raw,value=latest,enable={{ is_default_branch }}
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{major}}
-
name: Set up QEMU
uses: https://github.com/docker/setup-qemu-action@v3
-
name: Set up Docker Buildx
uses: https://github.com/docker/setup-buildx-action@v3
-
name: Docker login
uses: https://github.com/docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
-
name: Compile relay
run: cargo zigbuild --target ${{ matrix.info.target }} --release
-
name: Prepare artifacts
run: |
mkdir artifacts
cp target/${{ matrix.info.target }}/release/relay artifacts/relay-${{ matrix.info.artifact }}
-
uses: https://github.com/actions/upload-artifact@v3
with:
name: binaries
path: artifacts/
-
name: Prepare binary
run: |
cp target/${{ matrix.info.target }}/release/relay docker/forgejo/relay
-
name: Build and push ${{ matrix.info.platform }} docker image
id: build
uses: docker/build-push-action@v5
with:
context: ./docker/forgejo
platforms: ${{ matrix.info.platform }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
outputs: type=image,name=${{ env.REGISTRY_IMAGE }},name-canonical=true,push=true
-
name: Export digest
run: |
mkdir -p /tmp/digests
digest="${{ steps.build.outputs.digest }}"
touch "/tmp/digests/${digest#sha256:}"
echo "Created /tmp/digests/${digest#sha256:}"
shell: bash
-
name: Upload ${{ matrix.info.platform }} digest
uses: https://github.com/actions/upload-artifact@v3
with:
name: digests
path: /tmp/digests/*
if-no-files-found: error
retention-days: 1
publish-docker:
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
needs: [build]
steps:
-
name: Download digests
uses: https://github.com/actions/download-artifact@v3
with:
name: digests
path: /tmp/digests
pattern: digests-*
merge-multiple: true
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
-
name: Docker login
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
-
name: Docker meta
id: meta
uses: https://github.com/docker/metadata-action@v5
with:
images: ${{ env.REGISTRY_IMAGE }}
flavor: |
latest=auto
tags: |
type=raw,value=latest,enable={{ is_default_branch }}
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{major}}
-
name: Create manifest list and push
working-directory: /tmp/digests
run: |
tags=$(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "${DOCKER_METADATA_OUTPUT_JSON}")
images=$(printf "${{ env.REGISTRY_IMAGE }}@sha256:%s " *)
echo "Running 'docker buildx imagetools create ${tags[@]} ${images[@]}'"
docker buildx imagetools create ${tags[@]} ${images[@]}
shell: bash
-
name: Inspect Image
run: |
docker buildx imagetools inspect ${{ env.REGISTRY_IMAGE }}:${{ steps.meta.outputs.version }}
publish-forgejo:
needs: [build]
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
steps:
- uses: https://github.com/actions/download-artifact@v3
with:
name: binaries
path: artifacts/
merge-multiple: true
- uses: actions/forgejo-release@v1
with:
direction: upload
token: ${{ secrets.GITHUB_TOKEN }}
release-dir: artifacts/
publish-crate:
needs: [build]
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
steps:
-
name: Checkout relay
uses: https://github.com/actions/checkout@v4
-
name: Cargo Cache
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
-
name: Publish Crate
run: cargo publish --token ${{ secrets.CRATES_IO_TOKEN }}

3
.gitignore vendored
View File

@ -1,3 +1,6 @@
/target /target
/artifacts /artifacts
/sled /sled
/.direnv
/.envrc
/result

3874
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,11 +1,11 @@
[package] [package]
name = "ap-relay" name = "ap-relay"
description = "A simple activitypub relay" description = "A simple activitypub relay"
version = "0.3.79" version = "0.3.116"
authors = ["asonix <asonix@asonix.dog>"] authors = ["asonix <asonix@asonix.dog>"]
license = "AGPL-3.0" license = "AGPL-3.0"
readme = "README.md" readme = "README.md"
repository = "https://git.asonix.dog/asonix/ap-relay" repository = "https://git.asonix.dog/asonix/relay"
keywords = ["activitypub", "relay"] keywords = ["activitypub", "relay"]
edition = "2021" edition = "2021"
build = "src/build.rs" build = "src/build.rs"
@ -14,94 +14,101 @@ build = "src/build.rs"
name = "relay" name = "relay"
path = "src/main.rs" path = "src/main.rs"
[profile.release]
strip = true
[features] [features]
console = ["console-subscriber"] console = ["dep:console-subscriber"]
default = [] default = []
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies] [dependencies]
anyhow = "1.0" actix-web = { version = "4.4.0", default-features = false, features = ["compress-brotli", "compress-gzip", "rustls-0_23"] }
actix-rt = "2.7.0" actix-webfinger = { version = "0.5.0", default-features = false }
actix-web = { version = "4.0.1", default-features = false, features = [ activitystreams = "0.7.0-alpha.25"
"rustls",
"compress-brotli",
"compress-gzip",
] }
actix-webfinger = "0.4.0"
activitystreams = "0.7.0-alpha.21"
activitystreams-ext = "0.1.0-alpha.3" activitystreams-ext = "0.1.0-alpha.3"
ammonia = "3.1.0" ammonia = "4.0.0"
awc = { version = "3.0.0", default-features = false, features = ["rustls"] } async-cpupool = "0.3.0"
bcrypt = "0.13" bcrypt = "0.16"
base64 = "0.13" base64 = "0.22"
clap = { version = "4.0.0", features = ["derive"] } clap = { version = "4.0.0", features = ["derive"] }
config = "0.13.0" color-eyre = "0.6.2"
console-subscriber = { version = "0.1", optional = true } config = { version = "0.14.0", default-features = false, features = ["toml", "json", "yaml"] }
dashmap = "5.1.0" console-subscriber = { version = "0.4", optional = true }
dashmap = "6.0.1"
dotenv = "0.15.0" dotenv = "0.15.0"
futures-util = "0.3.17" futures-core = "0.3.30"
lru = "0.9.0" lru = "0.12.0"
metrics = "0.20.1" metrics = "0.23.0"
metrics-exporter-prometheus = { version = "0.11.0", default-features = false, features = [ metrics-exporter-prometheus = { version = "0.15.0", default-features = false, features = [
"http-listener", "http-listener",
] } ] }
metrics-util = "0.14.0" metrics-util = "0.17.0"
mime = "0.3.16" mime = "0.3.16"
minify-html = "0.10.0" minify-html = "0.15.0"
opentelemetry = { version = "0.18", features = ["rt-tokio"] } opentelemetry = "0.27.1"
opentelemetry-otlp = "0.11" opentelemetry_sdk = { version = "0.27", features = ["rt-tokio"] }
opentelemetry-otlp = { version = "0.27", features = ["grpc-tonic"] }
pin-project-lite = "0.2.9" pin-project-lite = "0.2.9"
quanta = "0.10.1" # pinned to metrics-util
quanta = "0.12.0"
rand = "0.8" rand = "0.8"
rsa = "0.7" reqwest = { version = "0.12", default-features = false, features = ["rustls-tls", "stream"]}
rsa-magic-public-key = "0.6.0" reqwest-middleware = { version = "0.4", default-features = false, features = ["json"] }
rustls = "0.20.7" reqwest-tracing = "0.5.0"
rustls-pemfile = "1.0.1" ring = "0.17.5"
rsa = "0.9"
rsa-magic-public-key = "0.8.0"
rustls = { version = "0.23.0", default-features = false, features = ["ring", "logging", "std", "tls12"] }
rustls-channel-resolver = "0.3.0"
rustls-pemfile = "2"
serde = { version = "1.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0" serde_json = "1.0"
sha2 = { version = "0.10", features = ["oid"] }
signature = "1.6.4"
sled = "0.34.7" sled = "0.34.7"
teloxide = { version = "0.11.1", default-features = false, features = [ streem = "0.2.0"
teloxide = { version = "0.13.0", default-features = false, features = [
"ctrlc_handler", "ctrlc_handler",
"macros", "macros",
"rustls", "rustls",
] } ] }
thiserror = "1.0" thiserror = "2.0"
time = { version = "0.3.17", features = ["serde"] } time = { version = "0.3.17", features = ["serde"] }
tracing = "0.1" tracing = "0.1"
tracing-awc = "0.1.6"
tracing-error = "0.2" tracing-error = "0.2"
tracing-futures = "0.2" tracing-log = "0.2"
tracing-log = "0.1" tracing-opentelemetry = "0.28"
tracing-opentelemetry = "0.18"
tracing-subscriber = { version = "0.3", features = [ tracing-subscriber = { version = "0.3", features = [
"ansi", "ansi",
"env-filter", "env-filter",
"fmt", "fmt",
] } ] }
tokio = { version = "1", features = ["macros", "sync"] } tokio = { version = "1", features = ["full", "tracing"] }
uuid = { version = "1", features = ["v4", "serde"] } uuid = { version = "1", features = ["v4", "serde"] }
[dependencies.background-jobs] [dependencies.background-jobs]
version = "0.14.0" version = "0.19.0"
default-features = false default-features = false
features = ["background-jobs-actix", "error-logging"] features = ["error-logging", "metrics", "tokio"]
[dependencies.http-signature-normalization-actix] [dependencies.http-signature-normalization-actix]
version = "0.8.0" version = "0.11.1"
default-features = false default-features = false
features = ["client", "server", "sha-2"] features = ["server", "ring"]
[dependencies.http-signature-normalization-reqwest]
version = "0.13.0"
default-features = false
features = ["middleware", "ring"]
[dependencies.tracing-actix-web] [dependencies.tracing-actix-web]
version = "0.7.0" version = "0.7.9"
[build-dependencies] [build-dependencies]
anyhow = "1.0" color-eyre = "0.6.2"
dotenv = "0.15.0" dotenv = "0.15.0"
ructe = { version = "0.15.0", features = ["sass", "mime03"] } ructe = { version = "0.17.0", features = ["sass", "mime03"] }
toml = "0.5.8" toml = "0.8.0"
[profile.dev.package.rsa] [profile.dev.package.rsa]
opt-level = 3 opt-level = 3

View File

@ -6,11 +6,11 @@ _A simple and efficient activitypub relay_
If running docker, you can start the relay with the following command: If running docker, you can start the relay with the following command:
``` ```
$ sudo docker run --rm -it \ $ sudo docker run --rm -it \
-v "./:/mnt/" \ -v "$(pwd):/mnt/" \
-e ADDR=0.0.0.0 \ -e ADDR=0.0.0.0 \
-e SLED_PATH=/mnt/sled/db-0.34 \ -e SLED_PATH=/mnt/sled/db-0.34 \
-p 8080:8080 \ -p 8080:8080 \
asonix/relay:0.3.78 asonix/relay:0.3.85
``` ```
This will launch the relay with the database stored in "./sled/db-0.34" and listening on port 8080 This will launch the relay with the database stored in "./sled/db-0.34" and listening on port 8080
#### Cargo #### Cargo
@ -105,6 +105,9 @@ LOCAL_DOMAINS=masto.asonix.dog
LOCAL_BLURB="<p>Welcome to my cool relay where I have cool relay things happening. I hope you enjoy your stay!</p>" LOCAL_BLURB="<p>Welcome to my cool relay where I have cool relay things happening. I hope you enjoy your stay!</p>"
PROMETHEUS_ADDR=0.0.0.0 PROMETHEUS_ADDR=0.0.0.0
PROMETHEUS_PORT=9000 PROMETHEUS_PORT=9000
CLIENT_TIMEOUT=10
DELIVER_CONCURRENCY=8
SIGNATURE_THREADS=2
``` ```
#### Descriptions #### Descriptions
@ -154,6 +157,22 @@ Optional - description for the relay
Optional - Address to bind to for serving the prometheus scrape endpoint Optional - Address to bind to for serving the prometheus scrape endpoint
##### `PROMETHEUS_PORT` ##### `PROMETHEUS_PORT`
Optional - Port to bind to for serving the prometheus scrape endpoint Optional - Port to bind to for serving the prometheus scrape endpoint
##### `CLIENT_TIMEOUT`
Optional - How long the relay will hold open a connection (in seconds) to a remote server during
fetches and deliveries. This defaults to 10
##### `DELIVER_CONCURRENCY`
Optional - How many deliver requests the relay should allow to be in-flight per thread. the default
is 8
##### `SIGNATURE_THREADS`
Optional - Override number of threads used for signing and verifying requests. Default is
`std::thread::available_parallelism()` (It tries to detect how many cores you have). If it cannot
detect the correct number of cores, it falls back to 1.
##### 'PROXY_URL'
Optional - URL of an HTTP proxy to forward outbound requests through
##### 'PROXY_USERNAME'
Optional - username to provide to the HTTP proxy set with `PROXY_URL` through HTTP Basic Auth
##### 'PROXY_PASSWORD'
Optional - password to provide to the HTTP proxy set with `PROXY_URL` through HTTP Basic Auth
### Subscribing ### Subscribing
Mastodon admins can subscribe to this relay by adding the `/inbox` route to their relay settings. Mastodon admins can subscribe to this relay by adding the `/inbox` route to their relay settings.
@ -182,11 +201,17 @@ example, if the server is `https://relay.my.tld`, the correct URL would be
- Add {anything}, the Add {anything} is relayed verbatim to listening servers. - Add {anything}, the Add {anything} is relayed verbatim to listening servers.
Note that this activity will likely be rejected by the listening servers unless it has been Note that this activity will likely be rejected by the listening servers unless it has been
signed with a JSON-LD signature signed with a JSON-LD signature
- Remove {anything}, the Remove {anything} is relayed verbatim to listening servers.
Note that this activity will likely be rejected by the listening servers unless it has been
signed with a JSON-LD signature
### Supported Discovery Protocols ### Supported Discovery Protocols
- Webfinger - Webfinger
- NodeInfo - NodeInfo
### Known issues
Pleroma and Akkoma do not support validating JSON-LD signatures, meaning many activities such as Delete, Update, Add, and Remove will be rejected with a message similar to `WARN: Response from https://example.com/inbox, "Invalid HTTP Signature"`. This is normal and not an issue with the relay.
### Contributing ### Contributing
Feel free to open issues for anything you find an issue with. Please note that any contributed code will be licensed under the AGPLv3. Feel free to open issues for anything you find an issue with. Please note that any contributed code will be licensed under the AGPLv3.

View File

@ -1,11 +0,0 @@
ARG REPO_ARCH
FROM asonix/rust-runner:latest-linux-$REPO_ARCH
COPY relay /usr/local/bin/relay
USER app
EXPOSE 8080
VOLUME /mnt
ENTRYPOINT ["/sbin/tini", "--"]
CMD ["/usr/local/bin/relay"]

View File

@ -1,25 +0,0 @@
image: asonix/relay:{{#if build.tag}}{{trimPrefix "v" build.tag}}{{else}}latest{{/if}}
{{#if build.tags}}
tags:
{{#each build.tags}}
- {{this}}
{{/each}}
{{/if}}
manifests:
-
image: asonix/relay:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}linux-amd64
platform:
architecture: amd64
os: linux
-
image: asonix/relay:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}linux-arm64v8
platform:
architecture: arm64
os: linux
variant: v8
-
image: asonix/relay:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}linux-arm32v7
platform:
architecture: arm
os: linux
variant: v7

24
docker/forgejo/Dockerfile Normal file
View File

@ -0,0 +1,24 @@
FROM alpine:3.19
ARG UID=991
ARG GID=991
ENV \
UID=${UID} \
GID=${GID}
USER root
RUN \
addgroup -g "${GID}" app && \
adduser -D -G app -u "${UID}" -g "" -h /opt/app app && \
apk add tini && \
chown -R app:app /mnt
COPY relay /usr/local/bin/relay
USER app
EXPOSE 6669
EXPOSE 8080
VOLUME /mnt
ENTRYPOINT ["/sbin/tini", "--"]
CMD ["/usr/local/bin/relay"]

View File

@ -2,7 +2,7 @@ version: '3.3'
services: services:
relay: relay:
image: asonix/relay:v0.3.73 image: asonix/relay:0.3.115
ports: ports:
- "8079:8079" - "8079:8079"
restart: always restart: always

61
flake.lock Normal file
View File

@ -0,0 +1,61 @@
{
"nodes": {
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1710146030,
"narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1733550349,
"narHash": "sha256-NcGumB4Lr6KSDq+nIqXtNA8QwAQKDSZT7N9OTGWbTrs=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "e2605d0744c2417b09f8bf850dfca42fcf537d34",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-24.11",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"flake-utils": "flake-utils",
"nixpkgs": "nixpkgs"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

34
flake.nix Normal file
View File

@ -0,0 +1,34 @@
{
description = "relay";
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.11";
flake-utils.url = "github:numtide/flake-utils";
};
outputs = { self, nixpkgs, flake-utils }:
flake-utils.lib.eachDefaultSystem (system:
let
pkgs = import nixpkgs {
inherit system;
};
in
{
packages = rec {
relay = pkgs.callPackage ./relay.nix { };
default = relay;
};
apps = rec {
dev = flake-utils.lib.mkApp { drv = self.packages.${system}.pict-rs-proxy; };
default = dev;
};
devShell = with pkgs; mkShell {
nativeBuildInputs = [ cargo cargo-outdated cargo-zigbuild clippy gcc protobuf rust-analyzer rustc rustfmt ];
RUST_SRC_PATH = "${pkgs.rust.packages.stable.rustPlatform.rustLibSrc}";
};
});
}

23
relay.nix Normal file
View File

@ -0,0 +1,23 @@
{ lib
, nixosTests
, rustPlatform
}:
rustPlatform.buildRustPackage {
pname = "relay";
version = "0.3.116";
src = ./.;
cargoLock.lockFile = ./Cargo.lock;
RUSTFLAGS = "--cfg tokio_unstable";
nativeBuildInputs = [ ];
passthru.tests = { inherit (nixosTests) relay; };
meta = with lib; {
description = "An ActivityPub relay";
homepage = "https://git.asonix.dog/asonix/relay";
license = with licenses; [ agpl3Plus ];
};
}

View File

@ -3,12 +3,13 @@ use crate::{
collector::Snapshot, collector::Snapshot,
config::{AdminUrlKind, Config}, config::{AdminUrlKind, Config},
error::{Error, ErrorKind}, error::{Error, ErrorKind},
extractors::XApiToken,
}; };
use awc::Client; use reqwest_middleware::ClientWithMiddleware;
use serde::de::DeserializeOwned; use serde::de::DeserializeOwned;
pub(crate) async fn allow( pub(crate) async fn allow(
client: &Client, client: &ClientWithMiddleware,
config: &Config, config: &Config,
domains: Vec<String>, domains: Vec<String>,
) -> Result<(), Error> { ) -> Result<(), Error> {
@ -16,7 +17,7 @@ pub(crate) async fn allow(
} }
pub(crate) async fn disallow( pub(crate) async fn disallow(
client: &Client, client: &ClientWithMiddleware,
config: &Config, config: &Config,
domains: Vec<String>, domains: Vec<String>,
) -> Result<(), Error> { ) -> Result<(), Error> {
@ -24,7 +25,7 @@ pub(crate) async fn disallow(
} }
pub(crate) async fn block( pub(crate) async fn block(
client: &Client, client: &ClientWithMiddleware,
config: &Config, config: &Config,
domains: Vec<String>, domains: Vec<String>,
) -> Result<(), Error> { ) -> Result<(), Error> {
@ -32,35 +33,50 @@ pub(crate) async fn block(
} }
pub(crate) async fn unblock( pub(crate) async fn unblock(
client: &Client, client: &ClientWithMiddleware,
config: &Config, config: &Config,
domains: Vec<String>, domains: Vec<String>,
) -> Result<(), Error> { ) -> Result<(), Error> {
post_domains(client, config, domains, AdminUrlKind::Unblock).await post_domains(client, config, domains, AdminUrlKind::Unblock).await
} }
pub(crate) async fn allowed(client: &Client, config: &Config) -> Result<AllowedDomains, Error> { pub(crate) async fn allowed(
client: &ClientWithMiddleware,
config: &Config,
) -> Result<AllowedDomains, Error> {
get_results(client, config, AdminUrlKind::Allowed).await get_results(client, config, AdminUrlKind::Allowed).await
} }
pub(crate) async fn blocked(client: &Client, config: &Config) -> Result<BlockedDomains, Error> { pub(crate) async fn blocked(
client: &ClientWithMiddleware,
config: &Config,
) -> Result<BlockedDomains, Error> {
get_results(client, config, AdminUrlKind::Blocked).await get_results(client, config, AdminUrlKind::Blocked).await
} }
pub(crate) async fn connected(client: &Client, config: &Config) -> Result<ConnectedActors, Error> { pub(crate) async fn connected(
client: &ClientWithMiddleware,
config: &Config,
) -> Result<ConnectedActors, Error> {
get_results(client, config, AdminUrlKind::Connected).await get_results(client, config, AdminUrlKind::Connected).await
} }
pub(crate) async fn stats(client: &Client, config: &Config) -> Result<Snapshot, Error> { pub(crate) async fn stats(
client: &ClientWithMiddleware,
config: &Config,
) -> Result<Snapshot, Error> {
get_results(client, config, AdminUrlKind::Stats).await get_results(client, config, AdminUrlKind::Stats).await
} }
pub(crate) async fn last_seen(client: &Client, config: &Config) -> Result<LastSeen, Error> { pub(crate) async fn last_seen(
client: &ClientWithMiddleware,
config: &Config,
) -> Result<LastSeen, Error> {
get_results(client, config, AdminUrlKind::LastSeen).await get_results(client, config, AdminUrlKind::LastSeen).await
} }
async fn get_results<T: DeserializeOwned>( async fn get_results<T: DeserializeOwned>(
client: &Client, client: &ClientWithMiddleware,
config: &Config, config: &Config,
url_kind: AdminUrlKind, url_kind: AdminUrlKind,
) -> Result<T, Error> { ) -> Result<T, Error> {
@ -68,15 +84,19 @@ async fn get_results<T: DeserializeOwned>(
let iri = config.generate_admin_url(url_kind); let iri = config.generate_admin_url(url_kind);
let mut res = client let res = client
.get(iri.as_str()) .get(iri.as_str())
.insert_header(x_api_token) .header(XApiToken::http1_name(), x_api_token.to_string())
.send() .send()
.await .await
.map_err(|e| ErrorKind::SendRequest(iri.to_string(), e.to_string()))?; .map_err(|e| ErrorKind::SendRequest(iri.to_string(), e.to_string()))?;
if !res.status().is_success() { if !res.status().is_success() {
return Err(ErrorKind::Status(iri.to_string(), res.status()).into()); return Err(ErrorKind::Status(
iri.to_string(),
crate::http1::status_to_http02(res.status()),
)
.into());
} }
let t = res let t = res
@ -88,7 +108,7 @@ async fn get_results<T: DeserializeOwned>(
} }
async fn post_domains( async fn post_domains(
client: &Client, client: &ClientWithMiddleware,
config: &Config, config: &Config,
domains: Vec<String>, domains: Vec<String>,
url_kind: AdminUrlKind, url_kind: AdminUrlKind,
@ -99,8 +119,9 @@ async fn post_domains(
let res = client let res = client
.post(iri.as_str()) .post(iri.as_str())
.insert_header(x_api_token) .header(XApiToken::http1_name(), x_api_token.to_string())
.send_json(&Domains { domains }) .json(&Domains { domains })
.send()
.await .await
.map_err(|e| ErrorKind::SendRequest(iri.to_string(), e.to_string()))?; .map_err(|e| ErrorKind::SendRequest(iri.to_string(), e.to_string()))?;

View File

@ -5,7 +5,7 @@ fn git_info() {
if let Ok(output) = Command::new("git").args(["rev-parse", "HEAD"]).output() { if let Ok(output) = Command::new("git").args(["rev-parse", "HEAD"]).output() {
if output.status.success() { if output.status.success() {
let git_hash = String::from_utf8_lossy(&output.stdout); let git_hash = String::from_utf8_lossy(&output.stdout);
println!("cargo:rustc-env=GIT_HASH={}", git_hash); println!("cargo:rustc-env=GIT_HASH={git_hash}");
println!("cargo:rustc-env=GIT_SHORT_HASH={}", &git_hash[..8]) println!("cargo:rustc-env=GIT_SHORT_HASH={}", &git_hash[..8])
} }
} }
@ -16,12 +16,12 @@ fn git_info() {
{ {
if output.status.success() { if output.status.success() {
let git_branch = String::from_utf8_lossy(&output.stdout); let git_branch = String::from_utf8_lossy(&output.stdout);
println!("cargo:rustc-env=GIT_BRANCH={}", git_branch); println!("cargo:rustc-env=GIT_BRANCH={git_branch}");
} }
} }
} }
fn version_info() -> Result<(), anyhow::Error> { fn version_info() -> color_eyre::Result<()> {
let cargo_toml = Path::new(&std::env::var("CARGO_MANIFEST_DIR")?).join("Cargo.toml"); let cargo_toml = Path::new(&std::env::var("CARGO_MANIFEST_DIR")?).join("Cargo.toml");
let mut file = File::open(cargo_toml)?; let mut file = File::open(cargo_toml)?;
@ -32,17 +32,17 @@ fn version_info() -> Result<(), anyhow::Error> {
let data: toml::Value = toml::from_str(&cargo_data)?; let data: toml::Value = toml::from_str(&cargo_data)?;
if let Some(version) = data["package"]["version"].as_str() { if let Some(version) = data["package"]["version"].as_str() {
println!("cargo:rustc-env=PKG_VERSION={}", version); println!("cargo:rustc-env=PKG_VERSION={version}");
} }
if let Some(name) = data["package"]["name"].as_str() { if let Some(name) = data["package"]["name"].as_str() {
println!("cargo:rustc-env=PKG_NAME={}", name); println!("cargo:rustc-env=PKG_NAME={name}");
} }
Ok(()) Ok(())
} }
fn main() -> Result<(), anyhow::Error> { fn main() -> color_eyre::Result<()> {
dotenv::dotenv().ok(); dotenv::dotenv().ok();
git_info(); git_info();

View File

@ -1,5 +1,425 @@
mod double; use metrics::{Key, Metadata, Recorder, SetRecorderError};
mod stats; use metrics_util::{
registry::{AtomicStorage, GenerationalStorage, Recency, Registry},
MetricKindMask, Summary,
};
use quanta::Clock;
use std::{
collections::{BTreeMap, HashMap},
sync::{atomic::Ordering, Arc, RwLock},
time::Duration,
};
pub(crate) use double::DoubleRecorder; const SECONDS: u64 = 1;
pub(crate) use stats::{MemoryCollector, Snapshot}; const MINUTES: u64 = 60 * SECONDS;
const HOURS: u64 = 60 * MINUTES;
const DAYS: u64 = 24 * HOURS;
pub(crate) fn recordable(len: usize) -> u32 {
((len as u64) % u64::from(u32::MAX)) as u32
}
type DistributionMap = BTreeMap<Vec<(String, String)>, Summary>;
#[derive(Clone)]
pub struct MemoryCollector {
inner: Arc<Inner>,
}
struct Inner {
descriptions: RwLock<HashMap<String, metrics::SharedString>>,
distributions: RwLock<HashMap<String, DistributionMap>>,
recency: Recency<Key>,
registry: Registry<Key, GenerationalStorage<AtomicStorage>>,
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
struct Counter {
labels: BTreeMap<String, String>,
value: u64,
}
impl std::fmt::Display for Counter {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let labels = self
.labels
.iter()
.map(|(k, v)| format!("{k}: {v}"))
.collect::<Vec<_>>()
.join(", ");
write!(f, "{labels} - {}", self.value)
}
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
struct Gauge {
labels: BTreeMap<String, String>,
value: f64,
}
impl std::fmt::Display for Gauge {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let labels = self
.labels
.iter()
.map(|(k, v)| format!("{k}: {v}"))
.collect::<Vec<_>>()
.join(", ");
write!(f, "{labels} - {}", self.value)
}
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
struct Histogram {
labels: BTreeMap<String, String>,
value: Vec<(f64, Option<f64>)>,
}
impl std::fmt::Display for Histogram {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let labels = self
.labels
.iter()
.map(|(k, v)| format!("{k}: {v}"))
.collect::<Vec<_>>()
.join(", ");
let value = self
.value
.iter()
.map(|(k, v)| {
if let Some(v) = v {
format!("{k}: {v:.6}")
} else {
format!("{k}: None,")
}
})
.collect::<Vec<_>>()
.join(", ");
write!(f, "{labels} - {value}")
}
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
pub(crate) struct Snapshot {
counters: HashMap<String, Vec<Counter>>,
gauges: HashMap<String, Vec<Gauge>>,
histograms: HashMap<String, Vec<Histogram>>,
}
const PAIRS: [((&str, &str), &str); 2] = [
(
(
"background-jobs.worker.started",
"background-jobs.worker.finished",
),
"background-jobs.worker.running",
),
(
(
"background-jobs.job.started",
"background-jobs.job.finished",
),
"background-jobs.job.running",
),
];
#[derive(Default)]
struct MergeCounter {
start: Option<Counter>,
finish: Option<Counter>,
}
impl MergeCounter {
fn merge(self) -> Option<Counter> {
match (self.start, self.finish) {
(Some(start), Some(end)) => Some(Counter {
labels: start.labels,
value: start.value.saturating_sub(end.value),
}),
(Some(only), None) => Some(only),
(None, Some(only)) => Some(Counter {
labels: only.labels,
value: 0,
}),
(None, None) => None,
}
}
}
impl Snapshot {
pub(crate) fn present(self) {
if !self.counters.is_empty() {
println!("Counters");
let mut merging = HashMap::new();
for (key, counters) in self.counters {
if let Some(((start, _), name)) = PAIRS
.iter()
.find(|((start, finish), _)| *start == key || *finish == key)
{
let entry = merging.entry(name).or_insert_with(HashMap::new);
for counter in counters {
let merge_counter = entry
.entry(counter.labels.clone())
.or_insert_with(MergeCounter::default);
if key == *start {
merge_counter.start = Some(counter);
} else {
merge_counter.finish = Some(counter);
}
}
continue;
}
println!("\t{key}");
for counter in counters {
println!("\t\t{counter}");
}
}
for (key, counters) in merging {
println!("\t{key}");
for (_, counter) in counters {
if let Some(counter) = counter.merge() {
println!("\t\t{counter}");
}
}
}
}
if !self.gauges.is_empty() {
println!("Gauges");
for (key, gauges) in self.gauges {
println!("\t{key}");
for gauge in gauges {
println!("\t\t{gauge}");
}
}
}
if !self.histograms.is_empty() {
println!("Histograms");
for (key, histograms) in self.histograms {
println!("\t{key}");
for histogram in histograms {
println!("\t\t{histogram}");
}
}
}
}
}
fn key_to_parts(key: &Key) -> (String, Vec<(String, String)>) {
let labels = key
.labels()
.map(|label| (label.key().to_string(), label.value().to_string()))
.collect();
let name = key.name().to_string();
(name, labels)
}
impl Inner {
fn snapshot_counters(&self) -> HashMap<String, Vec<Counter>> {
let mut counters = HashMap::new();
for (key, counter) in self.registry.get_counter_handles() {
let gen = counter.get_generation();
if !self.recency.should_store_counter(&key, gen, &self.registry) {
continue;
}
let (name, labels) = key_to_parts(&key);
let value = counter.get_inner().load(Ordering::Acquire);
counters.entry(name).or_insert_with(Vec::new).push(Counter {
labels: labels.into_iter().collect(),
value,
});
}
counters
}
fn snapshot_gauges(&self) -> HashMap<String, Vec<Gauge>> {
let mut gauges = HashMap::new();
for (key, gauge) in self.registry.get_gauge_handles() {
let gen = gauge.get_generation();
if !self.recency.should_store_gauge(&key, gen, &self.registry) {
continue;
}
let (name, labels) = key_to_parts(&key);
let value = f64::from_bits(gauge.get_inner().load(Ordering::Acquire));
gauges.entry(name).or_insert_with(Vec::new).push(Gauge {
labels: labels.into_iter().collect(),
value,
})
}
gauges
}
fn snapshot_histograms(&self) -> HashMap<String, Vec<Histogram>> {
for (key, histogram) in self.registry.get_histogram_handles() {
let gen = histogram.get_generation();
let (name, labels) = key_to_parts(&key);
if !self
.recency
.should_store_histogram(&key, gen, &self.registry)
{
let mut d = self.distributions.write().unwrap();
let delete_by_name = if let Some(by_name) = d.get_mut(&name) {
by_name.remove(&labels);
by_name.is_empty()
} else {
false
};
drop(d);
if delete_by_name {
self.descriptions.write().unwrap().remove(&name);
}
continue;
}
let mut d = self.distributions.write().unwrap();
let outer_entry = d.entry(name.clone()).or_default();
let entry = outer_entry
.entry(labels)
.or_insert_with(Summary::with_defaults);
histogram.get_inner().clear_with(|samples| {
for sample in samples {
entry.add(*sample);
}
});
let mut total_len = 0;
for dist_map in d.values() {
total_len += dist_map.len();
}
metrics::gauge!("relay.collector.distributions.size").set(recordable(total_len));
}
let d = self.distributions.read().unwrap().clone();
d.into_iter()
.map(|(key, value)| {
(
key,
value
.into_iter()
.map(|(labels, summary)| Histogram {
labels: labels.into_iter().collect(),
value: [0.001, 0.01, 0.05, 0.1, 0.5, 0.9, 0.99, 1.0]
.into_iter()
.map(|q| (q, summary.quantile(q)))
.collect(),
})
.collect(),
)
})
.collect()
}
fn snapshot(&self) -> Snapshot {
Snapshot {
counters: self.snapshot_counters(),
gauges: self.snapshot_gauges(),
histograms: self.snapshot_histograms(),
}
}
}
impl MemoryCollector {
pub(crate) fn new() -> Self {
MemoryCollector {
inner: Arc::new(Inner {
descriptions: Default::default(),
distributions: Default::default(),
recency: Recency::new(
Clock::new(),
MetricKindMask::ALL,
Some(Duration::from_secs(5 * DAYS)),
),
registry: Registry::new(GenerationalStorage::atomic()),
}),
}
}
pub(crate) fn snapshot(&self) -> Snapshot {
self.inner.snapshot()
}
fn add_description_if_missing(
&self,
key: &metrics::KeyName,
description: metrics::SharedString,
) {
let mut d = self.inner.descriptions.write().unwrap();
d.entry(key.as_str().to_owned()).or_insert(description);
metrics::gauge!("relay.collector.descriptions.size").set(recordable(d.len()));
}
pub(crate) fn install(&self) -> Result<(), SetRecorderError<Self>> {
metrics::set_global_recorder(self.clone())
}
}
impl Recorder for MemoryCollector {
fn describe_counter(
&self,
key: metrics::KeyName,
_: Option<metrics::Unit>,
description: metrics::SharedString,
) {
self.add_description_if_missing(&key, description)
}
fn describe_gauge(
&self,
key: metrics::KeyName,
_: Option<metrics::Unit>,
description: metrics::SharedString,
) {
self.add_description_if_missing(&key, description)
}
fn describe_histogram(
&self,
key: metrics::KeyName,
_: Option<metrics::Unit>,
description: metrics::SharedString,
) {
self.add_description_if_missing(&key, description)
}
fn register_counter(&self, key: &Key, _: &Metadata<'_>) -> metrics::Counter {
self.inner
.registry
.get_or_create_counter(key, |c| c.clone().into())
}
fn register_gauge(&self, key: &Key, _: &Metadata<'_>) -> metrics::Gauge {
self.inner
.registry
.get_or_create_gauge(key, |c| c.clone().into())
}
fn register_histogram(&self, key: &Key, _: &Metadata<'_>) -> metrics::Histogram {
self.inner
.registry
.get_or_create_histogram(key, |c| c.clone().into())
}
}

View File

@ -1,133 +0,0 @@
use metrics::{CounterFn, GaugeFn, HistogramFn, Key, Recorder, SetRecorderError};
use std::sync::Arc;
#[derive(Clone)]
pub(crate) struct DoubleRecorder<R, S> {
first: R,
second: S,
}
struct DoubleCounter {
first: metrics::Counter,
second: metrics::Counter,
}
struct DoubleGauge {
first: metrics::Gauge,
second: metrics::Gauge,
}
struct DoubleHistogram {
first: metrics::Histogram,
second: metrics::Histogram,
}
impl<R, S> DoubleRecorder<R, S> {
pub(crate) fn new(first: R, second: S) -> Self {
DoubleRecorder { first, second }
}
pub(crate) fn install(self) -> Result<(), SetRecorderError>
where
R: Recorder + 'static,
S: Recorder + 'static,
{
metrics::set_boxed_recorder(Box::new(self))
}
}
impl<R, S> Recorder for DoubleRecorder<R, S>
where
R: Recorder,
S: Recorder,
{
fn describe_counter(
&self,
key: metrics::KeyName,
unit: Option<metrics::Unit>,
description: metrics::SharedString,
) {
self.first
.describe_counter(key.clone(), unit, description.clone());
self.second.describe_counter(key, unit, description);
}
fn describe_gauge(
&self,
key: metrics::KeyName,
unit: Option<metrics::Unit>,
description: metrics::SharedString,
) {
self.first
.describe_gauge(key.clone(), unit, description.clone());
self.second.describe_gauge(key, unit, description);
}
fn describe_histogram(
&self,
key: metrics::KeyName,
unit: Option<metrics::Unit>,
description: metrics::SharedString,
) {
self.first
.describe_histogram(key.clone(), unit, description.clone());
self.second.describe_histogram(key, unit, description);
}
fn register_counter(&self, key: &Key) -> metrics::Counter {
let first = self.first.register_counter(key);
let second = self.second.register_counter(key);
metrics::Counter::from_arc(Arc::new(DoubleCounter { first, second }))
}
fn register_gauge(&self, key: &Key) -> metrics::Gauge {
let first = self.first.register_gauge(key);
let second = self.second.register_gauge(key);
metrics::Gauge::from_arc(Arc::new(DoubleGauge { first, second }))
}
fn register_histogram(&self, key: &Key) -> metrics::Histogram {
let first = self.first.register_histogram(key);
let second = self.second.register_histogram(key);
metrics::Histogram::from_arc(Arc::new(DoubleHistogram { first, second }))
}
}
impl CounterFn for DoubleCounter {
fn increment(&self, value: u64) {
self.first.increment(value);
self.second.increment(value);
}
fn absolute(&self, value: u64) {
self.first.absolute(value);
self.second.absolute(value);
}
}
impl GaugeFn for DoubleGauge {
fn increment(&self, value: f64) {
self.first.increment(value);
self.second.increment(value);
}
fn decrement(&self, value: f64) {
self.first.decrement(value);
self.second.decrement(value);
}
fn set(&self, value: f64) {
self.first.set(value);
self.second.set(value);
}
}
impl HistogramFn for DoubleHistogram {
fn record(&self, value: f64) {
self.first.record(value);
self.second.record(value);
}
}

View File

@ -1,414 +0,0 @@
use metrics::{Key, Recorder, SetRecorderError};
use metrics_util::{
registry::{AtomicStorage, GenerationalStorage, Recency, Registry},
MetricKindMask, Summary,
};
use quanta::Clock;
use std::{
collections::{BTreeMap, HashMap},
sync::{atomic::Ordering, Arc, RwLock},
time::Duration,
};
const SECONDS: u64 = 1;
const MINUTES: u64 = 60 * SECONDS;
const HOURS: u64 = 60 * MINUTES;
const DAYS: u64 = 24 * HOURS;
type DistributionMap = BTreeMap<Vec<(String, String)>, Summary>;
#[derive(Clone)]
pub struct MemoryCollector {
inner: Arc<Inner>,
}
struct Inner {
descriptions: RwLock<HashMap<String, metrics::SharedString>>,
distributions: RwLock<HashMap<String, DistributionMap>>,
recency: Recency<Key>,
registry: Registry<Key, GenerationalStorage<AtomicStorage>>,
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
struct Counter {
labels: BTreeMap<String, String>,
value: u64,
}
impl std::fmt::Display for Counter {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let labels = self
.labels
.iter()
.map(|(k, v)| format!("{}: {}", k, v))
.collect::<Vec<_>>()
.join(", ");
write!(f, "{} - {}", labels, self.value)
}
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
struct Gauge {
labels: BTreeMap<String, String>,
value: f64,
}
impl std::fmt::Display for Gauge {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let labels = self
.labels
.iter()
.map(|(k, v)| format!("{}: {}", k, v))
.collect::<Vec<_>>()
.join(", ");
write!(f, "{} - {}", labels, self.value)
}
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
struct Histogram {
labels: BTreeMap<String, String>,
value: Vec<(f64, Option<f64>)>,
}
impl std::fmt::Display for Histogram {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let labels = self
.labels
.iter()
.map(|(k, v)| format!("{}: {}", k, v))
.collect::<Vec<_>>()
.join(", ");
let value = self
.value
.iter()
.map(|(k, v)| {
if let Some(v) = v {
format!("{}: {:.6}", k, v)
} else {
format!("{}: None,", k)
}
})
.collect::<Vec<_>>()
.join(", ");
write!(f, "{} - {}", labels, value)
}
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
pub(crate) struct Snapshot {
counters: HashMap<String, Vec<Counter>>,
gauges: HashMap<String, Vec<Gauge>>,
histograms: HashMap<String, Vec<Histogram>>,
}
const PAIRS: [((&str, &str), &str); 2] = [
(
(
"background-jobs.worker.started",
"background-jobs.worker.finished",
),
"background-jobs.worker.running",
),
(
(
"background-jobs.job.started",
"background-jobs.job.finished",
),
"background-jobs.job.running",
),
];
#[derive(Default)]
struct MergeCounter {
start: Option<Counter>,
finish: Option<Counter>,
}
impl MergeCounter {
fn merge(self) -> Option<Counter> {
match (self.start, self.finish) {
(Some(start), Some(end)) => Some(Counter {
labels: start.labels,
value: start.value.saturating_sub(end.value),
}),
(Some(only), None) => Some(only),
(None, Some(only)) => Some(Counter {
labels: only.labels,
value: 0,
}),
(None, None) => None,
}
}
}
impl Snapshot {
pub(crate) fn present(self) {
if !self.counters.is_empty() {
println!("Counters");
let mut merging = HashMap::new();
for (key, counters) in self.counters {
if let Some(((start, _), name)) = PAIRS
.iter()
.find(|((start, finish), _)| *start == key || *finish == key)
{
let entry = merging.entry(name).or_insert_with(HashMap::new);
for counter in counters {
let mut merge_counter = entry
.entry(counter.labels.clone())
.or_insert_with(MergeCounter::default);
if key == *start {
merge_counter.start = Some(counter);
} else {
merge_counter.finish = Some(counter);
}
}
continue;
}
println!("\t{}", key);
for counter in counters {
println!("\t\t{}", counter);
}
}
for (key, counters) in merging {
println!("\t{}", key);
for (_, counter) in counters {
if let Some(counter) = counter.merge() {
println!("\t\t{}", counter);
}
}
}
}
if !self.gauges.is_empty() {
println!("Gauges");
for (key, gauges) in self.gauges {
println!("\t{}", key);
for gauge in gauges {
println!("\t\t{}", gauge);
}
}
}
if !self.histograms.is_empty() {
println!("Histograms");
for (key, histograms) in self.histograms {
println!("\t{}", key);
for histogram in histograms {
println!("\t\t{}", histogram);
}
}
}
}
}
fn key_to_parts(key: &Key) -> (String, Vec<(String, String)>) {
let labels = key
.labels()
.into_iter()
.map(|label| (label.key().to_string(), label.value().to_string()))
.collect();
let name = key.name().to_string();
(name, labels)
}
impl Inner {
fn snapshot_counters(&self) -> HashMap<String, Vec<Counter>> {
let mut counters = HashMap::new();
for (key, counter) in self.registry.get_counter_handles() {
let gen = counter.get_generation();
if !self.recency.should_store_counter(&key, gen, &self.registry) {
continue;
}
let (name, labels) = key_to_parts(&key);
let value = counter.get_inner().load(Ordering::Acquire);
counters.entry(name).or_insert_with(Vec::new).push(Counter {
labels: labels.into_iter().collect(),
value,
});
}
counters
}
fn snapshot_gauges(&self) -> HashMap<String, Vec<Gauge>> {
let mut gauges = HashMap::new();
for (key, gauge) in self.registry.get_gauge_handles() {
let gen = gauge.get_generation();
if !self.recency.should_store_gauge(&key, gen, &self.registry) {
continue;
}
let (name, labels) = key_to_parts(&key);
let value = f64::from_bits(gauge.get_inner().load(Ordering::Acquire));
gauges.entry(name).or_insert_with(Vec::new).push(Gauge {
labels: labels.into_iter().collect(),
value,
})
}
gauges
}
fn snapshot_histograms(&self) -> HashMap<String, Vec<Histogram>> {
for (key, histogram) in self.registry.get_histogram_handles() {
let gen = histogram.get_generation();
let (name, labels) = key_to_parts(&key);
if !self
.recency
.should_store_histogram(&key, gen, &self.registry)
{
let mut d = self.distributions.write().unwrap();
let delete_by_name = if let Some(by_name) = d.get_mut(&name) {
by_name.remove(&labels);
by_name.is_empty()
} else {
false
};
drop(d);
if delete_by_name {
self.descriptions.write().unwrap().remove(&name);
}
continue;
}
let mut d = self.distributions.write().unwrap();
let outer_entry = d.entry(name.clone()).or_insert_with(BTreeMap::new);
let entry = outer_entry
.entry(labels)
.or_insert_with(Summary::with_defaults);
histogram.get_inner().clear_with(|samples| {
for sample in samples {
entry.add(*sample);
}
})
}
let d = self.distributions.read().unwrap().clone();
d.into_iter()
.map(|(key, value)| {
(
key,
value
.into_iter()
.map(|(labels, summary)| Histogram {
labels: labels.into_iter().collect(),
value: [0.001, 0.01, 0.05, 0.1, 0.5, 0.9, 0.99, 1.0]
.into_iter()
.map(|q| (q, summary.quantile(q)))
.collect(),
})
.collect(),
)
})
.collect()
}
fn snapshot(&self) -> Snapshot {
Snapshot {
counters: self.snapshot_counters(),
gauges: self.snapshot_gauges(),
histograms: self.snapshot_histograms(),
}
}
}
impl MemoryCollector {
pub(crate) fn new() -> Self {
MemoryCollector {
inner: Arc::new(Inner {
descriptions: Default::default(),
distributions: Default::default(),
recency: Recency::new(
Clock::new(),
MetricKindMask::ALL,
Some(Duration::from_secs(5 * DAYS)),
),
registry: Registry::new(GenerationalStorage::atomic()),
}),
}
}
pub(crate) fn snapshot(&self) -> Snapshot {
self.inner.snapshot()
}
fn add_description_if_missing(
&self,
key: &metrics::KeyName,
description: metrics::SharedString,
) {
let mut d = self.inner.descriptions.write().unwrap();
d.entry(key.as_str().to_owned()).or_insert(description);
}
pub(crate) fn install(&self) -> Result<(), SetRecorderError> {
metrics::set_boxed_recorder(Box::new(self.clone()))
}
}
impl Recorder for MemoryCollector {
fn describe_counter(
&self,
key: metrics::KeyName,
_: Option<metrics::Unit>,
description: metrics::SharedString,
) {
self.add_description_if_missing(&key, description)
}
fn describe_gauge(
&self,
key: metrics::KeyName,
_: Option<metrics::Unit>,
description: metrics::SharedString,
) {
self.add_description_if_missing(&key, description)
}
fn describe_histogram(
&self,
key: metrics::KeyName,
_: Option<metrics::Unit>,
description: metrics::SharedString,
) {
self.add_description_if_missing(&key, description)
}
fn register_counter(&self, key: &Key) -> metrics::Counter {
self.inner
.registry
.get_or_create_counter(key, |c| c.clone().into())
}
fn register_gauge(&self, key: &Key) -> metrics::Gauge {
self.inner
.registry
.get_or_create_gauge(key, |c| c.clone().into())
}
fn register_histogram(&self, key: &Key) -> metrics::Histogram {
self.inner
.registry
.get_or_create_histogram(key, |c| c.clone().into())
}
}

View File

@ -11,11 +11,9 @@ use activitystreams::{
}, },
}; };
use config::Environment; use config::Environment;
use http_signature_normalization_actix::prelude::VerifyDigest; use http_signature_normalization_actix::{digest::ring::Sha256, prelude::VerifyDigest};
use rustls::{Certificate, PrivateKey}; use rustls::sign::CertifiedKey;
use sha2::{Digest, Sha256};
use std::{ use std::{
io::BufReader,
net::{IpAddr, SocketAddr}, net::{IpAddr, SocketAddr},
path::PathBuf, path::PathBuf,
}; };
@ -45,6 +43,12 @@ pub(crate) struct ParsedConfig {
local_blurb: Option<String>, local_blurb: Option<String>,
prometheus_addr: Option<IpAddr>, prometheus_addr: Option<IpAddr>,
prometheus_port: Option<u16>, prometheus_port: Option<u16>,
deliver_concurrency: u64,
client_timeout: u64,
proxy_url: Option<IriString>,
proxy_username: Option<String>,
proxy_password: Option<String>,
signature_threads: Option<usize>,
} }
#[derive(Clone)] #[derive(Clone)]
@ -68,6 +72,10 @@ pub struct Config {
local_domains: Vec<String>, local_domains: Vec<String>,
local_blurb: Option<String>, local_blurb: Option<String>,
prometheus_config: Option<PrometheusConfig>, prometheus_config: Option<PrometheusConfig>,
deliver_concurrency: u64,
client_timeout: u64,
proxy_config: Option<ProxyConfig>,
signature_threads: Option<usize>,
} }
#[derive(Clone)] #[derive(Clone)]
@ -82,6 +90,12 @@ struct PrometheusConfig {
port: u16, port: u16,
} }
#[derive(Clone, Debug)]
struct ProxyConfig {
url: IriString,
auth: Option<(String, String)>,
}
#[derive(Debug)] #[derive(Debug)]
pub enum UrlKind { pub enum UrlKind {
Activity, Activity,
@ -135,6 +149,10 @@ impl std::fmt::Debug for Config {
.field("local_domains", &self.local_domains) .field("local_domains", &self.local_domains)
.field("local_blurb", &self.local_blurb) .field("local_blurb", &self.local_blurb)
.field("prometheus_config", &self.prometheus_config) .field("prometheus_config", &self.prometheus_config)
.field("deliver_concurrency", &self.deliver_concurrency)
.field("client_timeout", &self.client_timeout)
.field("proxy_config", &self.proxy_config)
.field("signature_threads", &self.signature_threads)
.finish() .finish()
} }
} }
@ -164,13 +182,19 @@ impl Config {
.set_default("local_blurb", None as Option<&str>)? .set_default("local_blurb", None as Option<&str>)?
.set_default("prometheus_addr", None as Option<&str>)? .set_default("prometheus_addr", None as Option<&str>)?
.set_default("prometheus_port", None as Option<u16>)? .set_default("prometheus_port", None as Option<u16>)?
.set_default("deliver_concurrency", 8u64)?
.set_default("client_timeout", 10u64)?
.set_default("proxy_url", None as Option<&str>)?
.set_default("proxy_username", None as Option<&str>)?
.set_default("proxy_password", None as Option<&str>)?
.set_default("signature_threads", None as Option<u64>)?
.add_source(Environment::default()) .add_source(Environment::default())
.build()?; .build()?;
let config: ParsedConfig = config.try_deserialize()?; let config: ParsedConfig = config.try_deserialize()?;
let scheme = if config.https { "https" } else { "http" }; let scheme = if config.https { "https" } else { "http" };
let base_uri = iri!(format!("{}://{}", scheme, config.hostname)).into_absolute(); let base_uri = iri!(format!("{scheme}://{}", config.hostname)).into_absolute();
let tls = match (config.tls_key, config.tls_cert) { let tls = match (config.tls_key, config.tls_cert) {
(Some(key), Some(cert)) => Some(TlsConfig { key, cert }), (Some(key), Some(cert)) => Some(TlsConfig { key, cert }),
@ -205,10 +229,30 @@ impl Config {
(None, None) => None, (None, None) => None,
}; };
let proxy_config = match (config.proxy_username, config.proxy_password) {
(Some(username), Some(password)) => config.proxy_url.map(|url| ProxyConfig {
url,
auth: Some((username, password)),
}),
(Some(_), None) => {
tracing::warn!(
"PROXY_USERNAME is set but PROXY_PASSWORD is not set, not setting Proxy Auth"
);
config.proxy_url.map(|url| ProxyConfig { url, auth: None })
}
(None, Some(_)) => {
tracing::warn!(
"PROXY_PASSWORD is set but PROXY_USERNAME is not set, not setting Proxy Auth"
);
config.proxy_url.map(|url| ProxyConfig { url, auth: None })
}
(None, None) => config.proxy_url.map(|url| ProxyConfig { url, auth: None }),
};
let source_url = match Self::git_hash() { let source_url = match Self::git_hash() {
Some(hash) => format!( Some(hash) => format!(
"{}{}{}", "{}{}{hash}",
config.source_repo, config.repository_commit_base, hash config.source_repo, config.repository_commit_base
) )
.parse() .parse()
.expect("constructed source URL is valid"), .expect("constructed source URL is valid"),
@ -235,58 +279,80 @@ impl Config {
local_domains, local_domains,
local_blurb: config.local_blurb, local_blurb: config.local_blurb,
prometheus_config, prometheus_config,
deliver_concurrency: config.deliver_concurrency,
client_timeout: config.client_timeout,
proxy_config,
signature_threads: config.signature_threads,
}) })
} }
pub(crate) fn signature_threads(&self) -> usize {
self.signature_threads
.unwrap_or_else(|| {
std::thread::available_parallelism()
.map(usize::from)
.map_err(|e| tracing::warn!("Failed to get parallelism, {e}"))
.unwrap_or(1)
})
.max(1)
}
pub(crate) fn client_timeout(&self) -> u64 {
self.client_timeout
}
pub(crate) fn deliver_concurrency(&self) -> u64 {
self.deliver_concurrency
}
pub(crate) fn prometheus_bind_address(&self) -> Option<SocketAddr> { pub(crate) fn prometheus_bind_address(&self) -> Option<SocketAddr> {
let config = self.prometheus_config.as_ref()?; let config = self.prometheus_config.as_ref()?;
Some((config.addr, config.port).into()) Some((config.addr, config.port).into())
} }
pub(crate) fn open_keys(&self) -> Result<Option<(Vec<Certificate>, PrivateKey)>, Error> { pub(crate) async fn open_keys(&self) -> Result<Option<CertifiedKey>, Error> {
let tls = if let Some(tls) = &self.tls { let tls = if let Some(tls) = &self.tls {
tls tls
} else { } else {
tracing::warn!("No TLS config present"); tracing::info!("No TLS config present");
return Ok(None); return Ok(None);
}; };
let mut certs_reader = BufReader::new(std::fs::File::open(&tls.cert)?); let certs_bytes = tokio::fs::read(&tls.cert).await?;
let certs = rustls_pemfile::certs(&mut certs_reader)?; let certs =
rustls_pemfile::certs(&mut certs_bytes.as_slice()).collect::<Result<Vec<_>, _>>()?;
if certs.is_empty() { if certs.is_empty() {
tracing::warn!("No certs read from certificate file"); tracing::warn!("No certs read from certificate file");
return Ok(None); return Ok(None);
} }
let mut key_reader = BufReader::new(std::fs::File::open(&tls.key)?); let key_bytes = tokio::fs::read(&tls.key).await?;
let key = rustls_pemfile::read_one(&mut key_reader)?; let key = if let Some(key) = rustls_pemfile::private_key(&mut key_bytes.as_slice())? {
key
let certs = certs.into_iter().map(Certificate).collect();
let key = if let Some(key) = key {
match key {
rustls_pemfile::Item::RSAKey(der) => PrivateKey(der),
rustls_pemfile::Item::PKCS8Key(der) => PrivateKey(der),
rustls_pemfile::Item::ECKey(der) => PrivateKey(der),
_ => {
tracing::warn!("Unknown key format: {:?}", key);
return Ok(None);
}
}
} else { } else {
tracing::warn!("Failed to read private key"); tracing::warn!("Failed to read private key");
return Ok(None); return Ok(None);
}; };
Ok(Some((certs, key))) let key = rustls::crypto::ring::sign::any_supported_type(&key)?;
Ok(Some(CertifiedKey::new(certs, key)))
} }
pub(crate) fn footer_blurb(&self) -> Option<crate::templates::Html<String>> { pub(crate) fn footer_blurb(&self) -> Option<crate::templates::Html<String>> {
if let Some(blurb) = &self.footer_blurb { if let Some(blurb) = &self.footer_blurb {
if !blurb.is_empty() { if !blurb.is_empty() {
return Some(crate::templates::Html(ammonia::clean(blurb))); return Some(crate::templates::Html(
ammonia::Builder::new()
.add_tag_attributes("a", &["rel"])
.add_tag_attributes("area", &["rel"])
.add_tag_attributes("link", &["rel"])
.link_rel(None)
.clean(blurb)
.to_string(),
));
} }
} }
@ -296,7 +362,15 @@ impl Config {
pub(crate) fn local_blurb(&self) -> Option<crate::templates::Html<String>> { pub(crate) fn local_blurb(&self) -> Option<crate::templates::Html<String>> {
if let Some(blurb) = &self.local_blurb { if let Some(blurb) = &self.local_blurb {
if !blurb.is_empty() { if !blurb.is_empty() {
return Some(crate::templates::Html(ammonia::clean(blurb))); return Some(crate::templates::Html(
ammonia::Builder::new()
.add_tag_attributes("a", &["rel"])
.add_tag_attributes("area", &["rel"])
.add_tag_attributes("link", &["rel"])
.link_rel(None)
.clean(blurb)
.to_string(),
));
} }
} }
@ -332,7 +406,7 @@ impl Config {
match AdminConfig::build(api_token) { match AdminConfig::build(api_token) {
Ok(conf) => Some(actix_web::web::Data::new(conf)), Ok(conf) => Some(actix_web::web::Data::new(conf)),
Err(e) => { Err(e) => {
tracing::error!("Error creating admin config: {}", e); tracing::error!("Error creating admin config: {e}");
None None
} }
} }
@ -371,7 +445,7 @@ impl Config {
pub(crate) fn software_version() -> String { pub(crate) fn software_version() -> String {
if let Some(git) = Self::git_version() { if let Some(git) = Self::git_version() {
return format!("v{}-{}", Self::version(), git); return format!("v{}-{git}", Self::version());
} }
format!("v{}", Self::version()) format!("v{}", Self::version())
@ -381,7 +455,7 @@ impl Config {
let branch = Self::git_branch()?; let branch = Self::git_branch()?;
let hash = Self::git_short_hash()?; let hash = Self::git_short_hash()?;
Some(format!("{}-{}", branch, hash)) Some(format!("{branch}-{hash}"))
} }
fn name() -> &'static str { fn name() -> &'static str {
@ -414,6 +488,12 @@ impl Config {
) )
} }
pub(crate) fn proxy_config(&self) -> Option<(&IriString, Option<(&str, &str)>)> {
self.proxy_config.as_ref().map(|ProxyConfig { url, auth }| {
(url, auth.as_ref().map(|(u, p)| (u.as_str(), p.as_str())))
})
}
pub(crate) fn source_code(&self) -> &IriString { pub(crate) fn source_code(&self) -> &IriString {
&self.source_repo &self.source_repo
} }
@ -463,7 +543,7 @@ impl Config {
resolved resolved
} }
UrlKind::Media(uuid) => FixedBaseResolver::new(self.base_uri.as_ref()) UrlKind::Media(uuid) => FixedBaseResolver::new(self.base_uri.as_ref())
.resolve(IriRelativeStr::new(&format!("media/{}", uuid))?.as_ref()) .resolve(IriRelativeStr::new(&format!("media/{uuid}"))?.as_ref())
.try_to_dedicated_string()?, .try_to_dedicated_string()?,
UrlKind::NodeInfo => FixedBaseResolver::new(self.base_uri.as_ref()) UrlKind::NodeInfo => FixedBaseResolver::new(self.base_uri.as_ref())
.resolve(IriRelativeStr::new("nodeinfo/2.0.json")?.as_ref()) .resolve(IriRelativeStr::new("nodeinfo/2.0.json")?.as_ref())

View File

@ -2,7 +2,7 @@ use crate::{
apub::AcceptedActors, apub::AcceptedActors,
db::{Actor, Db}, db::{Actor, Db},
error::{Error, ErrorKind}, error::{Error, ErrorKind},
requests::Requests, requests::{BreakerStrategy, Requests},
}; };
use activitystreams::{iri_string::types::IriString, prelude::*}; use activitystreams::{iri_string::types::IriString, prelude::*};
use std::time::{Duration, SystemTime}; use std::time::{Duration, SystemTime};
@ -71,7 +71,9 @@ impl ActorCache {
id: &IriString, id: &IriString,
requests: &Requests, requests: &Requests,
) -> Result<Actor, Error> { ) -> Result<Actor, Error> {
let accepted_actor = requests.fetch::<AcceptedActors>(id.as_str()).await?; let accepted_actor = requests
.fetch::<AcceptedActors>(id, BreakerStrategy::Require2XX)
.await?;
let input_authority = id.authority_components().ok_or(ErrorKind::MissingDomain)?; let input_authority = id.authority_components().ok_or(ErrorKind::MissingDomain)?;
let accepted_actor_id = accepted_actor let accepted_actor_id = accepted_actor
@ -97,6 +99,6 @@ impl ActorCache {
fn get_inbox(actor: &AcceptedActors) -> Result<&IriString, Error> { fn get_inbox(actor: &AcceptedActors) -> Result<&IriString, Error> {
Ok(actor Ok(actor
.endpoints()? .endpoints()?
.and_then(|e| e.shared_inbox) .and_then(|e| e.shared_inbox.as_ref())
.unwrap_or(actor.inbox()?)) .unwrap_or(actor.inbox()?))
} }

View File

@ -9,10 +9,10 @@ pub(crate) struct LastOnline {
impl LastOnline { impl LastOnline {
pub(crate) fn mark_seen(&self, iri: &IriStr) { pub(crate) fn mark_seen(&self, iri: &IriStr) {
if let Some(authority) = iri.authority_str() { if let Some(authority) = iri.authority_str() {
self.domains let mut guard = self.domains.lock().unwrap();
.lock() guard.insert(authority.to_string(), OffsetDateTime::now_utc());
.unwrap() metrics::gauge!("relay.last-online.size",)
.insert(authority.to_string(), OffsetDateTime::now_utc()); .set(crate::collector::recordable(guard.len()));
} }
} }

View File

@ -182,7 +182,7 @@ impl Node {
let authority = url.authority_str().ok_or(ErrorKind::MissingDomain)?; let authority = url.authority_str().ok_or(ErrorKind::MissingDomain)?;
let scheme = url.scheme_str(); let scheme = url.scheme_str();
let base = iri!(format!("{}://{}", scheme, authority)); let base = iri!(format!("{scheme}://{authority}"));
Ok(Node { Ok(Node {
base, base,

View File

@ -1,14 +1,15 @@
use crate::{ use crate::{
config::{Config, UrlKind},
data::NodeCache, data::NodeCache,
db::Db, db::Db,
error::Error, error::Error,
requests::{Breakers, Requests}, requests::{Breakers, Requests},
spawner::Spawner,
}; };
use activitystreams::iri_string::types::IriString; use activitystreams::iri_string::types::IriString;
use actix_web::web; use actix_web::web;
use lru::LruCache; use lru::LruCache;
use rand::thread_rng; use rand::thread_rng;
use reqwest_middleware::ClientWithMiddleware;
use rsa::{RsaPrivateKey, RsaPublicKey}; use rsa::{RsaPrivateKey, RsaPublicKey};
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
@ -16,10 +17,10 @@ use super::LastOnline;
#[derive(Clone)] #[derive(Clone)]
pub struct State { pub struct State {
pub(crate) requests: Requests,
pub(crate) public_key: RsaPublicKey, pub(crate) public_key: RsaPublicKey,
private_key: RsaPrivateKey,
object_cache: Arc<RwLock<LruCache<IriString, IriString>>>, object_cache: Arc<RwLock<LruCache<IriString, IriString>>>,
node_cache: NodeCache, pub(crate) node_cache: NodeCache,
breakers: Breakers, breakers: Breakers,
pub(crate) last_online: Arc<LastOnline>, pub(crate) last_online: Arc<LastOnline>,
pub(crate) db: Db, pub(crate) db: Db,
@ -36,20 +37,6 @@ impl std::fmt::Debug for State {
} }
impl State { impl State {
pub(crate) fn node_cache(&self) -> NodeCache {
self.node_cache.clone()
}
pub(crate) fn requests(&self, config: &Config) -> Requests {
Requests::new(
config.generate_url(UrlKind::MainKey).to_string(),
self.private_key.clone(),
config.user_agent(),
self.breakers.clone(),
self.last_online.clone(),
)
}
#[tracing::instrument( #[tracing::instrument(
level = "debug", level = "debug",
name = "Get inboxes for other domains", name = "Get inboxes for other domains",
@ -86,11 +73,22 @@ impl State {
} }
pub(crate) fn cache(&self, object_id: IriString, actor_id: IriString) { pub(crate) fn cache(&self, object_id: IriString, actor_id: IriString) {
self.object_cache.write().unwrap().put(object_id, actor_id); let mut guard = self.object_cache.write().unwrap();
guard.put(object_id, actor_id);
metrics::gauge!("relay.object-cache.size").set(crate::collector::recordable(guard.len()));
}
pub(crate) fn is_connected(&self, iri: &IriString) -> bool {
self.breakers.should_try(iri)
} }
#[tracing::instrument(level = "debug", name = "Building state", skip_all)] #[tracing::instrument(level = "debug", name = "Building state", skip_all)]
pub(crate) async fn build(db: Db) -> Result<Self, Error> { pub(crate) async fn build(
db: Db,
key_id: String,
spawner: Spawner,
client: ClientWithMiddleware,
) -> Result<Self, Error> {
let private_key = if let Ok(Some(key)) = db.private_key().await { let private_key = if let Ok(Some(key)) = db.private_key().await {
tracing::debug!("Using existing key"); tracing::debug!("Using existing key");
key key
@ -109,16 +107,28 @@ impl State {
let public_key = private_key.to_public_key(); let public_key = private_key.to_public_key();
let state = State { let breakers = Breakers::default();
public_key, let last_online = Arc::new(LastOnline::empty());
let requests = Requests::new(
key_id,
private_key, private_key,
breakers.clone(),
last_online.clone(),
spawner,
client,
);
let state = State {
requests,
public_key,
object_cache: Arc::new(RwLock::new(LruCache::new( object_cache: Arc::new(RwLock::new(LruCache::new(
(1024 * 8).try_into().expect("nonzero"), (1024 * 8).try_into().expect("nonzero"),
))), ))),
node_cache: NodeCache::new(db.clone()), node_cache: NodeCache::new(db.clone()),
breakers: Breakers::default(), breakers,
db, db,
last_online: Arc::new(LastOnline::empty()), last_online,
}; };
Ok(state) Ok(state)

245
src/db.rs
View File

@ -7,10 +7,13 @@ use rsa::{
pkcs8::{DecodePrivateKey, EncodePrivateKey}, pkcs8::{DecodePrivateKey, EncodePrivateKey},
RsaPrivateKey, RsaPrivateKey,
}; };
use sled::{Batch, Tree}; use sled::{transaction::TransactionError, Batch, Transactional, Tree};
use std::{ use std::{
collections::{BTreeMap, HashMap}, collections::{BTreeMap, HashMap},
sync::Arc, sync::{
atomic::{AtomicU64, Ordering},
Arc,
},
time::SystemTime, time::SystemTime,
}; };
use time::OffsetDateTime; use time::OffsetDateTime;
@ -22,6 +25,8 @@ pub(crate) struct Db {
} }
struct Inner { struct Inner {
healthz: Tree,
healthz_counter: Arc<AtomicU64>,
actor_id_actor: Tree, actor_id_actor: Tree,
public_key_id_actor_id: Tree, public_key_id_actor_id: Tree,
connected_actor_ids: Tree, connected_actor_ids: Tree,
@ -242,6 +247,8 @@ impl Db {
fn build_inner(restricted_mode: bool, db: sled::Db) -> Result<Self, Error> { fn build_inner(restricted_mode: bool, db: sled::Db) -> Result<Self, Error> {
Ok(Db { Ok(Db {
inner: Arc::new(Inner { inner: Arc::new(Inner {
healthz: db.open_tree("healthz")?,
healthz_counter: Arc::new(AtomicU64::new(0)),
actor_id_actor: db.open_tree("actor-id-actor")?, actor_id_actor: db.open_tree("actor-id-actor")?,
public_key_id_actor_id: db.open_tree("public-key-id-actor-id")?, public_key_id_actor_id: db.open_tree("public-key-id-actor-id")?,
connected_actor_ids: db.open_tree("connected-actor-ids")?, connected_actor_ids: db.open_tree("connected-actor-ids")?,
@ -273,6 +280,26 @@ impl Db {
Ok(t) Ok(t)
} }
pub(crate) async fn check_health(&self) -> Result<(), Error> {
let next = self.inner.healthz_counter.fetch_add(1, Ordering::Relaxed);
self.unblock(move |inner| {
let res = inner
.healthz
.insert("healthz", &next.to_be_bytes()[..])
.map_err(Error::from);
metrics::gauge!("relay.db.healthz.size")
.set(crate::collector::recordable(inner.healthz.len()));
res
})
.await?;
self.inner.healthz.flush_async().await?;
self.unblock(move |inner| inner.healthz.get("healthz").map_err(Error::from))
.await?;
Ok(())
}
pub(crate) async fn mark_last_seen( pub(crate) async fn mark_last_seen(
&self, &self,
nodes: HashMap<String, OffsetDateTime>, nodes: HashMap<String, OffsetDateTime>,
@ -327,6 +354,9 @@ impl Db {
.actor_id_info .actor_id_info
.insert(actor_id.as_str().as_bytes(), vec)?; .insert(actor_id.as_str().as_bytes(), vec)?;
metrics::gauge!("relay.db.actor-id-info.size")
.set(crate::collector::recordable(inner.actor_id_info.len()));
Ok(()) Ok(())
}) })
.await .await
@ -334,12 +364,12 @@ impl Db {
pub(crate) async fn info(&self, actor_id: IriString) -> Result<Option<Info>, Error> { pub(crate) async fn info(&self, actor_id: IriString) -> Result<Option<Info>, Error> {
self.unblock(move |inner| { self.unblock(move |inner| {
if let Some(ivec) = inner.actor_id_info.get(actor_id.as_str().as_bytes())? { inner
let info = serde_json::from_slice(&ivec)?; .actor_id_info
Ok(Some(info)) .get(actor_id.as_str().as_bytes())?
} else { .map(|ivec| serde_json::from_slice(&ivec))
Ok(None) .transpose()
} .map_err(Error::from)
}) })
.await .await
} }
@ -361,6 +391,9 @@ impl Db {
.actor_id_instance .actor_id_instance
.insert(actor_id.as_str().as_bytes(), vec)?; .insert(actor_id.as_str().as_bytes(), vec)?;
metrics::gauge!("relay.db.actor-id-instance.size")
.set(crate::collector::recordable(inner.actor_id_instance.len()));
Ok(()) Ok(())
}) })
.await .await
@ -368,12 +401,12 @@ impl Db {
pub(crate) async fn instance(&self, actor_id: IriString) -> Result<Option<Instance>, Error> { pub(crate) async fn instance(&self, actor_id: IriString) -> Result<Option<Instance>, Error> {
self.unblock(move |inner| { self.unblock(move |inner| {
if let Some(ivec) = inner.actor_id_instance.get(actor_id.as_str().as_bytes())? { inner
let instance = serde_json::from_slice(&ivec)?; .actor_id_instance
Ok(Some(instance)) .get(actor_id.as_str().as_bytes())?
} else { .map(|ivec| serde_json::from_slice(&ivec))
Ok(None) .transpose()
} .map_err(Error::from)
}) })
.await .await
} }
@ -395,6 +428,9 @@ impl Db {
.actor_id_contact .actor_id_contact
.insert(actor_id.as_str().as_bytes(), vec)?; .insert(actor_id.as_str().as_bytes(), vec)?;
metrics::gauge!("relay.db.actor-id-contact.size")
.set(crate::collector::recordable(inner.actor_id_contact.len()));
Ok(()) Ok(())
}) })
.await .await
@ -402,12 +438,12 @@ impl Db {
pub(crate) async fn contact(&self, actor_id: IriString) -> Result<Option<Contact>, Error> { pub(crate) async fn contact(&self, actor_id: IriString) -> Result<Option<Contact>, Error> {
self.unblock(move |inner| { self.unblock(move |inner| {
if let Some(ivec) = inner.actor_id_contact.get(actor_id.as_str().as_bytes())? { inner
let contact = serde_json::from_slice(&ivec)?; .actor_id_contact
Ok(Some(contact)) .get(actor_id.as_str().as_bytes())?
} else { .map(|ivec| serde_json::from_slice(&ivec))
Ok(None) .transpose()
} .map_err(Error::from)
}) })
.await .await
} }
@ -425,6 +461,12 @@ impl Db {
inner inner
.media_url_media_id .media_url_media_id
.insert(url.as_str().as_bytes(), id.as_bytes())?; .insert(url.as_str().as_bytes(), id.as_bytes())?;
metrics::gauge!("relay.db.media-id-media-url.size")
.set(crate::collector::recordable(inner.media_id_media_url.len()));
metrics::gauge!("relay.db.media-url-media-id.size")
.set(crate::collector::recordable(inner.media_url_media_id.len()));
Ok(()) Ok(())
}) })
.await .await
@ -432,22 +474,20 @@ impl Db {
pub(crate) async fn media_id(&self, url: IriString) -> Result<Option<Uuid>, Error> { pub(crate) async fn media_id(&self, url: IriString) -> Result<Option<Uuid>, Error> {
self.unblock(move |inner| { self.unblock(move |inner| {
if let Some(ivec) = inner.media_url_media_id.get(url.as_str().as_bytes())? { Ok(inner
Ok(uuid_from_ivec(ivec)) .media_url_media_id
} else { .get(url.as_str().as_bytes())?
Ok(None) .and_then(uuid_from_ivec))
}
}) })
.await .await
} }
pub(crate) async fn media_url(&self, id: Uuid) -> Result<Option<IriString>, Error> { pub(crate) async fn media_url(&self, id: Uuid) -> Result<Option<IriString>, Error> {
self.unblock(move |inner| { self.unblock(move |inner| {
if let Some(ivec) = inner.media_id_media_url.get(id.as_bytes())? { Ok(inner
Ok(url_from_ivec(ivec)) .media_id_media_url
} else { .get(id.as_bytes())?
Ok(None) .and_then(url_from_ivec))
}
}) })
.await .await
} }
@ -468,7 +508,7 @@ impl Db {
pub(crate) async fn is_connected(&self, base_id: IriString) -> Result<bool, Error> { pub(crate) async fn is_connected(&self, base_id: IriString) -> Result<bool, Error> {
let scheme = base_id.scheme_str(); let scheme = base_id.scheme_str();
let authority = base_id.authority_str().ok_or(ErrorKind::MissingDomain)?; let authority = base_id.authority_str().ok_or(ErrorKind::MissingDomain)?;
let prefix = format!("{}://{}", scheme, authority); let prefix = format!("{scheme}://{authority}");
self.unblock(move |inner| { self.unblock(move |inner| {
let connected = inner let connected = inner
@ -487,26 +527,22 @@ impl Db {
public_key_id: IriString, public_key_id: IriString,
) -> Result<Option<IriString>, Error> { ) -> Result<Option<IriString>, Error> {
self.unblock(move |inner| { self.unblock(move |inner| {
if let Some(ivec) = inner Ok(inner
.public_key_id_actor_id .public_key_id_actor_id
.get(public_key_id.as_str().as_bytes())? .get(public_key_id.as_str().as_bytes())?
{ .and_then(url_from_ivec))
Ok(url_from_ivec(ivec))
} else {
Ok(None)
}
}) })
.await .await
} }
pub(crate) async fn actor(&self, actor_id: IriString) -> Result<Option<Actor>, Error> { pub(crate) async fn actor(&self, actor_id: IriString) -> Result<Option<Actor>, Error> {
self.unblock(move |inner| { self.unblock(move |inner| {
if let Some(ivec) = inner.actor_id_actor.get(actor_id.as_str().as_bytes())? { inner
let actor = serde_json::from_slice(&ivec)?; .actor_id_actor
Ok(Some(actor)) .get(actor_id.as_str().as_bytes())?
} else { .map(|ivec| serde_json::from_slice(&ivec))
Ok(None) .transpose()
} .map_err(Error::from)
}) })
.await .await
} }
@ -522,30 +558,46 @@ impl Db {
inner inner
.actor_id_actor .actor_id_actor
.insert(actor.id.as_str().as_bytes(), vec)?; .insert(actor.id.as_str().as_bytes(), vec)?;
metrics::gauge!("relay.db.public-key-actor-id.size").set(crate::collector::recordable(
inner.public_key_id_actor_id.len(),
));
metrics::gauge!("relay.db.actor-id-actor.size").set(crate::collector::recordable(
inner.public_key_id_actor_id.len(),
));
Ok(()) Ok(())
}) })
.await .await
} }
pub(crate) async fn remove_connection(&self, actor_id: IriString) -> Result<(), Error> { pub(crate) async fn remove_connection(&self, actor_id: IriString) -> Result<(), Error> {
tracing::debug!("Removing Connection: {}", actor_id); tracing::debug!("Removing Connection: {actor_id}");
self.unblock(move |inner| { self.unblock(move |inner| {
inner inner
.connected_actor_ids .connected_actor_ids
.remove(actor_id.as_str().as_bytes())?; .remove(actor_id.as_str().as_bytes())?;
metrics::gauge!("relay.db.connected-actor-ids.size").set(crate::collector::recordable(
inner.connected_actor_ids.len(),
));
Ok(()) Ok(())
}) })
.await .await
} }
pub(crate) async fn add_connection(&self, actor_id: IriString) -> Result<(), Error> { pub(crate) async fn add_connection(&self, actor_id: IriString) -> Result<(), Error> {
tracing::debug!("Adding Connection: {}", actor_id); tracing::debug!("Adding Connection: {actor_id}");
self.unblock(move |inner| { self.unblock(move |inner| {
inner inner
.connected_actor_ids .connected_actor_ids
.insert(actor_id.as_str().as_bytes(), actor_id.as_str().as_bytes())?; .insert(actor_id.as_str().as_bytes(), actor_id.as_str().as_bytes())?;
metrics::gauge!("relay.db.connected-actor-ids.size").set(crate::collector::recordable(
inner.connected_actor_ids.len(),
));
Ok(()) Ok(())
}) })
.await .await
@ -553,30 +605,64 @@ impl Db {
pub(crate) async fn add_blocks(&self, domains: Vec<String>) -> Result<(), Error> { pub(crate) async fn add_blocks(&self, domains: Vec<String>) -> Result<(), Error> {
self.unblock(move |inner| { self.unblock(move |inner| {
for connected in inner.connected_by_domain(&domains) { let connected_by_domain = inner.connected_by_domain(&domains).collect::<Vec<_>>();
inner
.connected_actor_ids let res = (
.remove(connected.as_str().as_bytes())?; &inner.connected_actor_ids,
&inner.blocked_domains,
&inner.allowed_domains,
)
.transaction(|(connected, blocked, allowed)| {
let mut connected_batch = Batch::default();
let mut blocked_batch = Batch::default();
let mut allowed_batch = Batch::default();
for connected in &connected_by_domain {
connected_batch.remove(connected.as_str().as_bytes());
} }
for authority in &domains { for authority in &domains {
inner blocked_batch
.blocked_domains .insert(domain_key(authority).as_bytes(), authority.as_bytes());
.insert(domain_key(authority), authority.as_bytes())?; allowed_batch.remove(domain_key(authority).as_bytes());
inner.allowed_domains.remove(domain_key(authority))?;
} }
connected.apply_batch(&connected_batch)?;
blocked.apply_batch(&blocked_batch)?;
allowed.apply_batch(&allowed_batch)?;
Ok(()) Ok(())
});
metrics::gauge!("relay.db.connected-actor-ids.size").set(crate::collector::recordable(
inner.connected_actor_ids.len(),
));
metrics::gauge!("relay.db.blocked-domains.size")
.set(crate::collector::recordable(inner.blocked_domains.len()));
metrics::gauge!("relay.db.allowed-domains.size")
.set(crate::collector::recordable(inner.allowed_domains.len()));
match res {
Ok(()) => Ok(()),
Err(TransactionError::Abort(e) | TransactionError::Storage(e)) => Err(e.into()),
}
}) })
.await .await
} }
pub(crate) async fn remove_blocks(&self, domains: Vec<String>) -> Result<(), Error> { pub(crate) async fn remove_blocks(&self, domains: Vec<String>) -> Result<(), Error> {
self.unblock(move |inner| { self.unblock(move |inner| {
let mut blocked_batch = Batch::default();
for authority in &domains { for authority in &domains {
inner.blocked_domains.remove(domain_key(authority))?; blocked_batch.remove(domain_key(authority).as_bytes());
} }
inner.blocked_domains.apply_batch(blocked_batch)?;
metrics::gauge!("relay.db.blocked-domains.size")
.set(crate::collector::recordable(inner.blocked_domains.len()));
Ok(()) Ok(())
}) })
.await .await
@ -584,12 +670,17 @@ impl Db {
pub(crate) async fn add_allows(&self, domains: Vec<String>) -> Result<(), Error> { pub(crate) async fn add_allows(&self, domains: Vec<String>) -> Result<(), Error> {
self.unblock(move |inner| { self.unblock(move |inner| {
let mut allowed_batch = Batch::default();
for authority in &domains { for authority in &domains {
inner allowed_batch.insert(domain_key(authority).as_bytes(), authority.as_bytes());
.allowed_domains
.insert(domain_key(authority), authority.as_bytes())?;
} }
inner.allowed_domains.apply_batch(allowed_batch)?;
metrics::gauge!("relay.db.allowed-domains.size")
.set(crate::collector::recordable(inner.allowed_domains.len()));
Ok(()) Ok(())
}) })
.await .await
@ -598,17 +689,32 @@ impl Db {
pub(crate) async fn remove_allows(&self, domains: Vec<String>) -> Result<(), Error> { pub(crate) async fn remove_allows(&self, domains: Vec<String>) -> Result<(), Error> {
self.unblock(move |inner| { self.unblock(move |inner| {
if inner.restricted_mode { if inner.restricted_mode {
for connected in inner.connected_by_domain(&domains) { let connected_by_domain = inner.connected_by_domain(&domains).collect::<Vec<_>>();
inner
.connected_actor_ids let mut connected_batch = Batch::default();
.remove(connected.as_str().as_bytes())?;
} for connected in &connected_by_domain {
connected_batch.remove(connected.as_str().as_bytes());
} }
for authority in &domains { inner.connected_actor_ids.apply_batch(connected_batch)?;
inner.allowed_domains.remove(domain_key(authority))?;
metrics::gauge!("relay.db.connected-actor-ids.size").set(
crate::collector::recordable(inner.connected_actor_ids.len()),
);
} }
let mut allowed_batch = Batch::default();
for authority in &domains {
allowed_batch.remove(domain_key(authority).as_bytes());
}
inner.allowed_domains.apply_batch(allowed_batch)?;
metrics::gauge!("relay.db.allowed-domains.size")
.set(crate::collector::recordable(inner.allowed_domains.len()));
Ok(()) Ok(())
}) })
.await .await
@ -649,6 +755,10 @@ impl Db {
inner inner
.settings .settings
.insert("private-key".as_bytes(), pem_pkcs8.as_bytes())?; .insert("private-key".as_bytes(), pem_pkcs8.as_bytes())?;
metrics::gauge!("relay.db.settings.size")
.set(crate::collector::recordable(inner.settings.len()));
Ok(()) Ok(())
}) })
.await .await
@ -734,6 +844,11 @@ mod tests {
{ {
let db = let db =
Db::build_inner(true, sled::Config::new().temporary(true).open().unwrap()).unwrap(); Db::build_inner(true, sled::Config::new().temporary(true).open().unwrap()).unwrap();
actix_rt::System::new().block_on((f)(db));
tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap()
.block_on((f)(db));
} }
} }

View File

@ -1,53 +1,85 @@
use activitystreams::checked::CheckError; use activitystreams::checked::CheckError;
use actix_rt::task::JoinError;
use actix_web::{ use actix_web::{
error::{BlockingError, ResponseError}, error::{BlockingError, ResponseError},
http::StatusCode, http::StatusCode,
HttpResponse, HttpResponse,
}; };
use http_signature_normalization_actix::PrepareSignError; use background_jobs::BoxError;
use std::{convert::Infallible, fmt::Debug, io}; use color_eyre::eyre::Error as Report;
use tracing_error::SpanTrace; use http_signature_normalization_reqwest::SignError;
use std::{convert::Infallible, io, sync::Arc};
use tokio::task::JoinError;
#[derive(Clone)]
struct ArcKind {
kind: Arc<ErrorKind>,
}
impl std::fmt::Debug for ArcKind {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.kind.fmt(f)
}
}
impl std::fmt::Display for ArcKind {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.kind.fmt(f)
}
}
impl std::error::Error for ArcKind {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
self.kind.source()
}
}
pub(crate) struct Error { pub(crate) struct Error {
context: String, kind: ArcKind,
kind: ErrorKind, display: Box<str>,
debug: Box<str>,
} }
impl Error { impl Error {
fn kind(&self) -> &ErrorKind {
&self.kind.kind
}
pub(crate) fn is_breaker(&self) -> bool { pub(crate) fn is_breaker(&self) -> bool {
matches!(self.kind, ErrorKind::Breaker) matches!(self.kind(), ErrorKind::Breaker)
} }
pub(crate) fn is_not_found(&self) -> bool { pub(crate) fn is_not_found(&self) -> bool {
matches!(self.kind, ErrorKind::Status(_, StatusCode::NOT_FOUND)) matches!(self.kind(), ErrorKind::Status(_, StatusCode::NOT_FOUND))
} }
pub(crate) fn is_bad_request(&self) -> bool { pub(crate) fn is_bad_request(&self) -> bool {
matches!(self.kind, ErrorKind::Status(_, StatusCode::BAD_REQUEST)) matches!(self.kind(), ErrorKind::Status(_, StatusCode::BAD_REQUEST))
} }
pub(crate) fn is_gone(&self) -> bool { pub(crate) fn is_gone(&self) -> bool {
matches!(self.kind, ErrorKind::Status(_, StatusCode::GONE)) matches!(self.kind(), ErrorKind::Status(_, StatusCode::GONE))
}
pub(crate) fn is_malformed_json(&self) -> bool {
matches!(self.kind(), ErrorKind::Json(_))
} }
} }
impl std::fmt::Debug for Error { impl std::fmt::Debug for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
writeln!(f, "{:?}", self.kind) f.write_str(&self.debug)
} }
} }
impl std::fmt::Display for Error { impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
writeln!(f, "{}", self.kind)?; f.write_str(&self.display)
std::fmt::Display::fmt(&self.context, f)
} }
} }
impl std::error::Error for Error { impl std::error::Error for Error {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
self.kind.source() self.kind().source()
} }
} }
@ -56,56 +88,82 @@ where
ErrorKind: From<T>, ErrorKind: From<T>,
{ {
fn from(error: T) -> Self { fn from(error: T) -> Self {
let kind = ArcKind {
kind: Arc::new(ErrorKind::from(error)),
};
let report = Report::new(kind.clone());
let display = format!("{report}");
let debug = format!("{report:?}");
Error { Error {
context: SpanTrace::capture().to_string(), kind,
kind: error.into(), display: Box::from(display),
debug: Box::from(debug),
} }
} }
} }
#[derive(Debug, thiserror::Error)] #[derive(Debug, thiserror::Error)]
pub(crate) enum ErrorKind { pub(crate) enum ErrorKind {
#[error("Error queueing job, {0}")] #[error("Error in extractor")]
Queue(anyhow::Error), Extractor(#[from] crate::extractors::ErrorKind),
#[error("Error in configuration, {0}")] #[error("Error queueing job")]
Queue(#[from] BoxError),
#[error("Error in configuration")]
Config(#[from] config::ConfigError), Config(#[from] config::ConfigError),
#[error("Couldn't parse key, {0}")] #[error("Couldn't parse key")]
Pkcs8(#[from] rsa::pkcs8::Error), Pkcs8(#[from] rsa::pkcs8::Error),
#[error("Couldn't encode public key, {0}")] #[error("Couldn't encode public key")]
Spki(#[from] rsa::pkcs8::spki::Error), Spki(#[from] rsa::pkcs8::spki::Error),
#[error("Couldn't parse IRI, {0}")] #[error("Couldn't sign request")]
SignRequest,
#[error("Response body from server exceeded limits")]
BodyTooLarge,
#[error("Couldn't make request")]
Reqwest(#[from] reqwest::Error),
#[error("Couldn't make request")]
ReqwestMiddleware(#[from] reqwest_middleware::Error),
#[error("Couldn't parse IRI")]
ParseIri(#[from] activitystreams::iri_string::validate::Error), ParseIri(#[from] activitystreams::iri_string::validate::Error),
#[error("Couldn't normalize IRI, {0}")] #[error("Couldn't normalize IRI")]
NormalizeIri(#[from] std::collections::TryReserveError), NormalizeIri(#[from] std::collections::TryReserveError),
#[error("Couldn't perform IO, {0}")] #[error("Couldn't perform IO")]
Io(#[from] io::Error), Io(#[from] io::Error),
#[error("Couldn't sign string, {0}")] #[error("Couldn't sign string, {0}")]
Rsa(rsa::errors::Error), Rsa(rsa::errors::Error),
#[error("Couldn't use db, {0}")] #[error("Couldn't use db")]
Sled(#[from] sled::Error), Sled(#[from] sled::Error),
#[error("Couldn't do the json thing, {0}")] #[error("Couldn't do the json thing")]
Json(#[from] serde_json::Error), Json(#[from] serde_json::Error),
#[error("Couldn't build signing string, {0}")] #[error("Couldn't sign request")]
PrepareSign(#[from] PrepareSignError), Sign(#[from] SignError),
#[error("Couldn't sign digest")] #[error("Couldn't sign digest")]
Signature(#[from] signature::Error), Signature(#[from] rsa::signature::Error),
#[error("Couldn't read signature")] #[error("Couldn't prepare TLS private key")]
ReadSignature(signature::Error), PrepareKey(#[from] rustls::Error),
#[error("Couldn't verify signature")] #[error("Couldn't verify signature")]
VerifySignature(signature::Error), VerifySignature,
#[error("Failed to encode key der")]
DerEncode,
#[error("Couldn't parse the signature header")] #[error("Couldn't parse the signature header")]
HeaderValidation(#[from] actix_web::http::header::InvalidHeaderValue), HeaderValidation(#[from] actix_web::http::header::InvalidHeaderValue),
@ -131,10 +189,10 @@ pub(crate) enum ErrorKind {
#[error("Wrong ActivityPub kind, {0}")] #[error("Wrong ActivityPub kind, {0}")]
Kind(String), Kind(String),
#[error("Too many CPUs, {0}")] #[error("Too many CPUs")]
CpuCount(#[from] std::num::TryFromIntError), CpuCount(#[from] std::num::TryFromIntError),
#[error("{0}")] #[error("Host mismatch")]
HostMismatch(#[from] CheckError), HostMismatch(#[from] CheckError),
#[error("Couldn't flush buffer")] #[error("Couldn't flush buffer")]
@ -188,7 +246,7 @@ pub(crate) enum ErrorKind {
impl ResponseError for Error { impl ResponseError for Error {
fn status_code(&self) -> StatusCode { fn status_code(&self) -> StatusCode {
match self.kind { match self.kind() {
ErrorKind::NotAllowed(_) | ErrorKind::WrongActor(_) | ErrorKind::BadActor(_, _) => { ErrorKind::NotAllowed(_) | ErrorKind::WrongActor(_) | ErrorKind::BadActor(_, _) => {
StatusCode::FORBIDDEN StatusCode::FORBIDDEN
} }
@ -208,7 +266,7 @@ impl ResponseError for Error {
.insert_header(("Content-Type", "application/activity+json")) .insert_header(("Content-Type", "application/activity+json"))
.body( .body(
serde_json::to_string(&serde_json::json!({ serde_json::to_string(&serde_json::json!({
"error": self.kind.to_string(), "error": self.kind().to_string(),
})) }))
.unwrap_or_else(|_| "{}".to_string()), .unwrap_or_else(|_| "{}".to_string()),
) )
@ -238,3 +296,15 @@ impl From<rsa::errors::Error> for ErrorKind {
ErrorKind::Rsa(e) ErrorKind::Rsa(e)
} }
} }
impl From<http_signature_normalization_actix::Canceled> for ErrorKind {
fn from(_: http_signature_normalization_actix::Canceled) -> Self {
Self::Canceled
}
}
impl From<http_signature_normalization_reqwest::Canceled> for ErrorKind {
fn from(_: http_signature_normalization_reqwest::Canceled) -> Self {
Self::Canceled
}
}

View File

@ -1,20 +1,15 @@
use actix_web::{ use actix_web::{
dev::Payload, dev::Payload,
error::{BlockingError, ParseError}, error::ParseError,
http::{ http::header::{from_one_raw_str, Header, HeaderName, HeaderValue, TryIntoHeaderValue},
header::{from_one_raw_str, Header, HeaderName, HeaderValue, TryIntoHeaderValue},
StatusCode,
},
web::Data, web::Data,
FromRequest, HttpMessage, HttpRequest, HttpResponse, ResponseError, FromRequest, HttpMessage, HttpRequest,
}; };
use bcrypt::{BcryptError, DEFAULT_COST}; use bcrypt::{BcryptError, DEFAULT_COST};
use futures_util::future::LocalBoxFuture; use http_signature_normalization_actix::{prelude::InvalidHeaderValue, Canceled, Spawn};
use http_signature_normalization_actix::prelude::InvalidHeaderValue;
use std::{convert::Infallible, str::FromStr, time::Instant}; use std::{convert::Infallible, str::FromStr, time::Instant};
use tracing_error::SpanTrace;
use crate::db::Db; use crate::{db::Db, error::Error, future::LocalBoxFuture, spawner::Spawner};
#[derive(Clone)] #[derive(Clone)]
pub(crate) struct AdminConfig { pub(crate) struct AdminConfig {
@ -29,7 +24,7 @@ impl AdminConfig {
} }
fn verify(&self, token: XApiToken) -> Result<bool, Error> { fn verify(&self, token: XApiToken) -> Result<bool, Error> {
bcrypt::verify(&token.0, &self.hashed_api_token).map_err(Error::bcrypt_verify) bcrypt::verify(token.0, &self.hashed_api_token).map_err(Error::bcrypt_verify)
} }
} }
@ -37,10 +32,10 @@ pub(crate) struct Admin {
db: Data<Db>, db: Data<Db>,
} }
type PrepareTuple = (Data<Db>, Data<AdminConfig>, Data<Spawner>, XApiToken);
impl Admin { impl Admin {
fn prepare_verify( fn prepare_verify(req: &HttpRequest) -> Result<PrepareTuple, Error> {
req: &HttpRequest,
) -> Result<(Data<Db>, Data<AdminConfig>, XApiToken), Error> {
let hashed_api_token = req let hashed_api_token = req
.app_data::<Data<AdminConfig>>() .app_data::<Data<AdminConfig>>()
.ok_or_else(Error::missing_config)? .ok_or_else(Error::missing_config)?
@ -53,16 +48,23 @@ impl Admin {
.ok_or_else(Error::missing_db)? .ok_or_else(Error::missing_db)?
.clone(); .clone();
Ok((db, hashed_api_token, x_api_token)) let spawner = req
.app_data::<Data<Spawner>>()
.ok_or_else(Error::missing_spawner)?
.clone();
Ok((db, hashed_api_token, spawner, x_api_token))
} }
#[tracing::instrument(level = "debug", skip_all)] #[tracing::instrument(level = "debug", skip_all)]
async fn verify( async fn verify(
hashed_api_token: Data<AdminConfig>, hashed_api_token: Data<AdminConfig>,
spawner: Data<Spawner>,
x_api_token: XApiToken, x_api_token: XApiToken,
) -> Result<(), Error> { ) -> Result<(), Error> {
let span = tracing::Span::current(); let span = tracing::Span::current();
if actix_web::web::block(move || span.in_scope(|| hashed_api_token.verify(x_api_token))) if spawner
.spawn_blocking(move || span.in_scope(|| hashed_api_token.verify(x_api_token)))
.await .await
.map_err(Error::canceled)?? .map_err(Error::canceled)??
{ {
@ -77,67 +79,42 @@ impl Admin {
} }
} }
#[derive(Debug, thiserror::Error)]
#[error("Failed authentication")]
pub(crate) struct Error {
context: String,
#[source]
kind: ErrorKind,
}
impl Error { impl Error {
fn invalid() -> Self { fn invalid() -> Self {
Error { Error::from(ErrorKind::Invalid)
context: SpanTrace::capture().to_string(),
kind: ErrorKind::Invalid,
}
} }
fn missing_config() -> Self { fn missing_config() -> Self {
Error { Error::from(ErrorKind::MissingConfig)
context: SpanTrace::capture().to_string(),
kind: ErrorKind::MissingConfig,
}
} }
fn missing_db() -> Self { fn missing_db() -> Self {
Error { Error::from(ErrorKind::MissingDb)
context: SpanTrace::capture().to_string(),
kind: ErrorKind::MissingDb,
} }
fn missing_spawner() -> Self {
Error::from(ErrorKind::MissingSpawner)
} }
fn bcrypt_verify(e: BcryptError) -> Self { fn bcrypt_verify(e: BcryptError) -> Self {
Error { Error::from(ErrorKind::BCryptVerify(e))
context: SpanTrace::capture().to_string(),
kind: ErrorKind::BCryptVerify(e),
}
} }
fn bcrypt_hash(e: BcryptError) -> Self { fn bcrypt_hash(e: BcryptError) -> Self {
Error { Error::from(ErrorKind::BCryptHash(e))
context: SpanTrace::capture().to_string(),
kind: ErrorKind::BCryptHash(e),
}
} }
fn parse_header(e: ParseError) -> Self { fn parse_header(e: ParseError) -> Self {
Error { Error::from(ErrorKind::ParseHeader(e))
context: SpanTrace::capture().to_string(),
kind: ErrorKind::ParseHeader(e),
}
} }
fn canceled(_: BlockingError) -> Self { fn canceled(_: Canceled) -> Self {
Error { Error::from(ErrorKind::Canceled)
context: SpanTrace::capture().to_string(),
kind: ErrorKind::Canceled,
}
} }
} }
#[derive(Debug, thiserror::Error)] #[derive(Debug, thiserror::Error)]
enum ErrorKind { pub(crate) enum ErrorKind {
#[error("Invalid API Token")] #[error("Invalid API Token")]
Invalid, Invalid,
@ -147,6 +124,9 @@ enum ErrorKind {
#[error("Missing Db")] #[error("Missing Db")]
MissingDb, MissingDb,
#[error("Missing Spawner")]
MissingSpawner,
#[error("Panic in verify")] #[error("Panic in verify")]
Canceled, Canceled,
@ -160,20 +140,6 @@ enum ErrorKind {
ParseHeader(#[source] ParseError), ParseHeader(#[source] ParseError),
} }
impl ResponseError for Error {
fn status_code(&self) -> StatusCode {
match self.kind {
ErrorKind::Invalid | ErrorKind::ParseHeader(_) => StatusCode::BAD_REQUEST,
_ => StatusCode::INTERNAL_SERVER_ERROR,
}
}
fn error_response(&self) -> HttpResponse {
HttpResponse::build(self.status_code())
.json(serde_json::json!({ "msg": self.kind.to_string() }))
}
}
impl FromRequest for Admin { impl FromRequest for Admin {
type Error = Error; type Error = Error;
type Future = LocalBoxFuture<'static, Result<Self, Self::Error>>; type Future = LocalBoxFuture<'static, Result<Self, Self::Error>>;
@ -182,12 +148,10 @@ impl FromRequest for Admin {
let now = Instant::now(); let now = Instant::now();
let res = Self::prepare_verify(req); let res = Self::prepare_verify(req);
Box::pin(async move { Box::pin(async move {
let (db, c, t) = res?; let (db, c, s, t) = res?;
Self::verify(c, t).await?; Self::verify(c, s, t).await?;
metrics::histogram!( metrics::histogram!("relay.admin.verify")
"relay.admin.verify", .record(now.elapsed().as_micros() as f64 / 1_000_000_f64);
now.elapsed().as_micros() as f64 / 1_000_000_f64
);
Ok(Admin { db }) Ok(Admin { db })
}) })
} }
@ -199,6 +163,10 @@ impl XApiToken {
pub(crate) fn new(token: String) -> Self { pub(crate) fn new(token: String) -> Self {
Self(token) Self(token)
} }
pub(crate) const fn http1_name() -> reqwest::header::HeaderName {
reqwest::header::HeaderName::from_static("x-api-token")
}
} }
impl Header for XApiToken { impl Header for XApiToken {
@ -226,3 +194,9 @@ impl FromStr for XApiToken {
Ok(XApiToken(s.to_string())) Ok(XApiToken(s.to_string()))
} }
} }
impl std::fmt::Display for XApiToken {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.0.fmt(f)
}
}

4
src/future.rs Normal file
View File

@ -0,0 +1,4 @@
use std::{future::Future, pin::Pin};
pub(crate) type LocalBoxFuture<'a, T> = Pin<Box<dyn Future<Output = T> + 'a>>;
pub(crate) type BoxFuture<'a, T> = Pin<Box<dyn Future<Output = T> + Send + 'a>>;

18
src/http1.rs Normal file
View File

@ -0,0 +1,18 @@
pub(crate) fn name_to_http02(
name: &reqwest::header::HeaderName,
) -> actix_web::http::header::HeaderName {
actix_web::http::header::HeaderName::from_bytes(name.as_ref())
.expect("headername conversions always work")
}
pub(crate) fn value_to_http02(
value: &reqwest::header::HeaderValue,
) -> actix_web::http::header::HeaderValue {
actix_web::http::header::HeaderValue::from_bytes(value.as_bytes())
.expect("headervalue conversions always work")
}
pub(crate) fn status_to_http02(status: reqwest::StatusCode) -> actix_web::http::StatusCode {
actix_web::http::StatusCode::from_u16(status.as_u16())
.expect("statuscode conversions always work")
}

View File

@ -14,16 +14,17 @@ pub(crate) use self::{
use crate::{ use crate::{
config::Config, config::Config,
data::{ActorCache, MediaCache, NodeCache, State}, data::{ActorCache, MediaCache, State},
error::{Error, ErrorKind}, error::{Error, ErrorKind},
jobs::{process_listeners::Listeners, record_last_online::RecordLastOnline}, jobs::{process_listeners::Listeners, record_last_online::RecordLastOnline},
requests::Requests,
}; };
use background_jobs::{ use background_jobs::{
memory_storage::{ActixTimer, Storage}, memory_storage::{Storage, TokioTimer},
Job, Manager, QueueHandle, WorkerConfig, metrics::MetricsStorage,
tokio::{QueueHandle, WorkerConfig},
Job,
}; };
use std::{convert::TryFrom, num::NonZeroUsize, time::Duration}; use std::time::Duration;
fn debug_object(activity: &serde_json::Value) -> &serde_json::Value { fn debug_object(activity: &serde_json::Value) -> &serde_json::Value {
let mut object = &activity["object"]["type"]; let mut object = &activity["object"]["type"];
@ -39,16 +40,20 @@ fn debug_object(activity: &serde_json::Value) -> &serde_json::Value {
object object
} }
pub(crate) fn build_storage() -> MetricsStorage<Storage<TokioTimer>> {
MetricsStorage::wrap(Storage::new(TokioTimer))
}
pub(crate) fn create_workers( pub(crate) fn create_workers(
storage: MetricsStorage<Storage<TokioTimer>>,
state: State, state: State,
actors: ActorCache, actors: ActorCache,
media: MediaCache, media: MediaCache,
config: Config, config: Config,
) -> (Manager, JobServer) { ) -> std::io::Result<JobServer> {
let parallelism = std::thread::available_parallelism() let deliver_concurrency = config.deliver_concurrency();
.unwrap_or_else(|_| NonZeroUsize::try_from(1).expect("nonzero"));
let shared = WorkerConfig::new_managed(Storage::new(ActixTimer), move |queue_handle| { let queue_handle = WorkerConfig::new(storage, move |queue_handle| {
JobState::new( JobState::new(
state.clone(), state.clone(),
actors.clone(), actors.clone(),
@ -71,25 +76,21 @@ pub(crate) fn create_workers(
.register::<apub::Undo>() .register::<apub::Undo>()
.set_worker_count("maintenance", 2) .set_worker_count("maintenance", 2)
.set_worker_count("apub", 2) .set_worker_count("apub", 2)
.set_worker_count("deliver", 8) .set_worker_count("deliver", deliver_concurrency)
.start_with_threads(parallelism); .start()?;
shared.every(Duration::from_secs(60 * 5), Listeners); queue_handle.every(Duration::from_secs(60 * 5), Listeners)?;
shared.every(Duration::from_secs(60 * 10), RecordLastOnline); queue_handle.every(Duration::from_secs(60 * 10), RecordLastOnline)?;
let job_server = JobServer::new(shared.queue_handle().clone()); Ok(JobServer::new(queue_handle))
(shared, job_server)
} }
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub(crate) struct JobState { pub(crate) struct JobState {
requests: Requests,
state: State, state: State,
actors: ActorCache, actors: ActorCache,
config: Config, config: Config,
media: MediaCache, media: MediaCache,
node_cache: NodeCache,
job_server: JobServer, job_server: JobServer,
} }
@ -115,12 +116,10 @@ impl JobState {
config: Config, config: Config,
) -> Self { ) -> Self {
JobState { JobState {
requests: state.requests(&config), state,
node_cache: state.node_cache(),
actors, actors,
config, config,
media, media,
state,
job_server, job_server,
} }
} }

View File

@ -2,14 +2,14 @@ use crate::{
config::{Config, UrlKind}, config::{Config, UrlKind},
db::Actor, db::Actor,
error::Error, error::Error,
future::BoxFuture,
jobs::{ jobs::{
apub::{get_inboxes, prepare_activity}, apub::{get_inboxes, prepare_activity},
DeliverMany, JobState, DeliverMany, JobState,
}, },
}; };
use activitystreams::{activity::Announce as AsAnnounce, iri_string::types::IriString}; use activitystreams::{activity::Announce as AsAnnounce, iri_string::types::IriString};
use background_jobs::ActixJob; use background_jobs::Job;
use std::{future::Future, pin::Pin};
#[derive(Clone, serde::Deserialize, serde::Serialize)] #[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct Announce { pub(crate) struct Announce {
@ -62,14 +62,15 @@ fn generate_announce(
) )
} }
impl ActixJob for Announce { impl Job for Announce {
type State = JobState; type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>; type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::apub::Announce"; const NAME: &'static str = "relay::jobs::apub::Announce";
const QUEUE: &'static str = "apub"; const QUEUE: &'static str = "apub";
fn run(self, state: Self::State) -> Self::Future { fn run(self, state: Self::State) -> Self::Future {
Box::pin(async move { self.perform(state).await.map_err(Into::into) }) Box::pin(self.perform(state))
} }
} }

View File

@ -3,6 +3,7 @@ use crate::{
config::{Config, UrlKind}, config::{Config, UrlKind},
db::Actor, db::Actor,
error::{Error, ErrorKind}, error::{Error, ErrorKind},
future::BoxFuture,
jobs::{apub::prepare_activity, Deliver, JobState, QueryInstance, QueryNodeinfo}, jobs::{apub::prepare_activity, Deliver, JobState, QueryInstance, QueryNodeinfo},
}; };
use activitystreams::{ use activitystreams::{
@ -10,8 +11,7 @@ use activitystreams::{
iri_string::types::IriString, iri_string::types::IriString,
prelude::*, prelude::*,
}; };
use background_jobs::ActixJob; use background_jobs::Job;
use std::{future::Future, pin::Pin};
#[derive(Clone, serde::Deserialize, serde::Serialize)] #[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct Follow { pub(crate) struct Follow {
@ -111,14 +111,15 @@ fn generate_accept_follow(
) )
} }
impl ActixJob for Follow { impl Job for Follow {
type State = JobState; type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>; type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::apub::Follow"; const NAME: &'static str = "relay::jobs::apub::Follow";
const QUEUE: &'static str = "apub"; const QUEUE: &'static str = "apub";
fn run(self, state: Self::State) -> Self::Future { fn run(self, state: Self::State) -> Self::Future {
Box::pin(async move { self.perform(state).await.map_err(Into::into) }) Box::pin(self.perform(state))
} }
} }

View File

@ -2,11 +2,11 @@ use crate::{
apub::AcceptedActivities, apub::AcceptedActivities,
db::Actor, db::Actor,
error::{Error, ErrorKind}, error::{Error, ErrorKind},
future::BoxFuture,
jobs::{apub::get_inboxes, DeliverMany, JobState}, jobs::{apub::get_inboxes, DeliverMany, JobState},
}; };
use activitystreams::prelude::*; use activitystreams::prelude::*;
use background_jobs::ActixJob; use background_jobs::Job;
use std::{future::Future, pin::Pin};
#[derive(Clone, serde::Deserialize, serde::Serialize)] #[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct Forward { pub(crate) struct Forward {
@ -47,14 +47,15 @@ impl Forward {
} }
} }
impl ActixJob for Forward { impl Job for Forward {
type State = JobState; type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>; type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::apub::Forward"; const NAME: &'static str = "relay::jobs::apub::Forward";
const QUEUE: &'static str = "apub"; const QUEUE: &'static str = "apub";
fn run(self, state: Self::State) -> Self::Future { fn run(self, state: Self::State) -> Self::Future {
Box::pin(async move { self.perform(state).await.map_err(Into::into) }) Box::pin(self.perform(state))
} }
} }

View File

@ -2,10 +2,10 @@ use crate::{
config::UrlKind, config::UrlKind,
db::Actor, db::Actor,
error::Error, error::Error,
future::BoxFuture,
jobs::{apub::generate_undo_follow, Deliver, JobState}, jobs::{apub::generate_undo_follow, Deliver, JobState},
}; };
use background_jobs::ActixJob; use background_jobs::Job;
use std::{future::Future, pin::Pin};
#[derive(Clone, serde::Deserialize, serde::Serialize)] #[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct Reject(pub(crate) Actor); pub(crate) struct Reject(pub(crate) Actor);
@ -33,14 +33,15 @@ impl Reject {
} }
} }
impl ActixJob for Reject { impl Job for Reject {
type State = JobState; type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>; type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::apub::Reject"; const NAME: &'static str = "relay::jobs::apub::Reject";
const QUEUE: &'static str = "apub"; const QUEUE: &'static str = "apub";
fn run(self, state: Self::State) -> Self::Future { fn run(self, state: Self::State) -> Self::Future {
Box::pin(async move { self.perform(state).await.map_err(Into::into) }) Box::pin(self.perform(state))
} }
} }

View File

@ -3,11 +3,11 @@ use crate::{
config::UrlKind, config::UrlKind,
db::Actor, db::Actor,
error::Error, error::Error,
future::BoxFuture,
jobs::{apub::generate_undo_follow, Deliver, JobState}, jobs::{apub::generate_undo_follow, Deliver, JobState},
}; };
use activitystreams::prelude::BaseExt; use activitystreams::prelude::BaseExt;
use background_jobs::ActixJob; use background_jobs::Job;
use std::{future::Future, pin::Pin};
#[derive(Clone, serde::Deserialize, serde::Serialize)] #[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct Undo { pub(crate) struct Undo {
@ -48,14 +48,15 @@ impl Undo {
} }
} }
impl ActixJob for Undo { impl Job for Undo {
type State = JobState; type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>; type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::apub::Undo"; const NAME: &'static str = "relay::jobs::apub::Undo";
const QUEUE: &'static str = "apub"; const QUEUE: &'static str = "apub";
fn run(self, state: Self::State) -> Self::Future { fn run(self, state: Self::State) -> Self::Future {
Box::pin(async move { self.perform(state).await.map_err(Into::into) }) Box::pin(self.perform(state))
} }
} }

View File

@ -1,11 +1,12 @@
use crate::{ use crate::{
apub::AcceptedActors, apub::AcceptedActors,
error::{Error, ErrorKind}, error::{Error, ErrorKind},
future::BoxFuture,
jobs::JobState, jobs::JobState,
requests::BreakerStrategy,
}; };
use activitystreams::{iri_string::types::IriString, object::Image, prelude::*}; use activitystreams::{iri_string::types::IriString, object::Image, prelude::*};
use background_jobs::ActixJob; use background_jobs::Job;
use std::{future::Future, pin::Pin};
#[derive(Clone, serde::Deserialize, serde::Serialize)] #[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct QueryContact { pub(crate) struct QueryContact {
@ -32,6 +33,7 @@ impl QueryContact {
async fn perform(self, state: JobState) -> Result<(), Error> { async fn perform(self, state: JobState) -> Result<(), Error> {
let contact_outdated = state let contact_outdated = state
.state
.node_cache .node_cache
.is_contact_outdated(self.actor_id.clone()) .is_contact_outdated(self.actor_id.clone())
.await; .await;
@ -41,8 +43,9 @@ impl QueryContact {
} }
let contact = match state let contact = match state
.state
.requests .requests
.fetch::<AcceptedActors>(self.contact_id.as_str()) .fetch::<AcceptedActors>(&self.contact_id, BreakerStrategy::Allow404AndBelow)
.await .await
{ {
Ok(contact) => contact, Ok(contact) => contact,
@ -57,6 +60,7 @@ impl QueryContact {
to_contact(contact).ok_or(ErrorKind::Extract("contact"))?; to_contact(contact).ok_or(ErrorKind::Extract("contact"))?;
state state
.state
.node_cache .node_cache
.set_contact(self.actor_id, username, display_name, url, avatar) .set_contact(self.actor_id, username, display_name, url, avatar)
.await?; .await?;
@ -81,15 +85,16 @@ fn to_contact(contact: AcceptedActors) -> Option<(String, String, IriString, Iri
Some((username, display_name, url, avatar)) Some((username, display_name, url, avatar))
} }
impl ActixJob for QueryContact { impl Job for QueryContact {
type State = JobState; type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>; type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::QueryContact"; const NAME: &'static str = "relay::jobs::QueryContact";
const QUEUE: &'static str = "maintenance"; const QUEUE: &'static str = "maintenance";
fn run(self, state: Self::State) -> Self::Future { fn run(self, state: Self::State) -> Self::Future {
Box::pin(async move { self.perform(state).await.map_err(Into::into) }) Box::pin(self.perform(state))
} }
} }

View File

@ -1,10 +1,11 @@
use crate::{ use crate::{
error::Error, error::Error,
future::BoxFuture,
jobs::{debug_object, JobState}, jobs::{debug_object, JobState},
requests::BreakerStrategy,
}; };
use activitystreams::iri_string::types::IriString; use activitystreams::iri_string::types::IriString;
use background_jobs::{ActixJob, Backoff}; use background_jobs::{Backoff, Job};
use std::{future::Future, pin::Pin};
#[derive(Clone, serde::Deserialize, serde::Serialize)] #[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct Deliver { pub(crate) struct Deliver {
@ -34,8 +35,13 @@ impl Deliver {
} }
#[tracing::instrument(name = "Deliver", skip(state))] #[tracing::instrument(name = "Deliver", skip(state))]
async fn permform(self, state: JobState) -> Result<(), Error> { async fn perform(self, state: JobState) -> Result<(), Error> {
if let Err(e) = state.requests.deliver(self.to, &self.data).await { if let Err(e) = state
.state
.requests
.deliver(&self.to, &self.data, BreakerStrategy::Allow401AndBelow)
.await
{
if e.is_breaker() { if e.is_breaker() {
tracing::debug!("Not trying due to failed breaker"); tracing::debug!("Not trying due to failed breaker");
return Ok(()); return Ok(());
@ -50,15 +56,16 @@ impl Deliver {
} }
} }
impl ActixJob for Deliver { impl Job for Deliver {
type State = JobState; type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>; type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::Deliver"; const NAME: &'static str = "relay::jobs::Deliver";
const QUEUE: &'static str = "deliver"; const QUEUE: &'static str = "deliver";
const BACKOFF: Backoff = Backoff::Exponential(8); const BACKOFF: Backoff = Backoff::Exponential(8);
fn run(self, state: Self::State) -> Self::Future { fn run(self, state: Self::State) -> Self::Future {
Box::pin(async move { self.permform(state).await.map_err(Into::into) }) Box::pin(self.perform(state))
} }
} }

View File

@ -1,10 +1,10 @@
use crate::{ use crate::{
error::Error, error::Error,
future::BoxFuture,
jobs::{debug_object, Deliver, JobState}, jobs::{debug_object, Deliver, JobState},
}; };
use activitystreams::iri_string::types::IriString; use activitystreams::iri_string::types::IriString;
use background_jobs::ActixJob; use background_jobs::Job;
use futures_util::future::LocalBoxFuture;
#[derive(Clone, serde::Deserialize, serde::Serialize)] #[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct DeliverMany { pub(crate) struct DeliverMany {
@ -45,14 +45,15 @@ impl DeliverMany {
} }
} }
impl ActixJob for DeliverMany { impl Job for DeliverMany {
type State = JobState; type State = JobState;
type Future = LocalBoxFuture<'static, Result<(), anyhow::Error>>; type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::DeliverMany"; const NAME: &'static str = "relay::jobs::DeliverMany";
const QUEUE: &'static str = "deliver"; const QUEUE: &'static str = "deliver";
fn run(self, state: Self::State) -> Self::Future { fn run(self, state: Self::State) -> Self::Future {
Box::pin(async move { self.perform(state).await.map_err(Into::into) }) Box::pin(self.perform(state))
} }
} }

File diff suppressed because one or more lines are too long

View File

@ -1,17 +1,18 @@
use crate::{ use crate::{
error::{Error, ErrorKind}, error::{Error, ErrorKind},
future::BoxFuture,
jobs::{Boolish, JobState, QueryContact}, jobs::{Boolish, JobState, QueryContact},
requests::BreakerStrategy,
}; };
use activitystreams::{iri, iri_string::types::IriString, primitives::OneOrMany}; use activitystreams::{iri, iri_string::types::IriString, primitives::OneOrMany};
use background_jobs::ActixJob; use background_jobs::Job;
use std::{fmt::Debug, future::Future, pin::Pin};
#[derive(Clone, serde::Deserialize, serde::Serialize)] #[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct QueryNodeinfo { pub(crate) struct QueryNodeinfo {
actor_id: IriString, actor_id: IriString,
} }
impl Debug for QueryNodeinfo { impl std::fmt::Debug for QueryNodeinfo {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("QueryNodeinfo") f.debug_struct("QueryNodeinfo")
.field("actor_id", &self.actor_id.to_string()) .field("actor_id", &self.actor_id.to_string())
@ -27,6 +28,7 @@ impl QueryNodeinfo {
#[tracing::instrument(name = "Query node info", skip(state))] #[tracing::instrument(name = "Query node info", skip(state))]
async fn perform(self, state: JobState) -> Result<(), Error> { async fn perform(self, state: JobState) -> Result<(), Error> {
if !state if !state
.state
.node_cache .node_cache
.is_nodeinfo_outdated(self.actor_id.clone()) .is_nodeinfo_outdated(self.actor_id.clone())
.await .await
@ -39,11 +41,12 @@ impl QueryNodeinfo {
.authority_str() .authority_str()
.ok_or(ErrorKind::MissingDomain)?; .ok_or(ErrorKind::MissingDomain)?;
let scheme = self.actor_id.scheme_str(); let scheme = self.actor_id.scheme_str();
let well_known_uri = iri!(format!("{}://{}/.well-known/nodeinfo", scheme, authority)); let well_known_uri = iri!(format!("{scheme}://{authority}/.well-known/nodeinfo"));
let well_known = match state let well_known = match state
.state
.requests .requests
.fetch_json::<WellKnown>(well_known_uri.as_str()) .fetch_json::<WellKnown>(&well_known_uri, BreakerStrategy::Allow404AndBelow)
.await .await
{ {
Ok(well_known) => well_known, Ok(well_known) => well_known,
@ -55,12 +58,17 @@ impl QueryNodeinfo {
}; };
let href = if let Some(link) = well_known.links.into_iter().find(|l| l.rel.is_supported()) { let href = if let Some(link) = well_known.links.into_iter().find(|l| l.rel.is_supported()) {
link.href iri!(&link.href)
} else { } else {
return Ok(()); return Ok(());
}; };
let nodeinfo = match state.requests.fetch_json::<Nodeinfo>(&href).await { let nodeinfo = match state
.state
.requests
.fetch_json::<Nodeinfo>(&href, BreakerStrategy::Require2XX)
.await
{
Ok(nodeinfo) => nodeinfo, Ok(nodeinfo) => nodeinfo,
Err(e) if e.is_breaker() => { Err(e) if e.is_breaker() => {
tracing::debug!("Not retrying due to failed breaker"); tracing::debug!("Not retrying due to failed breaker");
@ -70,6 +78,7 @@ impl QueryNodeinfo {
}; };
state state
.state
.node_cache .node_cache
.set_info( .set_info(
self.actor_id.clone(), self.actor_id.clone(),
@ -83,7 +92,7 @@ impl QueryNodeinfo {
.metadata .metadata
.and_then(|meta| meta.into_iter().next().and_then(|meta| meta.staff_accounts)) .and_then(|meta| meta.into_iter().next().and_then(|meta| meta.staff_accounts))
{ {
if let Some(contact_id) = accounts.get(0) { if let Some(contact_id) = accounts.first() {
state state
.job_server .job_server
.queue(QueryContact::new(self.actor_id, contact_id.clone())) .queue(QueryContact::new(self.actor_id, contact_id.clone()))
@ -95,15 +104,16 @@ impl QueryNodeinfo {
} }
} }
impl ActixJob for QueryNodeinfo { impl Job for QueryNodeinfo {
type State = JobState; type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>; type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::QueryNodeinfo"; const NAME: &'static str = "relay::jobs::QueryNodeinfo";
const QUEUE: &'static str = "maintenance"; const QUEUE: &'static str = "maintenance";
fn run(self, state: Self::State) -> Self::Future { fn run(self, state: Self::State) -> Self::Future {
Box::pin(async move { self.perform(state).await.map_err(Into::into) }) Box::pin(self.perform(state))
} }
} }
@ -146,7 +156,7 @@ struct Link {
#[serde(untagged)] #[serde(untagged)]
enum MaybeSupported<T> { enum MaybeSupported<T> {
Supported(T), Supported(T),
Unsupported(String), Unsupported(#[allow(unused)] String),
} }
impl<T> MaybeSupported<T> { impl<T> MaybeSupported<T> {
@ -155,8 +165,8 @@ impl<T> MaybeSupported<T> {
} }
} }
struct SupportedVersion(String); struct SupportedVersion(#[allow(unused)] String);
struct SupportedNodeinfo(String); struct SupportedNodeinfo(#[allow(unused)] String);
static SUPPORTED_VERSIONS: &str = "2."; static SUPPORTED_VERSIONS: &str = "2.";
static SUPPORTED_NODEINFO: &str = "http://nodeinfo.diaspora.software/ns/schema/2."; static SUPPORTED_NODEINFO: &str = "http://nodeinfo.diaspora.software/ns/schema/2.";
@ -168,7 +178,7 @@ impl<'de> serde::de::Visitor<'de> for SupportedVersionVisitor {
type Value = SupportedVersion; type Value = SupportedVersion;
fn expecting(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { fn expecting(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "a string starting with '{}'", SUPPORTED_VERSIONS) write!(f, "a string starting with '{SUPPORTED_VERSIONS}'")
} }
fn visit_str<E>(self, s: &str) -> Result<Self::Value, E> fn visit_str<E>(self, s: &str) -> Result<Self::Value, E>
@ -187,7 +197,7 @@ impl<'de> serde::de::Visitor<'de> for SupportedNodeinfoVisitor {
type Value = SupportedNodeinfo; type Value = SupportedNodeinfo;
fn expecting(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { fn expecting(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "a string starting with '{}'", SUPPORTED_NODEINFO) write!(f, "a string starting with '{SUPPORTED_NODEINFO}'")
} }
fn visit_str<E>(self, s: &str) -> Result<Self::Value, E> fn visit_str<E>(self, s: &str) -> Result<Self::Value, E>

View File

@ -1,9 +1,9 @@
use crate::{ use crate::{
error::Error, error::Error,
future::BoxFuture,
jobs::{instance::QueryInstance, nodeinfo::QueryNodeinfo, JobState}, jobs::{instance::QueryInstance, nodeinfo::QueryNodeinfo, JobState},
}; };
use background_jobs::ActixJob; use background_jobs::Job;
use std::{future::Future, pin::Pin};
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)] #[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub(crate) struct Listeners; pub(crate) struct Listeners;
@ -23,14 +23,15 @@ impl Listeners {
} }
} }
impl ActixJob for Listeners { impl Job for Listeners {
type State = JobState; type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>; type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::Listeners"; const NAME: &'static str = "relay::jobs::Listeners";
const QUEUE: &'static str = "maintenance"; const QUEUE: &'static str = "maintenance";
fn run(self, state: Self::State) -> Self::Future { fn run(self, state: Self::State) -> Self::Future {
Box::pin(async move { self.perform(state).await.map_err(Into::into) }) Box::pin(self.perform(state))
} }
} }

View File

@ -1,6 +1,5 @@
use crate::{error::Error, jobs::JobState}; use crate::{error::Error, future::BoxFuture, jobs::JobState};
use background_jobs::{ActixJob, Backoff}; use background_jobs::{Backoff, Job};
use std::{future::Future, pin::Pin};
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)] #[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub(crate) struct RecordLastOnline; pub(crate) struct RecordLastOnline;
@ -14,15 +13,16 @@ impl RecordLastOnline {
} }
} }
impl ActixJob for RecordLastOnline { impl Job for RecordLastOnline {
type State = JobState; type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>; type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::RecordLastOnline"; const NAME: &'static str = "relay::jobs::RecordLastOnline";
const QUEUE: &'static str = "maintenance"; const QUEUE: &'static str = "maintenance";
const BACKOFF: Backoff = Backoff::Linear(1); const BACKOFF: Backoff = Backoff::Linear(1);
fn run(self, state: Self::State) -> Self::Future { fn run(self, state: Self::State) -> Self::Future {
Box::pin(async move { self.perform(state).await.map_err(Into::into) }) Box::pin(self.perform(state))
} }
} }

View File

@ -1,21 +1,27 @@
// need this for ructe // need this for ructe
#![allow(clippy::needless_borrow)] #![allow(clippy::needless_borrow)]
use std::time::Duration;
use activitystreams::iri_string::types::IriString; use activitystreams::iri_string::types::IriString;
use actix_rt::task::JoinHandle;
use actix_web::{middleware::Compress, web, App, HttpServer}; use actix_web::{middleware::Compress, web, App, HttpServer};
use collector::{DoubleRecorder, MemoryCollector}; use collector::MemoryCollector;
#[cfg(feature = "console")] #[cfg(feature = "console")]
use console_subscriber::ConsoleLayer; use console_subscriber::ConsoleLayer;
use error::Error;
use http_signature_normalization_actix::middleware::VerifySignature; use http_signature_normalization_actix::middleware::VerifySignature;
use metrics_exporter_prometheus::PrometheusBuilder; use metrics_exporter_prometheus::PrometheusBuilder;
use opentelemetry::{sdk::Resource, KeyValue}; use metrics_util::layers::FanoutBuilder;
use opentelemetry::{trace::TracerProvider, KeyValue};
use opentelemetry_otlp::WithExportConfig; use opentelemetry_otlp::WithExportConfig;
use opentelemetry_sdk::Resource;
use reqwest_middleware::ClientWithMiddleware;
use rustls::ServerConfig; use rustls::ServerConfig;
use tokio::task::JoinHandle;
use tracing_actix_web::TracingLogger; use tracing_actix_web::TracingLogger;
use tracing_error::ErrorLayer; use tracing_error::ErrorLayer;
use tracing_log::LogTracer; use tracing_log::LogTracer;
use tracing_subscriber::{filter::Targets, fmt::format::FmtSpan, layer::SubscriberExt, Layer}; use tracing_subscriber::{filter::Targets, layer::SubscriberExt, Layer};
mod admin; mod admin;
mod apub; mod apub;
@ -26,12 +32,18 @@ mod data;
mod db; mod db;
mod error; mod error;
mod extractors; mod extractors;
mod future;
mod http1;
mod jobs; mod jobs;
mod middleware; mod middleware;
mod requests; mod requests;
mod routes; mod routes;
mod spawner;
mod stream;
mod telegram; mod telegram;
use crate::config::UrlKind;
use self::{ use self::{
args::Args, args::Args,
config::Config, config::Config,
@ -39,22 +51,22 @@ use self::{
db::Db, db::Db,
jobs::create_workers, jobs::create_workers,
middleware::{DebugPayload, MyVerify, RelayResolver, Timings}, middleware::{DebugPayload, MyVerify, RelayResolver, Timings},
routes::{actor, inbox, index, nodeinfo, nodeinfo_meta, statics}, routes::{actor, healthz, inbox, index, nodeinfo, nodeinfo_meta, statics},
spawner::Spawner,
}; };
fn init_subscriber( fn init_subscriber(
software_name: &'static str, software_name: &'static str,
opentelemetry_url: Option<&IriString>, opentelemetry_url: Option<&IriString>,
) -> Result<(), anyhow::Error> { ) -> color_eyre::Result<()> {
LogTracer::init()?; LogTracer::init()?;
color_eyre::install()?;
let targets: Targets = std::env::var("RUST_LOG") let targets: Targets = std::env::var("RUST_LOG")
.unwrap_or_else(|_| "warn,actix_web=debug,actix_server=debug,tracing_actix_web=info".into()) .unwrap_or_else(|_| "info".into())
.parse()?; .parse()?;
let format_layer = tracing_subscriber::fmt::layer() let format_layer = tracing_subscriber::fmt::layer().with_filter(targets.clone());
.with_span_events(FmtSpan::NEW | FmtSpan::CLOSE)
.with_filter(targets.clone());
#[cfg(feature = "console")] #[cfg(feature = "console")]
let console_layer = ConsoleLayer::builder() let console_layer = ConsoleLayer::builder()
@ -71,21 +83,21 @@ fn init_subscriber(
let subscriber = subscriber.with(console_layer); let subscriber = subscriber.with(console_layer);
if let Some(url) = opentelemetry_url { if let Some(url) = opentelemetry_url {
let tracer = let exporter = opentelemetry_otlp::SpanExporter::builder()
opentelemetry_otlp::new_pipeline() .with_tonic()
.tracing() .with_endpoint(url.as_str())
.with_trace_config(opentelemetry::sdk::trace::config().with_resource( .build()?;
Resource::new(vec![KeyValue::new("service.name", software_name)]),
)) let tracer_provider = opentelemetry_sdk::trace::TracerProvider::builder()
.with_exporter( .with_resource(Resource::new(vec![KeyValue::new(
opentelemetry_otlp::new_exporter() "service.name",
.tonic() software_name,
.with_endpoint(url.as_str()), )]))
) .with_batch_exporter(exporter, opentelemetry_sdk::runtime::Tokio)
.install_batch(opentelemetry::runtime::Tokio)?; .build();
let otel_layer = tracing_opentelemetry::layer() let otel_layer = tracing_opentelemetry::layer()
.with_tracer(tracer) .with_tracer(tracer_provider.tracer(software_name))
.with_filter(targets); .with_filter(targets);
let subscriber = subscriber.with(otel_layer); let subscriber = subscriber.with(otel_layer);
@ -97,8 +109,40 @@ fn init_subscriber(
Ok(()) Ok(())
} }
#[actix_rt::main] fn build_client(
async fn main() -> Result<(), anyhow::Error> { user_agent: &str,
timeout_seconds: u64,
proxy: Option<(&IriString, Option<(&str, &str)>)>,
) -> Result<ClientWithMiddleware, Error> {
let builder = reqwest::Client::builder().user_agent(user_agent.to_string());
let builder = if let Some((url, auth)) = proxy {
let proxy = reqwest::Proxy::all(url.as_str())?;
let proxy = if let Some((username, password)) = auth {
proxy.basic_auth(username, password)
} else {
proxy
};
builder.proxy(proxy)
} else {
builder
};
let client = builder
.timeout(Duration::from_secs(timeout_seconds))
.build()?;
let client_with_middleware = reqwest_middleware::ClientBuilder::new(client)
.with(reqwest_tracing::TracingMiddleware::default())
.build();
Ok(client_with_middleware)
}
#[tokio::main]
async fn main() -> color_eyre::Result<()> {
dotenv::dotenv().ok(); dotenv::dotenv().ok();
let config = Config::build()?; let config = Config::build()?;
@ -108,7 +152,8 @@ async fn main() -> Result<(), anyhow::Error> {
let args = Args::new(); let args = Args::new();
if args.any() { if args.any() {
return client_main(config, args).await?; client_main(config, args).await??;
return Ok(());
} }
let collector = MemoryCollector::new(); let collector = MemoryCollector::new();
@ -118,32 +163,40 @@ async fn main() -> Result<(), anyhow::Error> {
.with_http_listener(bind_addr) .with_http_listener(bind_addr)
.build()?; .build()?;
actix_rt::spawn(exporter); tokio::spawn(exporter);
DoubleRecorder::new(recorder, collector.clone()).install()?; let recorder = FanoutBuilder::default()
.add_recorder(recorder)
.add_recorder(collector.clone())
.build();
metrics::set_global_recorder(recorder).map_err(|e| color_eyre::eyre::eyre!("{e}"))?;
} else { } else {
collector.install()?; collector.install()?;
} }
tracing::warn!("Opening DB"); tracing::info!("Opening DB");
let db = Db::build(&config)?; let db = Db::build(&config)?;
tracing::warn!("Building caches"); tracing::info!("Building caches");
let actors = ActorCache::new(db.clone()); let actors = ActorCache::new(db.clone());
let media = MediaCache::new(db.clone()); let media = MediaCache::new(db.clone());
server_main(db, actors, media, collector, config).await??; server_main(db, actors, media, collector, config).await?;
tracing::warn!("Application exit"); tracing::info!("Application exit");
Ok(()) Ok(())
} }
fn client_main(config: Config, args: Args) -> JoinHandle<Result<(), anyhow::Error>> { fn client_main(config: Config, args: Args) -> JoinHandle<color_eyre::Result<()>> {
actix_rt::spawn(do_client_main(config, args)) tokio::spawn(do_client_main(config, args))
} }
async fn do_client_main(config: Config, args: Args) -> Result<(), anyhow::Error> { async fn do_client_main(config: Config, args: Args) -> color_eyre::Result<()> {
let client = requests::build_client(&config.user_agent()); let client = build_client(
&config.user_agent(),
config.client_timeout(),
config.proxy_config(),
)?;
if !args.blocks().is_empty() || !args.allowed().is_empty() { if !args.blocks().is_empty() || !args.allowed().is_empty() {
if args.undo() { if args.undo() {
@ -221,48 +274,77 @@ async fn do_client_main(config: Config, args: Args) -> Result<(), anyhow::Error>
Ok(()) Ok(())
} }
fn server_main( const VERIFY_RATIO: usize = 7;
async fn server_main(
db: Db, db: Db,
actors: ActorCache, actors: ActorCache,
media: MediaCache, media: MediaCache,
collector: MemoryCollector, collector: MemoryCollector,
config: Config, config: Config,
) -> JoinHandle<Result<(), anyhow::Error>> { ) -> color_eyre::Result<()> {
actix_rt::spawn(do_server_main(db, actors, media, collector, config)) let client = build_client(
&config.user_agent(),
config.client_timeout(),
config.proxy_config(),
)?;
tracing::info!("Creating state");
let (signature_threads, verify_threads) = match config.signature_threads() {
0 | 1 => (1, 1),
n if n <= VERIFY_RATIO => (n, 1),
n => {
let verify_threads = (n / VERIFY_RATIO).max(1);
let signature_threads = n.saturating_sub(verify_threads).max(VERIFY_RATIO);
(signature_threads, verify_threads)
} }
};
async fn do_server_main( let verify_spawner = Spawner::build("verify-cpu", verify_threads.try_into()?)?;
db: Db, let sign_spawner = Spawner::build("sign-cpu", signature_threads.try_into()?)?;
actors: ActorCache,
media: MediaCache,
collector: MemoryCollector,
config: Config,
) -> Result<(), anyhow::Error> {
tracing::warn!("Creating state");
let state = State::build(db.clone()).await?;
tracing::warn!("Creating workers"); let key_id = config.generate_url(UrlKind::MainKey).to_string();
let (manager, job_server) = let state = State::build(db.clone(), key_id, sign_spawner.clone(), client).await?;
create_workers(state.clone(), actors.clone(), media.clone(), config.clone());
if let Some((token, admin_handle)) = config.telegram_info() { if let Some((token, admin_handle)) = config.telegram_info() {
tracing::warn!("Creating telegram handler"); tracing::info!("Creating telegram handler");
telegram::start(admin_handle.to_owned(), db.clone(), token); telegram::start(admin_handle.to_owned(), db.clone(), token);
} }
let keys = config.open_keys()?; let cert_resolver = config
.open_keys()
.await?
.map(rustls_channel_resolver::channel::<32>);
let bind_address = config.bind_address(); let bind_address = config.bind_address();
let sign_spawner2 = sign_spawner.clone();
let verify_spawner2 = verify_spawner.clone();
let config2 = config.clone();
let job_store = jobs::build_storage();
let server = HttpServer::new(move || { let server = HttpServer::new(move || {
let job_server = create_workers(
job_store.clone(),
state.clone(),
actors.clone(),
media.clone(),
config.clone(),
)
.expect("Failed to create job server");
let app = App::new() let app = App::new()
.app_data(web::Data::new(db.clone())) .app_data(web::Data::new(db.clone()))
.app_data(web::Data::new(state.clone())) .app_data(web::Data::new(state.clone()))
.app_data(web::Data::new(state.requests(&config))) .app_data(web::Data::new(
state.requests.clone().spawner(verify_spawner.clone()),
))
.app_data(web::Data::new(actors.clone())) .app_data(web::Data::new(actors.clone()))
.app_data(web::Data::new(config.clone())) .app_data(web::Data::new(config.clone()))
.app_data(web::Data::new(job_server.clone())) .app_data(web::Data::new(job_server))
.app_data(web::Data::new(media.clone())) .app_data(web::Data::new(media.clone()))
.app_data(web::Data::new(collector.clone())); .app_data(web::Data::new(collector.clone()))
.app_data(web::Data::new(verify_spawner.clone()));
let app = if let Some(data) = config.admin_config() { let app = if let Some(data) = config.admin_config() {
app.app_data(data) app.app_data(data)
@ -273,14 +355,20 @@ async fn do_server_main(
app.wrap(Compress::default()) app.wrap(Compress::default())
.wrap(TracingLogger::default()) .wrap(TracingLogger::default())
.wrap(Timings) .wrap(Timings)
.route("/healthz", web::get().to(healthz))
.service(web::resource("/").route(web::get().to(index))) .service(web::resource("/").route(web::get().to(index)))
.service(web::resource("/media/{path}").route(web::get().to(routes::media))) .service(web::resource("/media/{path}").route(web::get().to(routes::media)))
.service( .service(
web::resource("/inbox") web::resource("/inbox")
.wrap(config.digest_middleware()) .wrap(config.digest_middleware().spawner(verify_spawner.clone()))
.wrap(VerifySignature::new( .wrap(VerifySignature::new(
MyVerify(state.requests(&config), actors.clone(), state.clone()), MyVerify(
Default::default(), state.requests.clone().spawner(verify_spawner.clone()),
actors.clone(),
state.clone(),
verify_spawner.clone(),
),
http_signature_normalization_actix::Config::new(),
)) ))
.wrap(DebugPayload(config.debug())) .wrap(DebugPayload(config.debug()))
.route(web::post().to(inbox)), .route(web::post().to(inbox)),
@ -309,28 +397,42 @@ async fn do_server_main(
) )
}); });
if let Some((certs, key)) = keys { if let Some((cert_tx, cert_rx)) = cert_resolver {
tracing::warn!("Binding to {}:{} with TLS", bind_address.0, bind_address.1); let handle = tokio::spawn(async move {
let mut interval = tokio::time::interval(Duration::from_secs(30));
interval.tick().await;
loop {
interval.tick().await;
match config2.open_keys().await {
Ok(Some(key)) => cert_tx.update(key),
Ok(None) => tracing::warn!("Missing TLS keys"),
Err(e) => tracing::error!("Failed to read TLS keys {e}"),
}
}
});
tracing::info!("Binding to {}:{} with TLS", bind_address.0, bind_address.1);
let server_config = ServerConfig::builder() let server_config = ServerConfig::builder()
.with_safe_default_cipher_suites()
.with_safe_default_kx_groups()
.with_safe_default_protocol_versions()?
.with_no_client_auth() .with_no_client_auth()
.with_single_cert(certs, key)?; .with_cert_resolver(cert_rx);
server server
.bind_rustls(bind_address, server_config)? .bind_rustls_0_23(bind_address, server_config)?
.run() .run()
.await?; .await?;
handle.abort();
let _ = handle.await;
} else { } else {
tracing::warn!("Binding to {}:{}", bind_address.0, bind_address.1); tracing::info!("Binding to {}:{}", bind_address.0, bind_address.1);
server.bind(bind_address)?.run().await?; server.bind(bind_address)?.run().await?;
} }
tracing::warn!("Server closed"); sign_spawner2.close().await;
verify_spawner2.close().await;
drop(manager); tracing::info!("Server closed");
tracing::warn!("Main complete");
Ok(()) Ok(())
} }

View File

@ -4,14 +4,11 @@ use actix_web::{
web::BytesMut, web::BytesMut,
HttpMessage, HttpMessage,
}; };
use futures_util::{
future::TryFutureExt,
stream::{once, TryStreamExt},
};
use std::{ use std::{
future::{ready, Ready}, future::{ready, Ready},
task::{Context, Poll}, task::{Context, Poll},
}; };
use streem::IntoStreamer;
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub(crate) struct DebugPayload(pub bool); pub(crate) struct DebugPayload(pub bool);
@ -53,19 +50,23 @@ where
fn call(&self, mut req: ServiceRequest) -> Self::Future { fn call(&self, mut req: ServiceRequest) -> Self::Future {
if self.0 && req.method() == Method::POST { if self.0 && req.method() == Method::POST {
let pl = req.take_payload(); let mut pl = req.take_payload().into_streamer();
req.set_payload(Payload::Stream { req.set_payload(Payload::Stream {
payload: Box::pin(once( payload: Box::pin(streem::try_from_fn(|yielder| async move {
pl.try_fold(BytesMut::new(), |mut acc, bytes| async { let mut buf = BytesMut::new();
acc.extend(bytes);
Ok(acc) while let Some(bytes) = pl.try_next().await? {
}) buf.extend(bytes);
.map_ok(|bytes| { }
let bytes = bytes.freeze();
let bytes = buf.freeze();
tracing::info!("{}", String::from_utf8_lossy(&bytes)); tracing::info!("{}", String::from_utf8_lossy(&bytes));
bytes
}), yielder.yield_ok(bytes).await;
)),
Ok(())
})),
}); });
self.1.call(req) self.1.call(req)

View File

@ -40,7 +40,7 @@ impl Drop for LogOnDrop {
fn drop(&mut self) { fn drop(&mut self) {
if self.arm { if self.arm {
let duration = self.begin.elapsed(); let duration = self.begin.elapsed();
metrics::histogram!("relay.request.complete", duration, "path" => self.path.clone(), "method" => self.method.clone()); metrics::histogram!("relay.request.complete", "path" => self.path.clone(), "method" => self.method.clone()).record(duration);
} }
} }
} }
@ -80,7 +80,7 @@ where
fn call(&self, req: ServiceRequest) -> Self::Future { fn call(&self, req: ServiceRequest) -> Self::Future {
let log_on_drop = LogOnDrop { let log_on_drop = LogOnDrop {
begin: Instant::now(), begin: Instant::now(),
path: req.path().to_string(), path: format!("{:?}", req.match_pattern()),
method: req.method().to_string(), method: req.method().to_string(),
arm: false, arm: false,
}; };

View File

@ -2,18 +2,17 @@ use crate::{
apub::AcceptedActors, apub::AcceptedActors,
data::{ActorCache, State}, data::{ActorCache, State},
error::{Error, ErrorKind}, error::{Error, ErrorKind},
requests::Requests, requests::{BreakerStrategy, Requests},
spawner::Spawner,
}; };
use activitystreams::{base::BaseExt, iri, iri_string::types::IriString}; use activitystreams::{base::BaseExt, iri, iri_string::types::IriString};
use actix_web::web; use base64::{engine::general_purpose::STANDARD, Engine};
use http_signature_normalization_actix::{prelude::*, verify::DeprecatedAlgorithm}; use http_signature_normalization_actix::{prelude::*, verify::DeprecatedAlgorithm, Spawn};
use rsa::{pkcs1v15::VerifyingKey, pkcs8::DecodePublicKey, RsaPublicKey}; use rsa::{pkcs1::EncodeRsaPublicKey, pkcs8::DecodePublicKey, RsaPublicKey};
use sha2::{Digest, Sha256};
use signature::{DigestVerifier, Signature};
use std::{future::Future, pin::Pin}; use std::{future::Future, pin::Pin};
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub(crate) struct MyVerify(pub Requests, pub ActorCache, pub State); pub(crate) struct MyVerify(pub Requests, pub ActorCache, pub State, pub Spawner);
impl MyVerify { impl MyVerify {
#[tracing::instrument("Verify request", skip(self, signature, signing_string))] #[tracing::instrument("Verify request", skip(self, signature, signing_string))]
@ -53,7 +52,13 @@ impl MyVerify {
None => (), None => (),
}; };
let res = do_verify(&actor.public_key, signature.clone(), signing_string.clone()).await; let res = do_verify(
&self.3,
&actor.public_key,
signature.clone(),
signing_string.clone(),
)
.await;
if let Err(e) = res { if let Err(e) = res {
if !was_cached { if !was_cached {
@ -67,13 +72,13 @@ impl MyVerify {
} else { } else {
match self match self
.0 .0
.fetch::<PublicKeyResponse>(public_key_id.as_str()) .fetch::<PublicKeyResponse>(&public_key_id, BreakerStrategy::Require2XX)
.await .await
{ {
Ok(res) => res.actor_id().ok_or(ErrorKind::MissingId), Ok(res) => res.actor_id().ok_or(ErrorKind::MissingId),
Err(e) => { Err(e) => {
if e.is_gone() { if e.is_gone() {
tracing::warn!("Actor gone: {}", public_key_id); tracing::warn!("Actor gone: {public_key_id}");
return Ok(false); return Ok(false);
} else { } else {
return Err(e); return Err(e);
@ -87,7 +92,7 @@ impl MyVerify {
// Now we make sure we fetch an updated actor // Now we make sure we fetch an updated actor
let actor = self.1.get_no_cache(&actor_id, &self.0).await?; let actor = self.1.get_no_cache(&actor_id, &self.0).await?;
do_verify(&actor.public_key, signature, signing_string).await?; do_verify(&self.3, &actor.public_key, signature, signing_string).await?;
Ok(true) Ok(true)
} }
@ -118,23 +123,29 @@ impl PublicKeyResponse {
#[tracing::instrument("Verify signature")] #[tracing::instrument("Verify signature")]
async fn do_verify( async fn do_verify(
spawner: &Spawner,
public_key: &str, public_key: &str,
signature: String, signature: String,
signing_string: String, signing_string: String,
) -> Result<(), Error> { ) -> Result<(), Error> {
let public_key = RsaPublicKey::from_public_key_pem(public_key.trim())?; let public_key = RsaPublicKey::from_public_key_pem(public_key.trim())?;
let public_key_der = public_key
.to_pkcs1_der()
.map_err(|_| ErrorKind::DerEncode)?;
let public_key = ring::signature::UnparsedPublicKey::new(
&ring::signature::RSA_PKCS1_2048_8192_SHA256,
public_key_der,
);
let span = tracing::Span::current(); let span = tracing::Span::current();
web::block(move || { spawner
.spawn_blocking(move || {
span.in_scope(|| { span.in_scope(|| {
let decoded = base64::decode(signature)?; let decoded = STANDARD.decode(signature)?;
let signature = Signature::from_bytes(&decoded).map_err(ErrorKind::ReadSignature)?;
let hashed = Sha256::new_with_prefix(signing_string.as_bytes());
let verifying_key = VerifyingKey::new_with_prefix(public_key); public_key
verifying_key .verify(signing_string.as_bytes(), decoded.as_slice())
.verify_digest(hashed, &signature) .map_err(|_| ErrorKind::VerifySignature)?;
.map_err(ErrorKind::VerifySignature)?;
Ok(()) as Result<(), Error> Ok(()) as Result<(), Error>
}) })
@ -176,13 +187,13 @@ mod tests {
#[test] #[test]
fn handles_masto_keys() { fn handles_masto_keys() {
println!("{}", ASONIX_DOG_KEY); println!("{ASONIX_DOG_KEY}");
let _ = RsaPublicKey::from_public_key_pem(ASONIX_DOG_KEY.trim()).unwrap(); let _ = RsaPublicKey::from_public_key_pem(ASONIX_DOG_KEY.trim()).unwrap();
} }
#[test] #[test]
fn handles_pleromo_keys() { fn handles_pleromo_keys() {
println!("{}", KARJALAZET_KEY); println!("{KARJALAZET_KEY}");
let _ = RsaPublicKey::from_public_key_pem(KARJALAZET_KEY.trim()).unwrap(); let _ = RsaPublicKey::from_public_key_pem(KARJALAZET_KEY.trim()).unwrap();
} }

View File

@ -1,10 +1,10 @@
use crate::{ use crate::{
config::{Config, UrlKind}, config::{Config, UrlKind},
data::State, data::State,
future::LocalBoxFuture,
}; };
use actix_web::web::Data; use actix_web::web::Data;
use actix_webfinger::{Resolver, Webfinger}; use actix_webfinger::{Resolver, Webfinger};
use futures_util::future::LocalBoxFuture;
use rsa_magic_public_key::AsMagicPublicKey; use rsa_magic_public_key::AsMagicPublicKey;
pub(crate) struct RelayResolver; pub(crate) struct RelayResolver;

View File

@ -1,32 +1,43 @@
use crate::{ use crate::{
data::LastOnline, data::LastOnline,
error::{Error, ErrorKind}, error::{Error, ErrorKind},
spawner::Spawner,
stream::{aggregate, limit_stream},
}; };
use activitystreams::iri_string::types::IriString; use activitystreams::iri_string::types::IriString;
use actix_web::http::header::Date; use actix_web::http::header::Date;
use awc::{error::SendRequestError, Client, ClientResponse}; use base64::{engine::general_purpose::STANDARD, Engine};
use dashmap::DashMap; use dashmap::DashMap;
use http_signature_normalization_actix::prelude::*; use http_signature_normalization_reqwest::{digest::ring::Sha256, prelude::*};
use rand::thread_rng; use reqwest_middleware::ClientWithMiddleware;
use rsa::{pkcs1v15::SigningKey, RsaPrivateKey}; use ring::{
use sha2::{Digest, Sha256}; rand::SystemRandom,
use signature::RandomizedSigner; signature::{RsaKeyPair, RSA_PKCS1_SHA256},
};
use rsa::{pkcs1::EncodeRsaPrivateKey, RsaPrivateKey};
use std::{ use std::{
cell::RefCell, sync::Arc,
rc::Rc,
sync::{
atomic::{AtomicUsize, Ordering},
Arc,
},
time::{Duration, SystemTime}, time::{Duration, SystemTime},
}; };
use tracing_awc::Tracing;
const ONE_SECOND: u64 = 1; const ONE_SECOND: u64 = 1;
const ONE_MINUTE: u64 = 60 * ONE_SECOND; const ONE_MINUTE: u64 = 60 * ONE_SECOND;
const ONE_HOUR: u64 = 60 * ONE_MINUTE; const ONE_HOUR: u64 = 60 * ONE_MINUTE;
const ONE_DAY: u64 = 24 * ONE_HOUR; const ONE_DAY: u64 = 24 * ONE_HOUR;
// 20 KB
const JSON_SIZE_LIMIT: usize = 20 * 1024;
#[derive(Debug)]
pub(crate) enum BreakerStrategy {
// Requires a successful response
Require2XX,
// Allows HTTP 2xx-401
Allow401AndBelow,
// Allows HTTP 2xx-404
Allow404AndBelow,
}
#[derive(Clone)] #[derive(Clone)]
pub(crate) struct Breakers { pub(crate) struct Breakers {
inner: Arc<DashMap<String, Breaker>>, inner: Arc<DashMap<String, Breaker>>,
@ -39,7 +50,7 @@ impl std::fmt::Debug for Breakers {
} }
impl Breakers { impl Breakers {
fn should_try(&self, url: &IriString) -> bool { pub(crate) fn should_try(&self, url: &IriString) -> bool {
if let Some(authority) = url.authority_str() { if let Some(authority) = url.authority_str() {
if let Some(breaker) = self.inner.get(authority) { if let Some(breaker) = self.inner.get(authority) {
breaker.should_try() breaker.should_try()
@ -57,7 +68,7 @@ impl Breakers {
if let Some(mut breaker) = self.inner.get_mut(authority) { if let Some(mut breaker) = self.inner.get_mut(authority) {
breaker.fail(); breaker.fail();
if !breaker.should_try() { if !breaker.should_try() {
tracing::warn!("Failed breaker for {}", authority); tracing::warn!("Failed breaker for {authority}");
} }
false false
} else { } else {
@ -141,13 +152,11 @@ impl Default for Breaker {
#[derive(Clone)] #[derive(Clone)]
pub(crate) struct Requests { pub(crate) struct Requests {
client: Rc<RefCell<Client>>, client: ClientWithMiddleware,
consecutive_errors: Rc<AtomicUsize>,
error_limit: usize,
key_id: String, key_id: String,
user_agent: String, private_key: Arc<RsaKeyPair>,
private_key: RsaPrivateKey, rng: SystemRandom,
config: Config, config: Config<Spawner>,
breakers: Breakers, breakers: Breakers,
last_online: Arc<LastOnline>, last_online: Arc<LastOnline>,
} }
@ -155,180 +164,196 @@ pub(crate) struct Requests {
impl std::fmt::Debug for Requests { impl std::fmt::Debug for Requests {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Requests") f.debug_struct("Requests")
.field("error_limit", &self.error_limit)
.field("key_id", &self.key_id) .field("key_id", &self.key_id)
.field("user_agent", &self.user_agent)
.field("config", &self.config) .field("config", &self.config)
.field("breakers", &self.breakers) .field("breakers", &self.breakers)
.finish() .finish()
} }
} }
pub(crate) fn build_client(user_agent: &str) -> Client {
Client::builder()
.wrap(Tracing)
.add_default_header(("User-Agent", user_agent.to_string()))
.timeout(Duration::from_secs(15))
.finish()
}
impl Requests { impl Requests {
#[allow(clippy::too_many_arguments)]
pub(crate) fn new( pub(crate) fn new(
key_id: String, key_id: String,
private_key: RsaPrivateKey, private_key: RsaPrivateKey,
user_agent: String,
breakers: Breakers, breakers: Breakers,
last_online: Arc<LastOnline>, last_online: Arc<LastOnline>,
spawner: Spawner,
client: ClientWithMiddleware,
) -> Self { ) -> Self {
let private_key_der = private_key.to_pkcs1_der().expect("Can encode der");
let private_key = ring::signature::RsaKeyPair::from_der(private_key_der.as_bytes())
.expect("Key is valid");
Requests { Requests {
client: Rc::new(RefCell::new(build_client(&user_agent))), client,
consecutive_errors: Rc::new(AtomicUsize::new(0)),
error_limit: 3,
key_id, key_id,
user_agent, private_key: Arc::new(private_key),
private_key, rng: SystemRandom::new(),
config: Config::default().mastodon_compat(), config: Config::new_with_spawner(spawner).mastodon_compat(),
breakers, breakers,
last_online, last_online,
} }
} }
pub(crate) fn spawner(mut self, spawner: Spawner) -> Self {
self.config = self.config.set_spawner(spawner);
self
}
pub(crate) fn reset_breaker(&self, iri: &IriString) { pub(crate) fn reset_breaker(&self, iri: &IriString) {
self.breakers.succeed(iri); self.breakers.succeed(iri);
} }
fn count_err(&self) {
let count = self.consecutive_errors.fetch_add(1, Ordering::Relaxed);
if count + 1 >= self.error_limit {
tracing::warn!("{} consecutive errors, rebuilding http client", count + 1);
*self.client.borrow_mut() = build_client(&self.user_agent);
self.reset_err();
}
}
fn reset_err(&self) {
self.consecutive_errors.swap(0, Ordering::Relaxed);
}
async fn check_response( async fn check_response(
&self, &self,
parsed_url: &IriString, parsed_url: &IriString,
res: Result<ClientResponse, SendRequestError>, strategy: BreakerStrategy,
) -> Result<ClientResponse, Error> { res: Result<reqwest::Response, reqwest_middleware::Error>,
) -> Result<reqwest::Response, Error> {
if res.is_err() { if res.is_err() {
self.count_err();
self.breakers.fail(&parsed_url); self.breakers.fail(&parsed_url);
} }
let mut res = let res = res?;
res.map_err(|e| ErrorKind::SendRequest(parsed_url.to_string(), e.to_string()))?;
self.reset_err(); let status = res.status();
if !res.status().is_success() { let success = match strategy {
BreakerStrategy::Require2XX => status.is_success(),
BreakerStrategy::Allow401AndBelow => (200..=401).contains(&status.as_u16()),
BreakerStrategy::Allow404AndBelow => (200..=404).contains(&status.as_u16()),
};
if !success {
self.breakers.fail(&parsed_url); self.breakers.fail(&parsed_url);
if let Ok(bytes) = res.body().await { if let Ok(s) = res.text().await {
if let Ok(s) = String::from_utf8(bytes.as_ref().to_vec()) {
if !s.is_empty() { if !s.is_empty() {
tracing::warn!("Response from {}, {}", parsed_url, s); tracing::debug!("Response from {parsed_url}, {s}");
}
} }
} }
return Err(ErrorKind::Status(parsed_url.to_string(), res.status()).into()); return Err(ErrorKind::Status(
parsed_url.to_string(),
crate::http1::status_to_http02(status),
)
.into());
} }
// only actually succeed a breaker on 2xx response
if status.is_success() {
self.last_online.mark_seen(&parsed_url); self.last_online.mark_seen(&parsed_url);
self.breakers.succeed(&parsed_url); self.breakers.succeed(&parsed_url);
}
Ok(res) Ok(res)
} }
#[tracing::instrument(name = "Fetch Json", skip(self), fields(signing_string))] #[tracing::instrument(name = "Fetch Json", skip(self), fields(signing_string))]
pub(crate) async fn fetch_json<T>(&self, url: &str) -> Result<T, Error> pub(crate) async fn fetch_json<T>(
&self,
url: &IriString,
strategy: BreakerStrategy,
) -> Result<T, Error>
where where
T: serde::de::DeserializeOwned, T: serde::de::DeserializeOwned,
{ {
self.do_fetch(url, "application/json").await self.do_fetch(url, "application/json", strategy).await
}
#[tracing::instrument(name = "Fetch Json", skip(self), fields(signing_string))]
pub(crate) async fn fetch_json_msky<T>(
&self,
url: &IriString,
strategy: BreakerStrategy,
) -> Result<T, Error>
where
T: serde::de::DeserializeOwned,
{
let stream = self
.do_deliver(
url,
&serde_json::json!({}),
"application/json",
"application/json",
strategy,
)
.await?
.bytes_stream();
let body = aggregate(limit_stream(stream, JSON_SIZE_LIMIT)).await?;
Ok(serde_json::from_slice(&body)?)
} }
#[tracing::instrument(name = "Fetch Activity+Json", skip(self), fields(signing_string))] #[tracing::instrument(name = "Fetch Activity+Json", skip(self), fields(signing_string))]
pub(crate) async fn fetch<T>(&self, url: &str) -> Result<T, Error> pub(crate) async fn fetch<T>(
&self,
url: &IriString,
strategy: BreakerStrategy,
) -> Result<T, Error>
where where
T: serde::de::DeserializeOwned, T: serde::de::DeserializeOwned,
{ {
self.do_fetch(url, "application/activity+json").await self.do_fetch(url, "application/activity+json", strategy)
}
async fn do_fetch<T>(&self, url: &str, accept: &str) -> Result<T, Error>
where
T: serde::de::DeserializeOwned,
{
let parsed_url = url.parse::<IriString>()?;
if !self.breakers.should_try(&parsed_url) {
return Err(ErrorKind::Breaker.into());
}
let signer = self.signer();
let span = tracing::Span::current();
let client: Client = self.client.borrow().clone();
let res = client
.get(url)
.insert_header(("Accept", accept))
.insert_header(Date(SystemTime::now().into()))
.signature(
self.config.clone(),
self.key_id.clone(),
move |signing_string| {
span.record("signing_string", signing_string);
span.in_scope(|| signer.sign(signing_string))
},
)
.await?
.send()
.await;
let mut res = self.check_response(&parsed_url, res).await?;
let body = res
.body()
.await .await
.map_err(|e| ErrorKind::ReceiveResponse(url.to_string(), e.to_string()))?; }
Ok(serde_json::from_slice(body.as_ref())?) async fn do_fetch<T>(
&self,
url: &IriString,
accept: &str,
strategy: BreakerStrategy,
) -> Result<T, Error>
where
T: serde::de::DeserializeOwned,
{
let stream = self
.do_fetch_response(url, accept, strategy)
.await?
.bytes_stream();
let body = aggregate(limit_stream(stream, JSON_SIZE_LIMIT)).await?;
Ok(serde_json::from_slice(&body)?)
} }
#[tracing::instrument(name = "Fetch response", skip(self), fields(signing_string))] #[tracing::instrument(name = "Fetch response", skip(self), fields(signing_string))]
pub(crate) async fn fetch_response(&self, url: IriString) -> Result<ClientResponse, Error> { pub(crate) async fn fetch_response(
if !self.breakers.should_try(&url) { &self,
url: &IriString,
strategy: BreakerStrategy,
) -> Result<reqwest::Response, Error> {
self.do_fetch_response(url, "*/*", strategy).await
}
pub(crate) async fn do_fetch_response(
&self,
url: &IriString,
accept: &str,
strategy: BreakerStrategy,
) -> Result<reqwest::Response, Error> {
if !self.breakers.should_try(url) {
return Err(ErrorKind::Breaker.into()); return Err(ErrorKind::Breaker.into());
} }
let signer = self.signer(); let signer = self.signer();
let span = tracing::Span::current(); let span = tracing::Span::current();
let client: Client = self.client.borrow().clone(); let request = self
let res = client .client
.get(url.as_str()) .get(url.as_str())
.insert_header(("Accept", "*/*")) .header("Accept", accept)
.insert_header(Date(SystemTime::now().into())) .header("Date", Date(SystemTime::now().into()).to_string())
.no_decompress() .signature(&self.config, self.key_id.clone(), move |signing_string| {
.signature(
self.config.clone(),
self.key_id.clone(),
move |signing_string| {
span.record("signing_string", signing_string); span.record("signing_string", signing_string);
span.in_scope(|| signer.sign(signing_string)) span.in_scope(|| signer.sign(signing_string))
}, })
) .await?;
.await?
.send()
.await;
let res = self.check_response(&url, res).await?; let res = self.client.execute(request).await;
let res = self.check_response(url, strategy, res).await?;
Ok(res) Ok(res)
} }
@ -338,7 +363,34 @@ impl Requests {
skip_all, skip_all,
fields(inbox = inbox.to_string().as_str(), signing_string) fields(inbox = inbox.to_string().as_str(), signing_string)
)] )]
pub(crate) async fn deliver<T>(&self, inbox: IriString, item: &T) -> Result<(), Error> pub(crate) async fn deliver<T>(
&self,
inbox: &IriString,
item: &T,
strategy: BreakerStrategy,
) -> Result<(), Error>
where
T: serde::ser::Serialize + std::fmt::Debug,
{
self.do_deliver(
inbox,
item,
"application/activity+json",
"application/activity+json",
strategy,
)
.await?;
Ok(())
}
async fn do_deliver<T>(
&self,
inbox: &IriString,
item: &T,
content_type: &str,
accept: &str,
strategy: BreakerStrategy,
) -> Result<reqwest::Response, Error>
where where
T: serde::ser::Serialize + std::fmt::Debug, T: serde::ser::Serialize + std::fmt::Debug,
{ {
@ -350,12 +402,12 @@ impl Requests {
let span = tracing::Span::current(); let span = tracing::Span::current();
let item_string = serde_json::to_string(item)?; let item_string = serde_json::to_string(item)?;
let client: Client = self.client.borrow().clone(); let request = self
let (req, body) = client .client
.post(inbox.as_str()) .post(inbox.as_str())
.insert_header(("Accept", "application/activity+json")) .header("Accept", accept)
.insert_header(("Content-Type", "application/activity+json")) .header("Content-Type", content_type)
.insert_header(Date(SystemTime::now().into())) .header("Date", Date(SystemTime::now().into()).to_string())
.signature_with_digest( .signature_with_digest(
self.config.clone(), self.config.clone(),
self.key_id.clone(), self.key_id.clone(),
@ -366,31 +418,41 @@ impl Requests {
span.in_scope(|| signer.sign(signing_string)) span.in_scope(|| signer.sign(signing_string))
}, },
) )
.await? .await?;
.split();
let res = req.send_body(body).await; let res = self.client.execute(request).await;
self.check_response(&inbox, res).await?; let res = self.check_response(inbox, strategy, res).await?;
Ok(()) Ok(res)
} }
fn signer(&self) -> Signer { fn signer(&self) -> Signer {
Signer { Signer {
private_key: self.private_key.clone(), private_key: self.private_key.clone(),
rng: self.rng.clone(),
} }
} }
} }
struct Signer { struct Signer {
private_key: RsaPrivateKey, private_key: Arc<RsaKeyPair>,
rng: SystemRandom,
} }
impl Signer { impl Signer {
fn sign(&self, signing_string: &str) -> Result<String, Error> { fn sign(&self, signing_string: &str) -> Result<String, Error> {
let signing_key = SigningKey::<Sha256>::new_with_prefix(self.private_key.clone()); let mut signature = vec![0; self.private_key.public().modulus_len()];
let signature = signing_key.try_sign_with_rng(thread_rng(), signing_string.as_bytes())?;
Ok(base64::encode(signature.as_ref())) self.private_key
.sign(
&RSA_PKCS1_SHA256,
&self.rng,
signing_string.as_bytes(),
&mut signature,
)
.map_err(|_| ErrorKind::SignRequest)?;
Ok(STANDARD.encode(&signature))
} }
} }

View File

@ -1,4 +1,5 @@
mod actor; mod actor;
mod healthz;
mod inbox; mod inbox;
mod index; mod index;
mod media; mod media;
@ -7,6 +8,7 @@ mod statics;
pub(crate) use self::{ pub(crate) use self::{
actor::route as actor, actor::route as actor,
healthz::route as healthz,
inbox::route as inbox, inbox::route as inbox,
index::route as index, index::route as index,
media::route as media, media::route as media,

7
src/routes/healthz.rs Normal file
View File

@ -0,0 +1,7 @@
use crate::{data::State, error::Error};
use actix_web::{web, HttpResponse};
pub(crate) async fn route(state: web::Data<State>) -> Result<HttpResponse, Error> {
state.db.check_health().await?;
Ok(HttpResponse::Ok().finish())
}

View File

@ -14,8 +14,12 @@ const MINIFY_CONFIG: minify_html::Cfg = minify_html::Cfg {
keep_html_and_head_opening_tags: false, keep_html_and_head_opening_tags: false,
keep_spaces_between_attributes: true, keep_spaces_between_attributes: true,
keep_comments: false, keep_comments: false,
minify_js: true, keep_input_type_text_attr: true,
keep_ssi_comments: false,
preserve_brace_template_syntax: false,
preserve_chevron_percent_template_syntax: false,
minify_css: true, minify_css: true,
minify_js: true,
remove_bangs: true, remove_bangs: true,
remove_processing_instructions: true, remove_processing_instructions: true,
}; };
@ -33,12 +37,16 @@ pub(crate) async fn route(
state: web::Data<State>, state: web::Data<State>,
config: web::Data<Config>, config: web::Data<Config>,
) -> Result<HttpResponse, Error> { ) -> Result<HttpResponse, Error> {
let all_nodes = state.node_cache().nodes().await?; let all_nodes = state.node_cache.nodes().await?;
let mut nodes = Vec::new(); let mut nodes = Vec::new();
let mut local = Vec::new(); let mut local = Vec::new();
for node in all_nodes { for node in all_nodes {
if !state.is_connected(&node.base) {
continue;
}
if node if node
.base .base
.authority_str() .authority_str()
@ -71,7 +79,7 @@ pub(crate) async fn route(
let mut buf = BufWriter::new(Vec::new()); let mut buf = BufWriter::new(Vec::new());
crate::templates::index(&mut buf, &local, &nodes, &config)?; crate::templates::index_html(&mut buf, &local, &nodes, &config)?;
let html = buf.into_inner().map_err(|e| { let html = buf.into_inner().map_err(|e| {
tracing::error!("Error rendering template, {}", e.error()); tracing::error!("Error rendering template, {}", e.error());
ErrorKind::FlushBuffer ErrorKind::FlushBuffer

View File

@ -1,7 +1,15 @@
use crate::{data::MediaCache, error::Error, requests::Requests}; use crate::{
data::MediaCache,
error::Error,
requests::{BreakerStrategy, Requests},
stream::limit_stream,
};
use actix_web::{body::BodyStream, web, HttpResponse}; use actix_web::{body::BodyStream, web, HttpResponse};
use uuid::Uuid; use uuid::Uuid;
// 16 MB
const IMAGE_SIZE_LIMIT: usize = 16 * 1024 * 1024;
#[tracing::instrument(name = "Media", skip(media, requests))] #[tracing::instrument(name = "Media", skip(media, requests))]
pub(crate) async fn route( pub(crate) async fn route(
media: web::Data<MediaCache>, media: web::Data<MediaCache>,
@ -11,15 +19,23 @@ pub(crate) async fn route(
let uuid = uuid.into_inner(); let uuid = uuid.into_inner();
if let Some(url) = media.get_url(uuid).await? { if let Some(url) = media.get_url(uuid).await? {
let res = requests.fetch_response(url).await?; let res = requests
.fetch_response(&url, BreakerStrategy::Allow404AndBelow)
.await?;
let mut response = HttpResponse::build(res.status()); let mut response = HttpResponse::build(crate::http1::status_to_http02(res.status()));
for (name, value) in res.headers().iter().filter(|(h, _)| *h != "connection") { for (name, value) in res.headers().iter().filter(|(h, _)| *h != "connection") {
response.insert_header((name.clone(), value.clone())); response.insert_header((
crate::http1::name_to_http02(name),
crate::http1::value_to_http02(value),
));
} }
return Ok(response.body(BodyStream::new(res))); return Ok(response.body(BodyStream::new(limit_stream(
res.bytes_stream(),
IMAGE_SIZE_LIMIT,
))));
} }
Ok(HttpResponse::NotFound().finish()) Ok(HttpResponse::NotFound().finish())

View File

@ -44,6 +44,8 @@ pub(crate) async fn route(
.map(|s| s.to_owned()) .map(|s| s.to_owned())
.collect(); .collect();
let open_registrations = !config.restricted_mode();
web::Json(NodeInfo { web::Json(NodeInfo {
version: NodeInfoVersion, version: NodeInfoVersion,
software: Software { software: Software {
@ -55,7 +57,7 @@ pub(crate) async fn route(
inbound: vec![], inbound: vec![],
outbound: vec![], outbound: vec![],
}, },
open_registrations: false, open_registrations,
usage: Usage { usage: Usage {
users: Users { users: Users {
total: 1, total: 1,

92
src/spawner.rs Normal file
View File

@ -0,0 +1,92 @@
use async_cpupool::CpuPool;
use http_signature_normalization_actix::{Canceled, Spawn};
use std::time::Duration;
#[derive(Clone)]
pub(crate) struct Spawner {
pool: CpuPool,
}
impl Spawner {
pub(crate) fn build(name: &'static str, threads: u16) -> color_eyre::Result<Self> {
let pool = CpuPool::configure()
.name(name)
.max_threads(threads)
.build()?;
Ok(Spawner { pool })
}
pub(crate) async fn close(self) {
self.pool.close().await;
}
}
impl std::fmt::Debug for Spawner {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Spawner").finish()
}
}
async fn timer<Fut>(fut: Fut) -> Fut::Output
where
Fut: std::future::Future,
{
let id = uuid::Uuid::new_v4();
metrics::counter!("relay.spawner.wait-timer.start").increment(1);
let mut interval = tokio::time::interval(Duration::from_secs(5));
// pass the first tick (instant)
interval.tick().await;
let mut fut = std::pin::pin!(fut);
let mut counter = 0;
loop {
tokio::select! {
out = &mut fut => {
metrics::counter!("relay.spawner.wait-timer.end").increment(1);
return out;
}
_ = interval.tick() => {
counter += 1;
metrics::counter!("relay.spawner.wait-timer.pending").increment(1);
tracing::warn!("Blocking operation {id} is taking a long time, {} seconds", counter * 5);
}
}
}
}
impl Spawn for Spawner {
type Future<T> = std::pin::Pin<Box<dyn std::future::Future<Output = Result<T, Canceled>>>>;
fn spawn_blocking<Func, Out>(&self, func: Func) -> Self::Future<Out>
where
Func: FnOnce() -> Out + Send + 'static,
Out: Send + 'static,
{
let pool = self.pool.clone();
Box::pin(async move { timer(pool.spawn(func)).await.map_err(|_| Canceled) })
}
}
impl http_signature_normalization_reqwest::Spawn for Spawner {
type Future<T> = std::pin::Pin<Box<dyn std::future::Future<Output = Result<T, http_signature_normalization_reqwest::Canceled>> + Send>> where T: Send;
fn spawn_blocking<Func, Out>(&self, func: Func) -> Self::Future<Out>
where
Func: FnOnce() -> Out + Send + 'static,
Out: Send + 'static,
{
let pool = self.pool.clone();
Box::pin(async move {
timer(pool.spawn(func))
.await
.map_err(|_| http_signature_normalization_reqwest::Canceled)
})
}
}

59
src/stream.rs Normal file
View File

@ -0,0 +1,59 @@
use crate::error::{Error, ErrorKind};
use actix_web::web::{Bytes, BytesMut};
use futures_core::Stream;
use streem::IntoStreamer;
pub(crate) fn limit_stream<'a, S>(
input: S,
limit: usize,
) -> impl Stream<Item = Result<Bytes, Error>> + Send + 'a
where
S: Stream<Item = reqwest::Result<Bytes>> + Send + 'a,
{
streem::try_from_fn(move |yielder| async move {
let stream = std::pin::pin!(input);
let mut stream = stream.into_streamer();
let mut count = 0;
while let Some(bytes) = stream.try_next().await? {
count += bytes.len();
if count > limit {
return Err(ErrorKind::BodyTooLarge.into());
}
yielder.yield_ok(bytes).await;
}
Ok(())
})
}
pub(crate) async fn aggregate<S>(input: S) -> Result<Bytes, Error>
where
S: Stream<Item = Result<Bytes, Error>>,
{
let stream = std::pin::pin!(input);
let mut streamer = stream.into_streamer();
let mut buf = Vec::new();
while let Some(bytes) = streamer.try_next().await? {
buf.push(bytes);
}
if buf.len() == 1 {
return Ok(buf.pop().expect("buf has exactly one element"));
}
let total_size: usize = buf.iter().map(|b| b.len()).sum();
let mut bytes_mut = BytesMut::with_capacity(total_size);
for bytes in &buf {
bytes_mut.extend_from_slice(&bytes);
}
Ok(bytes_mut.freeze())
}

View File

@ -46,7 +46,7 @@ pub(crate) fn start(admin_handle: String, db: Db, token: &str) {
let bot = Bot::new(token); let bot = Bot::new(token);
let admin_handle = Arc::new(admin_handle); let admin_handle = Arc::new(admin_handle);
actix_rt::spawn(async move { tokio::spawn(async move {
let command_handler = teloxide::filter_command::<Command, _>().endpoint( let command_handler = teloxide::filter_command::<Command, _>().endpoint(
move |bot: Bot, msg: Message, cmd: Command| { move |bot: Bot, msg: Message, cmd: Command| {
let admin_handle = admin_handle.clone(); let admin_handle = admin_handle.clone();
@ -75,7 +75,8 @@ pub(crate) fn start(admin_handle: String, db: Db, token: &str) {
fn is_admin(admin_handle: &str, message: &Message) -> bool { fn is_admin(admin_handle: &str, message: &Message) -> bool {
message message
.from() .from
.as_ref()
.and_then(|user| user.username.as_deref()) .and_then(|user| user.username.as_deref())
.map(|username| username == admin_handle) .map(|username| username == admin_handle)
.unwrap_or(false) .unwrap_or(false)
@ -89,19 +90,19 @@ async fn answer(bot: Bot, msg: Message, cmd: Command, db: Db) -> ResponseResult<
.await?; .await?;
} }
Command::Block { domain } if db.add_blocks(vec![domain.clone()]).await.is_ok() => { Command::Block { domain } if db.add_blocks(vec![domain.clone()]).await.is_ok() => {
bot.send_message(msg.chat.id, format!("{} has been blocked", domain)) bot.send_message(msg.chat.id, format!("{domain} has been blocked"))
.await?; .await?;
} }
Command::Unblock { domain } if db.remove_blocks(vec![domain.clone()]).await.is_ok() => { Command::Unblock { domain } if db.remove_blocks(vec![domain.clone()]).await.is_ok() => {
bot.send_message(msg.chat.id, format!("{} has been unblocked", domain)) bot.send_message(msg.chat.id, format!("{domain} has been unblocked"))
.await?; .await?;
} }
Command::Allow { domain } if db.add_allows(vec![domain.clone()]).await.is_ok() => { Command::Allow { domain } if db.add_allows(vec![domain.clone()]).await.is_ok() => {
bot.send_message(msg.chat.id, format!("{} has been allowed", domain)) bot.send_message(msg.chat.id, format!("{domain} has been allowed"))
.await?; .await?;
} }
Command::Disallow { domain } if db.remove_allows(vec![domain.clone()]).await.is_ok() => { Command::Disallow { domain } if db.remove_allows(vec![domain.clone()]).await.is_ok() => {
bot.send_message(msg.chat.id, format!("{} has been disallowed", domain)) bot.send_message(msg.chat.id, format!("{domain} has been disallowed"))
.await?; .await?;
} }
Command::ListAllowed => { Command::ListAllowed => {

View File

@ -1,7 +1,7 @@
@use crate::{ @use crate::{
config::{Config, UrlKind}, config::{Config, UrlKind},
data::Node, data::Node,
templates::{info, instance, statics::index_css}, templates::{info_html, instance_html, statics::index_css},
}; };
@(local: &[Node], nodes: &[Node], config: &Config) @(local: &[Node], nodes: &[Node], config: &Config)
@ -39,13 +39,13 @@ templates::{info, instance, statics::index_css},
@for node in local { @for node in local {
@if let Some(inst) = node.instance.as_ref() { @if let Some(inst) = node.instance.as_ref() {
<li> <li>
@:instance(inst, node.info.as_ref().map(|info| { info.software.as_ref() }), node.contact.as_ref(), @:instance_html(inst, node.info.as_ref().map(|info| { info.software.as_ref() }), node.contact.as_ref(),
&node.base) &node.base)
</li> </li>
} else { } else {
@if let Some(inf) = node.info.as_ref() { @if let Some(inf) = node.info.as_ref() {
<li> <li>
@:info(inf, &node.base) @:info_html(inf, &node.base)
</li> </li>
} }
} }
@ -79,9 +79,7 @@ templates::{info, instance, statics::index_css},
<h4>Pleroma</h4> <h4>Pleroma</h4>
<p> <p>
Pleroma admins can add this relay by adding Pleroma admins can add this relay by adding
<pre>@config.generate_url(UrlKind::Actor)</pre> <pre>@config.generate_url(UrlKind::Actor)</pre> to their relay settings.
to their relay settings (I don't actually know how pleroma handles adding
relays, is it still a mix command?).
</p> </p>
<h4>Others</h4> <h4>Others</h4>
<p> <p>
@ -97,13 +95,13 @@ templates::{info, instance, statics::index_css},
@for node in nodes { @for node in nodes {
@if let Some(inst) = node.instance.as_ref() { @if let Some(inst) = node.instance.as_ref() {
<li> <li>
@:instance(inst, node.info.as_ref().map(|info| { info.software.as_ref() }), node.contact.as_ref(), @:instance_html(inst, node.info.as_ref().map(|info| { info.software.as_ref() }), node.contact.as_ref(),
&node.base) &node.base)
</li> </li>
} else { } else {
@if let Some(inf) = node.info.as_ref() { @if let Some(inf) = node.info.as_ref() {
<li> <li>
@:info(inf, &node.base) @:info_html(inf, &node.base)
</li> </li>
} }
} }

View File

@ -1,4 +1,4 @@
@use crate::{db::{Contact, Instance}, templates::admin}; @use crate::{db::{Contact, Instance}, templates::admin_html};
@use activitystreams::iri_string::types::IriString; @use activitystreams::iri_string::types::IriString;
@(instance: &Instance, software: Option<&str>, contact: Option<&Contact>, base: &IriString) @(instance: &Instance, software: Option<&str>, contact: Option<&Contact>, base: &IriString)
@ -30,7 +30,7 @@
} }
@if let Some(contact) = contact { @if let Some(contact) = contact {
<h5 class="instance-admin">@instance.title's admin:</h5> <h5 class="instance-admin">@instance.title's admin:</h5>
@:admin(contact, base) @:admin_html(contact, base)
} }
</div> </div>
} }