Compare commits

...

14 Commits

Author SHA1 Message Date
Timshel 6ec67b230b
Merge 9eefb7427f into 0fe93edea6 2024-04-29 15:07:59 +00:00
Timshel 9eefb7427f Stop rolling device token 2024-04-29 17:04:50 +02:00
Timshel 1f9c9e2239 Toggle SSO button depending on SSO_ENABLED in docker start 2024-04-29 17:04:50 +02:00
Timshel 927a9e3e4c Allow to cache discovery endpoint 2024-04-29 17:04:50 +02:00
Timshel 7117e78fc7 Improvements and error handling 2024-04-29 17:04:50 +02:00
Felix Eckhofer 819624a4a3 Add SSO functionality using OpenID Connect
Co-authored-by: Pablo Ovelleiro Corral <mail@pablo.tools>
Co-authored-by: Stuart Heap <sheap13@gmail.com>
Co-authored-by: Alex Moore <skiepp@my-dockerfarm.cloud>
Co-authored-by: Brian Munro <brian.alexander.munro@gmail.com>
Co-authored-by: Jacques B. <timshel@github.com>
2024-04-29 16:15:10 +02:00
Timshel 0b49f26b57 Add playwright create/login test for all db 2024-04-29 16:13:00 +02:00
Daniel García 0fe93edea6
Some fixes for the new mobile apps (#4526) 2024-04-27 23:24:04 +02:00
Stefan Melmuk e9aa5a545e
fix emergency access invites (#4337)
* fix emergency access invites with no mail

when mail is disabled instead of accepting emergency access for all
invited users automatically, we only accept if the user already exists

on registration of a new account any open emergency access invitations
will be accepted, if mail is disabled

also prevent invited emergency access contacts to register if emergency
access is disabled (this is only relevant for when mail is enabled, if
mail is disabled they should have an Invitation entry)

* delete emergency access invitations

if an invited user is deleted in the /admin panel their emergency
access invitation will remain in the database which causes
the to_json_grantee_details fn to panic

* improve missing emergency access grantees

instead of returning an empty emergency access contact the entry should
not be added to the list. also the error handling can be improved a bit.
2024-04-27 22:16:05 +02:00
Stefan Melmuk 9dcc738f85
improve access to collections via groups (#4441)
* refactor get_org_collections_details

* improve access to collection check

* fix get_org_collection_detail too
2024-04-27 22:09:00 +02:00
Kristof Mattei 84a7c7da5d
Pass in collection ids to notifier when sharing cipher. (#4517) 2024-04-27 21:53:10 +02:00
Mathijs van Veluw ca9234ed86
Add extra (unsupported) container build arch's (#4524)
There was a PR (#4370) to add i686/i386 support for Vaultwarden.
That specific PR was not a viable way of adding this.

This PR adds extra architectures for Debian based containers which we
will not support by default. Those images will not be build and pushed
to our container registries.

Added the following architectures:
 - linux/386
 - linux/ppc64le
 - linux/s390x

Again, there will be no major support for these architectures, but it
will allow people who use these architectures to build a Debian based
binary more easily
2024-04-27 21:51:14 +02:00
Daniel García 27dc67fadd
Implement custom DNS resolver (#3988) 2024-04-27 20:25:34 +02:00
Mathijs van Veluw 2ad33ec97f
Update Crate and Rust (#4522)
* Update Crate and Rust

- Updated all crates
- Updated Rust to the latest patch version

* Updated GitHub Actions
2024-04-27 00:53:42 +02:00
100 changed files with 4683 additions and 888 deletions

View File

@ -38,3 +38,6 @@ web-vault
# Vaultwarden Resources
resources
# Playwright tests
playwright

View File

@ -152,6 +152,10 @@
## Cron schedule of the job that cleans old auth requests from the auth request.
## Defaults to every minute. Set blank to disable this job.
# AUTH_REQUEST_PURGE_SCHEDULE="30 * * * * *"
##
## Cron schedule of the job that cleans sso nonce from incomplete flow
## Defaults to daily (20 minutes after midnight). Set blank to disable this job.
# PURGE_INCOMPLETE_SSO_NONCE="0 20 0 * * *"
########################
### General settings ###
@ -409,6 +413,43 @@
## KNOW WHAT YOU ARE DOING!
# ORG_GROUPS_ENABLED=false
#####################################
### SSO settings (OpenID Connect) ###
#####################################
## Controls whether users can login using an OpenID Connect identity provider
# SSO_ENABLED=true
## Prevent users from logging in directly without going through SSO
# SSO_ONLY=false
## On SSO Signup if a user with a matching email already exists make the association
# SSO_SIGNUPS_MATCH_EMAIL=true
## Base URL of the OIDC server (auto-discovery is used)
## - Should not include the `/.well-known/openid-configuration` part and no trailing `/`
## - ${SSO_AUTHORITY}/.well-known/openid-configuration should return a json document: https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationResponse
# SSO_AUTHORITY=https://auth.example.com
## Optional SSO scopes, override if email and profile are not enough
#SSO_SCOPES="email profile"
## Additionnal authorization url parameters (ex: to obtain a `refresh_token` with Google Auth).
# SSO_AUTHORIZE_EXTRA_PARAMS="
# access_type=offline
# prompt=consent
# "
## Activate PKCE for the Auth Code flow. Recommended but disabled for now waiting for feedback on support.
# SSO_PKCE=false
## Regex to add additionnal trusted audience to Id Token (by default only the client_id is trusted).
# SSO_AUDIENCE_TRUSTED='^$'
## Set your Client ID and Client Key
# SSO_CLIENT_ID=11111
# SSO_CLIENT_SECRET=AAAAAAAAAAAAAAAAAAAAAAAA
## Optional Master password policy (minComplexity=[0-4])
# SSO_MASTER_PASSWORD_POLICY='{"enforceOnLogin":false,"minComplexity":3,"minLength":12,"requireLower":false,"requireNumbers":false,"requireSpecial":false,"requireUpper":false}'
### Use sso only for authentication not the session lifecycle
# SSO_AUTH_ONLY_NOT_SESSION=false
#### Client cache for discovery endpoint. Duration in seconds (0 to disable).
# SSO_CLIENT_CACHE_EXPIRATION=0
### Debug only, log all the tokens
# SSO_DEBUG_TOKENS=false
########################
### MFA/2FA settings ###
########################

View File

@ -46,7 +46,7 @@ jobs:
steps:
# Checkout the repo
- name: "Checkout"
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 #v4.1.1
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b #v4.1.4
# End Checkout the repo
@ -74,7 +74,7 @@ jobs:
# Only install the clippy and rustfmt components on the default rust-toolchain
- name: "Install rust-toolchain version"
uses: dtolnay/rust-toolchain@be73d7920c329f220ce78e0234b8f96b7ae60248 # master @ 2023-12-07 - 10:22 PM GMT+1
uses: dtolnay/rust-toolchain@bb45937a053e097f8591208d8e74c90db1873d07 # master @ Apr 14, 2024, 9:02 PM GMT+2
if: ${{ matrix.channel == 'rust-toolchain' }}
with:
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
@ -84,7 +84,7 @@ jobs:
# Install the any other channel to be used for which we do not execute clippy and rustfmt
- name: "Install MSRV version"
uses: dtolnay/rust-toolchain@be73d7920c329f220ce78e0234b8f96b7ae60248 # master @ 2023-12-07 - 10:22 PM GMT+1
uses: dtolnay/rust-toolchain@bb45937a053e097f8591208d8e74c90db1873d07 # master @ Apr 14, 2024, 9:02 PM GMT+2
if: ${{ matrix.channel != 'rust-toolchain' }}
with:
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"

View File

@ -13,7 +13,7 @@ jobs:
steps:
# Checkout the repo
- name: Checkout
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
# End Checkout the repo
# Download hadolint - https://github.com/hadolint/hadolint/releases

View File

@ -58,7 +58,7 @@ jobs:
steps:
# Checkout the repo
- name: Checkout
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
with:
fetch-depth: 0
@ -69,11 +69,11 @@ jobs:
# Start Docker Buildx
- name: Setup Docker Buildx
uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0
uses: docker/setup-buildx-action@d70bba72b1f3fd22344832f00baa16ece964efeb # v3.3.0
# https://github.com/moby/buildkit/issues/3969
# Also set max parallelism to 2, the default of 4 breaks GitHub Actions
with:
config-inline: |
buildkitd-config-inline: |
[worker.oci]
max-parallelism = 2
driver-opts: |
@ -102,7 +102,7 @@ jobs:
# Login to Docker Hub
- name: Login to Docker Hub
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3.1.0
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
@ -116,7 +116,7 @@ jobs:
# Login to GitHub Container Registry
- name: Login to GitHub Container Registry
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3.1.0
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
@ -137,7 +137,7 @@ jobs:
# Login to Quay.io
- name: Login to Quay.io
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3.1.0
with:
registry: quay.io
username: ${{ secrets.QUAY_USERNAME }}
@ -171,7 +171,7 @@ jobs:
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}localhost:5000/vaultwarden/server" | tee -a "${GITHUB_ENV}"
- name: Bake ${{ matrix.base_image }} containers
uses: docker/bake-action@849707117b03d39aba7924c50a10376a69e88d7d # v4.1.0
uses: docker/bake-action@73b0efa7a0e8ac276e0a8d5c580698a942ff10b5 # v4.4.0
env:
BASE_TAGS: "${{ env.BASE_TAGS }}"
SOURCE_COMMIT: "${{ env.SOURCE_COMMIT }}"
@ -229,28 +229,28 @@ jobs:
# Upload artifacts to Github Actions
- name: "Upload amd64 artifact"
uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
if: ${{ matrix.base_image == 'alpine' }}
with:
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-amd64
path: vaultwarden-amd64
- name: "Upload arm64 artifact"
uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
if: ${{ matrix.base_image == 'alpine' }}
with:
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-arm64
path: vaultwarden-arm64
- name: "Upload armv7 artifact"
uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
if: ${{ matrix.base_image == 'alpine' }}
with:
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv7
path: vaultwarden-armv7
- name: "Upload armv6 artifact"
uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
if: ${{ matrix.base_image == 'alpine' }}
with:
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv6

View File

@ -25,10 +25,10 @@ jobs:
actions: read
steps:
- name: Checkout code
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 #v4.1.1
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b #v4.1.4
- name: Run Trivy vulnerability scanner
uses: aquasecurity/trivy-action@d43c1f16c00cfd3978dde6c07f4bbcf9eb6993ca # v0.16.1
uses: aquasecurity/trivy-action@d710430a6722f083d3b36b8339ff66b32f22ee55 # v0.19.0
with:
scan-type: repo
ignore-unfixed: true
@ -37,6 +37,6 @@ jobs:
severity: CRITICAL,HIGH
- name: Upload Trivy scan results to GitHub Security tab
uses: github/codeql-action/upload-sarif@b7bf0a3ed3ecfa44160715d7c442788f65f0f923 # v3.23.2
uses: github/codeql-action/upload-sarif@2bbafcdd7fbf96243689e764c2f15d9735164f33 # v3.25.3
with:
sarif_file: 'trivy-results.sarif'

1176
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -36,7 +36,7 @@ unstable = []
[target."cfg(not(windows))".dependencies]
# Logging
syslog = "6.1.0"
syslog = "6.1.1"
[dependencies]
# Logging
@ -60,21 +60,21 @@ rocket = { version = "0.5.0", features = ["tls", "json"], default-features = fal
rocket_ws = { version ="0.1.0" }
# WebSockets libraries
rmpv = "1.0.1" # MessagePack library
rmpv = "1.0.2" # MessagePack library
# Concurrent HashMap used for WebSocket messaging and favicons
dashmap = "5.5.3"
# Async futures
futures = "0.3.30"
tokio = { version = "1.37.0", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal"] }
tokio = { version = "1.37.0", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal", "net"] }
# A generic serialization/deserialization framework
serde = { version = "1.0.197", features = ["derive"] }
serde_json = "1.0.115"
serde = { version = "1.0.198", features = ["derive"] }
serde_json = "1.0.116"
# A safe, extensible ORM and Query builder
diesel = { version = "2.1.5", features = ["chrono", "r2d2", "numeric"] }
diesel = { version = "2.1.6", features = ["chrono", "r2d2", "numeric"] }
diesel_migrations = "2.1.0"
diesel_logger = { version = "0.3.0", optional = true }
@ -89,12 +89,12 @@ ring = "0.17.8"
uuid = { version = "1.8.0", features = ["v4"] }
# Date and time libraries
chrono = { version = "0.4.37", features = ["clock", "serde"], default-features = false }
chrono = { version = "0.4.38", features = ["clock", "serde"], default-features = false }
chrono-tz = "0.9.0"
time = "0.3.34"
time = "0.3.36"
# Job scheduler
job_scheduler_ng = "2.0.4"
job_scheduler_ng = "2.0.5"
# Data encoding library Hex/Base32/Base64
data-encoding = "2.5.0"
@ -115,7 +115,7 @@ webauthn-rs = "0.3.2"
url = "2.5.0"
# Email libraries
lettre = { version = "0.11.6", features = ["smtp-transport", "sendmail-transport", "builder", "serde", "tokio1-native-tls", "hostname", "tracing", "tokio1"], default-features = false }
lettre = { version = "0.11.7", features = ["smtp-transport", "sendmail-transport", "builder", "serde", "tokio1-native-tls", "hostname", "tracing", "tokio1"], default-features = false }
percent-encoding = "2.3.1" # URL encoding library used for URL's in the emails
email_address = "0.2.4"
@ -123,7 +123,8 @@ email_address = "0.2.4"
handlebars = { version = "5.1.2", features = ["dir_source"] }
# HTTP client (Used for favicons, version check, DUO and HIBP API)
reqwest = { version = "0.12.3", features = ["native-tls-alpn", "stream", "json", "gzip", "brotli", "socks", "cookies", "hickory-dns"] }
reqwest = { version = "0.12.4", features = ["native-tls-alpn", "stream", "json", "gzip", "brotli", "socks", "cookies"] }
hickory-resolver = "0.24.1"
# Favicon extraction libraries
html5gum = "0.5.7"
@ -132,7 +133,7 @@ data-url = "0.3.1"
bytes = "1.6.0"
# Cache function results (Used for version check and favicon fetching)
cached = { version = "0.49.2", features = ["async"] }
cached = { version = "0.50.0", features = ["async"] }
# Used for custom short lived cookie jar during favicon extraction
cookie = "0.18.1"
@ -148,12 +149,16 @@ pico-args = "0.5.0"
paste = "1.0.14"
governor = "0.6.3"
# OIDC for SSO
openidconnect = "3.5.0"
mini-moka = "0.10.2"
# Check client versions for specific features.
semver = "1.0.22"
# Allow overriding the default memory allocator
# Mainly used for the musl builds, since the default musl malloc is very slow
mimalloc = { version = "0.1.39", features = ["secure"], default-features = false, optional = true }
mimalloc = { version = "0.1.41", features = ["secure"], default-features = false, optional = true }
which = "6.0.1"
# Argon2 library with support for the PHC format

252
SSO.md Normal file
View File

@ -0,0 +1,252 @@
# SSO using OpenId Connect
To use an external source of authentication your SSO will need to support OpenID Connect :
- And OpenID Connect Discovery endpoint should be available
- Client authentication will be done using Id and Secret.
A master password will still required and not controlled by the SSO (depending of your point of view this might be a feature ;).
This introduce another way to control who can use the vault without having to use invitation or using an LDAP.
## Configuration
The following configurations are available
- `SSO_ENABLED` : Activate the SSO
- `SSO_ONLY` : disable email+Master password authentication
- `SSO_SIGNUPS_MATCH_EMAIL`: On SSO Signup if a user with a matching email already exists make the association (default `true`)
- `SSO_AUTHORITY` : the OpendID Connect Discovery endpoint of your SSO
- Should not include the `/.well-known/openid-configuration` part and no trailing `/`
- $SSO_AUTHORITY/.well-known/openid-configuration should return the a json document: https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationResponse
- `SSO_SCOPES` : Optional, allow to override scopes if needed (default `"email profile"`)
- `SSO_AUTHORIZE_EXTRA_PARAMS` : Optional, allow to add extra parameter to the authorize redirection (default `""`)
- `SSO_PKCE`: Activate PKCE for the Auth Code flow. Recommended but disabled for now waiting for feedback on support (default `false`).
- `SSO_AUDIENCE_TRUSTED`: Optional, Regex to trust additionnal audience for the IdToken (`client_id` is always trusted). Use single quote when writing the regex: `'^$'`.
- `SSO_CLIENT_ID` : Client Id
- `SSO_CLIENT_SECRET` : Client Secret
- `SSO_MASTER_PASSWORD_POLICY`: Optional Master password policy
- `SSO_AUTH_ONLY_NOT_SESSION`: Enable to use SSO only for authentication not session lifecycle
- `SSO_CLIENT_CACHE_EXPIRATION`: Cache calls to the discovery endpoint, duration in seconds, `0` to disable (default `0`);
- `SSO_DEBUG_TOKENS`: Log all tokens for easier debugging (default `false`)
The callback url is : `https://your.domain/identity/connect/oidc-signin`
## Account and Email handling
When logging with SSO an identifier (`{iss}/{sub}` claims from the IdToken) is saved in a separate table (`sso_users`).
This is used to link to the SSO provider identifier without changing the default Vaultwarden user `uuid`. This is needed because:
- Storing the SSO identifier is important to prevent account takeover due to email change.
- We can't use the identifier as the User uuid since it's way longer (Max 255 chars for the `sub` part, cf [spec](https://openid.net/specs/openid-connect-core-1_0.html#CodeIDToken)).
- We want to be able to associate existing account based on `email` but only when the user log for the first time (controlled by `SSO_SIGNUPS_MATCH_EMAIL`).
- We need to be able to associate with existing stub account, such as the one created when inviting a user to an org (association is possible only if the user does not have a private key).
Additionnaly:
- Signup to Vaultwarden will be blocked if the Provider report the email as `unverified`.
- Changing the email need to be done by the user since it require updating the `key`.
On login if the email returned by the provider is not the one saved in Vaultwarden an email will be sent to the user to ask him to update it.
- If set `SIGNUPS_DOMAINS_WHITELIST` is applied on SSO signup and when attempting to change the email.
## Client Cache
By default the client cache is disabled since it can cause issues with the signing keys.
\
This mean that the discovery endpoint will be called again each time we need to interact with the provider (generating authorize_url, exhange the authorize code, refresh tokens).
This is suboptimal so the `SSO_CLIENT_CACHE_EXPIRATION` allow you to configure an expiration that should work for your provider.
As a protection against a misconfigured expiration if the validation of the `IdToken` fails then the client cache is invalidated (but you'll periodically have an unlucky user ^^).
### Google example (Rolling keys)
If we take Google as an example checking the discovery [endpoint](https://accounts.google.com/.well-known/openid-configuration) response headers we can see that the `max-age` of the cache control is set to `3600` seconds. And the [jwk_uri](https://www.googleapis.com/oauth2/v3/certs) response headers usually contain a `max-age` with an even bigger value.
/
Combined with user [feedback](https://github.com/ramosbugs/openidconnect-rs/issues/152) we can conclude that Google will roll the signing keys each week.
Setting the cache expiration too high has diminishing return but using something like `600` (10 min) should provide plenty benefits.
### Rolling keys manually
If you want to roll the used key, first add a new one but do not immediately start signing with it.
Wait for the delay you configured in `SSO_CLIENT_CACHE_EXPIRATION` then you can start signing with it.
As mentionned in the Google example setting too high of a value has dimishing return even if you do not plan to roll the keys.
## Keycloak
Default access token lifetime might be only `5min`, set a longer value otherwise it will collide with `VaultWarden` front-end expiration detection which is also set at `5min`.
\
At the realm level
- `Realm settings / Tokens / Access Token Lifespan` to at least `10min` (`accessTokenLifespan` setting when using `kcadm.sh`).
- `Realm settings / Sessions / SSO Session Idle/Max` for the Refresh token lifetime
Or for a specific client in `Clients / Client details / Advanced / Advanced settings` you can find `Access Token Lifespan` and `Client Session Idle/Max`.
Server configuration, nothing specific just set:
- `SSO_AUTHORITY=https://${domain}/realms/${realm_name}`
- `SSO_CLIENT_ID`
- `SSO_CLIENT_SECRET`
- `SSO_PKCE=true`
### Testing
If you want to run a testing instance of Keycloak a [docker-compose](docker/keycloak/docker-compose.yml) is available.
## Authelia
To obtain a `refresh_token` to be able to extend session you'll need to add the `offline_access` scope.
Config will look like:
- `SSO_SCOPES="email profile offline_access"`
## Authentik
Default access token lifetime might be only `5min`, set a longer value otherwise it will collide with `VaultWarden` front-end expiration detection which is also set at `5min`.
\
To change the tokens expiration go to `Applications / Providers / Edit / Advanced protocol settings`.
Starting with `2024.2` version you will need to add the `offline_access` scope and ensure it's selected in `Applications / Providers / Edit / Advanced protocol settings / Scopes` ([Doc](https://docs.goauthentik.io/docs/providers/oauth2/#authorization_code)).
Server configuration should look like:
- `SSO_AUTHORITY=https://${domain}/application/o/${application_name}/` : trailing `/` is important
- `SSO_SCOPES="email profile offline_access"`
- `SSO_CLIENT_ID`
- `SSO_CLIENT_SECRET`
- `SSO_PKCE=true`
## GitLab
Create an application in your Gitlab Settings with
- `redirectURI`: https://your.domain/identity/connect/oidc-signin
- `Confidential`: `true`
- `scopes`: `openid`, `profile`, `email`
Then configure your server with
- `SSO_AUTHORITY=https://gitlab.com`
- `SSO_CLIENT_ID`
- `SSO_CLIENT_SECRET`
- `SSO_PKCE=true`
## Google Auth
Google [Documentation](https://developers.google.com/identity/openid-connect/openid-connect).
\
By default without extra [configuration](https://developers.google.com/identity/protocols/oauth2/web-server#creatingclient) you won´t have a `refresh_token` and session will be limited to 1h.
Configure your server with :
- `SSO_AUTHORITY=https://accounts.google.com`
- ```conf
SSO_AUTHORIZE_EXTRA_PARAMS="
access_type=offline
prompt=consent
"
```
- `SSO_PKCE=true`
- `SSO_CLIENT_ID`
- `SSO_CLIENT_SECRET`
## Kanidm
Kanidm recommend always running with PKCE:
Config will look like:
- `SSO_PKCE=true`
Otherwise you can disable the PKCE requirement with: `kanidm system oauth2 warning-insecure-client-disable-pkce CLIENT_NAME --name admin`.
## Microsoft Entra ID
1. Create an "App registration" in [Entra ID](https://entra.microsoft.com/) following [Identity | Applications | App registrations](https://entra.microsoft.com/#blade/Microsoft_AAD_RegisteredApps/ApplicationsListBlade/quickStartType//sourceType/Microsoft_AAD_IAM).
2. From the "Overview" of your "App registration", you'll need the "Directory (tenant) ID" for the `SSO_AUTHORITY` variable and the "Application (client) ID" as the `SSO_CLIENT_ID` value.
3. In "Certificates & Secrets" create an "App secret" , you'll need the "Secret Value" for the `SSO_CLIENT_SECRET` variable.
4. In "Authentication" add https://vaultwarden.example.org/identity/connect/oidc-signin as "Web Redirect URI".
5. In "API Permissions" make sure you have `profile`, `email` and `offline_access` listed under "API / Permission name" (`offline_access` is required, otherwise no refresh_token is returned, see https://github.com/MicrosoftDocs/azure-docs/issues/17134).
Only the v2 endpooint is compliant with the OpenID spec, see https://github.com/MicrosoftDocs/azure-docs/issues/38427 and https://github.com/ramosbugs/openidconnect-rs/issues/122.
Your configuration should look like this:
* `SSO_AUTHORITY=https://login.microsoftonline.com/${Directory (tenant) ID}/v2.0`
* `SSO_SCOPES="email profile offline_access"`
* `SSO_CLIENT_ID=${Application (client) ID}`
* `SSO_CLIENT_SECRET=${Secret Value}`
## Zitadel
To obtain a `refresh_token` to be able to extend session you'll need to add the `offline_access` scope.
Additionnaly Zitadel include the `Project id` and the `Client Id` in the audience of the Id Token.
For the validation to work you will need to add the `Resource Id` as a trusted audience (`Client Id` is trusted by default).
You can control the trusted audience with the config `SSO_AUDIENCE_TRUSTED`
It appears it's not possible to use PKCE with confidential client so it needs to be disabled.
Config will look like:
- `SSO_AUTHORITY=https://${provider_host}`
- `SSO_SCOPES="email profile offline_access"`
- `SSO_CLIENT_ID`
- `SSO_CLIENT_SECRET`
- `SSO_AUDIENCE_TRUSTED='^${Project Id}$'`
- `SSO_PKCE=false`
## Session lifetime
Session lifetime is dependant on refresh token and access token returned after calling your SSO token endpoint (grant type `authorization_code`).
If no refresh token is returned then the session will be limited to the access token lifetime.
Tokens are not persisted in VaultWarden but wrapped in JWT tokens and returned to the application (The `refresh_token` and `access_token` values returned by VW `identity/connect/token` endpoint).
Note that VaultWarden will always return a `refresh_token` for compatibility reasons with the web front and it presence does not indicate that a refresh token was returned by your SSO (But you can decode its value with https://jwt.io and then check if the `token` field contain anything).
With a refresh token present, activity in the application will trigger a refresh of the access token when it's close to expiration ([5min](https://github.com/bitwarden/clients/blob/0bcb45ed5caa990abaff735553a5046e85250f24/libs/common/src/auth/services/token.service.ts#L126) in web client).
Additionnaly for certain action a token check is performed, if we have a refresh token we will perform a refresh otherwise we'll call the user information endpoint to check the access token validity.
### Disabling SSO session handling
If you are unable to obtain a `refresh_token` or for any other reason you can disable SSO session handling and revert to the default handling.
You'll need to enable `SSO_AUTH_ONLY_NOT_SESSION=true` then access token will be valid for 2h and refresh token will allow for an idle time of 7 days (which can be indefinitely extended).
### Debug information
Running with `LOG_LEVEL=debug` you'll be able to see information on token expiration.
## Desktop Client
There is some issue to handle redirection from your browser (used for sso login) to the application.
### Chrome
Probably not much hope, an [issue](https://github.com/bitwarden/clients/issues/2606) is open on the subject and it appears that both Linux and Windows are not working.
## Firefox
On Windows you'll be presented with a prompt the first time you log to confirm which application should be launched (But there is a bug at the moment you might end-up with an empty vault after login atm).
On Linux it's a bit more tricky.
First you'll need to add some config in `about:config` :
```
network.protocol-handler.expose.bitwarden=false
network.protocol-handler.external.bitwarden=true
```
If you have any doubt you can check `mailto` to see how it's configured.
The redirection will still not work since it appears that the association to an application can only be done on a link/click. You can trigger it with a dummy page such as:
```html
data:text/html,<a href="bitwarden:///dummy">Click me to register Bitwarden</a>
```
From now on the redirection should now work.
If you need to change the application launched you can now find it in `Settings` by using the search function and entering `application`.

View File

@ -1,10 +1,10 @@
---
vault_version: "v2024.3.1"
vault_image_digest: "sha256:689b1e706f29e1858a5c7e0ec82e40fac793322e5e0ac9102ab09c2620207cd5"
# Cross Compile Docker Helper Scripts v1.3.0
# Cross Compile Docker Helper Scripts v1.4.0
# We use the linux/amd64 platform shell scripts since there is no difference between the different platform scripts
xx_image_digest: "sha256:c9609ace652bbe51dd4ce90e0af9d48a4590f1214246da5bc70e46f6dd586edc"
rust_version: 1.77.0 # Rust version to be used
xx_image_digest: "sha256:0cd3f05c72d6c9b038eb135f91376ee1169ef3a330d34e418e65e2a5c2e9c0d4"
rust_version: 1.77.2 # Rust version to be used
debian_version: bookworm # Debian release name to be used
alpine_version: 3.19 # Alpine version to be used
# For which platforms/architectures will we try to build images

View File

@ -31,10 +31,10 @@ FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:689b1e706f29e
########################## ALPINE BUILD IMAGES ##########################
## NOTE: The Alpine Base Images do not support other platforms then linux/amd64
## And for Alpine we define all build images here, they will only be loaded when actually used
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.77.0 as build_amd64
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.77.0 as build_arm64
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.77.0 as build_armv7
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.77.0 as build_armv6
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.77.2 as build_amd64
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.77.2 as build_arm64
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.77.2 as build_armv7
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.77.2 as build_armv6
########################## BUILD IMAGE ##########################
# hadolint ignore=DL3006
@ -65,13 +65,14 @@ RUN mkdir -pv "${CARGO_HOME}" \
RUN USER=root cargo new --bin /app
WORKDIR /app
# Shared variables across Debian and Alpine
# Environment variables for Cargo on Alpine based builds
RUN echo "export CARGO_TARGET=${RUST_MUSL_CROSS_TARGET}" >> /env-cargo && \
# To be able to build the armv6 image with mimalloc we need to tell the linker to also look for libatomic
if [[ "${TARGETARCH}${TARGETVARIANT}" == "armv6" ]] ; then echo "export RUSTFLAGS='-Clink-arg=-latomic'" >> /env-cargo ; fi && \
# Output the current contents of the file
cat /env-cargo
# Configure the DB ARG as late as possible to not invalidate the cached layers above
# Enable MiMalloc to improve performance on Alpine builds
ARG DB=sqlite,mysql,postgresql,enable_mimalloc

View File

@ -31,11 +31,11 @@ FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:689b1e706f29e
########################## Cross Compile Docker Helper Scripts ##########################
## We use the linux/amd64 no matter which Build Platform, since these are all bash scripts
## And these bash scripts do not have any significant difference if at all
FROM --platform=linux/amd64 docker.io/tonistiigi/xx@sha256:c9609ace652bbe51dd4ce90e0af9d48a4590f1214246da5bc70e46f6dd586edc AS xx
FROM --platform=linux/amd64 docker.io/tonistiigi/xx@sha256:0cd3f05c72d6c9b038eb135f91376ee1169ef3a330d34e418e65e2a5c2e9c0d4 AS xx
########################## BUILD IMAGE ##########################
# hadolint ignore=DL3006
FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.77.0-slim-bookworm as build
FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.77.2-slim-bookworm as build
COPY --from=xx / /
ARG TARGETARCH
ARG TARGETVARIANT
@ -88,9 +88,17 @@ RUN mkdir -pv "${CARGO_HOME}" \
RUN USER=root cargo new --bin /app
WORKDIR /app
# Environment variables for cargo across Debian and Alpine
# Environment variables for Cargo on Debian based builds
ARG ARCH_OPENSSL_LIB_DIR \
ARCH_OPENSSL_INCLUDE_DIR
RUN source /env-cargo && \
if xx-info is-cross ; then \
# Some special variables if needed to override some build paths
if [[ -n "${ARCH_OPENSSL_LIB_DIR}" && -n "${ARCH_OPENSSL_INCLUDE_DIR}" ]]; then \
echo "export $(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_OPENSSL_LIB_DIR=${ARCH_OPENSSL_LIB_DIR}" >> /env-cargo && \
echo "export $(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_OPENSSL_INCLUDE_DIR=${ARCH_OPENSSL_INCLUDE_DIR}" >> /env-cargo ; \
fi && \
# We can't use xx-cargo since that uses clang, which doesn't work for our libraries.
# Because of this we generate the needed environment variables here which we can load in the needed steps.
echo "export CC_$(echo "${CARGO_TARGET}" | tr '[:upper:]' '[:lower:]' | tr - _)=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \

View File

@ -108,9 +108,17 @@ RUN USER=root cargo new --bin /app
WORKDIR /app
{% if base == "debian" %}
# Environment variables for cargo across Debian and Alpine
# Environment variables for Cargo on Debian based builds
ARG ARCH_OPENSSL_LIB_DIR \
ARCH_OPENSSL_INCLUDE_DIR
RUN source /env-cargo && \
if xx-info is-cross ; then \
# Some special variables if needed to override some build paths
if [[ -n "${ARCH_OPENSSL_LIB_DIR}" && -n "${ARCH_OPENSSL_INCLUDE_DIR}" ]]; then \
echo "export $(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_OPENSSL_LIB_DIR=${ARCH_OPENSSL_LIB_DIR}" >> /env-cargo && \
echo "export $(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_OPENSSL_INCLUDE_DIR=${ARCH_OPENSSL_INCLUDE_DIR}" >> /env-cargo ; \
fi && \
# We can't use xx-cargo since that uses clang, which doesn't work for our libraries.
# Because of this we generate the needed environment variables here which we can load in the needed steps.
echo "export CC_$(echo "${CARGO_TARGET}" | tr '[:upper:]' '[:lower:]' | tr - _)=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
@ -126,13 +134,14 @@ RUN source /env-cargo && \
# Configure the DB ARG as late as possible to not invalidate the cached layers above
ARG DB=sqlite,mysql,postgresql
{% elif base == "alpine" %}
# Shared variables across Debian and Alpine
# Environment variables for Cargo on Alpine based builds
RUN echo "export CARGO_TARGET=${RUST_MUSL_CROSS_TARGET}" >> /env-cargo && \
# To be able to build the armv6 image with mimalloc we need to tell the linker to also look for libatomic
if [[ "${TARGETARCH}${TARGETVARIANT}" == "armv6" ]] ; then echo "export RUSTFLAGS='-Clink-arg=-latomic'" >> /env-cargo ; fi && \
# Output the current contents of the file
cat /env-cargo
# Configure the DB ARG as late as possible to not invalidate the cached layers above
# Enable MiMalloc to improve performance on Alpine builds
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
{% endif %}

View File

@ -11,6 +11,11 @@ With just these two files we can build both Debian and Alpine images for the fol
- armv7 (linux/arm/v7)
- armv6 (linux/arm/v6)
Some unsupported platforms for Debian based images. These are not built and tested by default and are only provided to make it easier for users to build for these architectures.
- 386 (linux/386)
- ppc64le (linux/ppc64le)
- s390x (linux/s390x)
To build these containers you need to enable QEMU binfmt support to be able to run/emulate architectures which are different then your host.<br>
This ensures the container build process can run binaries from other architectures.<br>

View File

@ -125,6 +125,40 @@ target "debian-armv6" {
tags = generate_tags("", "-armv6")
}
// ==== Start of unsupported Debian architecture targets ===
// These are provided just to help users build for these rare platforms
// They will not be built by default
target "debian-386" {
inherits = ["debian"]
platforms = ["linux/386"]
tags = generate_tags("", "-386")
args = {
ARCH_OPENSSL_LIB_DIR = "/usr/lib/i386-linux-gnu"
ARCH_OPENSSL_INCLUDE_DIR = "/usr/include/i386-linux-gnu"
}
}
target "debian-ppc64le" {
inherits = ["debian"]
platforms = ["linux/ppc64le"]
tags = generate_tags("", "-ppc64le")
args = {
ARCH_OPENSSL_LIB_DIR = "/usr/lib/powerpc64le-linux-gnu"
ARCH_OPENSSL_INCLUDE_DIR = "/usr/include/powerpc64le-linux-gnu"
}
}
target "debian-s390x" {
inherits = ["debian"]
platforms = ["linux/s390x"]
tags = generate_tags("", "-s390x")
args = {
ARCH_OPENSSL_LIB_DIR = "/usr/lib/s390x-linux-gnu"
ARCH_OPENSSL_INCLUDE_DIR = "/usr/include/s390x-linux-gnu"
}
}
// ==== End of unsupported Debian architecture targets ===
// A Group to build all platforms individually for local testing
group "debian-all" {
targets = ["debian-amd64", "debian-arm64", "debian-armv7", "debian-armv6"]

View File

@ -0,0 +1,26 @@
# Keycloak Config
KEYCLOAK_ADMIN=admin
KEYCLOAK_ADMIN_PASSWORD=${KEYCLOAK_ADMIN}
KC_HTTP_HOST=127.0.0.1
KC_HTTP_PORT=8080
# Script parameters (use Keycloak and VaultWarden config too)
TEST_REALM=test
TEST_USER=test
TEST_USER_PASSWORD=${TEST_USER}
TEST_USER_MAIL="${TEST_USER}@yopmail.com"
TEST_USER_2=test2
TEST_USER_2_PASSWORD=${TEST_USER_2}
TEST_USER_2_MAIL="${TEST_USER_2}@yopmail.com"
# VaultWarden Config
ROCKET_PORT=8000
DOMAIN=http://127.0.0.1:${ROCKET_PORT}
I_REALLY_WANT_VOLATILE_STORAGE=true
SSO_ENABLED=true
SSO_ONLY=false
SSO_CLIENT_ID=VaultWarden
SSO_CLIENT_SECRET=VaultWarden
SSO_AUTHORITY=http://${KC_HTTP_HOST}:${KC_HTTP_PORT}/realms/${TEST_REALM}

67
docker/keycloak/README.md Normal file
View File

@ -0,0 +1,67 @@
# OpenID Connect test setup
This `docker-compose` template allow to run locally a `VaultWarden` and [`Keycloak`](https://www.keycloak.org/) instance to test OIDC.
## Usage
You'll need `docker` and `docker-compose` ([cf](https://docs.docker.com/engine/install/)).
First create a copy of `.env.template` as `.env` (This is done to prevent commiting your custom settings, Ex `SMTP_`).
Then start the stack (the `profile` is required to run the `VaultWarden`) :
```bash
> DOCKER_BUILDKIT=1 docker-compose --profile VaultWarden up
....
keycloakSetup_1 | Logging into http://127.0.0.1:8080 as user admin of realm master
keycloakSetup_1 | Created new realm with id 'test'
keycloakSetup_1 | 74af4933-e386-4e64-ba15-a7b61212c45e
oidc_keycloakSetup_1 exited with code 0
```
Wait until `oidc_keycloakSetup_1 exited with code 0` which indicate the correct setup of the Keycloak realm, client and user (It's normal for this container to stop once the configuration is done).
Then you can access :
- `VaultWarden` on http://127.0.0.1:8000 with the default user `test@yopmail.com/test`.
- `Keycloak` on http://127.0.0.1:8080/admin/master/console/ with the default user `admin/admin`
To proceed with an SSO login after you enter the email, on the screen prompting for `Master Password` the SSO button should be visible.
## Running only Keycloak
Since the `VaultWarden` service is defined with a `profile` you can just use the default `docker-compose` command :
```bash
> docker-compose up
```
When running with a local VaultWarden you'll need to make the SSO button visible using :
```bash
sed -i 's#a\[routerlink="/sso"\],##' /web-vault/app/main.*.css
```
Otherwise you'll need to reveal the SSO login button using the debug console (F12)
```js
document.querySelector('a[routerlink="/sso"]').style.setProperty("display", "inline-block", "important");
```
## To force rebuilding the VaultWarden image
Use `DOCKER_BUILDKIT=1 docker-compose --profile VaultWarden up --build VaultWarden`.
If after building the `Keycloak` configuration is not run, just interrupt and run without `--build`
## Configuration
All configuration for `keycloak` / `VaultWarden` / `keycloak_setup.sh` can be found in [.env](.env.template).
The content of the file will be loaded as environment variables in all containers.
- `keycloak` [configuration](https://www.keycloak.org/server/all-config) include `KEYCLOAK_ADMIN` / `KEYCLOAK_ADMIN_PASSWORD` and any variable prefixed `KC_` ([more information](https://www.keycloak.org/server/configuration#_example_configuring_the_db_url_host_parameter)).
- All `VaultWarden` configuration can be set (EX: `SMTP_*`)
## Cleanup
Use `docker-compose --profile VaultWarden down`.

View File

@ -0,0 +1,31 @@
services:
keycloak:
container_name: keycloak-${ENV:-dev}
image: quay.io/keycloak/keycloak
network_mode: "host"
command:
- start-dev
env_file: ${ENV}.env
volumes:
- ./keycloak_setup.sh:/opt/script/keycloak_setup.sh
keycloakSetup:
container_name: keycloakSetup-${ENV:-dev}
image: quay.io/keycloak/keycloak
network_mode: "host"
depends_on:
- keycloak
restart: "no"
env_file: ${ENV}.env
entrypoint: [ "bash", "-c", "/opt/script/keycloak_setup.sh"]
volumes:
- ${KC_SETUP_PATH:-.}/keycloak_setup.sh:/opt/script/keycloak_setup.sh
VaultWarden:
image: vaultwarden
profiles: ["VaultWarden"]
network_mode: "host"
build:
context: ../..
dockerfile: Dockerfile
depends_on:
- keycloak
env_file: ${ENV}.env

View File

@ -0,0 +1,32 @@
#!/bin/bash
export PATH=$PATH:/opt/keycloak/bin
CANARY=/tmp/keycloak_setup_done
if [ -f $CANARY ]
then
echo "Setup should already be done. Will not run."
exit 0
fi
while true; do
sleep 5
kcadm.sh config credentials --server "http://${KC_HTTP_HOST}:${KC_HTTP_PORT}" --realm master --user "$KEYCLOAK_ADMIN" --password "$KEYCLOAK_ADMIN_PASSWORD" --client admin-cli
EC=$?
if [ $EC -eq 0 ]; then
break
fi
echo "Will retry in 5 seconds"
done
kcadm.sh create realms -s realm="$TEST_REALM" -s enabled=true -s "accessTokenLifespan=600"
kcadm.sh create clients -r test -s "clientId=$SSO_CLIENT_ID" -s "secret=$SSO_CLIENT_SECRET" -s "redirectUris=[\"$DOMAIN/*\"]" -i
TEST_USER_ID=$(kcadm.sh create users -r "$TEST_REALM" -s "username=$TEST_USER" -s "email=$TEST_USER_MAIL" -s emailVerified=true -s enabled=true -i)
kcadm.sh update users/$TEST_USER_ID/reset-password -r "$TEST_REALM" -s type=password -s "value=$TEST_USER_PASSWORD" -n
TEST_USER_2_ID=$(kcadm.sh create users -r "$TEST_REALM" -s "username=$TEST_USER_2" -s "email=$TEST_USER_2_MAIL" -s emailVerified=true -s enabled=true -i)
kcadm.sh update users/$TEST_USER_2_ID/reset-password -r "$TEST_REALM" -s type=password -s "value=$TEST_USER_2_PASSWORD" -n
touch $CANARY

View File

@ -22,4 +22,11 @@ elif [ -d /etc/bitwarden_rs.d ]; then
done
fi
# Toggle the SSO Link
if [ "$SSO_ENABLED" = "true" ]; then
sed -i 's#a\[routerlink="/sso"\]#a\[routerlink="/sso-sed"\]#' /web-vault/app/main.*.css
else
sed -i 's#a\[routerlink="/sso-sed"\]#a\[routerlink="/sso"\]#' /web-vault/app/main.*.css
fi
exec /vaultwarden "${@}"

View File

@ -0,0 +1 @@
DROP TABLE sso_nonce;

View File

@ -0,0 +1,4 @@
CREATE TABLE sso_nonce (
nonce CHAR(36) NOT NULL PRIMARY KEY,
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP
);

View File

@ -0,0 +1 @@
ALTER TABLE users_organizations DROP COLUMN invited_by_email;

View File

@ -0,0 +1 @@
ALTER TABLE users_organizations ADD COLUMN invited_by_email TEXT DEFAULT NULL;

View File

@ -0,0 +1,6 @@
DROP TABLE IF EXISTS sso_nonce;
CREATE TABLE sso_nonce (
nonce CHAR(36) NOT NULL PRIMARY KEY,
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP
);

View File

@ -0,0 +1,8 @@
DROP TABLE IF EXISTS sso_nonce;
CREATE TABLE sso_nonce (
state VARCHAR(512) NOT NULL PRIMARY KEY,
nonce TEXT NOT NULL,
redirect_uri TEXT NOT NULL,
created_at TIMESTAMP NOT NULL DEFAULT now()
);

View File

@ -0,0 +1,8 @@
DROP TABLE IF EXISTS sso_nonce;
CREATE TABLE sso_nonce (
state VARCHAR(512) NOT NULL PRIMARY KEY,
nonce TEXT NOT NULL,
redirect_uri TEXT NOT NULL,
created_at TIMESTAMP NOT NULL DEFAULT now()
);

View File

@ -0,0 +1,9 @@
DROP TABLE IF EXISTS sso_nonce;
CREATE TABLE sso_nonce (
state VARCHAR(512) NOT NULL PRIMARY KEY,
nonce TEXT NOT NULL,
verifier TEXT,
redirect_uri TEXT NOT NULL,
created_at TIMESTAMP NOT NULL DEFAULT now()
);

View File

@ -0,0 +1 @@
DROP TABLE IF EXISTS sso_users;

View File

@ -0,0 +1,7 @@
CREATE TABLE sso_users (
user_uuid CHAR(36) NOT NULL PRIMARY KEY,
identifier VARCHAR(768) NOT NULL UNIQUE,
created_at TIMESTAMP NOT NULL DEFAULT now(),
FOREIGN KEY(user_uuid) REFERENCES users(uuid)
);

View File

@ -0,0 +1,2 @@
ALTER TABLE sso_users DROP FOREIGN KEY `sso_users_ibfk_1`;
ALTER TABLE sso_users ADD FOREIGN KEY(user_uuid) REFERENCES users(uuid) ON UPDATE CASCADE ON DELETE CASCADE;

View File

@ -0,0 +1 @@
DROP TABLE sso_nonce;

View File

@ -0,0 +1,4 @@
CREATE TABLE sso_nonce (
nonce CHAR(36) NOT NULL PRIMARY KEY,
created_at TIMESTAMP NOT NULL DEFAULT now()
);

View File

@ -0,0 +1 @@
ALTER TABLE users_organizations DROP COLUMN invited_by_email;

View File

@ -0,0 +1 @@
ALTER TABLE users_organizations ADD COLUMN invited_by_email TEXT DEFAULT NULL;

View File

@ -0,0 +1,6 @@
DROP TABLE sso_nonce;
CREATE TABLE sso_nonce (
nonce CHAR(36) NOT NULL PRIMARY KEY,
created_at TIMESTAMP NOT NULL DEFAULT now()
);

View File

@ -0,0 +1,8 @@
DROP TABLE sso_nonce;
CREATE TABLE sso_nonce (
state TEXT NOT NULL PRIMARY KEY,
nonce TEXT NOT NULL,
redirect_uri TEXT NOT NULL,
created_at TIMESTAMP NOT NULL DEFAULT now()
);

View File

@ -0,0 +1,8 @@
DROP TABLE IF EXISTS sso_nonce;
CREATE TABLE sso_nonce (
state TEXT NOT NULL PRIMARY KEY,
nonce TEXT NOT NULL,
redirect_uri TEXT NOT NULL,
created_at TIMESTAMP NOT NULL DEFAULT now()
);

View File

@ -0,0 +1,9 @@
DROP TABLE IF EXISTS sso_nonce;
CREATE TABLE sso_nonce (
state TEXT NOT NULL PRIMARY KEY,
nonce TEXT NOT NULL,
verifier TEXT,
redirect_uri TEXT NOT NULL,
created_at TIMESTAMP NOT NULL DEFAULT now()
);

View File

@ -0,0 +1 @@
DROP TABLE IF EXISTS sso_users;

View File

@ -0,0 +1,7 @@
CREATE TABLE sso_users (
user_uuid CHAR(36) NOT NULL PRIMARY KEY,
identifier TEXT NOT NULL UNIQUE,
created_at TIMESTAMP NOT NULL DEFAULT now(),
FOREIGN KEY(user_uuid) REFERENCES users(uuid)
);

View File

@ -0,0 +1,3 @@
ALTER TABLE sso_users
DROP CONSTRAINT "sso_users_user_uuid_fkey",
ADD CONSTRAINT "sso_users_user_uuid_fkey" FOREIGN KEY(user_uuid) REFERENCES users(uuid) ON UPDATE CASCADE ON DELETE CASCADE;

View File

@ -0,0 +1 @@
DROP TABLE sso_nonce;

View File

@ -0,0 +1,4 @@
CREATE TABLE sso_nonce (
nonce CHAR(36) NOT NULL PRIMARY KEY,
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP
);

View File

@ -0,0 +1 @@
ALTER TABLE users_organizations DROP COLUMN invited_by_email;

View File

@ -0,0 +1 @@
ALTER TABLE users_organizations ADD COLUMN invited_by_email TEXT DEFAULT NULL;

View File

@ -0,0 +1,6 @@
DROP TABLE sso_nonce;
CREATE TABLE sso_nonce (
nonce CHAR(36) NOT NULL PRIMARY KEY,
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP
);

View File

@ -0,0 +1,8 @@
DROP TABLE sso_nonce;
CREATE TABLE sso_nonce (
state TEXT NOT NULL PRIMARY KEY,
nonce TEXT NOT NULL,
redirect_uri TEXT NOT NULL,
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP
);

View File

@ -0,0 +1,8 @@
DROP TABLE IF EXISTS sso_nonce;
CREATE TABLE sso_nonce (
state TEXT NOT NULL PRIMARY KEY,
nonce TEXT NOT NULL,
redirect_uri TEXT NOT NULL,
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP
);

View File

@ -0,0 +1,9 @@
DROP TABLE IF EXISTS sso_nonce;
CREATE TABLE sso_nonce (
state TEXT NOT NULL PRIMARY KEY,
nonce TEXT NOT NULL,
verifier TEXT,
redirect_uri TEXT NOT NULL,
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP
);

View File

@ -0,0 +1 @@
DROP TABLE IF EXISTS sso_users;

View File

@ -0,0 +1,7 @@
CREATE TABLE sso_users (
user_uuid CHAR(36) NOT NULL PRIMARY KEY,
identifier TEXT NOT NULL UNIQUE,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY(user_uuid) REFERENCES users(uuid)
);

View File

@ -0,0 +1,9 @@
DROP TABLE IF EXISTS sso_users;
CREATE TABLE sso_users (
user_uuid CHAR(36) NOT NULL PRIMARY KEY,
identifier TEXT NOT NULL UNIQUE,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY(user_uuid) REFERENCES users(uuid) ON UPDATE CASCADE ON DELETE CASCADE
);

6
playwright/.gitignore vendored Normal file
View File

@ -0,0 +1,6 @@
logs
node_modules/
/test-results/
/playwright-report/
/playwright/.cache/
temp

77
playwright/README.md Normal file
View File

@ -0,0 +1,77 @@
# OpenID Keycloak scenarios
This allows running integration tests using [Playwright](https://playwright.dev/).
\
It usse its own [test.env](/test/scenarios/test.env) with different ports to not collide with a running dev instance.
## Install
```bash
npm install
npx playwright install firefox
```
## Usage
To run all the tests:
```bash
npx playwright test
```
To access the ui to easily run test individually and debug if needed:
```bash
npx playwright test --ui
```
### DB
Projects are configured to allow to run tests only on specific database.
\
You can use:
```bash
npx playwright test --project sqllite
npx playwright test --project postgres
npx playwright test --project mysql
```
### SSO
To run the SSO tests:
```bash
npx playwright test --project sso-sqllite
```
Additionnaly if you want you can keep the Keycloak `docker-compose` runnning with (its state is not impacted by the tests):
```bash
KC_KEEP_RUNNNING=true npx playwright test --project sso-sqllite
```
### Running specific tests
To run a whole file you can :
```bash
npx playwright test --project=sqllite tests/login.spec.ts
npx playwright test --project=sqllite login
```
To run only a specifc test (It might fail if it has dependency):
```bash
npx playwright test --project=sqllite -g "Account creation"
npx playwright test --project=sqllite tests/login.spec.ts:16
```
## Writing scenario
When creating new scenario use the recorder to more easily identify elements (in general try to rely on visible hint to identify elements and not hidden ids).
This does not start the server, you will need to start it manually.
```bash
npx playwright codegen "http://127.0.0.1:8000"
```

View File

@ -0,0 +1,82 @@
import { type FullConfig } from '@playwright/test';
import { execSync } from 'node:child_process';
import fs from 'fs';
import yaml from 'js-yaml';
const utils = require('./global-utils');
utils.loadEnv();
function readCurrentVersion(){
try {
const vw_version_file = fs.readFileSync('temp/web-vault/vw-version.json', {
encoding: 'utf8',
flag: 'r'
});
return JSON.parse(vw_version_file)["version"];
} catch(err) {
console.log(`Failed to read frontend current version: ${err}`);
}
}
function readDockerVersion(){
try {
const docker_settings = fs.readFileSync('../docker/DockerSettings.yaml', {
encoding: 'utf8',
flag: 'r'
});
const settings = yaml.load(docker_settings);
return settings["vault_version"];
} catch(err) {
console.log(`Failed to read docker frontend current version: ${err}`);
}
}
function retrieveFrontend(){
const vw_version = readCurrentVersion();
const vv = readDockerVersion()
if( !vv ){
console.log("Empty docker frontend version");
process.exit(1);
}
try {
if( vv != `v${vw_version}`) {
fs.rmSync("./temp/web-vault", { recursive: true, force: true });
execSync(`cd temp && wget -c https://github.com/dani-garcia/bw_web_builds/releases/download/${vv}/bw_web_${vv}.tar.gz -O - | tar xz`, { stdio: "inherit" });
// Make the SSO button visible
execSync(`bash -c "sed -i 's#a.routerlink=./sso..,##' temp/web-vault/app/main.*.css"`, { stdio: "inherit" });
console.log(`Retrieved bw_web_builds-${vv}`);
} else {
console.log(`Using existing bw_web_builds-${vv}`);
}
} catch(err) {
console.log(`Failed to retrieve frontend: ${err}`);
process.exit(1);
}
}
function buildServer(){
if( !fs.existsSync('temp/vaultwarden') ){
console.log("Rebuilding server");
execSync(`cd .. && cargo build --features sqlite,mysql,postgresql --release`, { stdio: "inherit" });
execSync(`cp ../target/release/vaultwarden temp/vaultwarden`, { stdio: "inherit" });
} else {
console.log("Using existing server");
}
}
async function globalSetup(config: FullConfig) {
execSync("mkdir -p temp/logs");
buildServer();
retrieveFrontend();
}
export default globalSetup;

161
playwright/global-utils.ts Normal file
View File

@ -0,0 +1,161 @@
import { test, type Browser, type TestInfo, type Page } from '@playwright/test';
import { execSync } from 'node:child_process';
import dotenv from 'dotenv';
import dotenvExpand from 'dotenv-expand';
const fs = require("fs");
const { spawn } = require('node:child_process');
function loadEnv(){
var myEnv = dotenv.config({ path: 'test.env' });
dotenvExpand.expand(myEnv);
}
async function waitFor(url: String, browser: Browser) {
var ready = false;
var context;
do {
try {
context = await browser.newContext();
const page = await context.newPage();
await page.waitForTimeout(500);
const result = await page.goto(url);
ready = result.status() === 200;
} catch(e) {
if( !e.message.includes("CONNECTION_REFUSED") ){
throw e;
}
} finally {
await context.close();
}
} while(!ready);
}
function startStopSqlite(){
fs.rmSync("temp/db.sqlite3", { force: true });
fs.rmSync("temp/db.sqlite3-shm", { force: true });
fs.rmSync("temp/db.sqlite3-wal", { force: true });
}
function startMariaDB() {
console.log(`Starting MariaDB`);
execSync(`docker run --rm --name ${process.env.MARIADB_CONTAINER} \
-e MARIADB_ROOT_PASSWORD=${process.env.MARIADB_PWD} \
-e MARIADB_USER=${process.env.MARIADB_USER} \
-e MARIADB_PASSWORD=${process.env.MARIADB_PWD} \
-e MARIADB_DATABASE=${process.env.MARIADB_DB} \
-p ${process.env.MARIADB_PORT}:3306 \
-d mariadb:10.4`
);
}
function stopMariaDB() {
console.log("Stopping MariaDB (ensure DB is wiped)");
execSync(`docker stop ${process.env.MARIADB_CONTAINER} || true`);
}
function startMysqlDB() {
console.log(`Starting Mysql`);
execSync(`docker run --rm --name ${process.env.MYSQL_CONTAINER} \
-e MYSQL_ROOT_PASSWORD=${process.env.MYSQL_PWD} \
-e MYSQL_USER=${process.env.MYSQL_USER} \
-e MYSQL_PASSWORD=${process.env.MYSQL_PWD} \
-e MYSQL_DATABASE=${process.env.MYSQL_DB} \
-p ${process.env.MYSQL_PORT}:3306 \
-d mysql:8.3.0`
);
}
function stopMysqlDB() {
console.log("Stopping Mysql (ensure DB is wiped)");
execSync(`docker stop ${process.env.MYSQL_CONTAINER} || true`);
}
function startPostgres() {
console.log(`Starting Postgres`);
execSync(`docker run --rm --name ${process.env.POSTGRES_CONTAINER} \
-e POSTGRES_USER=${process.env.POSTGRES_USER} \
-e POSTGRES_PASSWORD=${process.env.POSTGRES_PWD} \
-e POSTGRES_DB=${process.env.POSTGRES_DB} \
-p ${process.env.POSTGRES_PORT}:5432 \
-d postgres:16.2`
);
};
function stopPostgres() {
console.log("Stopping Postgres (Ensure DB is wiped)");
execSync(`docker stop ${process.env.POSTGRES_CONTAINER} || true`);
}
function dbConfig(testInfo: TestInfo){
if( testInfo.project.name.includes("postgres") ){
return {
DATABASE_URL: `postgresql://${process.env.POSTGRES_USER}:${process.env.POSTGRES_PWD}@127.0.0.1:${process.env.POSTGRES_PORT}/${process.env.POSTGRES_DB}`
};
} else if( testInfo.project.name.includes("mariadb") ){
return {
DATABASE_URL: `mysql://${process.env.MARIADB_USER}:${process.env.MARIADB_PWD}@127.0.0.1:${process.env.MARIADB_PORT}/${process.env.MARIADB_DB}`
};
} else if( testInfo.project.name.includes("mysql") ){
return {
DATABASE_URL: `mysql://${process.env.MYSQL_USER}:${process.env.MYSQL_PWD}@127.0.0.1:${process.env.MYSQL_PORT}/${process.env.MYSQL_DB}`
};
} else {
return { I_REALLY_WANT_VOLATILE_STORAGE: true };
}
}
async function startVaultwarden(browser: Browser, testInfo: TestInfo, env = {}, resetDB: Boolean = true) {
if( resetDB ){
test.setTimeout(20000);
if( testInfo.project.name.includes("postgres") ){
stopPostgres();
startPostgres()
} else if( testInfo.project.name.includes("mariadb") ){
stopMariaDB();
startMariaDB();
} else if( testInfo.project.name.includes("mysql") ){
stopMysqlDB();
startMysqlDB();
} else {
startStopSqlite();
}
}
const vw_log = fs.openSync("temp/logs/vaultwarden.log", "a");
var proc = spawn("temp/vaultwarden", {
env: { ...process.env, ...env, ...dbConfig(testInfo) },
stdio: [process.stdin, vw_log, vw_log]
});
await waitFor("/", browser);
console.log(`Vaultwarden running on: ${process.env.DOMAIN}`);
return proc;
}
async function stopVaultwarden(proc, testInfo: TestInfo, resetDB: Boolean = true) {
console.log(`Vaultwarden stopping`);
proc.kill();
if( resetDB ){
if( testInfo.project.name.includes("postgres") ){
stopPostgres();
} else if( testInfo.project.name.includes("mariadb") ){
stopMariaDB();
} else if( testInfo.project.name.includes("mysql") ){
stopMysqlDB();
} else {
startStopSqlite();
}
}
}
async function restartVaultwarden(proc, page: Page, testInfo: TestInfo, env, resetDB: Boolean = true) {
stopVaultwarden(proc, testInfo, resetDB);
return startVaultwarden(page.context().browser(), testInfo, env, resetDB);
}
export { loadEnv, waitFor, startVaultwarden, stopVaultwarden, restartVaultwarden };

141
playwright/package-lock.json generated Normal file
View File

@ -0,0 +1,141 @@
{
"name": "scenarios",
"version": "1.0.0",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "scenarios",
"version": "1.0.0",
"license": "ISC",
"devDependencies": {
"@playwright/test": "^1.41.2",
"@types/node": "^20.11.16",
"abort-controller": "3.0.0",
"dotenv": "^16.4.1",
"dotenv-expand": "^10.0.0",
"js-yaml": "4.1.0"
}
},
"node_modules/@playwright/test": {
"version": "1.41.2",
"resolved": "https://registry.npmjs.org/@playwright/test/-/test-1.41.2.tgz",
"integrity": "sha512-qQB9h7KbibJzrDpkXkYvsmiDJK14FULCCZgEcoe2AvFAS64oCirWTwzTlAYEbKaRxWs5TFesE1Na6izMv3HfGg==",
"dev": true,
"dependencies": {
"playwright": "1.41.2"
},
"bin": {
"playwright": "cli.js"
},
"engines": {
"node": ">=16"
}
},
"node_modules/@types/node": {
"version": "20.11.16",
"resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.16.tgz",
"integrity": "sha512-gKb0enTmRCzXSSUJDq6/sPcqrfCv2mkkG6Jt/clpn5eiCbKTY+SgZUxo+p8ZKMof5dCp9vHQUAB7wOUTod22wQ==",
"dev": true,
"dependencies": {
"undici-types": "~5.26.4"
}
},
"node_modules/abort-controller": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz",
"integrity": "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==",
"dev": true,
"dependencies": {
"event-target-shim": "^5.0.0"
},
"engines": {
"node": ">=6.5"
}
},
"node_modules/argparse": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz",
"integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==",
"dev": true
},
"node_modules/dotenv": {
"version": "16.4.1",
"resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.4.1.tgz",
"integrity": "sha512-CjA3y+Dr3FyFDOAMnxZEGtnW9KBR2M0JvvUtXNW+dYJL5ROWxP9DUHCwgFqpMk0OXCc0ljhaNTr2w/kutYIcHQ==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/motdotla/dotenv?sponsor=1"
}
},
"node_modules/dotenv-expand": {
"version": "10.0.0",
"resolved": "https://registry.npmjs.org/dotenv-expand/-/dotenv-expand-10.0.0.tgz",
"integrity": "sha512-GopVGCpVS1UKH75VKHGuQFqS1Gusej0z4FyQkPdwjil2gNIv+LNsqBlboOzpJFZKVT95GkCyWJbBSdFEFUWI2A==",
"dev": true,
"engines": {
"node": ">=12"
}
},
"node_modules/event-target-shim": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/event-target-shim/-/event-target-shim-5.0.1.tgz",
"integrity": "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==",
"dev": true,
"engines": {
"node": ">=6"
}
},
"node_modules/js-yaml": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz",
"integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==",
"dev": true,
"dependencies": {
"argparse": "^2.0.1"
},
"bin": {
"js-yaml": "bin/js-yaml.js"
}
},
"node_modules/playwright": {
"version": "1.41.2",
"resolved": "https://registry.npmjs.org/playwright/-/playwright-1.41.2.tgz",
"integrity": "sha512-v0bOa6H2GJChDL8pAeLa/LZC4feoAMbSQm1/jF/ySsWWoaNItvrMP7GEkvEEFyCTUYKMxjQKaTSg5up7nR6/8A==",
"dev": true,
"dependencies": {
"playwright-core": "1.41.2"
},
"bin": {
"playwright": "cli.js"
},
"engines": {
"node": ">=16"
},
"optionalDependencies": {
"fsevents": "2.3.2"
}
},
"node_modules/playwright-core": {
"version": "1.41.2",
"resolved": "https://registry.npmjs.org/playwright-core/-/playwright-core-1.41.2.tgz",
"integrity": "sha512-VaTvwCA4Y8kxEe+kfm2+uUUw5Lubf38RxF7FpBxLPmGe5sdNkSg5e3ChEigaGrX7qdqT3pt2m/98LiyvU2x6CA==",
"dev": true,
"bin": {
"playwright-core": "cli.js"
},
"engines": {
"node": ">=16"
}
},
"node_modules/undici-types": {
"version": "5.26.5",
"resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz",
"integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==",
"dev": true
}
}
}

18
playwright/package.json Normal file
View File

@ -0,0 +1,18 @@
{
"name": "scenarios",
"version": "1.0.0",
"description": "",
"main": "index.js",
"scripts": {},
"keywords": [],
"author": "",
"license": "ISC",
"devDependencies": {
"@playwright/test": "^1.41.2",
"@types/node": "^20.11.16",
"abort-controller": "3.0.0",
"dotenv": "^16.4.1",
"dotenv-expand": "^10.0.0",
"js-yaml": "4.1.0"
}
}

View File

@ -0,0 +1,99 @@
import { defineConfig, devices } from '@playwright/test';
import { exec } from 'node:child_process';
const utils = require('./global-utils');
utils.loadEnv();
/**
* See https://playwright.dev/docs/test-configuration.
*/
export default defineConfig({
testDir: './.',
/* Run tests in files in parallel */
fullyParallel: false,
/* Fail the build on CI if you accidentally left test.only in the source code. */
forbidOnly: !!process.env.CI,
/* Retry on CI only */
retries: process.env.CI ? 2 : 0,
workers: 1,
/* Reporter to use. See https://playwright.dev/docs/test-reporters */
reporter: 'html',
timeout: 10 * 1000,
expect: { timeout: 10 * 1000 },
/* Shared settings for all the projects below. See https://playwright.dev/docs/api/class-testoptions. */
use: {
/* Base URL to use in actions like `await page.goto('/')`. */
baseURL: process.env.DOMAIN,
/* Collect trace when retrying the failed test. See https://playwright.dev/docs/trace-viewer */
trace: 'on-first-retry',
},
/* Configure projects for major browsers */
projects: [
{
name: 'sqllite',
testMatch: 'tests/*.spec.ts',
testIgnore: 'tests/sso_*.spec.ts',
use: { ...devices['Desktop Firefox'] },
},
{
name: 'postgres',
testMatch: 'tests/*.spec.ts',
testIgnore: 'tests/sso_*.spec.ts',
use: { ...devices['Desktop Firefox'] },
},
{
name: 'mariadb',
testMatch: 'tests/*.spec.ts',
testIgnore: 'tests/sso_*.spec.ts',
use: { ...devices['Desktop Firefox'] },
},
{
name: 'mysql',
testMatch: 'tests/*.spec.ts',
testIgnore: 'tests/sso_*.spec.ts',
use: { ...devices['Desktop Firefox'] },
},
{
name: 'sso-setup',
testMatch: 'sso-setup.ts',
teardown: 'sso-teardown',
},
{
name: 'sso-sqllite',
testMatch: 'tests/sso_*.spec.ts',
dependencies: ['sso-setup'],
teardown: 'sso-teardown',
},
{
name: 'sso-postgres',
testMatch: 'tests/sso_*.spec.ts',
dependencies: ['sso-setup'],
teardown: 'sso-teardown',
},
{
name: 'sso-mariadb',
testMatch: 'tests/sso_*.spec.ts',
dependencies: ['sso-setup'],
teardown: 'sso-teardown',
},
{
name: 'sso-mysql',
testMatch: 'tests/sso_*.spec.ts',
dependencies: ['sso-setup'],
teardown: 'sso-teardown',
},
{
name: 'sso-teardown',
testMatch: 'sso-teardown.ts',
},
],
globalSetup: require.resolve('./global-setup'),
});

18
playwright/sso-setup.ts Normal file
View File

@ -0,0 +1,18 @@
import { test, expect, type TestInfo } from '@playwright/test';
const { exec } = require('node:child_process');
const utils = require('./global-utils');
utils.loadEnv();
test.beforeAll('Setup', async () => {
var kcPath = process.env.KC_SETUP_PATH;
console.log("Starting Keycloak");
exec(`ENV=test KC_SETUP_PATH=${kcPath} docker-compose -f ${kcPath}/docker-compose.yml --project-directory . up >> temp/logs/keycloak.log 2>&1`);
});
test('Keycloak is up', async ({ page }) => {
test.setTimeout(60000);
await utils.waitFor(process.env.SSO_AUTHORITY, page.context().browser());
console.log(`Keycloak running on: ${process.env.SSO_AUTHORITY}`);
});

View File

@ -0,0 +1,17 @@
import { test, type FullConfig } from '@playwright/test';
const { execSync } = require('node:child_process');
const utils = require('./global-utils');
utils.loadEnv();
test('Keycloak teardown', async () => {
var kcPath = process.env.KC_SETUP_PATH;
if( process.env.KC_KEEP_RUNNNING == "true" ) {
console.log("Keep Keycloak running");
} else {
console.log("Keycloak stopping");
execSync(`ENV=test KC_SETUP_PATH=${kcPath} docker-compose -f ${kcPath}/docker-compose.yml --project-directory . down`);
}
});

69
playwright/test.env Normal file
View File

@ -0,0 +1,69 @@
##################################################################
### Shared Playwright conf test file Vaultwarden and Databases ###
##################################################################
########################
# Docker-compose Config#
########################
KC_SETUP_PATH=../docker/keycloak
KC_KEEP_RUNNNING=false
###################
# Keycloak Config #
###################
KEYCLOAK_ADMIN=admin
KEYCLOAK_ADMIN_PASSWORD=${KEYCLOAK_ADMIN}
KC_HTTP_HOST=127.0.0.1
KC_HTTP_PORT=8081
# Script parameters (use Keycloak and VaultWarden config too)
TEST_REALM=test
TEST_USER=test
TEST_USER_PASSWORD=${TEST_USER}
TEST_USER_MAIL="${TEST_USER}@example.com"
TEST_USER_2=test2
TEST_USER_2_PASSWORD=${TEST_USER_2}
TEST_USER_2_MAIL="${TEST_USER_2}@example.com"
######################
# Vaultwarden Config #
######################
DATA_FOLDER=temp
WEB_VAULT_FOLDER=temp/web-vault/
ROCKET_PORT=8001
DOMAIN=http://127.0.0.1:${ROCKET_PORT}
SSO_CLIENT_ID=VaultWarden
SSO_CLIENT_SECRET=VaultWarden
SSO_AUTHORITY=http://${KC_HTTP_HOST}:${KC_HTTP_PORT}/realms/${TEST_REALM}
SSO_PKCE=true
###########################
# Docker MariaDb container#
###########################
MARIADB_CONTAINER=vw-mariadb-test
MARIADB_PORT=3307
MARIADB_USER=vaultwarden
MARIADB_PWD=vaultwarden
MARIADB_DB=vaultwarden
###########################
# Docker Mysql container#
###########################
MYSQL_CONTAINER=vw-mysql-test
MYSQL_PORT=3309
MYSQL_USER=vaultwarden
MYSQL_PWD=vaultwarden
MYSQL_DB=vaultwarden
############################
# Docker Postgres container#
############################
POSTGRES_CONTAINER=vw-postgres-test
POSTGRES_PORT=5433
POSTGRES_USER=vaultwarden
POSTGRES_PWD=vaultwarden
POSTGRES_DB=vaultwarden

View File

@ -0,0 +1,54 @@
import { test, expect, type TestInfo } from '@playwright/test';
const utils = require('../global-utils');
utils.loadEnv();
var proc;
test.beforeAll('Setup', async ({ browser }, testInfo: TestInfo) => {
proc = await utils.startVaultwarden(browser, testInfo, {});
});
test.afterAll('Teardown', async ({}, testInfo: TestInfo) => {
utils.stopVaultwarden(proc, testInfo);
});
test('Account creation', async ({ page }) => {
// Landing page
await page.goto('/');
await page.getByRole('link', { name: 'Create account' }).click();
// Back to Vault create account
await expect(page).toHaveTitle(/Create account | Vaultwarden Web/);
await page.getByLabel(/Email address/).fill(process.env.TEST_USER_MAIL);
await page.getByLabel('Name').fill(process.env.TEST_USER);
await page.getByLabel('Master password\n (required)', { exact: true }).fill('Master password');
await page.getByLabel('Re-type master password').fill('Master password');
await page.getByRole('button', { name: 'Create account' }).click();
// Back to the login page
await expect(page).toHaveTitle('Vaultwarden Web');
await page.getByLabel('Your new account has been created')
await page.getByRole('button', { name: 'Continue' }).click();
// Unlock page
await page.getByLabel('Master password').fill('Master password');
await page.getByRole('button', { name: 'Log in with master password' }).click();
// We are now in the default vault page
await expect(page).toHaveTitle(/Vaults/);
});
test('Master password login', async ({ page }) => {
// Landing page
await page.goto('/');
await page.getByLabel(/Email address/).fill(process.env.TEST_USER_MAIL);
await page.getByRole('button', { name: 'Continue' }).click();
// Unlock page
await page.getByLabel('Master password').fill('Master password');
await page.getByRole('button', { name: 'Log in with master password' }).click();
// We are now in the default vault page
await expect(page).toHaveTitle(/Vaults/);
});

View File

@ -0,0 +1,121 @@
import { test, expect, type TestInfo } from '@playwright/test';
const utils = require('../global-utils');
utils.loadEnv();
var proc;
test.beforeAll('Setup', async ({ browser }, testInfo: TestInfo) => {
proc = await utils.startVaultwarden(browser, testInfo, {
SSO_ENABLED: true,
SSO_ONLY: false
});
});
test.afterAll('Teardown', async ({}, testInfo: TestInfo) => {
utils.stopVaultwarden(proc, testInfo);
});
test('Account creation using SSO', async ({ page }) => {
// Landing page
await page.goto('/');
await page.getByLabel(/Email address/).fill(process.env.TEST_USER_MAIL);
await page.getByRole('button', { name: 'Continue' }).click();
// Unlock page
await page.getByRole('link', { name: /Enterprise single sign-on/ }).click();
// Keycloak Login page
await expect(page.getByRole('heading', { name: 'Sign in to your account' })).toBeVisible();
await page.getByLabel(/Username/).fill(process.env.TEST_USER);
await page.getByLabel('Password', { exact: true }).fill(process.env.TEST_USER_PASSWORD);
await page.getByRole('button', { name: 'Sign In' }).click();
// Back to Vault create account
await expect(page.getByText('Set master password')).toBeVisible();
await page.getByLabel('Master password', { exact: true }).fill('Master password');
await page.getByLabel('Re-type master password').fill('Master password');
await page.getByRole('button', { name: 'Submit' }).click();
// We are now in the default vault page
await expect(page).toHaveTitle(/Vaults/);
});
test('SSO login', async ({ page }) => {
// Landing page
await page.goto('/');
await page.getByLabel(/Email address/).fill(process.env.TEST_USER_MAIL);
await page.getByRole('button', { name: 'Continue' }).click();
// Unlock page
await page.getByRole('link', { name: /Enterprise single sign-on/ }).click();
// Keycloak Login page
await expect(page.getByRole('heading', { name: 'Sign in to your account' })).toBeVisible();
await page.getByLabel(/Username/).fill(process.env.TEST_USER);
await page.getByLabel('Password', { exact: true }).fill(process.env.TEST_USER_PASSWORD);
await page.getByRole('button', { name: 'Sign In' }).click();
// Back to Vault unlock page
await expect(page).toHaveTitle('Vaultwarden Web');
await page.getByLabel('Master password').fill('Master password');
await page.getByRole('button', { name: 'Unlock' }).click();
// We are now in the default vault page
await expect(page).toHaveTitle(/Vaults/);
});
test('Non SSO login', async ({ page }) => {
// Landing page
await page.goto('/');
await page.getByLabel(/Email address/).fill(process.env.TEST_USER_MAIL);
await page.getByRole('button', { name: 'Continue' }).click();
// Unlock page
await page.getByLabel('Master password').fill('Master password');
await page.getByRole('button', { name: 'Log in with master password' }).click();
// We are now in the default vault page
await expect(page).toHaveTitle(/Vaults/);
});
test('Non SSO login Failure', async ({ page, browser }, testInfo: TestInfo) => {
proc = await utils.restartVaultwarden(proc, page, testInfo, {
SSO_ENABLED: true,
SSO_ONLY: true
}, false);
// Landing page
await page.goto('/');
await page.getByLabel(/Email address/).fill(process.env.TEST_USER_MAIL);
await page.getByRole('button', { name: 'Continue' }).click();
// Unlock page
await page.getByLabel('Master password').fill('Master password');
await page.getByRole('button', { name: 'Log in with master password' }).click();
// An error should appear
await page.getByLabel('SSO sign-in is required')
});
test('SSO login Failure', async ({ page }, testInfo: TestInfo) => {
proc = await utils.restartVaultwarden(proc, page, testInfo, {
SSO_ENABLED: false
}, false);
// Landing page
await page.goto('/');
await page.getByLabel(/Email address/).fill(process.env.TEST_USER_MAIL);
await page.getByRole('button', { name: 'Continue' }).click();
// Unlock page
await page.getByRole('link', { name: /Enterprise single sign-on/ }).click();
// SSO identifier page
await page.getByLabel('SSO identifier').fill('Random');
await page.getByRole('button', { name: 'Log in' }).click();
// An error should appear
await page.getByLabel('SSO sign-in is not available')
});

View File

@ -1,4 +1,4 @@
[toolchain]
channel = "1.77.0"
channel = "1.77.2"
components = [ "rustfmt", "clippy" ]
profile = "minimal"

View File

@ -286,7 +286,7 @@ async fn invite_user(data: Json<InviteData>, _token: AdminToken, mut conn: DbCon
err_code!("User already exists", Status::Conflict.code)
}
let mut user = User::new(data.email);
let mut user = User::new(data.email, None);
async fn _generate_invite(user: &User, conn: &mut DbConn) -> EmptyResult {
if CONFIG.mail_enabled() {

View File

@ -6,8 +6,8 @@ use serde_json::Value;
use crate::{
api::{
core::{log_user_event, two_factor::email},
register_push_device, unregister_push_device, AnonymousNotify, EmptyResult, JsonResult, JsonUpcase, Notify,
PasswordOrOtpData, UpdateType,
register_push_device, unregister_push_device, AnonymousNotify, ApiResult, EmptyResult, JsonResult, JsonUpcase,
Notify, PasswordOrOtpData, UpdateType,
},
auth::{decode_delete, decode_invite, decode_verify_email, ClientHeaders, Headers},
crypto,
@ -31,6 +31,7 @@ pub fn routes() -> Vec<rocket::Route> {
get_public_keys,
post_keys,
post_password,
post_set_password,
post_kdf,
post_rotatekey,
post_sstamp,
@ -81,6 +82,21 @@ pub struct RegisterData {
OrganizationUserId: Option<String>,
}
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
pub struct SetPasswordData {
Kdf: Option<i32>,
KdfIterations: Option<i32>,
KdfMemory: Option<i32>,
KdfParallelism: Option<i32>,
Key: String,
Keys: Option<KeysData>,
MasterPasswordHash: String,
MasterPasswordHint: Option<String>,
#[allow(dead_code)]
OrgIdentifier: Option<String>,
}
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
struct KeysData {
@ -160,13 +176,11 @@ pub async fn _register(data: JsonUpcase<RegisterData>, mut conn: DbConn) -> Json
err!("Registration email does not match invite email")
}
} else if Invitation::take(&email, &mut conn).await {
for user_org in UserOrganization::find_invited_by_user(&user.uuid, &mut conn).await.iter_mut() {
user_org.status = UserOrgStatus::Accepted as i32;
user_org.save(&mut conn).await?;
}
UserOrganization::confirm_user_invitations(&user.uuid, &mut conn).await?;
user
} else if CONFIG.is_signup_allowed(&email)
|| EmergencyAccess::find_invited_by_grantee_email(&email, &mut conn).await.is_some()
|| (CONFIG.emergency_access_allowed()
&& EmergencyAccess::find_invited_by_grantee_email(&email, &mut conn).await.is_some())
{
user
} else {
@ -178,7 +192,7 @@ pub async fn _register(data: JsonUpcase<RegisterData>, mut conn: DbConn) -> Json
// because the vaultwarden admin can invite anyone, regardless
// of other signup restrictions.
if Invitation::take(&email, &mut conn).await || CONFIG.is_signup_allowed(&email) {
User::new(email.clone())
User::new(email.clone(), None)
} else {
err!("Registration not allowed or user already exists")
}
@ -217,7 +231,6 @@ pub async fn _register(data: JsonUpcase<RegisterData>, mut conn: DbConn) -> Json
if let Err(e) = mail::send_welcome_must_verify(&user.email, &user.uuid).await {
error!("Error sending welcome email: {:#?}", e);
}
user.last_verifying_at = Some(user.created_at);
} else if let Err(e) = mail::send_welcome(&user.email).await {
error!("Error sending welcome email: {:#?}", e);
@ -229,12 +242,64 @@ pub async fn _register(data: JsonUpcase<RegisterData>, mut conn: DbConn) -> Json
}
user.save(&mut conn).await?;
// accept any open emergency access invitations
if !CONFIG.mail_enabled() && CONFIG.emergency_access_allowed() {
for mut emergency_invite in EmergencyAccess::find_all_invited_by_grantee_email(&user.email, &mut conn).await {
let _ = emergency_invite.accept_invite(&user.uuid, &user.email, &mut conn).await;
}
}
Ok(Json(json!({
"Object": "register",
"CaptchaBypassToken": "",
})))
}
#[post("/accounts/set-password", data = "<data>")]
async fn post_set_password(data: JsonUpcase<SetPasswordData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: SetPasswordData = data.into_inner().data;
let mut user = headers.user;
// Check against the password hint setting here so if it fails, the user
// can retry without losing their invitation below.
let password_hint = clean_password_hint(&data.MasterPasswordHint);
enforce_password_hint_setting(&password_hint)?;
if let Some(client_kdf_iter) = data.KdfIterations {
user.client_kdf_iter = client_kdf_iter;
}
if let Some(client_kdf_type) = data.Kdf {
user.client_kdf_type = client_kdf_type;
}
// We need to allow revision-date to use the old security_timestamp
let routes = ["revision_date"];
let routes: Option<Vec<String>> = Some(routes.iter().map(ToString::to_string).collect());
user.client_kdf_memory = data.KdfMemory;
user.client_kdf_parallelism = data.KdfParallelism;
user.set_password(&data.MasterPasswordHash, Some(data.Key), false, routes);
user.password_hint = password_hint;
if let Some(keys) = data.Keys {
user.private_key = Some(keys.EncryptedPrivateKey);
user.public_key = Some(keys.PublicKey);
}
if CONFIG.mail_enabled() {
mail::send_set_password(&user.email.to_lowercase(), &user.name).await?;
}
user.save(&mut conn).await?;
Ok(Json(json!({
"Object": "set-password",
"CaptchaBypassToken": "",
})))
}
#[get("/accounts/profile")]
async fn profile(headers: Headers, mut conn: DbConn) -> Json<Value> {
Json(headers.user.to_json(&mut conn).await)
@ -568,7 +633,7 @@ async fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, mut conn: D
// Prevent triggering cipher updates via WebSockets by settings UpdateType::None
// The user sessions are invalidated because all the ciphers were re-encrypted and thus triggering an update could cause issues.
// We force the users to logout after the user has been saved to try and prevent these issues.
update_cipher_from_data(&mut saved_cipher, cipher_data, &headers, false, &mut conn, &nt, UpdateType::None)
update_cipher_from_data(&mut saved_cipher, cipher_data, &headers, None, &mut conn, &nt, UpdateType::None)
.await?
}
}
@ -929,16 +994,37 @@ struct SecretVerificationRequest {
MasterPasswordHash: String,
}
// Change the KDF Iterations if necessary
pub async fn kdf_upgrade(user: &mut User, pwd_hash: &str, conn: &mut DbConn) -> ApiResult<()> {
if user.password_iterations != CONFIG.password_iterations() {
user.password_iterations = CONFIG.password_iterations();
user.set_password(pwd_hash, None, false, None);
if let Err(e) = user.save(conn).await {
error!("Error updating user: {:#?}", e);
}
}
Ok(())
}
#[post("/accounts/verify-password", data = "<data>")]
fn verify_password(data: JsonUpcase<SecretVerificationRequest>, headers: Headers) -> EmptyResult {
async fn verify_password(
data: JsonUpcase<SecretVerificationRequest>,
headers: Headers,
mut conn: DbConn,
) -> JsonResult {
let data: SecretVerificationRequest = data.into_inner().data;
let user = headers.user;
let mut user = headers.user;
if !user.check_valid_password(&data.MasterPasswordHash) {
err!("Invalid password")
}
Ok(())
kdf_upgrade(&mut user, &data.MasterPasswordHash, &mut conn).await?;
Ok(Json(json!({
"MasterPasswordPolicy": {}, // Required for SSO login with mobile apps
})))
}
async fn _api_key(data: JsonUpcase<PasswordOrOtpData>, rotate: bool, headers: Headers, mut conn: DbConn) -> JsonResult {

View File

@ -10,6 +10,7 @@ use rocket::{
};
use serde_json::Value;
use crate::util::NumberOrString;
use crate::{
api::{self, core::log_event, EmptyResult, JsonResult, JsonUpcase, Notify, PasswordOrOtpData, UpdateType},
auth::Headers,
@ -321,7 +322,7 @@ async fn post_ciphers(data: JsonUpcase<CipherData>, headers: Headers, mut conn:
data.LastKnownRevisionDate = None;
let mut cipher = Cipher::new(data.Type, data.Name.clone());
update_cipher_from_data(&mut cipher, data, &headers, false, &mut conn, &nt, UpdateType::SyncCipherCreate).await?;
update_cipher_from_data(&mut cipher, data, &headers, None, &mut conn, &nt, UpdateType::SyncCipherCreate).await?;
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await))
}
@ -352,7 +353,7 @@ pub async fn update_cipher_from_data(
cipher: &mut Cipher,
data: CipherData,
headers: &Headers,
shared_to_collection: bool,
shared_to_collections: Option<Vec<String>>,
conn: &mut DbConn,
nt: &Notify<'_>,
ut: UpdateType,
@ -391,7 +392,7 @@ pub async fn update_cipher_from_data(
match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, conn).await {
None => err!("You don't have permission to add item to organization"),
Some(org_user) => {
if shared_to_collection
if shared_to_collections.is_some()
|| org_user.has_full_access()
|| cipher.is_write_accessible_to_user(&headers.user.uuid, conn).await
{
@ -518,8 +519,15 @@ pub async fn update_cipher_from_data(
)
.await;
}
nt.send_cipher_update(ut, cipher, &cipher.update_users_revision(conn).await, &headers.device.uuid, None, conn)
.await;
nt.send_cipher_update(
ut,
cipher,
&cipher.update_users_revision(conn).await,
&headers.device.uuid,
shared_to_collections,
conn,
)
.await;
}
Ok(())
}
@ -580,7 +588,7 @@ async fn post_ciphers_import(
cipher_data.FolderId = folder_uuid;
let mut cipher = Cipher::new(cipher_data.Type, cipher_data.Name.clone());
update_cipher_from_data(&mut cipher, cipher_data, &headers, false, &mut conn, &nt, UpdateType::None).await?;
update_cipher_from_data(&mut cipher, cipher_data, &headers, None, &mut conn, &nt, UpdateType::None).await?;
}
let mut user = headers.user;
@ -648,7 +656,7 @@ async fn put_cipher(
err!("Cipher is not write accessible")
}
update_cipher_from_data(&mut cipher, data, &headers, false, &mut conn, &nt, UpdateType::SyncCipherUpdate).await?;
update_cipher_from_data(&mut cipher, data, &headers, None, &mut conn, &nt, UpdateType::SyncCipherUpdate).await?;
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await))
}
@ -898,7 +906,7 @@ async fn share_cipher_by_uuid(
None => err!("Cipher doesn't exist"),
};
let mut shared_to_collection = false;
let mut shared_to_collections = vec![];
if let Some(organization_uuid) = &data.Cipher.OrganizationId {
for uuid in &data.CollectionIds {
@ -907,7 +915,7 @@ async fn share_cipher_by_uuid(
Some(collection) => {
if collection.is_writable_by_user(&headers.user.uuid, conn).await {
CollectionCipher::save(&cipher.uuid, &collection.uuid, conn).await?;
shared_to_collection = true;
shared_to_collections.push(collection.uuid);
} else {
err!("No rights to modify the collection")
}
@ -923,7 +931,7 @@ async fn share_cipher_by_uuid(
UpdateType::SyncCipherCreate
};
update_cipher_from_data(&mut cipher, data.Cipher, headers, shared_to_collection, conn, nt, ut).await?;
update_cipher_from_data(&mut cipher, data.Cipher, headers, Some(shared_to_collections), conn, nt, ut).await?;
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, conn).await))
}
@ -957,7 +965,7 @@ async fn get_attachment(uuid: &str, attachment_id: &str, headers: Headers, mut c
struct AttachmentRequestData {
Key: String,
FileName: String,
FileSize: i64,
FileSize: NumberOrString,
AdminRequest: Option<bool>, // true when attaching from an org vault view
}
@ -987,12 +995,14 @@ async fn post_attachment_v2(
}
let data: AttachmentRequestData = data.into_inner().data;
if data.FileSize < 0 {
let file_size = data.FileSize.into_i64()?;
if file_size < 0 {
err!("Attachment size can't be negative")
}
let attachment_id = crypto::generate_attachment_id();
let attachment =
Attachment::new(attachment_id.clone(), cipher.uuid.clone(), data.FileName, data.FileSize, Some(data.Key));
Attachment::new(attachment_id.clone(), cipher.uuid.clone(), data.FileName, file_size, Some(data.Key));
attachment.save(&mut conn).await.expect("Error saving attachment");
let url = format!("/ciphers/{}/attachment/{}", cipher.uuid, attachment_id);

View File

@ -61,7 +61,9 @@ async fn get_contacts(headers: Headers, mut conn: DbConn) -> Json<Value> {
let emergency_access_list = EmergencyAccess::find_all_by_grantor_uuid(&headers.user.uuid, &mut conn).await;
let mut emergency_access_list_json = Vec::with_capacity(emergency_access_list.len());
for ea in emergency_access_list {
emergency_access_list_json.push(ea.to_json_grantee_details(&mut conn).await);
if let Some(grantee) = ea.to_json_grantee_details(&mut conn).await {
emergency_access_list_json.push(grantee)
}
}
Json(json!({
@ -95,7 +97,9 @@ async fn get_emergency_access(emer_id: &str, mut conn: DbConn) -> JsonResult {
check_emergency_access_enabled()?;
match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
Some(emergency_access) => Ok(Json(emergency_access.to_json_grantee_details(&mut conn).await)),
Some(emergency_access) => Ok(Json(
emergency_access.to_json_grantee_details(&mut conn).await.expect("Grantee user should exist but does not!"),
)),
None => err!("Emergency access not valid."),
}
}
@ -209,7 +213,7 @@ async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Heade
err!("You can not set yourself as an emergency contact.")
}
let grantee_user = match User::find_by_mail(&email, &mut conn).await {
let (grantee_user, new_user) = match User::find_by_mail(&email, &mut conn).await {
None => {
if !CONFIG.invitations_allowed() {
err!(format!("Grantee user does not exist: {}", &email))
@ -224,11 +228,12 @@ async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Heade
invitation.save(&mut conn).await?;
}
let mut user = User::new(email.clone());
let mut user = User::new(email.clone(), None);
user.save(&mut conn).await?;
user
(user, true)
}
Some(user) => user,
Some(user) if user.password_hash.is_empty() => (user, true),
Some(user) => (user, false),
};
if EmergencyAccess::find_by_grantor_uuid_and_grantee_uuid_or_email(
@ -256,15 +261,9 @@ async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Heade
&grantor_user.email,
)
.await?;
} else {
// Automatically mark user as accepted if no email invites
match User::find_by_mail(&email, &mut conn).await {
Some(user) => match accept_invite_process(&user.uuid, &mut new_emergency_access, &email, &mut conn).await {
Ok(v) => v,
Err(e) => err!(e.to_string()),
},
None => err!("Grantee user not found."),
}
} else if !new_user {
// if mail is not enabled immediately accept the invitation for existing users
new_emergency_access.accept_invite(&grantee_user.uuid, &email, &mut conn).await?;
}
Ok(())
@ -308,17 +307,12 @@ async fn resend_invite(emer_id: &str, headers: Headers, mut conn: DbConn) -> Emp
&grantor_user.email,
)
.await?;
} else {
if Invitation::find_by_mail(&email, &mut conn).await.is_none() {
let invitation = Invitation::new(&email);
invitation.save(&mut conn).await?;
}
// Automatically mark user as accepted if no email invites
match accept_invite_process(&grantee_user.uuid, &mut emergency_access, &email, &mut conn).await {
Ok(v) => v,
Err(e) => err!(e.to_string()),
}
} else if !grantee_user.password_hash.is_empty() {
// accept the invitation for existing user
emergency_access.accept_invite(&grantee_user.uuid, &email, &mut conn).await?;
} else if CONFIG.invitations_allowed() && Invitation::find_by_mail(&email, &mut conn).await.is_none() {
let invitation = Invitation::new(&email);
invitation.save(&mut conn).await?;
}
Ok(())
@ -367,10 +361,7 @@ async fn accept_invite(emer_id: &str, data: JsonUpcase<AcceptData>, headers: Hea
&& grantor_user.name == claims.grantor_name
&& grantor_user.email == claims.grantor_email
{
match accept_invite_process(&grantee_user.uuid, &mut emergency_access, &grantee_user.email, &mut conn).await {
Ok(v) => v,
Err(e) => err!(e.to_string()),
}
emergency_access.accept_invite(&grantee_user.uuid, &grantee_user.email, &mut conn).await?;
if CONFIG.mail_enabled() {
mail::send_emergency_access_invite_accepted(&grantor_user.email, &grantee_user.email).await?;
@ -382,26 +373,6 @@ async fn accept_invite(emer_id: &str, data: JsonUpcase<AcceptData>, headers: Hea
}
}
async fn accept_invite_process(
grantee_uuid: &str,
emergency_access: &mut EmergencyAccess,
grantee_email: &str,
conn: &mut DbConn,
) -> EmptyResult {
if emergency_access.email.is_none() || emergency_access.email.as_ref().unwrap() != grantee_email {
err!("User email does not match invite.");
}
if emergency_access.status == EmergencyAccessStatus::Accepted as i32 {
err!("Emergency contact already accepted.");
}
emergency_access.status = EmergencyAccessStatus::Accepted as i32;
emergency_access.grantee_uuid = Some(String::from(grantee_uuid));
emergency_access.email = None;
emergency_access.save(conn).await
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
struct ConfirmData {

View File

@ -40,6 +40,7 @@ pub fn routes() -> Vec<Route> {
post_organization_collection_delete,
bulk_delete_organization_collections,
get_org_details,
get_org_domain_sso_details,
get_org_users,
send_invite,
reinvite_user,
@ -56,6 +57,8 @@ pub fn routes() -> Vec<Route> {
post_org_import,
list_policies,
list_policies_token,
list_policies_invited_user,
get_policy_master_password,
get_policy,
put_policy,
get_organization_tax,
@ -96,6 +99,7 @@ pub fn routes() -> Vec<Route> {
get_org_export,
api_key,
rotate_api_key,
get_auto_enroll_status,
]
}
@ -168,7 +172,7 @@ async fn create_organization(headers: Headers, data: JsonUpcase<OrgData>, mut co
};
let org = Organization::new(data.Name, data.BillingEmail, private_key, public_key);
let mut user_org = UserOrganization::new(headers.user.uuid, org.uuid.clone());
let mut user_org = UserOrganization::new(headers.user.uuid, org.uuid.clone(), None);
let collection = Collection::new(org.uuid.clone(), data.CollectionName, None);
user_org.akey = data.Key;
@ -302,6 +306,17 @@ async fn get_user_collections(headers: Headers, mut conn: DbConn) -> Json<Value>
}))
}
// Called during the SSO enrollment
// The `_identifier` should be the harcoded value returned by `get_org_domain_sso_details`
// The returned `Id` will then be passed to `get_policy_master_password` which will mainly ignore it
#[get("/organizations/<_identifier>/auto-enroll-status")]
fn get_auto_enroll_status(_identifier: &str) -> JsonResult {
Ok(Json(json!({
"Id": "_",
"ResetPasswordEnabled": false, // Not implemented
})))
}
#[get("/organizations/<org_id>/collections")]
async fn get_org_collections(org_id: &str, _headers: ManagerHeadersLoose, mut conn: DbConn) -> Json<Value> {
Json(json!({
@ -329,27 +344,19 @@ async fn get_org_collections_details(org_id: &str, headers: ManagerHeadersLoose,
&& GroupUser::has_full_access_by_member(org_id, &user_org.uuid, &mut conn).await);
for col in Collection::find_by_organization(org_id, &mut conn).await {
// assigned indicates whether the current user has access to the given collection
let mut assigned = has_full_access_to_org;
// check whether the current user has access to the given collection
let assigned = has_full_access_to_org
|| CollectionUser::has_access_to_collection_by_user(&col.uuid, &user_org.user_uuid, &mut conn).await
|| (CONFIG.org_groups_enabled()
&& GroupUser::has_access_to_collection_by_member(&col.uuid, &user_org.uuid, &mut conn).await);
// get the users assigned directly to the given collection
let users: Vec<Value> = coll_users
.iter()
.filter(|collection_user| collection_user.collection_uuid == col.uuid)
.map(|collection_user| {
// check if the current user is assigned to this collection directly
if collection_user.user_uuid == user_org.uuid {
assigned = true;
}
SelectionReadOnly::to_collection_user_details_read_only(collection_user).to_json()
})
.map(|collection_user| SelectionReadOnly::to_collection_user_details_read_only(collection_user).to_json())
.collect();
// check if the current user has access to the given collection via a group
if !assigned && CONFIG.org_groups_enabled() {
assigned = GroupUser::has_access_to_collection_by_member(&col.uuid, &user_org.uuid, &mut conn).await;
}
// get the group details for the given collection
let groups: Vec<Value> = if CONFIG.org_groups_enabled() {
CollectionGroup::find_by_collection(&col.uuid, &mut conn)
@ -672,24 +679,16 @@ async fn get_org_collection_detail(
Vec::with_capacity(0)
};
let mut assigned = false;
let users: Vec<Value> =
CollectionUser::find_by_collection_swap_user_uuid_with_org_user_uuid(&collection.uuid, &mut conn)
.await
.iter()
.map(|collection_user| {
// Remember `user_uuid` is swapped here with the `user_org.uuid` with a join during the `find_by_collection_swap_user_uuid_with_org_user_uuid` call.
// We check here if the current user is assigned to this collection or not.
if collection_user.user_uuid == user_org.uuid {
assigned = true;
}
SelectionReadOnly::to_collection_user_details_read_only(collection_user).to_json()
})
.collect();
if user_org.access_all {
assigned = true;
}
let assigned = Collection::can_access_collection(&user_org, &collection.uuid, &mut conn).await;
let mut json_object = collection.to_json();
json_object["Assigned"] = json!(assigned);
@ -783,6 +782,17 @@ async fn _get_org_details(org_id: &str, host: &str, user_uuid: &str, conn: &mut
json!(ciphers_json)
}
// Endpoint called when the user select SSO login (body: `{ "email": "" }`).
// Returning a Domain/Organization here allow to prefill it and prevent prompting the user
// VaultWarden sso login is not linked to Org so we set a dummy value.
#[post("/organizations/domain/sso/details")]
fn get_org_domain_sso_details() -> JsonResult {
Ok(Json(json!({
"organizationIdentifier": "vaultwarden",
"ssoAvailable": CONFIG.sso_enabled()
})))
}
#[derive(FromForm)]
struct GetOrgUserData {
#[field(name = "includeCollections")]
@ -902,7 +912,7 @@ async fn send_invite(
invitation.save(&mut conn).await?;
}
let mut user = User::new(email.clone());
let mut user = User::new(email.clone(), None);
user.save(&mut conn).await?;
user
}
@ -919,7 +929,8 @@ async fn send_invite(
}
};
let mut new_user = UserOrganization::new(user.uuid.clone(), String::from(org_id));
let mut new_user =
UserOrganization::new(user.uuid.clone(), String::from(org_id), Some(headers.user.email.clone()));
let access_all = data.AccessAll.unwrap_or(false);
new_user.access_all = access_all;
new_user.atype = new_type;
@ -1618,7 +1629,7 @@ async fn post_org_import(
let mut ciphers = Vec::new();
for cipher_data in data.Ciphers {
let mut cipher = Cipher::new(cipher_data.Type, cipher_data.Name.clone());
update_cipher_from_data(&mut cipher, cipher_data, &headers, false, &mut conn, &nt, UpdateType::None).await.ok();
update_cipher_from_data(&mut cipher, cipher_data, &headers, None, &mut conn, &nt, UpdateType::None).await.ok();
ciphers.push(cipher);
}
@ -1674,7 +1685,50 @@ async fn list_policies_token(org_id: &str, token: &str, mut conn: DbConn) -> Jso
})))
}
#[get("/organizations/<org_id>/policies/<pol_type>")]
// Called during the SSO enrollment.
// Since the VW SSO flow is not linked to an organization it will be called with a dummy or undefined `org_id`
#[allow(non_snake_case)]
#[get("/organizations/<org_id>/policies/invited-user?<userId>")]
async fn list_policies_invited_user(org_id: &str, userId: &str, mut conn: DbConn) -> JsonResult {
if userId.is_empty() {
err!("userId must not be empty");
}
let user_orgs = UserOrganization::find_invited_by_user(userId, &mut conn).await;
let policies_json: Vec<Value> = if user_orgs.into_iter().any(|user_org| user_org.org_uuid == org_id) {
let policies = OrgPolicy::find_by_org(org_id, &mut conn).await;
policies.iter().map(OrgPolicy::to_json).collect()
} else {
Vec::with_capacity(0)
};
Ok(Json(json!({
"Data": policies_json,
"Object": "list",
"ContinuationToken": null
})))
}
// Called during the SSO enrollment.
#[get("/organizations/<org_id>/policies/master-password", rank = 1)]
fn get_policy_master_password(org_id: &str, _headers: Headers) -> JsonResult {
let data = match CONFIG.sso_master_password_policy() {
Some(policy) => policy,
None => "null".to_string(),
};
let policy = OrgPolicy {
uuid: String::from(org_id),
org_uuid: String::from(org_id),
atype: OrgPolicyType::MasterPassword as i32,
enabled: CONFIG.sso_master_password_policy().is_some(),
data,
};
Ok(Json(policy.to_json()))
}
#[get("/organizations/<org_id>/policies/<pol_type>", rank = 2)]
async fn get_policy(org_id: &str, pol_type: i32, _headers: AdminHeaders, mut conn: DbConn) -> JsonResult {
let pol_type_enum = match OrgPolicyType::from_i32(pol_type) {
Some(pt) => pt,
@ -1907,7 +1961,8 @@ async fn import(org_id: &str, data: JsonUpcase<OrgImportData>, headers: Headers,
UserOrgStatus::Accepted as i32 // Automatically mark user as accepted if no email invites
};
let mut new_org_user = UserOrganization::new(user.uuid.clone(), String::from(org_id));
let mut new_org_user =
UserOrganization::new(user.uuid.clone(), String::from(org_id), Some(headers.user.email.clone()));
new_org_user.access_all = false;
new_org_user.atype = UserOrgType::User as i32;
new_org_user.status = user_org_status;

View File

@ -92,7 +92,7 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
Some(user) => user, // exists in vaultwarden
None => {
// User does not exist yet
let mut new_user = User::new(user_data.Email.clone());
let mut new_user = User::new(user_data.Email.clone(), None);
new_user.save(&mut conn).await?;
if !CONFIG.mail_enabled() {
@ -108,7 +108,12 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
UserOrgStatus::Accepted as i32 // Automatically mark user as accepted if no email invites
};
let mut new_org_user = UserOrganization::new(user.uuid.clone(), org_id.clone());
let (org_name, org_email) = match Organization::find_by_uuid(&org_id, &mut conn).await {
Some(org) => (org.name, org.billing_email),
None => err!("Error looking up organization"),
};
let mut new_org_user = UserOrganization::new(user.uuid.clone(), org_id.clone(), Some(org_email.clone()));
new_org_user.set_external_id(Some(user_data.ExternalId.clone()));
new_org_user.access_all = false;
new_org_user.atype = UserOrgType::User as i32;
@ -117,11 +122,6 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
new_org_user.save(&mut conn).await?;
if CONFIG.mail_enabled() {
let (org_name, org_email) = match Organization::find_by_uuid(&org_id, &mut conn).await {
Some(org) => (org.name, org.billing_email),
None => err!("Error looking up organization"),
};
mail::send_invite(
&user_data.Email,
&user.uuid,

View File

@ -284,10 +284,6 @@ fn sign_duo_values(key: &str, email: &str, ikey: &str, prefix: &str, expire: i64
}
pub async fn validate_duo_login(email: &str, response: &str, conn: &mut DbConn) -> EmptyResult {
// email is as entered by the user, so it needs to be normalized before
// comparison with auth_user below.
let email = &email.to_lowercase();
let split: Vec<&str> = response.split(':').collect();
if split.len() != 2 {
err!(

View File

@ -1,6 +1,6 @@
use std::{
net::IpAddr,
sync::Arc,
sync::{Arc, Mutex},
time::{Duration, SystemTime},
};
@ -16,14 +16,13 @@ use rocket::{http::ContentType, response::Redirect, Route};
use tokio::{
fs::{create_dir_all, remove_file, symlink_metadata, File},
io::{AsyncReadExt, AsyncWriteExt},
net::lookup_host,
};
use html5gum::{Emitter, HtmlString, InfallibleTokenizer, Readable, StringReader, Tokenizer};
use crate::{
error::Error,
util::{get_reqwest_client_builder, Cached},
util::{get_reqwest_client_builder, Cached, CustomDnsResolver, CustomResolverError},
CONFIG,
};
@ -49,48 +48,32 @@ static CLIENT: Lazy<Client> = Lazy::new(|| {
let icon_download_timeout = Duration::from_secs(CONFIG.icon_download_timeout());
let pool_idle_timeout = Duration::from_secs(10);
// Reuse the client between requests
let client = get_reqwest_client_builder()
get_reqwest_client_builder()
.cookie_provider(Arc::clone(&cookie_store))
.timeout(icon_download_timeout)
.pool_max_idle_per_host(5) // Configure the Hyper Pool to only have max 5 idle connections
.pool_idle_timeout(pool_idle_timeout) // Configure the Hyper Pool to timeout after 10 seconds
.hickory_dns(true)
.default_headers(default_headers.clone());
match client.build() {
Ok(client) => client,
Err(e) => {
error!("Possible trust-dns error, trying with trust-dns disabled: '{e}'");
get_reqwest_client_builder()
.cookie_provider(cookie_store)
.timeout(icon_download_timeout)
.pool_max_idle_per_host(5) // Configure the Hyper Pool to only have max 5 idle connections
.pool_idle_timeout(pool_idle_timeout) // Configure the Hyper Pool to timeout after 10 seconds
.hickory_dns(false)
.default_headers(default_headers)
.build()
.expect("Failed to build client")
}
}
.dns_resolver(CustomDnsResolver::instance())
.default_headers(default_headers.clone())
.build()
.expect("Failed to build client")
});
// Build Regex only once since this takes a lot of time.
static ICON_SIZE_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?x)(\d+)\D*(\d+)").unwrap());
// Special HashMap which holds the user defined Regex to speedup matching the regex.
static ICON_BLACKLIST_REGEX: Lazy<dashmap::DashMap<String, Regex>> = Lazy::new(dashmap::DashMap::new);
async fn icon_redirect(domain: &str, template: &str) -> Option<Redirect> {
#[get("/<domain>/icon.png")]
fn icon_external(domain: &str) -> Option<Redirect> {
if !is_valid_domain(domain) {
warn!("Invalid domain: {}", domain);
return None;
}
if check_domain_blacklist_reason(domain).await.is_some() {
if is_domain_blacklisted(domain) {
return None;
}
let url = template.replace("{}", domain);
let url = CONFIG._icon_service_url().replace("{}", domain);
match CONFIG.icon_redirect_code() {
301 => Some(Redirect::moved(url)), // legacy permanent redirect
302 => Some(Redirect::found(url)), // legacy temporary redirect
@ -103,11 +86,6 @@ async fn icon_redirect(domain: &str, template: &str) -> Option<Redirect> {
}
}
#[get("/<domain>/icon.png")]
async fn icon_external(domain: &str) -> Option<Redirect> {
icon_redirect(domain, &CONFIG._icon_service_url()).await
}
#[get("/<domain>/icon.png")]
async fn icon_internal(domain: &str) -> Cached<(ContentType, Vec<u8>)> {
const FALLBACK_ICON: &[u8] = include_bytes!("../static/images/fallback-icon.png");
@ -166,153 +144,28 @@ fn is_valid_domain(domain: &str) -> bool {
true
}
/// TODO: This is extracted from IpAddr::is_global, which is unstable:
/// https://doc.rust-lang.org/nightly/std/net/enum.IpAddr.html#method.is_global
/// Remove once https://github.com/rust-lang/rust/issues/27709 is merged
#[allow(clippy::nonminimal_bool)]
#[cfg(not(feature = "unstable"))]
fn is_global(ip: IpAddr) -> bool {
match ip {
IpAddr::V4(ip) => {
// check if this address is 192.0.0.9 or 192.0.0.10. These addresses are the only two
// globally routable addresses in the 192.0.0.0/24 range.
if u32::from(ip) == 0xc0000009 || u32::from(ip) == 0xc000000a {
return true;
}
!ip.is_private()
&& !ip.is_loopback()
&& !ip.is_link_local()
&& !ip.is_broadcast()
&& !ip.is_documentation()
&& !(ip.octets()[0] == 100 && (ip.octets()[1] & 0b1100_0000 == 0b0100_0000))
&& !(ip.octets()[0] == 192 && ip.octets()[1] == 0 && ip.octets()[2] == 0)
&& !(ip.octets()[0] & 240 == 240 && !ip.is_broadcast())
&& !(ip.octets()[0] == 198 && (ip.octets()[1] & 0xfe) == 18)
// Make sure the address is not in 0.0.0.0/8
&& ip.octets()[0] != 0
}
IpAddr::V6(ip) => {
if ip.is_multicast() && ip.segments()[0] & 0x000f == 14 {
true
} else {
!ip.is_multicast()
&& !ip.is_loopback()
&& !((ip.segments()[0] & 0xffc0) == 0xfe80)
&& !((ip.segments()[0] & 0xfe00) == 0xfc00)
&& !ip.is_unspecified()
&& !((ip.segments()[0] == 0x2001) && (ip.segments()[1] == 0xdb8))
}
}
}
}
pub fn is_domain_blacklisted(domain: &str) -> bool {
let Some(config_blacklist) = CONFIG.icon_blacklist_regex() else {
return false;
};
#[cfg(feature = "unstable")]
fn is_global(ip: IpAddr) -> bool {
ip.is_global()
}
// Compiled domain blacklist
static COMPILED_BLACKLIST: Mutex<Option<(String, Regex)>> = Mutex::new(None);
let mut guard = COMPILED_BLACKLIST.lock().unwrap();
/// These are some tests to check that the implementations match
/// The IPv4 can be all checked in 5 mins or so and they are correct as of nightly 2020-07-11
/// The IPV6 can't be checked in a reasonable time, so we check about ten billion random ones, so far correct
/// Note that the is_global implementation is subject to change as new IP RFCs are created
///
/// To run while showing progress output:
/// cargo test --features sqlite,unstable -- --nocapture --ignored
#[cfg(test)]
#[cfg(feature = "unstable")]
mod tests {
use super::*;
#[test]
#[ignore]
fn test_ipv4_global() {
for a in 0..u8::MAX {
println!("Iter: {}/255", a);
for b in 0..u8::MAX {
for c in 0..u8::MAX {
for d in 0..u8::MAX {
let ip = IpAddr::V4(std::net::Ipv4Addr::new(a, b, c, d));
assert_eq!(ip.is_global(), is_global(ip))
}
}
}
// If the stored regex is up to date, use it
if let Some((value, regex)) = &*guard {
if value == &config_blacklist {
return regex.is_match(domain);
}
}
#[test]
#[ignore]
fn test_ipv6_global() {
use ring::rand::{SecureRandom, SystemRandom};
let mut v = [0u8; 16];
let rand = SystemRandom::new();
for i in 0..1_000 {
println!("Iter: {}/1_000", i);
for _ in 0..10_000_000 {
rand.fill(&mut v).expect("Error generating random values");
let ip = IpAddr::V6(std::net::Ipv6Addr::new(
(v[14] as u16) << 8 | v[15] as u16,
(v[12] as u16) << 8 | v[13] as u16,
(v[10] as u16) << 8 | v[11] as u16,
(v[8] as u16) << 8 | v[9] as u16,
(v[6] as u16) << 8 | v[7] as u16,
(v[4] as u16) << 8 | v[5] as u16,
(v[2] as u16) << 8 | v[3] as u16,
(v[0] as u16) << 8 | v[1] as u16,
));
assert_eq!(ip.is_global(), is_global(ip))
}
}
}
}
// If we don't have a regex stored, or it's not up to date, recreate it
let regex = Regex::new(&config_blacklist).unwrap();
let is_match = regex.is_match(domain);
*guard = Some((config_blacklist, regex));
#[derive(Clone)]
enum DomainBlacklistReason {
Regex,
IP,
}
use cached::proc_macro::cached;
#[cached(key = "String", convert = r#"{ domain.to_string() }"#, size = 16, time = 60)]
async fn check_domain_blacklist_reason(domain: &str) -> Option<DomainBlacklistReason> {
// First check the blacklist regex if there is a match.
// This prevents the blocked domain(s) from being leaked via a DNS lookup.
if let Some(blacklist) = CONFIG.icon_blacklist_regex() {
// Use the pre-generate Regex stored in a Lazy HashMap if there's one, else generate it.
let is_match = if let Some(regex) = ICON_BLACKLIST_REGEX.get(&blacklist) {
regex.is_match(domain)
} else {
// Clear the current list if the previous key doesn't exists.
// To prevent growing of the HashMap after someone has changed it via the admin interface.
if ICON_BLACKLIST_REGEX.len() >= 1 {
ICON_BLACKLIST_REGEX.clear();
}
// Generate the regex to store in too the Lazy Static HashMap.
let blacklist_regex = Regex::new(&blacklist).unwrap();
let is_match = blacklist_regex.is_match(domain);
ICON_BLACKLIST_REGEX.insert(blacklist.clone(), blacklist_regex);
is_match
};
if is_match {
debug!("Blacklisted domain: {} matched ICON_BLACKLIST_REGEX", domain);
return Some(DomainBlacklistReason::Regex);
}
}
if CONFIG.icon_blacklist_non_global_ips() {
if let Ok(s) = lookup_host((domain, 0)).await {
for addr in s {
if !is_global(addr.ip()) {
debug!("IP {} for domain '{}' is not a global IP!", addr.ip(), domain);
return Some(DomainBlacklistReason::IP);
}
}
}
}
None
is_match
}
async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
@ -342,6 +195,13 @@ async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
Some((icon.to_vec(), icon_type.unwrap_or("x-icon").to_string()))
}
Err(e) => {
// If this error comes from the custom resolver, this means this is a blacklisted domain
// or non global IP, don't save the miss file in this case to avoid leaking it
if let Some(error) = CustomResolverError::downcast_ref(&e) {
warn!("{error}");
return None;
}
warn!("Unable to download icon: {:?}", e);
let miss_indicator = path + ".miss";
save_icon(&miss_indicator, &[]).await;
@ -491,42 +351,48 @@ async fn get_icon_url(domain: &str) -> Result<IconUrlResult, Error> {
let ssldomain = format!("https://{domain}");
let httpdomain = format!("http://{domain}");
// First check the domain as given during the request for both HTTPS and HTTP.
let resp = match get_page(&ssldomain).or_else(|_| get_page(&httpdomain)).await {
Ok(c) => Ok(c),
Err(e) => {
let mut sub_resp = Err(e);
// First check the domain as given during the request for HTTPS.
let resp = match get_page(&ssldomain).await {
Err(e) if CustomResolverError::downcast_ref(&e).is_none() => {
// If we get an error that is not caused by the blacklist, we retry with HTTP
match get_page(&httpdomain).await {
mut sub_resp @ Err(_) => {
// When the domain is not an IP, and has more then one dot, remove all subdomains.
let is_ip = domain.parse::<IpAddr>();
if is_ip.is_err() && domain.matches('.').count() > 1 {
let mut domain_parts = domain.split('.');
let base_domain = format!(
"{base}.{tld}",
tld = domain_parts.next_back().unwrap(),
base = domain_parts.next_back().unwrap()
);
if is_valid_domain(&base_domain) {
let sslbase = format!("https://{base_domain}");
let httpbase = format!("http://{base_domain}");
debug!("[get_icon_url]: Trying without subdomains '{base_domain}'");
// When the domain is not an IP, and has more then one dot, remove all subdomains.
let is_ip = domain.parse::<IpAddr>();
if is_ip.is_err() && domain.matches('.').count() > 1 {
let mut domain_parts = domain.split('.');
let base_domain = format!(
"{base}.{tld}",
tld = domain_parts.next_back().unwrap(),
base = domain_parts.next_back().unwrap()
);
if is_valid_domain(&base_domain) {
let sslbase = format!("https://{base_domain}");
let httpbase = format!("http://{base_domain}");
debug!("[get_icon_url]: Trying without subdomains '{base_domain}'");
sub_resp = get_page(&sslbase).or_else(|_| get_page(&httpbase)).await;
}
sub_resp = get_page(&sslbase).or_else(|_| get_page(&httpbase)).await;
}
// When the domain is not an IP, and has less then 2 dots, try to add www. infront of it.
} else if is_ip.is_err() && domain.matches('.').count() < 2 {
let www_domain = format!("www.{domain}");
if is_valid_domain(&www_domain) {
let sslwww = format!("https://{www_domain}");
let httpwww = format!("http://{www_domain}");
debug!("[get_icon_url]: Trying with www. prefix '{www_domain}'");
sub_resp = get_page(&sslwww).or_else(|_| get_page(&httpwww)).await;
// When the domain is not an IP, and has less then 2 dots, try to add www. infront of it.
} else if is_ip.is_err() && domain.matches('.').count() < 2 {
let www_domain = format!("www.{domain}");
if is_valid_domain(&www_domain) {
let sslwww = format!("https://{www_domain}");
let httpwww = format!("http://{www_domain}");
debug!("[get_icon_url]: Trying with www. prefix '{www_domain}'");
sub_resp = get_page(&sslwww).or_else(|_| get_page(&httpwww)).await;
}
}
sub_resp
}
res => res,
}
sub_resp
}
// If we get a result or a blacklist error, just continue
res => res,
};
// Create the iconlist
@ -573,21 +439,12 @@ async fn get_page(url: &str) -> Result<Response, Error> {
}
async fn get_page_with_referer(url: &str, referer: &str) -> Result<Response, Error> {
match check_domain_blacklist_reason(url::Url::parse(url).unwrap().host_str().unwrap_or_default()).await {
Some(DomainBlacklistReason::Regex) => warn!("Favicon '{}' is from a blacklisted domain!", url),
Some(DomainBlacklistReason::IP) => warn!("Favicon '{}' is hosted on a non-global IP!", url),
None => (),
}
let mut client = CLIENT.get(url);
if !referer.is_empty() {
client = client.header("Referer", referer)
}
match client.send().await {
Ok(c) => c.error_for_status().map_err(Into::into),
Err(e) => err_silent!(format!("{e}")),
}
Ok(client.send().await?.error_for_status()?)
}
/// Returns a Integer with the priority of the type of the icon which to prefer.
@ -670,12 +527,6 @@ fn parse_sizes(sizes: &str) -> (u16, u16) {
}
async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> {
match check_domain_blacklist_reason(domain).await {
Some(DomainBlacklistReason::Regex) => err_silent!("Domain is blacklisted", domain),
Some(DomainBlacklistReason::IP) => err_silent!("Host resolves to a non-global IP", domain),
None => (),
}
let icon_result = get_icon_url(domain).await?;
let mut buffer = Bytes::new();
@ -711,22 +562,19 @@ async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> {
_ => debug!("Extracted icon from data:image uri is invalid"),
};
} else {
match get_page_with_referer(&icon.href, &icon_result.referer).await {
Ok(res) => {
buffer = stream_to_bytes_limit(res, 5120 * 1024).await?; // 5120KB/5MB for each icon max (Same as icons.bitwarden.net)
let res = get_page_with_referer(&icon.href, &icon_result.referer).await?;
// Check if the icon type is allowed, else try an icon from the list.
icon_type = get_icon_type(&buffer);
if icon_type.is_none() {
buffer.clear();
debug!("Icon from {}, is not a valid image type", icon.href);
continue;
}
info!("Downloaded icon from {}", icon.href);
break;
}
Err(e) => debug!("{:?}", e),
};
buffer = stream_to_bytes_limit(res, 5120 * 1024).await?; // 5120KB/5MB for each icon max (Same as icons.bitwarden.net)
// Check if the icon type is allowed, else try an icon from the list.
icon_type = get_icon_type(&buffer);
if icon_type.is_none() {
buffer.clear();
debug!("Icon from {}, is not a valid image type", icon.href);
continue;
}
info!("Downloaded icon from {}", icon.href);
break;
}
}

View File

@ -1,8 +1,10 @@
use chrono::Utc;
use chrono::{NaiveDateTime, Utc};
use num_traits::FromPrimitive;
use rocket::serde::json::Json;
use rocket::{
form::{Form, FromForm},
http::Status,
response::Redirect,
serde::json::Json,
Route,
};
use serde_json::Value;
@ -10,21 +12,22 @@ use serde_json::Value;
use crate::{
api::{
core::{
accounts::{PreloginData, RegisterData, _prelogin, _register},
accounts::{PreloginData, RegisterData, _prelogin, _register, kdf_upgrade},
log_user_event,
two_factor::{authenticator, duo, email, enforce_2fa_policy, webauthn, yubikey},
},
push::register_push_device,
ApiResult, EmptyResult, JsonResult, JsonUpcase,
},
auth::{generate_organization_api_key_login_claims, ClientHeaders, ClientIp},
auth,
auth::{AuthMethod, AuthMethodScope, ClientHeaders, ClientIp},
db::{models::*, DbConn},
error::MapResult,
mail, util, CONFIG,
mail, sso, util, CONFIG,
};
pub fn routes() -> Vec<Route> {
routes![login, prelogin, identity_register]
routes![login, prelogin, identity_register, _prevalidate, prevalidate, authorize, oidcsignin, oidcsignin_error]
}
#[post("/connect/token", data = "<data>")]
@ -38,6 +41,7 @@ async fn login(data: Form<ConnectData>, client_header: ClientHeaders, mut conn:
_check_is_some(&data.refresh_token, "refresh_token cannot be blank")?;
_refresh_login(data, &mut conn).await
}
"password" if CONFIG.sso_enabled() && CONFIG.sso_only() => err!("SSO sign-in is required"),
"password" => {
_check_is_some(&data.client_id, "client_id cannot be blank")?;
_check_is_some(&data.password, "password cannot be blank")?;
@ -61,6 +65,17 @@ async fn login(data: Form<ConnectData>, client_header: ClientHeaders, mut conn:
_api_key_login(data, &mut user_uuid, &mut conn, &client_header.ip).await
}
"authorization_code" if CONFIG.sso_enabled() => {
_check_is_some(&data.client_id, "client_id cannot be blank")?;
_check_is_some(&data.code, "code cannot be blank")?;
_check_is_some(&data.device_identifier, "device_identifier cannot be blank")?;
_check_is_some(&data.device_name, "device_name cannot be blank")?;
_check_is_some(&data.device_type, "device_type cannot be blank")?;
_sso_login(data, &mut user_uuid, &mut conn, &client_header.ip).await
}
"authorization_code" => err!("SSO sign-in is not available"),
t => err!("Invalid type", t),
};
@ -94,45 +109,154 @@ async fn login(data: Form<ConnectData>, client_header: ClientHeaders, mut conn:
login_result
}
// Return Status::Unauthorized to trigger logout
async fn _refresh_login(data: ConnectData, conn: &mut DbConn) -> JsonResult {
// Extract token
let token = data.refresh_token.unwrap();
let refresh_token = match data.refresh_token {
Some(token) => token,
None => err_code!("Missing refresh_token", Status::Unauthorized.code),
};
// Get device by refresh token
let mut device = Device::find_by_refresh_token(&token, conn).await.map_res("Invalid refresh token")?;
let scope = "api offline_access";
let scope_vec = vec!["api".into(), "offline_access".into()];
// Common
let user = User::find_by_uuid(&device.user_uuid, conn).await.unwrap();
// ---
// Disabled this variable, it was used to generate the JWT
// Because this might get used in the future, and is add by the Bitwarden Server, lets keep it, but then commented out
// See: https://github.com/dani-garcia/vaultwarden/issues/4156
// ---
// let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await;
let (access_token, expires_in) = device.refresh_tokens(&user, scope_vec);
device.save(conn).await?;
match auth::refresh_tokens(&refresh_token, conn).await {
Err(err) => err_code!(err.to_string(), Status::Unauthorized.code),
Ok((mut device, user, auth_tokens)) => {
// Save to update `device.updated_at` to track usage
device.save(conn).await?;
let result = json!({
"access_token": access_token,
"expires_in": expires_in,
"token_type": "Bearer",
"refresh_token": device.refresh_token,
"Key": user.akey,
"PrivateKey": user.private_key,
let result = json!({
"refresh_token": auth_tokens.refresh_token(),
"access_token": auth_tokens.access_token(),
"expires_in": auth_tokens.expires_in(),
"token_type": "Bearer",
"Key": user.akey,
"PrivateKey": user.private_key,
"Kdf": user.client_kdf_type,
"KdfIterations": user.client_kdf_iter,
"KdfMemory": user.client_kdf_memory,
"KdfParallelism": user.client_kdf_parallelism,
"ResetMasterPassword": false, // TODO: according to official server seems something like: user.password_hash.is_empty(), but would need testing
"scope": scope,
"unofficialServer": true,
});
"Kdf": user.client_kdf_type,
"KdfIterations": user.client_kdf_iter,
"KdfMemory": user.client_kdf_memory,
"KdfParallelism": user.client_kdf_parallelism,
"ResetMasterPassword": false, // TODO: according to official server seems something like: user.password_hash.is_empty(), but would need testing
"scope": auth_tokens.scope(),
"unofficialServer": true,
});
Ok(Json(result))
Ok(Json(result))
}
}
}
// After exchanging the code we need to check first if 2FA is needed before continuing
async fn _sso_login(data: ConnectData, user_uuid: &mut Option<String>, conn: &mut DbConn, ip: &ClientIp) -> JsonResult {
AuthMethod::Sso.check_scope(data.scope.as_ref())?;
// Ratelimit the login
crate::ratelimit::check_limit_login(&ip.ip)?;
let code = match data.code.as_ref() {
None => err!("Got no code in OIDC data"),
Some(code) => code,
};
let user_infos = sso::exchange_code(code, conn).await?;
// Will trigger 2FA flow if needed
let user_data = match SsoUser::find_by_identifier_or_email(&user_infos.identifier, &user_infos.email, conn).await {
None => None,
Some((user, None)) if user.private_key.is_some() && !CONFIG.sso_signups_match_email() => {
error!(
"Login failure ({}), existing non SSO user ({}) with same email ({}) and association is disabled",
user_infos.identifier, user.uuid, user.email
);
err_silent!("Existing non SSO user with same email")
}
Some((user, Some(sso_user))) if sso_user.identifier != user_infos.identifier => {
error!(
"Login failure ({}), existing SSO user ({}) with same email ({})",
user_infos.identifier, user.uuid, user.email
);
err_silent!("Existing non SSO user with same email")
}
Some((user, sso_user)) => {
let (mut device, new_device) = get_device(&data, conn, &user).await?;
let twofactor_token = twofactor_auth(&user, &data, &mut device, ip, conn).await?;
Some((user, device, new_device, twofactor_token, sso_user))
}
};
// We passed 2FA get full user informations
let auth_user = sso::redeem(&user_infos.state, conn).await?;
let now = Utc::now().naive_utc();
let (user, mut device, new_device, twofactor_token, sso_user) = match user_data {
None => {
if !CONFIG.is_email_domain_allowed(&user_infos.email) {
err!("Email domain not allowed");
}
if !user_infos.email_verified.unwrap_or(true) {
err!("Email needs to be verified before you can use VaultWarden");
}
let mut user = User::new(user_infos.email, user_infos.user_name);
user.verified_at = Some(now);
user.save(conn).await?;
let (device, new_device) = get_device(&data, conn, &user).await?;
(user, device, new_device, None, None)
}
Some((mut user, device, new_device, twofactor_token, sso_user)) if user.private_key.is_none() => {
// User was invited a stub was created
user.verified_at = Some(now);
if let Some(user_name) = user_infos.user_name {
user.name = user_name;
}
if !CONFIG.mail_enabled() {
UserOrganization::confirm_user_invitations(&user.uuid, conn).await?;
}
user.save(conn).await?;
(user, device, new_device, twofactor_token, sso_user)
}
Some((user, device, new_device, twofactor_token, sso_user)) => {
if user.email != user_infos.email {
if CONFIG.mail_enabled() {
mail::send_sso_change_email(&user_infos.email).await?;
}
info!("User {} email changed in SSO provider from {} to {}", user.uuid, user.email, user_infos.email);
}
(user, device, new_device, twofactor_token, sso_user)
}
};
if sso_user.is_none() {
let user_sso = SsoUser {
user_uuid: user.uuid.clone(),
identifier: user_infos.identifier,
};
user_sso.save(conn).await?;
}
// Set the user_uuid here to be passed back used for event logging.
*user_uuid = Some(user.uuid.clone());
let auth_tokens = sso::create_auth_tokens(
&device,
&user,
auth_user.refresh_token,
&auth_user.access_token,
auth_user.expires_in,
)?;
authenticated_response(&user, &mut device, new_device, auth_tokens, twofactor_token, &now, conn, ip).await
}
async fn _password_login(
@ -142,11 +266,7 @@ async fn _password_login(
ip: &ClientIp,
) -> JsonResult {
// Validate scope
let scope = data.scope.as_ref().unwrap();
if scope != "api offline_access" {
err!("Scope not supported")
}
let scope_vec = vec!["api".into(), "offline_access".into()];
AuthMethod::Password.check_scope(data.scope.as_ref())?;
// Ratelimit the login
crate::ratelimit::check_limit_login(&ip.ip)?;
@ -193,15 +313,7 @@ async fn _password_login(
)
}
// Change the KDF Iterations
if user.password_iterations != CONFIG.password_iterations() {
user.password_iterations = CONFIG.password_iterations();
user.set_password(password, None, false, None);
if let Err(e) = user.save(conn).await {
error!("Error updating user: {:#?}", e);
}
}
kdf_upgrade(&mut user, password, conn).await?;
// Check if the user is disabled
if !user.enabled {
@ -248,12 +360,28 @@ async fn _password_login(
)
}
let (mut device, new_device) = get_device(&data, conn, &user).await;
let (mut device, new_device) = get_device(&data, conn, &user).await?;
let twofactor_token = twofactor_auth(&user, &data, &mut device, ip, conn).await?;
let auth_tokens = auth::AuthTokens::new(&device, &user, AuthMethod::Password);
authenticated_response(&user, &mut device, new_device, auth_tokens, twofactor_token, &now, conn, ip).await
}
#[allow(clippy::too_many_arguments)]
async fn authenticated_response(
user: &User,
device: &mut Device,
new_device: bool,
auth_tokens: auth::AuthTokens,
twofactor_token: Option<String>,
now: &NaiveDateTime,
conn: &mut DbConn,
ip: &ClientIp,
) -> JsonResult {
if CONFIG.mail_enabled() && new_device {
if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &now, &device.name).await {
if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), now, &device.name).await {
error!("Error sending new device email: {:#?}", e);
if CONFIG.require_device_email() {
@ -269,34 +397,30 @@ async fn _password_login(
// register push device
if !new_device {
register_push_device(&mut device, conn).await?;
register_push_device(device, conn).await?;
}
// Common
// ---
// Disabled this variable, it was used to generate the JWT
// Because this might get used in the future, and is add by the Bitwarden Server, lets keep it, but then commented out
// See: https://github.com/dani-garcia/vaultwarden/issues/4156
// ---
// let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await;
let (access_token, expires_in) = device.refresh_tokens(&user, scope_vec);
// Save to update `device.updated_at` to track usage
device.save(conn).await?;
let mut result = json!({
"access_token": access_token,
"expires_in": expires_in,
"access_token": auth_tokens.access_token(),
"expires_in": auth_tokens.expires_in(),
"token_type": "Bearer",
"refresh_token": device.refresh_token,
"refresh_token": auth_tokens.refresh_token(),
"Key": user.akey,
"PrivateKey": user.private_key,
//"TwoFactorToken": "11122233333444555666777888999"
"Kdf": user.client_kdf_type,
"KdfIterations": user.client_kdf_iter,
"KdfMemory": user.client_kdf_memory,
"KdfParallelism": user.client_kdf_parallelism,
"ResetMasterPassword": false,// TODO: Same as above
"scope": scope,
"ResetMasterPassword": false, // TODO: Same as above
"ForcePasswordReset": false,
"MasterPasswordPolicy": {
"object": "masterPasswordPolicy",
},
"scope": auth_tokens.scope(),
"unofficialServer": true,
"UserDecryptionOptions": {
"HasMasterPassword": !user.password_hash.is_empty(),
@ -308,7 +432,7 @@ async fn _password_login(
result["TwoFactorToken"] = Value::String(token);
}
info!("User {} logged in successfully. IP: {}", username, ip.ip);
info!("User {} logged in successfully. IP: {}", user.email, ip.ip);
Ok(Json(result))
}
@ -322,9 +446,9 @@ async fn _api_key_login(
crate::ratelimit::check_limit_login(&ip.ip)?;
// Validate scope
match data.scope.as_ref().unwrap().as_ref() {
"api" => _user_api_key_login(data, user_uuid, conn, ip).await,
"api.organization" => _organization_api_key_login(data, conn, ip).await,
match data.scope.as_ref() {
Some(scope) if scope == &AuthMethod::UserApiKey.scope() => _user_api_key_login(data, user_uuid, conn, ip).await,
Some(scope) if scope == &AuthMethod::OrgApiKey.scope() => _organization_api_key_login(data, conn, ip).await,
_ => err!("Scope not supported"),
}
}
@ -372,7 +496,7 @@ async fn _user_api_key_login(
)
}
let (mut device, new_device) = get_device(&data, conn, &user).await;
let (mut device, new_device) = get_device(&data, conn, &user).await?;
if CONFIG.mail_enabled() && new_device {
let now = Utc::now().naive_utc();
@ -390,15 +514,15 @@ async fn _user_api_key_login(
}
}
// Common
let scope_vec = vec!["api".into()];
// ---
// Disabled this variable, it was used to generate the JWT
// Because this might get used in the future, and is add by the Bitwarden Server, lets keep it, but then commented out
// See: https://github.com/dani-garcia/vaultwarden/issues/4156
// ---
// let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await;
let (access_token, expires_in) = device.refresh_tokens(&user, scope_vec);
let access_claims = auth::LoginJwtClaims::default(&device, &user, &auth::AuthMethod::UserApiKey);
// Save to update `device.updated_at` to track usage
device.save(conn).await?;
info!("User {} logged in successfully via API key. IP: {}", user.email, ip.ip);
@ -406,8 +530,8 @@ async fn _user_api_key_login(
// Note: No refresh_token is returned. The CLI just repeats the
// client_credentials login flow when the existing token expires.
let result = json!({
"access_token": access_token,
"expires_in": expires_in,
"access_token": access_claims.token(),
"expires_in": access_claims.expires_in(),
"token_type": "Bearer",
"Key": user.akey,
"PrivateKey": user.private_key,
@ -417,7 +541,7 @@ async fn _user_api_key_login(
"KdfMemory": user.client_kdf_memory,
"KdfParallelism": user.client_kdf_parallelism,
"ResetMasterPassword": false, // TODO: Same as above
"scope": "api",
"scope": auth::AuthMethod::UserApiKey.scope(),
"unofficialServer": true,
});
@ -442,20 +566,20 @@ async fn _organization_api_key_login(data: ConnectData, conn: &mut DbConn, ip: &
err!("Incorrect client_secret", format!("IP: {}. Organization: {}.", ip.ip, org_api_key.org_uuid))
}
let claim = generate_organization_api_key_login_claims(org_api_key.uuid, org_api_key.org_uuid);
let access_token = crate::auth::encode_jwt(&claim);
let claim = auth::generate_organization_api_key_login_claims(org_api_key.uuid, org_api_key.org_uuid);
let access_token = auth::encode_jwt(&claim);
Ok(Json(json!({
"access_token": access_token,
"expires_in": 3600,
"token_type": "Bearer",
"scope": "api.organization",
"scope": auth::AuthMethod::OrgApiKey.scope(),
"unofficialServer": true,
})))
}
/// Retrieves an existing device or creates a new device from ConnectData and the User
async fn get_device(data: &ConnectData, conn: &mut DbConn, user: &User) -> (Device, bool) {
async fn get_device(data: &ConnectData, conn: &mut DbConn, user: &User) -> ApiResult<(Device, bool)> {
// On iOS, device_type sends "iOS", on others it sends a number
// When unknown or unable to parse, return 14, which is 'Unknown Browser'
let device_type = util::try_parse_string(data.device_type.as_ref()).unwrap_or(14);
@ -467,12 +591,13 @@ async fn get_device(data: &ConnectData, conn: &mut DbConn, user: &User) -> (Devi
let device = match Device::find_by_uuid_and_user(&device_id, &user.uuid, conn).await {
Some(device) => device,
None => {
let device = Device::new(device_id, user.uuid.clone(), device_name, device_type);
new_device = true;
Device::new(device_id, user.uuid.clone(), device_name, device_type)
device
}
};
(device, new_device)
Ok((device, new_device))
}
async fn twofactor_auth(
@ -513,9 +638,7 @@ async fn twofactor_auth(
}
Some(TwoFactorType::Webauthn) => webauthn::validate_webauthn_login(&user.uuid, twofactor_code, conn).await?,
Some(TwoFactorType::YubiKey) => yubikey::validate_yubikey_login(twofactor_code, &selected_data?).await?,
Some(TwoFactorType::Duo) => {
duo::validate_duo_login(data.username.as_ref().unwrap().trim(), twofactor_code, conn).await?
}
Some(TwoFactorType::Duo) => duo::validate_duo_login(&user.email, twofactor_code, conn).await?,
Some(TwoFactorType::Email) => {
email::validate_email_code_str(&user.uuid, twofactor_code, &selected_data?, conn).await?
}
@ -543,12 +666,13 @@ async fn twofactor_auth(
TwoFactorIncomplete::mark_complete(&user.uuid, &device.uuid, conn).await?;
if !CONFIG.disable_2fa_remember() && remember == 1 {
Ok(Some(device.refresh_twofactor_remember()))
let two_factor = if !CONFIG.disable_2fa_remember() && remember == 1 {
Some(device.refresh_twofactor_remember())
} else {
device.delete_twofactor_remember();
Ok(None)
}
None
};
Ok(two_factor)
}
fn _selected_data(tf: Option<TwoFactor>) -> ApiResult<String> {
@ -689,11 +813,131 @@ struct ConnectData {
two_factor_remember: Option<i32>,
#[field(name = uncased("authrequest"))]
auth_request: Option<String>,
// Needed for authorization code
#[form(field = uncased("code"))]
code: Option<String>,
}
fn _check_is_some<T>(value: &Option<T>, msg: &str) -> EmptyResult {
if value.is_none() {
err!(msg)
}
Ok(())
}
// Deprecated but still needed for Mobile apps
#[get("/account/prevalidate")]
fn _prevalidate() -> JsonResult {
prevalidate()
}
#[get("/sso/prevalidate")]
fn prevalidate() -> JsonResult {
if CONFIG.sso_enabled() {
let sso_token = sso::encode_ssotoken_claims();
Ok(Json(json!({
"token": sso_token,
})))
} else {
err!("SSO sign-in is not available")
}
}
#[get("/connect/oidc-signin?<code>&<state>", rank = 1)]
async fn oidcsignin(code: String, state: String, conn: DbConn) -> ApiResult<Redirect> {
oidcsignin_redirect(
state.clone(),
sso::OIDCCodeWrapper::Ok {
code,
state,
},
&conn,
)
.await
}
// Bitwarden client appear to only care for code and state so we pipe it through
// cf: https://github.com/bitwarden/clients/blob/8e46ef1ae5be8b62b0d3d0b9d1b1c62088a04638/libs/angular/src/auth/components/sso.component.ts#L68C11-L68C23)
#[get("/connect/oidc-signin?<state>&<error>&<error_description>", rank = 2)]
async fn oidcsignin_error(
state: String,
error: String,
error_description: Option<String>,
conn: DbConn,
) -> ApiResult<Redirect> {
oidcsignin_redirect(
state.clone(),
sso::OIDCCodeWrapper::Error {
state,
error,
error_description,
},
&conn,
)
.await
}
// iss and scope parameters are needed for redirection to work on IOS.
async fn oidcsignin_redirect(state: String, wrapper: sso::OIDCCodeWrapper, conn: &DbConn) -> ApiResult<Redirect> {
let code = sso::encode_code_claims(wrapper);
let nonce = match SsoNonce::find(&state, conn).await {
Some(n) => n,
None => err!(format!("Failed to retrive redirect_uri with {state}")),
};
let mut url = match url::Url::parse(&nonce.redirect_uri) {
Ok(url) => url,
Err(err) => err!(format!("Failed to parse redirect uri ({}): {err}", nonce.redirect_uri)),
};
url.query_pairs_mut()
.append_pair("code", &code)
.append_pair("state", &state)
.append_pair("scope", &AuthMethod::Sso.scope())
.append_pair("iss", &CONFIG.domain());
debug!("Redirection to {url}");
Ok(Redirect::temporary(String::from(url)))
}
#[derive(Debug, Clone, Default, FromForm)]
struct AuthorizeData {
#[field(name = uncased("client_id"))]
#[field(name = uncased("clientid"))]
client_id: String,
#[field(name = uncased("redirect_uri"))]
#[field(name = uncased("redirecturi"))]
redirect_uri: String,
#[allow(unused)]
response_type: Option<String>,
#[allow(unused)]
scope: Option<String>,
state: String,
#[allow(unused)]
code_challenge: Option<String>,
#[allow(unused)]
code_challenge_method: Option<String>,
#[allow(unused)]
response_mode: Option<String>,
#[allow(unused)]
domain_hint: Option<String>,
#[allow(unused)]
#[field(name = uncased("ssoToken"))]
sso_token: Option<String>,
}
// The `redirect_uri` will change depending of the client (web, android, ios ..)
#[get("/connect/authorize?<data..>")]
async fn authorize(data: AuthorizeData, conn: DbConn) -> ApiResult<Redirect> {
let AuthorizeData {
client_id,
redirect_uri,
state,
..
} = data;
let auth_url = sso::authorize_url(state, &client_id, &redirect_uri, conn).await?;
Ok(Redirect::temporary(String::from(auth_url)))
}

View File

@ -20,7 +20,7 @@ pub use crate::api::{
core::two_factor::send_incomplete_2fa_notifications,
core::{emergency_notification_reminder_job, emergency_request_timeout_job},
core::{event_cleanup_job, events_routes as core_events_routes},
icons::routes as icons_routes,
icons::{is_domain_blacklisted, routes as icons_routes},
identity::routes as identity_routes,
notifications::routes as notifications_routes,
notifications::{AnonymousNotify, Notify, UpdateType, WS_ANONYMOUS_SUBSCRIPTIONS, WS_USERS},
@ -36,7 +36,7 @@ use crate::db::{models::User, DbConn};
use crate::util;
// Type aliases for API methods results
type ApiResult<T> = Result<T, crate::error::Error>;
pub type ApiResult<T> = Result<T, crate::error::Error>;
pub type JsonResult = ApiResult<Json<Value>>;
pub type EmptyResult = ApiResult<()>;

View File

@ -1,6 +1,5 @@
// JWT Handling
//
use chrono::{TimeDelta, Utc};
use chrono::{DateTime, TimeDelta, Utc};
use num_traits::FromPrimitive;
use once_cell::sync::{Lazy, OnceCell};
@ -9,11 +8,23 @@ use openssl::rsa::Rsa;
use serde::de::DeserializeOwned;
use serde::ser::Serialize;
use crate::{error::Error, CONFIG};
use crate::{
api::ApiResult,
db::{
models::{Collection, Device, User, UserOrgStatus, UserOrgType, UserOrganization, UserStampException},
DbConn,
},
error::Error,
sso, CONFIG,
};
const JWT_ALGORITHM: Algorithm = Algorithm::RS256;
pub static DEFAULT_VALIDITY: Lazy<TimeDelta> = Lazy::new(|| TimeDelta::try_hours(2).unwrap());
// Limit when BitWarden consider the token as expired
pub static BW_EXPIRATION: Lazy<TimeDelta> = Lazy::new(|| TimeDelta::try_minutes(5).unwrap());
pub static DEFAULT_REFRESH_VALIDITY: Lazy<TimeDelta> = Lazy::new(|| TimeDelta::try_days(30).unwrap());
pub static DEFAULT_ACCESS_VALIDITY: Lazy<TimeDelta> = Lazy::new(|| TimeDelta::try_hours(2).unwrap());
static JWT_HEADER: Lazy<Header> = Lazy::new(|| Header::new(JWT_ALGORITHM));
pub static JWT_LOGIN_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|login", CONFIG.domain_origin()));
@ -72,7 +83,7 @@ pub fn encode_jwt<T: Serialize>(claims: &T) -> String {
}
}
fn decode_jwt<T: DeserializeOwned>(token: &str, issuer: String) -> Result<T, Error> {
pub fn decode_jwt<T: DeserializeOwned>(token: &str, issuer: String) -> Result<T, Error> {
let mut validation = jsonwebtoken::Validation::new(JWT_ALGORITHM);
validation.leeway = 30; // 30 seconds
validation.validate_exp = true;
@ -91,6 +102,10 @@ fn decode_jwt<T: DeserializeOwned>(token: &str, issuer: String) -> Result<T, Err
}
}
pub fn decode_refresh(token: &str) -> Result<RefreshJwtClaims, Error> {
decode_jwt(token, JWT_LOGIN_ISSUER.to_string())
}
pub fn decode_login(token: &str) -> Result<LoginJwtClaims, Error> {
decode_jwt(token, JWT_LOGIN_ISSUER.to_string())
}
@ -164,6 +179,73 @@ pub struct LoginJwtClaims {
pub amr: Vec<String>,
}
impl LoginJwtClaims {
pub fn new(device: &Device, user: &User, nbf: i64, exp: i64, scope: Vec<String>, now: DateTime<Utc>) -> Self {
// ---
// Disabled these keys to be added to the JWT since they could cause the JWT to get too large
// Also These key/value pairs are not used anywhere by either Vaultwarden or Bitwarden Clients
// Because these might get used in the future, and they are added by the Bitwarden Server, lets keep it, but then commented out
// ---
// fn arg: orgs: Vec<super::UserOrganization>,
// ---
// let orgowner: Vec<_> = orgs.iter().filter(|o| o.atype == 0).map(|o| o.org_uuid.clone()).collect();
// let orgadmin: Vec<_> = orgs.iter().filter(|o| o.atype == 1).map(|o| o.org_uuid.clone()).collect();
// let orguser: Vec<_> = orgs.iter().filter(|o| o.atype == 2).map(|o| o.org_uuid.clone()).collect();
// let orgmanager: Vec<_> = orgs.iter().filter(|o| o.atype == 3).map(|o| o.org_uuid.clone()).collect();
if exp <= (now + *BW_EXPIRATION).timestamp() {
warn!("Raise access_token lifetime to more than 5min.")
}
// Create the JWT claims struct, to send to the client
Self {
nbf,
exp,
iss: JWT_LOGIN_ISSUER.to_string(),
sub: user.uuid.clone(),
premium: true,
name: user.name.clone(),
email: user.email.clone(),
email_verified: !CONFIG.mail_enabled() || user.verified_at.is_some(),
// ---
// Disabled these keys to be added to the JWT since they could cause the JWT to get too large
// Also These key/value pairs are not used anywhere by either Vaultwarden or Bitwarden Clients
// Because these might get used in the future, and they are added by the Bitwarden Server, lets keep it, but then commented out
// See: https://github.com/dani-garcia/vaultwarden/issues/4156
// ---
// orgowner,
// orgadmin,
// orguser,
// orgmanager,
sstamp: user.security_stamp.clone(),
device: device.uuid.clone(),
scope,
amr: vec!["Application".into()],
}
}
pub fn default(device: &Device, user: &User, auth_method: &AuthMethod) -> Self {
let time_now = Utc::now();
Self::new(
device,
user,
time_now.timestamp(),
(time_now + *DEFAULT_ACCESS_VALIDITY).timestamp(),
auth_method.scope_vec(),
time_now,
)
}
pub fn token(&self) -> String {
encode_jwt(&self)
}
pub fn expires_in(&self) -> i64 {
self.exp - Utc::now().timestamp()
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct InviteJwtClaims {
// Not before
@ -356,11 +438,6 @@ use rocket::{
request::{FromRequest, Outcome, Request},
};
use crate::db::{
models::{Collection, Device, User, UserOrgStatus, UserOrgType, UserOrganization, UserStampException},
DbConn,
};
pub struct Host {
pub host: String,
}
@ -689,7 +766,7 @@ impl<'r> FromRequest<'r> for ManagerHeaders {
_ => err_handler!("Error getting DB"),
};
if !can_access_collection(&headers.org_user, &col_id, &mut conn).await {
if !Collection::can_access_collection(&headers.org_user, &col_id, &mut conn).await {
err_handler!("The current user isn't a manager for this collection")
}
}
@ -762,10 +839,6 @@ impl From<ManagerHeadersLoose> for Headers {
}
}
}
async fn can_access_collection(org_user: &UserOrganization, col_id: &str, conn: &mut DbConn) -> bool {
org_user.has_full_access()
|| Collection::has_access_by_collection_and_user_uuid(col_id, &org_user.user_uuid, conn).await
}
impl ManagerHeaders {
pub async fn from_loose(
@ -777,7 +850,7 @@ impl ManagerHeaders {
if uuid::Uuid::parse_str(col_id).is_err() {
err!("Collection Id is malformed!");
}
if !can_access_collection(&h.org_user, col_id, conn).await {
if !Collection::can_access_collection(&h.org_user, col_id, conn).await {
err!("You don't have access to all collections!");
}
}
@ -880,3 +953,149 @@ impl<'r> FromRequest<'r> for WsAccessTokenHeader {
})
}
}
#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum AuthMethod {
OrgApiKey,
Password,
Sso,
UserApiKey,
}
pub trait AuthMethodScope {
fn scope_vec(&self) -> Vec<String>;
fn scope(&self) -> String;
fn check_scope(&self, scope: Option<&String>) -> ApiResult<String>;
}
impl AuthMethodScope for AuthMethod {
fn scope(&self) -> String {
match self {
AuthMethod::OrgApiKey => "api.organization".to_string(),
AuthMethod::Password => "api offline_access".to_string(),
AuthMethod::Sso => "api offline_access".to_string(),
AuthMethod::UserApiKey => "api".to_string(),
}
}
fn scope_vec(&self) -> Vec<String> {
self.scope().split_whitespace().map(str::to_string).collect()
}
fn check_scope(&self, scope: Option<&String>) -> ApiResult<String> {
let method_scope = self.scope();
match scope {
None => err!("Missing scope"),
Some(scope) if scope == &method_scope => Ok(method_scope),
Some(scope) => err!(format!("Scope ({scope}) not supported")),
}
}
}
#[derive(Debug, Serialize, Deserialize)]
pub enum TokenWrapper {
Access(String),
Refresh(String),
}
#[derive(Debug, Serialize, Deserialize)]
pub struct RefreshJwtClaims {
// Not before
pub nbf: i64,
// Expiration time
pub exp: i64,
// Issuer
pub iss: String,
// Subject
pub sub: AuthMethod,
pub device_token: String,
pub token: Option<TokenWrapper>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct AuthTokens {
pub refresh_claims: RefreshJwtClaims,
pub access_claims: LoginJwtClaims,
}
impl AuthTokens {
pub fn refresh_token(&self) -> String {
encode_jwt(&self.refresh_claims)
}
pub fn access_token(&self) -> String {
self.access_claims.token()
}
pub fn expires_in(&self) -> i64 {
self.access_claims.expires_in()
}
pub fn scope(&self) -> String {
self.refresh_claims.sub.scope()
}
// Create refresh_token and access_token with default validity
pub fn new(device: &Device, user: &User, sub: AuthMethod) -> Self {
let time_now = Utc::now();
let access_claims = LoginJwtClaims::default(device, user, &sub);
let refresh_claims = RefreshJwtClaims {
nbf: time_now.timestamp(),
exp: (time_now + *DEFAULT_REFRESH_VALIDITY).timestamp(),
iss: JWT_LOGIN_ISSUER.to_string(),
sub,
device_token: device.refresh_token.clone(),
token: None,
};
Self {
refresh_claims,
access_claims,
}
}
}
pub async fn refresh_tokens(refresh_token: &str, conn: &mut DbConn) -> ApiResult<(Device, User, AuthTokens)> {
let time_now = Utc::now();
let refresh_claims = match decode_refresh(refresh_token) {
Err(err) => err!(format!("Impossible to read refresh_token: {err}")),
Ok(claims) => claims,
};
// Get device by refresh token
let mut device = match Device::find_by_refresh_token(&refresh_claims.device_token, conn).await {
None => err!("Invalid refresh token"),
Some(device) => device,
};
// Save to update `updated_at`.
device.save(conn).await?;
let user = match User::find_by_uuid(&device.user_uuid, conn).await {
None => err!("Impossible to find user"),
Some(user) => user,
};
if refresh_claims.exp < time_now.timestamp() {
err!("Expired refresh token");
}
let auth_tokens = match refresh_claims.sub {
AuthMethod::Sso if CONFIG.sso_enabled() && CONFIG.sso_auth_only_not_session() => {
AuthTokens::new(&device, &user, refresh_claims.sub)
}
AuthMethod::Sso if CONFIG.sso_enabled() => sso::exchange_refresh_token(&device, &user, &refresh_claims).await?,
AuthMethod::Sso => err!("SSO is now disabled, Login again using email and master password"),
AuthMethod::Password if CONFIG.sso_enabled() && CONFIG.sso_only() => err!("SSO is now required, Login again"),
AuthMethod::Password => AuthTokens::new(&device, &user, refresh_claims.sub),
_ => err!("Invalid auth method cannot refresh token"),
};
Ok((device, user, auth_tokens))
}

View File

@ -409,7 +409,9 @@ make_config! {
/// Auth Request cleanup schedule |> Cron schedule of the job that cleans old auth requests from the auth request.
/// Defaults to every minute. Set blank to disable this job.
auth_request_purge_schedule: String, false, def, "30 * * * * *".to_string();
/// Purge incomplete sso nonce.
/// Defaults to daily. Set blank to disable this job.
purge_incomplete_sso_nonce: String, false, def, "0 20 0 * * *".to_string();
},
/// General settings
@ -606,6 +608,40 @@ make_config! {
org_groups_enabled: bool, false, def, false;
},
/// OpenID Connect SSO settings
sso {
/// Enabled
sso_enabled: bool, true, def, false;
/// Disable Email+Master Password login
sso_only: bool, true, def, false;
/// Associate existing user based on email
sso_signups_match_email: bool, true, def, true;
/// Client ID
sso_client_id: String, false, def, String::new();
/// Client Key
sso_client_secret: Pass, false, def, String::new();
/// Authority Server
sso_authority: String, false, def, String::new();
/// Scopes required for authorize
sso_scopes: String, false, def, "email profile".to_string();
/// Additionnal authorization url parameters
sso_authorize_extra_params: String, false, def, String::new();
/// Use PKCE during Auth Code flow
sso_pkce: bool, false, def, false;
/// Regex for additionnal trusted Id token audience
sso_audience_trusted: String, false, option;
/// CallBack Path
sso_callback_path: String, false, gen, |c| generate_sso_callback_path(&c.domain);
/// Optional sso master password policy
sso_master_password_policy: String, true, option;
/// Use sso only for auth not the session lifecycle
sso_auth_only_not_session: bool, true, def, false;
/// Client cache for discovery endpoint. Duration in seconds (0 or less to disable).
sso_client_cache_expiration: u64, true, def, 0;
/// Log all tokens
sso_debug_tokens: bool, true, def, false;
},
/// Yubikey settings
yubico: _enable_yubico {
/// Enabled
@ -629,7 +665,7 @@ make_config! {
/// Host
duo_host: String, true, option;
/// Application Key (generated automatically)
_duo_akey: Pass, false, option;
_duo_akey: Pass, true, option;
},
/// SMTP Email Settings
@ -815,6 +851,16 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
err!("All Duo options need to be set for global Duo support")
}
if cfg.sso_enabled {
if cfg.sso_client_id.is_empty() || cfg.sso_client_secret.is_empty() || cfg.sso_authority.is_empty() {
err!("`SSO_CLIENT_ID`, `SSO_CLIENT_SECRET` and `SSO_AUTHORITY` must be set for SSO support")
}
internal_sso_issuer_url(&cfg.sso_authority)?;
internal_sso_redirect_url(&cfg.sso_callback_path)?;
check_master_password_policy(&cfg.sso_master_password_policy)?;
}
if cfg._enable_yubico {
if cfg.yubico_client_id.is_some() != cfg.yubico_secret_key.is_some() {
err!("Both `YUBICO_CLIENT_ID` and `YUBICO_SECRET_KEY` must be set for Yubikey OTP support")
@ -987,6 +1033,28 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
Ok(())
}
fn internal_sso_issuer_url(sso_authority: &String) -> Result<openidconnect::IssuerUrl, Error> {
match openidconnect::IssuerUrl::new(sso_authority.clone()) {
Err(err) => err!(format!("Invalid sso_authority UR ({sso_authority}): {err}")),
Ok(issuer_url) => Ok(issuer_url),
}
}
fn internal_sso_redirect_url(sso_callback_path: &String) -> Result<openidconnect::RedirectUrl, Error> {
match openidconnect::RedirectUrl::new(sso_callback_path.clone()) {
Err(err) => err!(format!("Invalid sso_callback_path ({sso_callback_path} built using `domain`) URL: {err}")),
Ok(redirect_url) => Ok(redirect_url),
}
}
fn check_master_password_policy(sso_master_password_policy: &Option<String>) -> Result<(), Error> {
let policy = sso_master_password_policy.as_ref().map(|mpp| serde_json::from_str::<serde_json::Value>(mpp));
if let Some(Err(error)) = policy {
err!(format!("Invalid sso_master_password_policy ({error}), Ensure that it's correctly escaped with ''"))
}
Ok(())
}
/// Extracts an RFC 6454 web origin from a URL.
fn extract_url_origin(url: &str) -> String {
match Url::parse(url) {
@ -1018,6 +1086,10 @@ fn generate_smtp_img_src(embed_images: bool, domain: &str) -> String {
}
}
fn generate_sso_callback_path(domain: &str) -> String {
format!("{domain}/identity/connect/oidc-signin")
}
/// Generate the correct URL for the icon service.
/// This will be used within icons.rs to call the external icon service.
fn generate_icon_service_url(icon_service: &str) -> String {
@ -1060,6 +1132,26 @@ fn smtp_convert_deprecated_ssl_options(smtp_ssl: Option<bool>, smtp_explicit_tls
"starttls".to_string()
}
/// Allow to parse a multiline list of Key/Values (`key=value`)
/// Will ignore comment lines (starting with `//`)
fn parse_param_list(config: String) -> Vec<(String, String)> {
config
.lines()
.map(|l| l.trim())
.filter(|l| !l.is_empty() && !l.starts_with("//"))
.filter_map(|l| {
let split = l.split('=').collect::<Vec<&str>>();
match &split[..] {
[key, value] => Some(((*key).to_string(), (*value).to_string())),
_ => {
println!("[WARNING] Failed to parse ({l}). Expected key=value");
None
}
}
})
.collect()
}
impl Config {
pub fn load() -> Result<Self, Error> {
// Loading from env and file
@ -1249,6 +1341,22 @@ impl Config {
}
}
}
pub fn sso_issuer_url(&self) -> Result<openidconnect::IssuerUrl, Error> {
internal_sso_issuer_url(&self.sso_authority())
}
pub fn sso_redirect_url(&self) -> Result<openidconnect::RedirectUrl, Error> {
internal_sso_redirect_url(&self.sso_callback_path())
}
pub fn sso_scopes_vec(&self) -> Vec<String> {
self.sso_scopes().split_whitespace().map(str::to_string).collect()
}
pub fn sso_authorize_extra_params_vec(&self) -> Vec<(String, String)> {
parse_param_list(self.sso_authorize_extra_params())
}
}
use handlebars::{
@ -1305,7 +1413,9 @@ where
reg!("email/send_emergency_access_invite", ".html");
reg!("email/send_org_invite", ".html");
reg!("email/send_single_org_removed_from_org", ".html");
reg!("email/set_password", ".html");
reg!("email/smtp_test", ".html");
reg!("email/sso_change_email", ".html");
reg!("email/twofactor_email", ".html");
reg!("email/verify_email", ".html");
reg!("email/welcome_must_verify", ".html");

View File

@ -1,6 +1,6 @@
use serde_json::Value;
use super::{CollectionGroup, User, UserOrgStatus, UserOrgType, UserOrganization};
use super::{CollectionGroup, GroupUser, User, UserOrgStatus, UserOrgType, UserOrganization};
use crate::CONFIG;
db_object! {
@ -102,6 +102,15 @@ impl Collection {
json_object["HidePasswords"] = json!(hide_passwords);
json_object
}
pub async fn can_access_collection(org_user: &UserOrganization, col_id: &str, conn: &mut DbConn) -> bool {
org_user.has_status(UserOrgStatus::Confirmed)
&& (org_user.has_full_access()
|| CollectionUser::has_access_to_collection_by_user(col_id, &org_user.user_uuid, conn).await
|| (CONFIG.org_groups_enabled()
&& (GroupUser::has_full_access_by_member(&org_user.org_uuid, &org_user.uuid, conn).await
|| GroupUser::has_access_to_collection_by_member(col_id, &org_user.uuid, conn).await)))
}
}
use crate::db::DbConn;
@ -252,17 +261,6 @@ impl Collection {
}
}
// Check if a user has access to a specific collection
// FIXME: This needs to be reviewed. The query used by `find_by_user_uuid` could be adjusted to filter when needed.
// For now this is a good solution without making to much changes.
pub async fn has_access_by_collection_and_user_uuid(
collection_uuid: &str,
user_uuid: &str,
conn: &mut DbConn,
) -> bool {
Self::find_by_user_uuid(user_uuid.to_owned(), conn).await.into_iter().any(|c| c.uuid == collection_uuid)
}
pub async fn find_by_organization_and_user_uuid(org_uuid: &str, user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
Self::find_by_user_uuid(user_uuid.to_owned(), conn)
.await
@ -644,6 +642,10 @@ impl CollectionUser {
Ok(())
}}
}
pub async fn has_access_to_collection_by_user(col_id: &str, user_uuid: &str, conn: &mut DbConn) -> bool {
Self::find_by_collection_and_user(col_id, user_uuid, conn).await.is_some()
}
}
/// Database methods

View File

@ -1,6 +1,7 @@
use chrono::{NaiveDateTime, Utc};
use data_encoding::{BASE64, BASE64URL};
use crate::{crypto, CONFIG};
use crate::crypto;
use core::fmt;
db_object! {
@ -42,13 +43,12 @@ impl Device {
push_uuid: None,
push_token: None,
refresh_token: String::new(),
refresh_token: crypto::encode_random_bytes::<64>(BASE64URL),
twofactor_remember: None,
}
}
pub fn refresh_twofactor_remember(&mut self) -> String {
use data_encoding::BASE64;
let twofactor_remember = crypto::encode_random_bytes::<180>(BASE64);
self.twofactor_remember = Some(twofactor_remember.clone());
@ -59,61 +59,6 @@ impl Device {
self.twofactor_remember = None;
}
pub fn refresh_tokens(&mut self, user: &super::User, scope: Vec<String>) -> (String, i64) {
// If there is no refresh token, we create one
if self.refresh_token.is_empty() {
use data_encoding::BASE64URL;
self.refresh_token = crypto::encode_random_bytes::<64>(BASE64URL);
}
// Update the expiration of the device and the last update date
let time_now = Utc::now();
self.updated_at = time_now.naive_utc();
// ---
// Disabled these keys to be added to the JWT since they could cause the JWT to get too large
// Also These key/value pairs are not used anywhere by either Vaultwarden or Bitwarden Clients
// Because these might get used in the future, and they are added by the Bitwarden Server, lets keep it, but then commented out
// ---
// fn arg: orgs: Vec<super::UserOrganization>,
// ---
// let orgowner: Vec<_> = orgs.iter().filter(|o| o.atype == 0).map(|o| o.org_uuid.clone()).collect();
// let orgadmin: Vec<_> = orgs.iter().filter(|o| o.atype == 1).map(|o| o.org_uuid.clone()).collect();
// let orguser: Vec<_> = orgs.iter().filter(|o| o.atype == 2).map(|o| o.org_uuid.clone()).collect();
// let orgmanager: Vec<_> = orgs.iter().filter(|o| o.atype == 3).map(|o| o.org_uuid.clone()).collect();
// Create the JWT claims struct, to send to the client
use crate::auth::{encode_jwt, LoginJwtClaims, DEFAULT_VALIDITY, JWT_LOGIN_ISSUER};
let claims = LoginJwtClaims {
nbf: time_now.timestamp(),
exp: (time_now + *DEFAULT_VALIDITY).timestamp(),
iss: JWT_LOGIN_ISSUER.to_string(),
sub: user.uuid.clone(),
premium: true,
name: user.name.clone(),
email: user.email.clone(),
email_verified: !CONFIG.mail_enabled() || user.verified_at.is_some(),
// ---
// Disabled these keys to be added to the JWT since they could cause the JWT to get too large
// Also These key/value pairs are not used anywhere by either Vaultwarden or Bitwarden Clients
// Because these might get used in the future, and they are added by the Bitwarden Server, lets keep it, but then commented out
// See: https://github.com/dani-garcia/vaultwarden/issues/4156
// ---
// orgowner,
// orgadmin,
// orguser,
// orgmanager,
sstamp: user.security_stamp.clone(),
device: self.uuid.clone(),
scope,
amr: vec!["Application".into()],
};
(encode_jwt(&claims), DEFAULT_VALIDITY.num_seconds())
}
pub fn is_push_device(&self) -> bool {
matches!(DeviceType::from_i32(self.atype), DeviceType::Android | DeviceType::Ios)
}

View File

@ -81,25 +81,32 @@ impl EmergencyAccess {
})
}
pub async fn to_json_grantee_details(&self, conn: &mut DbConn) -> Value {
pub async fn to_json_grantee_details(&self, conn: &mut DbConn) -> Option<Value> {
let grantee_user = if let Some(grantee_uuid) = self.grantee_uuid.as_deref() {
Some(User::find_by_uuid(grantee_uuid, conn).await.expect("Grantee user not found."))
User::find_by_uuid(grantee_uuid, conn).await.expect("Grantee user not found.")
} else if let Some(email) = self.email.as_deref() {
Some(User::find_by_mail(email, conn).await.expect("Grantee user not found."))
match User::find_by_mail(email, conn).await {
Some(user) => user,
None => {
// remove outstanding invitations which should not exist
let _ = Self::delete_all_by_grantee_email(email, conn).await;
return None;
}
}
} else {
None
return None;
};
json!({
Some(json!({
"Id": self.uuid,
"Status": self.status,
"Type": self.atype,
"WaitTimeDays": self.wait_time_days,
"GranteeId": grantee_user.as_ref().map_or("", |u| &u.uuid),
"Email": grantee_user.as_ref().map_or("", |u| &u.email),
"Name": grantee_user.as_ref().map_or("", |u| &u.name),
"GranteeId": grantee_user.uuid,
"Email": grantee_user.email,
"Name": grantee_user.name,
"Object": "emergencyAccessGranteeDetails",
})
}))
}
}
@ -214,6 +221,13 @@ impl EmergencyAccess {
Ok(())
}
pub async fn delete_all_by_grantee_email(grantee_email: &str, conn: &mut DbConn) -> EmptyResult {
for ea in Self::find_all_invited_by_grantee_email(grantee_email, conn).await {
ea.delete(conn).await?;
}
Ok(())
}
pub async fn delete(self, conn: &mut DbConn) -> EmptyResult {
User::update_uuid_revision(&self.grantor_uuid, conn).await;
@ -285,6 +299,15 @@ impl EmergencyAccess {
}}
}
pub async fn find_all_invited_by_grantee_email(grantee_email: &str, conn: &mut DbConn) -> Vec<Self> {
db_run! { conn: {
emergency_access::table
.filter(emergency_access::email.eq(grantee_email))
.filter(emergency_access::status.eq(EmergencyAccessStatus::Invited as i32))
.load::<EmergencyAccessDb>(conn).expect("Error loading emergency_access").from_db()
}}
}
pub async fn find_all_by_grantor_uuid(grantor_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
db_run! { conn: {
emergency_access::table
@ -292,6 +315,21 @@ impl EmergencyAccess {
.load::<EmergencyAccessDb>(conn).expect("Error loading emergency_access").from_db()
}}
}
pub async fn accept_invite(&mut self, grantee_uuid: &str, grantee_email: &str, conn: &mut DbConn) -> EmptyResult {
if self.email.is_none() || self.email.as_ref().unwrap() != grantee_email {
err!("User email does not match invite.");
}
if self.status == EmergencyAccessStatus::Accepted as i32 {
err!("Emergency contact already accepted.");
}
self.status = EmergencyAccessStatus::Accepted as i32;
self.grantee_uuid = Some(String::from(grantee_uuid));
self.email = None;
self.save(conn).await
}
}
// endregion

View File

@ -11,6 +11,7 @@ mod group;
mod org_policy;
mod organization;
mod send;
mod sso_nonce;
mod two_factor;
mod two_factor_incomplete;
mod user;
@ -28,6 +29,7 @@ pub use self::group::{CollectionGroup, Group, GroupUser};
pub use self::org_policy::{OrgPolicy, OrgPolicyErr, OrgPolicyType};
pub use self::organization::{Organization, OrganizationApiKey, UserOrgStatus, UserOrgType, UserOrganization};
pub use self::send::{Send, SendType};
pub use self::sso_nonce::SsoNonce;
pub use self::two_factor::{TwoFactor, TwoFactorType};
pub use self::two_factor_incomplete::TwoFactorIncomplete;
pub use self::user::{Invitation, User, UserKdfType, UserStampException};
pub use self::user::{Invitation, SsoUser, User, UserKdfType, UserStampException};

View File

@ -76,12 +76,11 @@ impl OrgPolicy {
}
pub fn to_json(&self) -> Value {
let data_json: Value = serde_json::from_str(&self.data).unwrap_or(Value::Null);
json!({
"Id": self.uuid,
"OrganizationId": self.org_uuid,
"Type": self.atype,
"Data": data_json,
"Data": serde_json::from_str(&self.data).unwrap_or(Value::Null),
"Enabled": self.enabled,
"Object": "policy",
})

View File

@ -25,6 +25,7 @@ db_object! {
pub uuid: String,
pub user_uuid: String,
pub org_uuid: String,
pub invited_by_email: Option<String>,
pub access_all: bool,
pub akey: String,
@ -197,12 +198,13 @@ impl Organization {
static ACTIVATE_REVOKE_DIFF: i32 = 128;
impl UserOrganization {
pub fn new(user_uuid: String, org_uuid: String) -> Self {
pub fn new(user_uuid: String, org_uuid: String, invited_by_email: Option<String>) -> Self {
Self {
uuid: crate::util::get_uuid(),
user_uuid,
org_uuid,
invited_by_email,
access_all: false,
akey: String::new(),
@ -344,6 +346,25 @@ impl UserOrganization {
pub async fn to_json(&self, conn: &mut DbConn) -> Value {
let org = Organization::find_by_uuid(&self.org_uuid, conn).await.unwrap();
let permissions = json!({
// TODO: Add support for Custom User Roles
// See: https://bitwarden.com/help/article/user-types-access-control/#custom-role
"accessEventLogs": false,
"accessImportExport": false,
"accessReports": false,
"createNewCollections": false,
"editAnyCollection": false,
"deleteAnyCollection": false,
"editAssignedCollections": false,
"deleteAssignedCollections": false,
"manageGroups": false,
"managePolicies": false,
"manageSso": false, // Not supported
"manageUsers": false,
"manageResetPassword": false,
"manageScim": false // Not supported (Not AGPLv3 Licensed)
});
// https://github.com/bitwarden/server/blob/13d1e74d6960cf0d042620b72d85bf583a4236f7/src/Api/Models/Response/ProfileOrganizationResponseModel.cs
json!({
"Id": self.org_uuid,
@ -371,27 +392,7 @@ impl UserOrganization {
// "KeyConnectorEnabled": false,
// "KeyConnectorUrl": null,
// TODO: Add support for Custom User Roles
// See: https://bitwarden.com/help/article/user-types-access-control/#custom-role
// "Permissions": {
// "AccessEventLogs": false,
// "AccessImportExport": false,
// "AccessReports": false,
// "ManageAllCollections": false,
// "CreateNewCollections": false,
// "EditAnyCollection": false,
// "DeleteAnyCollection": false,
// "ManageAssignedCollections": false,
// "editAssignedCollections": false,
// "deleteAssignedCollections": false,
// "ManageCiphers": false,
// "ManageGroups": false,
// "ManagePolicies": false,
// "ManageResetPassword": false,
// "ManageSso": false, // Not supported
// "ManageUsers": false,
// "ManageScim": false, // Not supported (Not AGPLv3 Licensed)
// },
"permissions": permissions,
"MaxStorageGb": 10, // The value doesn't matter, we don't check server-side
@ -635,6 +636,17 @@ impl UserOrganization {
}}
}
pub async fn confirm_user_invitations(user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
db_run! { conn: {
diesel::update(users_organizations::table)
.filter(users_organizations::user_uuid.eq(user_uuid))
.filter(users_organizations::status.eq(UserOrgStatus::Invited as i32))
.set(users_organizations::status.eq(UserOrgStatus::Accepted as i32))
.execute(conn)
.map_res("Error confirming invitations")
}}
}
pub async fn find_any_state_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
db_run! { conn: {
users_organizations::table

View File

@ -0,0 +1,89 @@
use chrono::{NaiveDateTime, Utc};
use crate::api::EmptyResult;
use crate::db::{DbConn, DbPool};
use crate::error::MapResult;
use crate::sso::NONCE_EXPIRATION;
db_object! {
#[derive(Identifiable, Queryable, Insertable)]
#[diesel(table_name = sso_nonce)]
#[diesel(primary_key(state))]
pub struct SsoNonce {
pub state: String,
pub nonce: String,
pub verifier: Option<String>,
pub redirect_uri: String,
pub created_at: NaiveDateTime,
}
}
/// Local methods
impl SsoNonce {
pub fn new(state: String, nonce: String, verifier: Option<String>, redirect_uri: String) -> Self {
let now = Utc::now().naive_utc();
SsoNonce {
state,
nonce,
verifier,
redirect_uri,
created_at: now,
}
}
}
/// Database methods
impl SsoNonce {
pub async fn save(&self, conn: &mut DbConn) -> EmptyResult {
db_run! { conn:
sqlite, mysql {
diesel::replace_into(sso_nonce::table)
.values(SsoNonceDb::to_db(self))
.execute(conn)
.map_res("Error saving SSO nonce")
}
postgresql {
let value = SsoNonceDb::to_db(self);
diesel::insert_into(sso_nonce::table)
.values(&value)
.execute(conn)
.map_res("Error saving SSO nonce")
}
}
}
pub async fn delete(state: &str, conn: &mut DbConn) -> EmptyResult {
db_run! { conn: {
diesel::delete(sso_nonce::table.filter(sso_nonce::state.eq(state)))
.execute(conn)
.map_res("Error deleting SSO nonce")
}}
}
pub async fn find(state: &str, conn: &DbConn) -> Option<Self> {
let oldest = Utc::now().naive_utc() - *NONCE_EXPIRATION;
db_run! { conn: {
sso_nonce::table
.filter(sso_nonce::state.eq(state))
.filter(sso_nonce::created_at.ge(oldest))
.first::<SsoNonceDb>(conn)
.ok()
.from_db()
}}
}
pub async fn delete_expired(pool: DbPool) -> EmptyResult {
debug!("Purging expired sso_nonce");
if let Ok(conn) = pool.get().await {
let oldest = Utc::now().naive_utc() - *NONCE_EXPIRATION;
db_run! { conn: {
diesel::delete(sso_nonce::table.filter(sso_nonce::created_at.lt(oldest)))
.execute(conn)
.map_res("Error deleting expired SSO nonce")
}}
} else {
err!("Failed to get DB connection while purging expired sso_nonce")
}
}
}

View File

@ -5,7 +5,7 @@ use crate::crypto;
use crate::CONFIG;
db_object! {
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
#[derive(Identifiable, Queryable, Insertable, AsChangeset, Selectable)]
#[diesel(table_name = users)]
#[diesel(treat_none_as_null = true)]
#[diesel(primary_key(uuid))]
@ -60,6 +60,14 @@ db_object! {
pub struct Invitation {
pub email: String,
}
#[derive(Identifiable, Queryable, Insertable, Selectable)]
#[diesel(table_name = sso_users)]
#[diesel(primary_key(user_uuid))]
pub struct SsoUser {
pub user_uuid: String,
pub identifier: String,
}
}
pub enum UserKdfType {
@ -85,7 +93,7 @@ impl User {
pub const CLIENT_KDF_TYPE_DEFAULT: i32 = UserKdfType::Pbkdf2 as i32;
pub const CLIENT_KDF_ITER_DEFAULT: i32 = 600_000;
pub fn new(email: String) -> Self {
pub fn new(email: String, name: Option<String>) -> Self {
let now = Utc::now().naive_utc();
let email = email.to_lowercase();
@ -97,7 +105,7 @@ impl User {
verified_at: None,
last_verifying_at: None,
login_verify_count: 0,
name: email.clone(),
name: name.unwrap_or(email.clone()),
email,
akey: String::new(),
email_new: None,
@ -246,6 +254,7 @@ impl User {
"Email": self.email,
"EmailVerified": !CONFIG.mail_enabled() || self.verified_at.is_some(),
"Premium": true,
"PremiumFromOrganization": false,
"MasterPasswordHint": self.password_hint,
"Culture": "en-US",
"TwoFactorEnabled": twofactor_enabled,
@ -257,6 +266,7 @@ impl User {
"ProviderOrganizations": [],
"ForcePasswordReset": false,
"AvatarColor": self.avatar_color,
"UsesKeyConnector": false,
"Object": "profile",
})
}
@ -311,6 +321,7 @@ impl User {
Send::delete_all_by_user(&self.uuid, conn).await?;
EmergencyAccess::delete_all_by_user(&self.uuid, conn).await?;
EmergencyAccess::delete_all_by_grantee_email(&self.email, conn).await?;
UserOrganization::delete_all_by_user(&self.uuid, conn).await?;
Cipher::delete_all_by_user(&self.uuid, conn).await?;
Favorite::delete_all_by_user(&self.uuid, conn).await?;
@ -453,3 +464,51 @@ impl Invitation {
}
}
}
impl SsoUser {
pub async fn save(&self, conn: &mut DbConn) -> EmptyResult {
db_run! { conn:
sqlite, mysql {
diesel::replace_into(sso_users::table)
.values(SsoUserDb::to_db(self))
.execute(conn)
.map_res("Error saving SSO user")
}
postgresql {
let value = SsoUserDb::to_db(self);
diesel::insert_into(sso_users::table)
.values(&value)
.execute(conn)
.map_res("Error saving SSO user")
}
}
}
// Written as an union to make the query more lisible than using an `or_filter`.
// But `first()` does not appear to work with `union()` so we use `load()`.
pub async fn find_by_identifier_or_email(
identifier: &str,
mail: &str,
conn: &DbConn,
) -> Option<(User, Option<SsoUser>)> {
let lower_mail = mail.to_lowercase();
db_run! {conn: {
users::table
.inner_join(sso_users::table)
.select(<(UserDb, Option<SsoUserDb>)>::as_select())
.filter(sso_users::identifier.eq(identifier))
.union(
users::table
.left_join(sso_users::table)
.select(<(UserDb, Option<SsoUserDb>)>::as_select())
.filter(users::email.eq(lower_mail))
)
.load(conn)
.expect("Error searching user by SSO identifier and email")
.into_iter()
.next()
.map(|(user, sso_user)| { (user.from_db(), sso_user.from_db()) })
}}
}
}

View File

@ -224,6 +224,7 @@ table! {
uuid -> Text,
user_uuid -> Text,
org_uuid -> Text,
invited_by_email -> Nullable<Text>,
access_all -> Bool,
akey -> Text,
status -> Integer,
@ -243,6 +244,23 @@ table! {
}
}
table! {
sso_nonce (state) {
state -> Text,
nonce -> Text,
verifier -> Nullable<Text>,
redirect_uri -> Text,
created_at -> Timestamp,
}
}
table! {
sso_users (user_uuid) {
user_uuid -> Text,
identifier -> Text,
}
}
table! {
emergency_access (uuid) {
uuid -> Text,
@ -336,6 +354,7 @@ joinable!(collections_groups -> collections (collections_uuid));
joinable!(collections_groups -> groups (groups_uuid));
joinable!(event -> users_organizations (uuid));
joinable!(auth_requests -> users (user_uuid));
joinable!(sso_users -> users (user_uuid));
allow_tables_to_appear_in_same_query!(
attachments,
@ -349,6 +368,7 @@ allow_tables_to_appear_in_same_query!(
org_policies,
organizations,
sends,
sso_users,
twofactor,
users,
users_collections,

View File

@ -224,6 +224,7 @@ table! {
uuid -> Text,
user_uuid -> Text,
org_uuid -> Text,
invited_by_email -> Nullable<Text>,
access_all -> Bool,
akey -> Text,
status -> Integer,
@ -243,6 +244,23 @@ table! {
}
}
table! {
sso_nonce (state) {
state -> Text,
nonce -> Text,
verifier -> Nullable<Text>,
redirect_uri -> Text,
created_at -> Timestamp,
}
}
table! {
sso_users (user_uuid) {
user_uuid -> Text,
identifier -> Text,
}
}
table! {
emergency_access (uuid) {
uuid -> Text,
@ -336,6 +354,7 @@ joinable!(collections_groups -> collections (collections_uuid));
joinable!(collections_groups -> groups (groups_uuid));
joinable!(event -> users_organizations (uuid));
joinable!(auth_requests -> users (user_uuid));
joinable!(sso_users -> users (user_uuid));
allow_tables_to_appear_in_same_query!(
attachments,
@ -349,6 +368,7 @@ allow_tables_to_appear_in_same_query!(
org_policies,
organizations,
sends,
sso_users,
twofactor,
users,
users_collections,

View File

@ -224,6 +224,7 @@ table! {
uuid -> Text,
user_uuid -> Text,
org_uuid -> Text,
invited_by_email -> Nullable<Text>,
access_all -> Bool,
akey -> Text,
status -> Integer,
@ -243,6 +244,23 @@ table! {
}
}
table! {
sso_nonce (state) {
state -> Text,
nonce -> Text,
verifier -> Nullable<Text>,
redirect_uri -> Text,
created_at -> Timestamp,
}
}
table! {
sso_users (user_uuid) {
user_uuid -> Text,
identifier -> Text,
}
}
table! {
emergency_access (uuid) {
uuid -> Text,
@ -336,6 +354,7 @@ joinable!(collections_groups -> collections (collections_uuid));
joinable!(collections_groups -> groups (groups_uuid));
joinable!(event -> users_organizations (uuid));
joinable!(auth_requests -> users (user_uuid));
joinable!(sso_users -> users (user_uuid));
allow_tables_to_appear_in_same_query!(
attachments,
@ -349,6 +368,7 @@ allow_tables_to_appear_in_same_query!(
org_policies,
organizations,
sends,
sso_users,
twofactor,
users,
users_collections,

View File

@ -492,6 +492,30 @@ pub async fn send_change_email(address: &str, token: &str) -> EmptyResult {
send_email(address, &subject, body_html, body_text).await
}
pub async fn send_sso_change_email(address: &str) -> EmptyResult {
let (subject, body_html, body_text) = get_text(
"email/sso_change_email",
json!({
"url": format!("{}/#/settings/account", CONFIG.domain()),
"img_src": CONFIG._smtp_img_src(),
}),
)?;
send_email(address, &subject, body_html, body_text).await
}
pub async fn send_set_password(address: &str, user_name: &str) -> EmptyResult {
let (subject, body_html, body_text) = get_text(
"email/set_password",
json!({
"url": CONFIG.domain(),
"img_src": CONFIG._smtp_img_src(),
"user_name": user_name,
}),
)?;
send_email(address, &subject, body_html, body_text).await
}
pub async fn send_test(address: &str) -> EmptyResult {
let (subject, body_html, body_text) = get_text(
"email/smtp_test",

View File

@ -3,7 +3,7 @@
// The more key/value pairs there are the more recursion occurs.
// We want to keep this as low as possible, but not higher then 128.
// If you go above 128 it will cause rust-analyzer to fail,
#![recursion_limit = "87"]
#![recursion_limit = "90"]
// When enabled use MiMalloc as malloc instead of the default malloc
#[cfg(feature = "enable_mimalloc")]
@ -49,6 +49,7 @@ mod crypto;
mod db;
mod mail;
mod ratelimit;
mod sso;
mod util;
use crate::api::purge_auth_requests;
@ -211,8 +212,8 @@ fn launch_info() {
}
fn init_logging(level: log::LevelFilter) -> Result<(), fern::InitError> {
// Depending on the main log level we either want to disable or enable logging for trust-dns.
// Else if there are timeouts it will clutter the logs since trust-dns uses warn for this.
// Depending on the main log level we either want to disable or enable logging for hickory.
// Else if there are timeouts it will clutter the logs since hickory uses warn for this.
let hickory_level = if level >= log::LevelFilter::Debug {
level
} else {
@ -266,7 +267,7 @@ fn init_logging(level: log::LevelFilter) -> Result<(), fern::InitError> {
.level_for("handlebars::render", handlebars_level)
// Prevent cookie_store logs
.level_for("cookie_store", log::LevelFilter::Off)
// Variable level for trust-dns used by reqwest
// Variable level for hickory used by reqwest
.level_for("hickory_resolver::name_server::name_server", hickory_level)
.level_for("hickory_proto::xfer", hickory_level)
.level_for("diesel_logger", diesel_logger_level)
@ -594,6 +595,13 @@ fn schedule_jobs(pool: db::DbPool) {
}));
}
// Purge sso nonce from incomplete flow (default to daily at 00h20).
if !CONFIG.purge_incomplete_sso_nonce().is_empty() {
sched.add(Job::new(CONFIG.purge_incomplete_sso_nonce().parse().unwrap(), || {
runtime.spawn(db::models::SsoNonce::delete_expired(pool.clone()));
}));
}
// Periodically check for jobs to run. We probably won't need any
// jobs that run more often than once a minute, so a default poll
// interval of 30 seconds should be sufficient. Users who want to

548
src/sso.rs Normal file
View File

@ -0,0 +1,548 @@
use chrono::Utc;
use regex::Regex;
use std::borrow::Cow;
use std::time::Duration;
use url::Url;
use mini_moka::sync::Cache;
use once_cell::sync::Lazy;
use openidconnect::core::{
CoreClient, CoreIdTokenVerifier, CoreProviderMetadata, CoreResponseType, CoreUserInfoClaims,
};
use openidconnect::reqwest::async_http_client;
use openidconnect::{
AccessToken, AuthDisplay, AuthPrompt, AuthenticationFlow, AuthorizationCode, AuthorizationRequest, ClientId,
ClientSecret, CsrfToken, Nonce, OAuth2TokenResponse, PkceCodeChallenge, PkceCodeVerifier, RefreshToken,
ResponseType, Scope,
};
use crate::{
api::ApiResult,
auth,
auth::{AuthMethod, AuthMethodScope, AuthTokens, TokenWrapper, BW_EXPIRATION, DEFAULT_REFRESH_VALIDITY},
db::{
models::{Device, SsoNonce, User},
DbConn,
},
CONFIG,
};
static AC_CACHE: Lazy<Cache<String, AuthenticatedUser>> =
Lazy::new(|| Cache::builder().max_capacity(1000).time_to_live(Duration::from_secs(10 * 60)).build());
static CLIENT_CACHE_KEY: Lazy<String> = Lazy::new(|| "sso-client".to_string());
static CLIENT_CACHE: Lazy<Cache<String, CoreClient>> = Lazy::new(|| {
Cache::builder().max_capacity(1).time_to_live(Duration::from_secs(CONFIG.sso_client_cache_expiration())).build()
});
static SSO_JWT_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|sso", CONFIG.domain_origin()));
pub static NONCE_EXPIRATION: Lazy<chrono::Duration> = Lazy::new(|| chrono::TimeDelta::try_minutes(10).unwrap());
trait AuthorizationRequestExt<'a> {
fn add_extra_params<N: Into<Cow<'a, str>>, V: Into<Cow<'a, str>>>(self, params: Vec<(N, V)>) -> Self;
}
impl<'a, AD: AuthDisplay, P: AuthPrompt, RT: ResponseType> AuthorizationRequestExt<'a>
for AuthorizationRequest<'a, AD, P, RT>
{
fn add_extra_params<N: Into<Cow<'a, str>>, V: Into<Cow<'a, str>>>(mut self, params: Vec<(N, V)>) -> Self {
for (key, value) in params {
self = self.add_extra_param(key, value);
}
self
}
}
#[derive(Debug, Serialize, Deserialize)]
struct SsoTokenJwtClaims {
// Not before
pub nbf: i64,
// Expiration time
pub exp: i64,
// Issuer
pub iss: String,
// Subject
pub sub: String,
}
pub fn encode_ssotoken_claims() -> String {
let time_now = Utc::now();
let claims = SsoTokenJwtClaims {
nbf: time_now.timestamp(),
exp: (time_now + chrono::TimeDelta::try_minutes(2).unwrap()).timestamp(),
iss: SSO_JWT_ISSUER.to_string(),
sub: "vaultwarden".to_string(),
};
auth::encode_jwt(&claims)
}
#[derive(Debug, Serialize, Deserialize)]
pub enum OIDCCodeWrapper {
Ok {
code: String,
state: String,
},
Error {
state: String,
error: String,
error_description: Option<String>,
},
}
#[derive(Debug, Serialize, Deserialize)]
struct OIDCCodeClaims {
// Expiration time
pub exp: i64,
// Issuer
pub iss: String,
pub code: OIDCCodeWrapper,
}
pub fn encode_code_claims(code: OIDCCodeWrapper) -> String {
let time_now = Utc::now();
let claims = OIDCCodeClaims {
exp: (time_now + chrono::TimeDelta::try_minutes(5).unwrap()).timestamp(),
iss: SSO_JWT_ISSUER.to_string(),
code,
};
auth::encode_jwt(&claims)
}
#[derive(Clone, Debug, Serialize, Deserialize)]
struct BasicTokenClaims {
iat: Option<i64>,
nbf: Option<i64>,
exp: i64,
}
impl BasicTokenClaims {
fn nbf(&self) -> i64 {
self.nbf.or(self.iat).unwrap_or_else(|| Utc::now().timestamp())
}
}
fn decode_token_claims(token_name: &str, token: &str) -> ApiResult<BasicTokenClaims> {
let mut validation = jsonwebtoken::Validation::default();
validation.set_issuer(&[CONFIG.sso_authority()]);
validation.insecure_disable_signature_validation();
validation.validate_aud = false;
match jsonwebtoken::decode(token, &jsonwebtoken::DecodingKey::from_secret(&[]), &validation) {
Ok(btc) => Ok(btc.claims),
Err(err) => err_silent!(format!("Failed to decode basic token claims from {token_name}: {err}")),
}
}
#[rocket::async_trait]
trait CoreClientExt {
async fn _get_client() -> ApiResult<CoreClient>;
async fn cached() -> ApiResult<CoreClient>;
async fn user_info_async(&self, access_token: AccessToken) -> ApiResult<CoreUserInfoClaims>;
fn vw_id_token_verifier(&self) -> CoreIdTokenVerifier<'_>;
}
#[rocket::async_trait]
impl CoreClientExt for CoreClient {
// Call the OpenId discovery endpoint to retrieve configuration
async fn _get_client() -> ApiResult<CoreClient> {
let client_id = ClientId::new(CONFIG.sso_client_id());
let client_secret = ClientSecret::new(CONFIG.sso_client_secret());
let issuer_url = CONFIG.sso_issuer_url()?;
let provider_metadata = match CoreProviderMetadata::discover_async(issuer_url, async_http_client).await {
Err(err) => err!(format!("Failed to discover OpenID provider: {err}")),
Ok(metadata) => metadata,
};
Ok(CoreClient::from_provider_metadata(provider_metadata, client_id, Some(client_secret))
.set_redirect_uri(CONFIG.sso_redirect_url()?))
}
// Simple cache to prevent recalling the discovery endpoint each time
async fn cached() -> ApiResult<CoreClient> {
if CONFIG.sso_client_cache_expiration() > 0 {
match CLIENT_CACHE.get(&*CLIENT_CACHE_KEY) {
Some(client) => Ok(client),
None => Self::_get_client().await.map(|client| {
debug!("Inserting new client in cache");
CLIENT_CACHE.insert(CLIENT_CACHE_KEY.clone(), client.clone());
client
}),
}
} else {
Self::_get_client().await
}
}
async fn user_info_async(&self, access_token: AccessToken) -> ApiResult<CoreUserInfoClaims> {
let endpoint = match self.user_info(access_token, None) {
Err(err) => err!(format!("No user_info endpoint: {err}")),
Ok(endpoint) => endpoint,
};
match endpoint.request_async(async_http_client).await {
Err(err) => err!(format!("Request to user_info endpoint failed: {err}")),
Ok(user_info) => Ok(user_info),
}
}
fn vw_id_token_verifier(&self) -> CoreIdTokenVerifier<'_> {
let mut verifier = self.id_token_verifier();
if let Some(regex_str) = CONFIG.sso_audience_trusted() {
match Regex::new(&regex_str) {
Ok(regex) => {
verifier = verifier.set_other_audience_verifier_fn(move |aud| regex.is_match(aud));
}
Err(err) => {
error!("Failed to parse SSO_AUDIENCE_TRUSTED={regex_str} regex: {err}");
}
}
}
verifier
}
}
// The `nonce` allow to protect against replay attacks
// redirect_uri from: https://github.com/bitwarden/server/blob/main/src/Identity/IdentityServer/ApiClient.cs
pub async fn authorize_url(state: String, client_id: &str, raw_redirect_uri: &str, mut conn: DbConn) -> ApiResult<Url> {
let scopes = CONFIG.sso_scopes_vec().into_iter().map(Scope::new);
let redirect_uri = match client_id {
"web" | "browser" => format!("{}/sso-connector.html", CONFIG.domain()),
"desktop" | "mobile" => "bitwarden://sso-callback".to_string(),
"cli" => {
let port_regex = Regex::new(r"^http://localhost:([0-9]{4})$").unwrap();
match port_regex.captures(raw_redirect_uri).and_then(|captures| captures.get(1).map(|c| c.as_str())) {
Some(port) => format!("http://localhost:{}", port),
None => err!("Failed to extract port number"),
}
}
_ => err!(format!("Unsupported client {client_id}")),
};
let client = CoreClient::cached().await?;
let mut auth_req = client
.authorize_url(
AuthenticationFlow::<CoreResponseType>::AuthorizationCode,
|| CsrfToken::new(state),
Nonce::new_random,
)
.add_scopes(scopes)
.add_extra_params(CONFIG.sso_authorize_extra_params_vec());
let verifier = if CONFIG.sso_pkce() {
let (pkce_challenge, pkce_verifier) = PkceCodeChallenge::new_random_sha256();
auth_req = auth_req.set_pkce_challenge(pkce_challenge);
Some(pkce_verifier.secret().to_string())
} else {
None
};
let (auth_url, csrf_state, nonce) = auth_req.url();
let sso_nonce = SsoNonce::new(csrf_state.secret().to_string(), nonce.secret().to_string(), verifier, redirect_uri);
sso_nonce.save(&mut conn).await?;
Ok(auth_url)
}
#[derive(Clone, Debug)]
pub struct AuthenticatedUser {
pub state: String,
pub refresh_token: Option<String>,
pub access_token: String,
pub expires_in: Option<Duration>,
pub identifier: String,
pub email: String,
pub email_verified: Option<bool>,
pub user_name: Option<String>,
}
#[derive(Clone, Debug)]
pub struct UserInformation {
pub state: String,
pub identifier: String,
pub email: String,
pub email_verified: Option<bool>,
pub user_name: Option<String>,
}
async fn decode_code_claims(code: &str, conn: &mut DbConn) -> ApiResult<(String, String)> {
match auth::decode_jwt::<OIDCCodeClaims>(code, SSO_JWT_ISSUER.to_string()) {
Ok(code_claims) => match code_claims.code {
OIDCCodeWrapper::Ok {
code,
state,
} => Ok((code, state)),
OIDCCodeWrapper::Error {
state,
error,
error_description,
} => {
if let Err(err) = SsoNonce::delete(&state, conn).await {
error!("Failed to delete database sso_nonce using {state}: {err}")
}
err!(format!(
"SSO authorization failed: {error}, {}",
error_description.as_ref().unwrap_or(&String::new())
))
}
},
Err(err) => err!(format!("Failed to decode code wrapper: {err}")),
}
}
// During the 2FA flow we will
// - retrieve the user information and then only discover he needs 2FA.
// - second time we will rely on the `AC_CACHE` since the `code` has already been exchanged.
// The `nonce` will ensure that the user is authorized only once.
// We return only the `UserInformation` to force calling `redeem` to obtain the `refresh_token`.
pub async fn exchange_code(wrapped_code: &str, conn: &mut DbConn) -> ApiResult<UserInformation> {
let (code, state) = decode_code_claims(wrapped_code, conn).await?;
if let Some(authenticated_user) = AC_CACHE.get(&state) {
return Ok(UserInformation {
state,
identifier: authenticated_user.identifier,
email: authenticated_user.email,
email_verified: authenticated_user.email_verified,
user_name: authenticated_user.user_name,
});
}
let oidc_code = AuthorizationCode::new(code.clone());
let client = CoreClient::cached().await?;
let nonce = match SsoNonce::find(&state, conn).await {
None => err!(format!("Invalid state cannot retrieve nonce")),
Some(nonce) => nonce,
};
let mut exchange = client.exchange_code(oidc_code);
if CONFIG.sso_pkce() {
match nonce.verifier {
None => err!(format!("Missing verifier in the DB nonce table")),
Some(secret) => exchange = exchange.set_pkce_verifier(PkceCodeVerifier::new(secret)),
}
}
match exchange.request_async(async_http_client).await {
Ok(token_response) => {
let user_info = client.user_info_async(token_response.access_token().to_owned()).await?;
let oidc_nonce = Nonce::new(nonce.nonce.clone());
let id_token = match token_response.extra_fields().id_token() {
None => err!("Token response did not contain an id_token"),
Some(token) => token,
};
if CONFIG.sso_debug_tokens() {
debug!("Id token: {}", id_token.to_string());
debug!("Access token: {}", token_response.access_token().secret().to_string());
debug!("Refresh token: {:?}", token_response.refresh_token().map(|t| t.secret().to_string()));
debug!("Expiration time: {:?}", token_response.expires_in());
}
let id_claims = match id_token.claims(&client.vw_id_token_verifier(), &oidc_nonce) {
Ok(claims) => claims,
Err(err) => {
if CONFIG.sso_client_cache_expiration() > 0 {
CLIENT_CACHE.invalidate(&*CLIENT_CACHE_KEY);
}
err!(format!("Could not read id_token claims, {err}"));
}
};
let email = match id_claims.email() {
Some(email) => email.to_string(),
None => match user_info.email() {
None => err!("Neither id token nor userinfo contained an email"),
Some(email) => email.to_owned().to_string(),
},
}
.to_lowercase();
let user_name = user_info.preferred_username().map(|un| un.to_string());
let refresh_token = token_response.refresh_token().map(|t| t.secret().to_string());
if refresh_token.is_none() && CONFIG.sso_scopes_vec().contains(&"offline_access".to_string()) {
error!("Scope offline_access is present but response contain no refresh_token");
}
let identifier = format!("{}/{}", **id_claims.issuer(), **id_claims.subject());
let authenticated_user = AuthenticatedUser {
state: nonce.state,
refresh_token,
access_token: token_response.access_token().secret().to_string(),
expires_in: token_response.expires_in(),
identifier: identifier.clone(),
email: email.clone(),
email_verified: id_claims.email_verified(),
user_name: user_name.clone(),
};
AC_CACHE.insert(state.clone(), authenticated_user.clone());
Ok(UserInformation {
state,
identifier,
email,
email_verified: id_claims.email_verified(),
user_name,
})
}
Err(err) => err!(format!("Failed to contact token endpoint: {err}")),
}
}
// User has passed 2FA flow we can delete `nonce` and clear the cache.
pub async fn redeem(state: &String, conn: &mut DbConn) -> ApiResult<AuthenticatedUser> {
if let Err(err) = SsoNonce::delete(state, conn).await {
error!("Failed to delete database sso_nonce using {state}: {err}")
}
if let Some(au) = AC_CACHE.get(state) {
AC_CACHE.invalidate(state);
Ok(au)
} else {
err!("Failed to retrieve user info from sso cache")
}
}
// We always return a refresh_token (with no refresh_token some secrets are not displayed in the web front).
// If there is no SSO refresh_token, we keep the access_token to be able to call user_info to check for validity
pub fn create_auth_tokens(
device: &Device,
user: &User,
refresh_token: Option<String>,
access_token: &str,
expires_in: Option<Duration>,
) -> ApiResult<auth::AuthTokens> {
if !CONFIG.sso_auth_only_not_session() {
let now = Utc::now();
let (ap_nbf, ap_exp) = match (decode_token_claims("access_token", access_token), expires_in) {
(Ok(ap), _) => (ap.nbf(), ap.exp),
(Err(_), Some(exp)) => (now.timestamp(), (now + exp).timestamp()),
_ => err!("Non jwt access_token and empty expires_in"),
};
let access_claims =
auth::LoginJwtClaims::new(device, user, ap_nbf, ap_exp, auth::AuthMethod::Sso.scope_vec(), now);
_create_auth_tokens(device, refresh_token, access_claims, access_token)
} else {
Ok(AuthTokens::new(device, user, AuthMethod::Sso))
}
}
fn _create_auth_tokens(
device: &Device,
refresh_token: Option<String>,
access_claims: auth::LoginJwtClaims,
access_token: &str,
) -> ApiResult<auth::AuthTokens> {
let (nbf, exp, token) = if let Some(rt) = refresh_token.as_ref() {
match decode_token_claims("refresh_token", rt) {
Err(_) => {
let time_now = Utc::now();
let exp = (time_now + *DEFAULT_REFRESH_VALIDITY).timestamp();
debug!("Non jwt refresh_token (expiration set to {})", exp);
(time_now.timestamp(), exp, TokenWrapper::Refresh(rt.to_string()))
}
Ok(refresh_payload) => {
debug!("Refresh_payload: {:?}", refresh_payload);
(refresh_payload.nbf(), refresh_payload.exp, TokenWrapper::Refresh(rt.to_string()))
}
}
} else {
debug!("No refresh_token present");
(access_claims.nbf, access_claims.exp, TokenWrapper::Access(access_token.to_string()))
};
let refresh_claims = auth::RefreshJwtClaims {
nbf,
exp,
iss: auth::JWT_LOGIN_ISSUER.to_string(),
sub: auth::AuthMethod::Sso,
device_token: device.refresh_token.clone(),
token: Some(token),
};
Ok(auth::AuthTokens {
refresh_claims,
access_claims,
})
}
// This endpoint is called in two case
// - the session is close to expiration we will try to extend it
// - the user is going to make an action and we check that the session is still valid
pub async fn exchange_refresh_token(
device: &Device,
user: &User,
refresh_claims: &auth::RefreshJwtClaims,
) -> ApiResult<auth::AuthTokens> {
match &refresh_claims.token {
Some(TokenWrapper::Refresh(refresh_token)) => {
let rt = RefreshToken::new(refresh_token.to_string());
let client = CoreClient::cached().await?;
let token_response = match client.exchange_refresh_token(&rt).request_async(async_http_client).await {
Err(err) => err!(format!("Request to exchange_refresh_token endpoint failed: {:?}", err)),
Ok(token_response) => token_response,
};
// Use new refresh_token if returned
let rolled_refresh_token = token_response
.refresh_token()
.map(|token| token.secret().to_string())
.unwrap_or(refresh_token.to_string());
create_auth_tokens(
device,
user,
Some(rolled_refresh_token),
token_response.access_token().secret(),
token_response.expires_in(),
)
}
Some(TokenWrapper::Access(access_token)) => {
let now = Utc::now();
let exp_limit = (now + *BW_EXPIRATION).timestamp();
if refresh_claims.exp < exp_limit {
err_silent!("Access token is close to expiration but we have no refresh token")
}
let client = CoreClient::cached().await?;
match client.user_info_async(AccessToken::new(access_token.to_string())).await {
Err(err) => {
err_silent!(format!("Failed to retrieve user info, token has probably been invalidated: {err}"))
}
Ok(_) => {
let access_claims = auth::LoginJwtClaims::new(
device,
user,
now.timestamp(),
refresh_claims.exp,
auth::AuthMethod::Sso.scope_vec(),
now,
);
_create_auth_tokens(device, None, access_claims, access_token)
}
}
}
None => err!("No token present while in SSO"),
}
}

View File

@ -0,0 +1,6 @@
Master Password Has Been Changed
<!---------------->
The master password for {{user_name}} has been changed. If you did not initiate this request, please reach out to your administrator immediately.
===
{{> email/email_footer_text }}

View File

@ -0,0 +1,11 @@
Master Password Has Been Changed
<!---------------->
{{> email/email_header }}
<table width="100%" cellpadding="0" cellspacing="0" style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
<tr style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
<td class="content-block" style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; margin: 0; -webkit-font-smoothing: antialiased; padding: 0 0 10px; -webkit-text-size-adjust: none;" valign="top">
The master password for <b style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">{{user_name}}</b> has been changed. If you did not initiate this request, please reach out to your administrator immediately.
</td>
</tr>
</table>
{{> email/email_footer }}

View File

@ -0,0 +1,4 @@
Your Email Changed
<!---------------->
Your email was changed in your SSO Provider. Please update your email in Account Settings ({{url}}).
{{> email/email_footer_text }}

View File

@ -0,0 +1,11 @@
Your Email Changed
<!---------------->
{{> email/email_header }}
<table width="100%" cellpadding="0" cellspacing="0" style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
<tr style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
<td class="content-block" style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; margin: 0; -webkit-font-smoothing: antialiased; padding: 0 0 10px; -webkit-text-size-adjust: none; text-align: center;" valign="top" align="center">
Your email was changed in your SSO Provider. Please update your email in <a href="{{url}}/">Account Settings</a>.
</td>
</tr>
</table>
{{> email/email_footer }}

View File

@ -4,6 +4,7 @@
use std::{collections::HashMap, io::Cursor, ops::Deref, path::Path};
use num_traits::ToPrimitive;
use once_cell::sync::Lazy;
use rocket::{
fairing::{Fairing, Info, Kind},
http::{ContentType, Header, HeaderMap, Method, Status},
@ -129,9 +130,12 @@ impl Cors {
// If a match exists, return it. Otherwise, return None.
fn get_allowed_origin(headers: &HeaderMap<'_>) -> Option<String> {
let origin = Cors::get_header(headers, "Origin");
let domain_origin = CONFIG.domain_origin();
let safari_extension_origin = "file://";
if origin == domain_origin || origin == safari_extension_origin {
if origin == CONFIG.domain_origin()
|| origin == safari_extension_origin
|| (CONFIG.sso_enabled() && origin == CONFIG.sso_authority())
{
Some(origin)
} else {
None
@ -701,14 +705,9 @@ where
use reqwest::{header, Client, ClientBuilder};
pub fn get_reqwest_client() -> Client {
match get_reqwest_client_builder().build() {
Ok(client) => client,
Err(e) => {
error!("Possible trust-dns error, trying with trust-dns disabled: '{e}'");
get_reqwest_client_builder().hickory_dns(false).build().expect("Failed to build client")
}
}
pub fn get_reqwest_client() -> &'static Client {
static INSTANCE: Lazy<Client> = Lazy::new(|| get_reqwest_client_builder().build().expect("Failed to build client"));
&INSTANCE
}
pub fn get_reqwest_client_builder() -> ClientBuilder {
@ -767,3 +766,248 @@ pub fn parse_experimental_client_feature_flags(experimental_client_feature_flags
feature_states
}
mod dns_resolver {
use std::{
fmt,
net::{IpAddr, SocketAddr},
sync::Arc,
};
use hickory_resolver::{system_conf::read_system_conf, TokioAsyncResolver};
use once_cell::sync::Lazy;
use reqwest::dns::{Name, Resolve, Resolving};
use crate::{util::is_global, CONFIG};
#[derive(Debug, Clone)]
pub enum CustomResolverError {
Blacklist {
domain: String,
},
NonGlobalIp {
domain: String,
ip: IpAddr,
},
}
impl CustomResolverError {
pub fn downcast_ref(e: &dyn std::error::Error) -> Option<&Self> {
let mut source = e.source();
while let Some(err) = source {
source = err.source();
if let Some(err) = err.downcast_ref::<CustomResolverError>() {
return Some(err);
}
}
None
}
}
impl fmt::Display for CustomResolverError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Blacklist {
domain,
} => write!(f, "Blacklisted domain: {domain} matched ICON_BLACKLIST_REGEX"),
Self::NonGlobalIp {
domain,
ip,
} => write!(f, "IP {ip} for domain '{domain}' is not a global IP!"),
}
}
}
impl std::error::Error for CustomResolverError {}
#[derive(Debug, Clone)]
pub enum CustomDnsResolver {
Default(),
Hickory(Arc<TokioAsyncResolver>),
}
type BoxError = Box<dyn std::error::Error + Send + Sync>;
impl CustomDnsResolver {
pub fn instance() -> Arc<Self> {
static INSTANCE: Lazy<Arc<CustomDnsResolver>> = Lazy::new(CustomDnsResolver::new);
Arc::clone(&*INSTANCE)
}
fn new() -> Arc<Self> {
match read_system_conf() {
Ok((config, opts)) => {
let resolver = TokioAsyncResolver::tokio(config.clone(), opts.clone());
Arc::new(Self::Hickory(Arc::new(resolver)))
}
Err(e) => {
warn!("Error creating Hickory resolver, falling back to default: {e:?}");
Arc::new(Self::Default())
}
}
}
// Note that we get an iterator of addresses, but we only grab the first one for convenience
async fn resolve_domain(&self, name: &str) -> Result<Option<SocketAddr>, BoxError> {
pre_resolve(name)?;
let result = match self {
Self::Default() => tokio::net::lookup_host(name).await?.next(),
Self::Hickory(r) => r.lookup_ip(name).await?.iter().next().map(|a| SocketAddr::new(a, 0)),
};
if let Some(addr) = &result {
post_resolve(name, addr.ip())?;
}
Ok(result)
}
}
fn pre_resolve(name: &str) -> Result<(), CustomResolverError> {
if crate::api::is_domain_blacklisted(name) {
return Err(CustomResolverError::Blacklist {
domain: name.to_string(),
});
}
Ok(())
}
fn post_resolve(name: &str, ip: IpAddr) -> Result<(), CustomResolverError> {
if CONFIG.icon_blacklist_non_global_ips() && !is_global(ip) {
Err(CustomResolverError::NonGlobalIp {
domain: name.to_string(),
ip,
})
} else {
Ok(())
}
}
impl Resolve for CustomDnsResolver {
fn resolve(&self, name: Name) -> Resolving {
let this = self.clone();
Box::pin(async move {
let name = name.as_str();
let result = this.resolve_domain(name).await?;
Ok::<reqwest::dns::Addrs, _>(Box::new(result.into_iter()))
})
}
}
}
pub use dns_resolver::{CustomDnsResolver, CustomResolverError};
/// TODO: This is extracted from IpAddr::is_global, which is unstable:
/// https://doc.rust-lang.org/nightly/std/net/enum.IpAddr.html#method.is_global
/// Remove once https://github.com/rust-lang/rust/issues/27709 is merged
#[allow(clippy::nonminimal_bool)]
#[cfg(any(not(feature = "unstable"), test))]
pub fn is_global_hardcoded(ip: std::net::IpAddr) -> bool {
match ip {
std::net::IpAddr::V4(ip) => {
!(ip.octets()[0] == 0 // "This network"
|| ip.is_private()
|| (ip.octets()[0] == 100 && (ip.octets()[1] & 0b1100_0000 == 0b0100_0000)) //ip.is_shared()
|| ip.is_loopback()
|| ip.is_link_local()
// addresses reserved for future protocols (`192.0.0.0/24`)
||(ip.octets()[0] == 192 && ip.octets()[1] == 0 && ip.octets()[2] == 0)
|| ip.is_documentation()
|| (ip.octets()[0] == 198 && (ip.octets()[1] & 0xfe) == 18) // ip.is_benchmarking()
|| (ip.octets()[0] & 240 == 240 && !ip.is_broadcast()) //ip.is_reserved()
|| ip.is_broadcast())
}
std::net::IpAddr::V6(ip) => {
!(ip.is_unspecified()
|| ip.is_loopback()
// IPv4-mapped Address (`::ffff:0:0/96`)
|| matches!(ip.segments(), [0, 0, 0, 0, 0, 0xffff, _, _])
// IPv4-IPv6 Translat. (`64:ff9b:1::/48`)
|| matches!(ip.segments(), [0x64, 0xff9b, 1, _, _, _, _, _])
// Discard-Only Address Block (`100::/64`)
|| matches!(ip.segments(), [0x100, 0, 0, 0, _, _, _, _])
// IETF Protocol Assignments (`2001::/23`)
|| (matches!(ip.segments(), [0x2001, b, _, _, _, _, _, _] if b < 0x200)
&& !(
// Port Control Protocol Anycast (`2001:1::1`)
u128::from_be_bytes(ip.octets()) == 0x2001_0001_0000_0000_0000_0000_0000_0001
// Traversal Using Relays around NAT Anycast (`2001:1::2`)
|| u128::from_be_bytes(ip.octets()) == 0x2001_0001_0000_0000_0000_0000_0000_0002
// AMT (`2001:3::/32`)
|| matches!(ip.segments(), [0x2001, 3, _, _, _, _, _, _])
// AS112-v6 (`2001:4:112::/48`)
|| matches!(ip.segments(), [0x2001, 4, 0x112, _, _, _, _, _])
// ORCHIDv2 (`2001:20::/28`)
|| matches!(ip.segments(), [0x2001, b, _, _, _, _, _, _] if (0x20..=0x2F).contains(&b))
))
|| ((ip.segments()[0] == 0x2001) && (ip.segments()[1] == 0xdb8)) // ip.is_documentation()
|| ((ip.segments()[0] & 0xfe00) == 0xfc00) //ip.is_unique_local()
|| ((ip.segments()[0] & 0xffc0) == 0xfe80)) //ip.is_unicast_link_local()
}
}
}
#[cfg(not(feature = "unstable"))]
pub use is_global_hardcoded as is_global;
#[cfg(feature = "unstable")]
#[inline(always)]
pub fn is_global(ip: std::net::IpAddr) -> bool {
ip.is_global()
}
/// These are some tests to check that the implementations match
/// The IPv4 can be all checked in 30 seconds or so and they are correct as of nightly 2023-07-17
/// The IPV6 can't be checked in a reasonable time, so we check over a hundred billion random ones, so far correct
/// Note that the is_global implementation is subject to change as new IP RFCs are created
///
/// To run while showing progress output:
/// cargo +nightly test --release --features sqlite,unstable -- --nocapture --ignored
#[cfg(test)]
#[cfg(feature = "unstable")]
mod tests {
use super::*;
use std::net::IpAddr;
#[test]
#[ignore]
fn test_ipv4_global() {
for a in 0..u8::MAX {
println!("Iter: {}/255", a);
for b in 0..u8::MAX {
for c in 0..u8::MAX {
for d in 0..u8::MAX {
let ip = IpAddr::V4(std::net::Ipv4Addr::new(a, b, c, d));
assert_eq!(ip.is_global(), is_global_hardcoded(ip), "IP mismatch: {}", ip)
}
}
}
}
}
#[test]
#[ignore]
fn test_ipv6_global() {
use rand::Rng;
std::thread::scope(|s| {
for t in 0..16 {
let handle = s.spawn(move || {
let mut v = [0u8; 16];
let mut rng = rand::thread_rng();
for i in 0..20 {
println!("Thread {t} Iter: {i}/50");
for _ in 0..500_000_000 {
rng.fill(&mut v);
let ip = IpAddr::V6(std::net::Ipv6Addr::from(v));
assert_eq!(ip.is_global(), is_global_hardcoded(ip), "IP mismatch: {ip}");
}
}
});
}
});
}
}