Compare commits
5 commits
Author | SHA1 | Date | |
---|---|---|---|
741a26a7c5 | |||
583cb924f1 | |||
9286838d23 | |||
d1ebcfaf0b | |||
e820551f62 |
19 changed files with 611 additions and 267 deletions
175
.forgejo/workflows/ci-checks.yml
Normal file
175
.forgejo/workflows/ci-checks.yml
Normal file
|
@ -0,0 +1,175 @@
|
|||
name: Checks / CI
|
||||
|
||||
on:
|
||||
push:
|
||||
workflow_dispatch:
|
||||
|
||||
# Cancel in-progress runs when a new push is made to the same branch
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
fast-checks:
|
||||
name: Prek & Format
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Install uv
|
||||
uses: https://github.com/astral-sh/setup-uv@v6
|
||||
with:
|
||||
enable-cache: true
|
||||
ignore-nothing-to-cache: true
|
||||
cache-dependency-glob: ''
|
||||
|
||||
- name: Run prek (formerly prefligit)
|
||||
run: uvx prek run --show-diff-on-failure --color=always -v --all-files --hook-stage manual
|
||||
|
||||
- name: Install rust nightly with rustfmt
|
||||
run: |
|
||||
uvx rustup override set nightly
|
||||
uvx rustup component add rustfmt
|
||||
|
||||
- name: Check formatting
|
||||
run: |
|
||||
cargo +nightly fmt --all -- --check
|
||||
|
||||
clippy:
|
||||
name: Clippy
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Install uv
|
||||
uses: https://github.com/astral-sh/setup-uv@v6
|
||||
with:
|
||||
enable-cache: true
|
||||
ignore-nothing-to-cache: true
|
||||
cache-dependency-glob: '' # Disable Python dependency tracking for Rust project
|
||||
|
||||
- name: Install Rust toolchain
|
||||
run: |
|
||||
# Install toolchain from rust-toolchain.toml
|
||||
uvx rustup show # This will auto-install from rust-toolchain.toml
|
||||
|
||||
# cache-apt-pkgs-action requires apt lists to be initialised first
|
||||
- name: Update APT package lists
|
||||
run: sudo apt-get update
|
||||
|
||||
- name: Cache system packages
|
||||
uses: https://github.com/awalsh128/cache-apt-pkgs-action@latest
|
||||
with:
|
||||
packages: clang liburing-dev
|
||||
version: 1.0
|
||||
|
||||
- name: Cache Rust registry
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/git
|
||||
!~/.cargo/git/checkouts
|
||||
~/.cargo/registry
|
||||
!~/.cargo/registry/src
|
||||
key: rust-registry-${{hashFiles('**/Cargo.lock') }}
|
||||
|
||||
- name: Run Clippy lints
|
||||
run: |
|
||||
cargo clippy \
|
||||
--workspace \
|
||||
--features full \
|
||||
--locked \
|
||||
--no-deps \
|
||||
--profile test \
|
||||
-- \
|
||||
-D warnings
|
||||
|
||||
tests:
|
||||
name: Tests
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
SCCACHE_ENABLED: ${{ vars.GH_APP_ID != '' && secrets.GH_APP_PRIVATE_KEY != '' }}
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Install uv
|
||||
uses: https://github.com/astral-sh/setup-uv@v6
|
||||
with:
|
||||
enable-cache: true
|
||||
ignore-nothing-to-cache: true
|
||||
cache-dependency-glob: '' # Disable Python dependency tracking for Rust project
|
||||
|
||||
- name: Install Rust toolchain
|
||||
run: |
|
||||
# Install toolchain from rust-toolchain.toml
|
||||
uvx rustup show # This will auto-install from rust-toolchain.toml
|
||||
|
||||
# cache-apt-pkgs-action requires apt lists to be initialised first
|
||||
- name: Update APT package lists
|
||||
run: sudo apt-get update
|
||||
|
||||
- name: Cache system packages
|
||||
uses: https://github.com/awalsh128/cache-apt-pkgs-action@latest
|
||||
with:
|
||||
packages: clang liburing-dev
|
||||
version: 1.0
|
||||
|
||||
- name: Cache Rust registry
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/git
|
||||
!~/.cargo/git/checkouts
|
||||
~/.cargo/registry
|
||||
!~/.cargo/registry/src
|
||||
key: rust-registry-${{hashFiles('**/Cargo.lock') }}
|
||||
|
||||
- name: Create GitHub App token for sccache
|
||||
if: env.SCCACHE_ENABLED == 'true'
|
||||
uses: https://github.com/actions/create-github-app-token@v1
|
||||
id: app-token
|
||||
with:
|
||||
app-id: ${{ vars.GH_APP_ID }}
|
||||
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
|
||||
github-api-url: https://api.github.com
|
||||
owner: ${{ vars.GH_APP_OWNER }}
|
||||
repositories: ""
|
||||
|
||||
- name: Setup sccache
|
||||
if: env.SCCACHE_ENABLED == 'true'
|
||||
uses: ./.forgejo/actions/sccache
|
||||
with:
|
||||
token: ${{ steps.app-token.outputs.token }}
|
||||
|
||||
- name: Setup Timelord
|
||||
if: env.SCCACHE_ENABLED == 'true'
|
||||
uses: ./.forgejo/actions/timelord
|
||||
with:
|
||||
key: sccache-v0
|
||||
path: .
|
||||
|
||||
- name: Run Cargo tests
|
||||
run: |
|
||||
cargo test \
|
||||
--workspace \
|
||||
--features full \
|
||||
--locked \
|
||||
--profile test \
|
||||
--all-targets \
|
||||
--no-fail-fast
|
||||
|
||||
- name: Display sccache statistics
|
||||
if: always() && env.SCCACHE_ENABLED == 'true'
|
||||
run: sccache --show-stats
|
|
@ -1,4 +1,4 @@
|
|||
name: Documentation
|
||||
name: Deploy / Documentation
|
||||
|
||||
on:
|
||||
pull_request:
|
|
@ -1,4 +1,4 @@
|
|||
name: Mirror Container Images
|
||||
name: Deploy / Mirror Images
|
||||
|
||||
on:
|
||||
schedule:
|
|
@ -1,22 +0,0 @@
|
|||
name: Checks / Prefligit
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
prefligit:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
FROM_REF: ${{ github.event.pull_request.base.sha || (!github.event.forced && ( github.event.before != '0000000000000000000000000000000000000000' && github.event.before || github.sha )) || format('{0}~', github.sha) }}
|
||||
TO_REF: ${{ github.sha }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: ./.forgejo/actions/prefligit
|
||||
with:
|
||||
extra_args: --all-files --hook-stage manual
|
|
@ -1,6 +1,8 @@
|
|||
name: Release Docker Image
|
||||
name: Release / Builds
|
||||
# Cancel in-progress runs when a new push is made to the same branch
|
||||
concurrency:
|
||||
group: "release-image-${{ github.ref }}"
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: false # Don't cancel release builds
|
||||
|
||||
on:
|
||||
push:
|
||||
|
@ -17,11 +19,11 @@ on:
|
|||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
BUILTIN_REGISTRY: forgejo.ellis.link
|
||||
BUILTIN_REGISTRY_ENABLED: "${{ ((vars.BUILTIN_REGISTRY_USER && secrets.BUILTIN_REGISTRY_PASSWORD) || (github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false)) && 'true' || 'false' }}"
|
||||
BUILTIN_REGISTRY_ENABLED: "${{ vars.BUILTIN_REGISTRY != '' && ((vars.BUILTIN_REGISTRY_USER && secrets.BUILTIN_REGISTRY_PASSWORD) || (github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false)) && 'true' || 'false' }}"
|
||||
|
||||
jobs:
|
||||
define-variables:
|
||||
prepare:
|
||||
name: Prepare Build Matrix
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
outputs:
|
||||
|
@ -30,7 +32,7 @@ jobs:
|
|||
build_matrix: ${{ steps.var.outputs.build_matrix }}
|
||||
|
||||
steps:
|
||||
- name: Setting variables
|
||||
- name: Define build matrix and registries
|
||||
uses: https://github.com/actions/github-script@v7
|
||||
id: var
|
||||
with:
|
||||
|
@ -39,26 +41,39 @@ jobs:
|
|||
const repoId = githubRepo.split('/')[1]
|
||||
|
||||
core.setOutput('github_repository', githubRepo)
|
||||
const builtinImage = '${{ env.BUILTIN_REGISTRY }}/' + githubRepo
|
||||
console.log('GitHub repository:', githubRepo)
|
||||
|
||||
const registry = '${{ vars.BUILTIN_REGISTRY }}'
|
||||
console.log('Registry:', registry || '(not set)')
|
||||
|
||||
const builtinImage = registry ? `${registry}/${githubRepo}` : ''
|
||||
console.log('Built-in image:', builtinImage || '(registry not configured)')
|
||||
|
||||
let images = []
|
||||
if (process.env.BUILTIN_REGISTRY_ENABLED === "true") {
|
||||
images.push(builtinImage)
|
||||
}
|
||||
console.log('Registry enabled:', process.env.BUILTIN_REGISTRY_ENABLED)
|
||||
console.log('Images:', images.length > 0 ? images : '(none)')
|
||||
|
||||
core.setOutput('images', images.join("\n"))
|
||||
core.setOutput('images_list', images.join(","))
|
||||
const platforms = ['linux/amd64', 'linux/arm64']
|
||||
core.setOutput('build_matrix', JSON.stringify({
|
||||
const buildMatrix = {
|
||||
platform: platforms,
|
||||
target_cpu: ['base'],
|
||||
include: platforms.map(platform => { return {
|
||||
platform,
|
||||
slug: platform.replace('/', '-')
|
||||
}})
|
||||
}))
|
||||
}
|
||||
console.log('Build matrix:', JSON.stringify(buildMatrix, null, 2))
|
||||
core.setOutput('build_matrix', JSON.stringify(buildMatrix))
|
||||
|
||||
build-image:
|
||||
build:
|
||||
name: Build Images & Binaries
|
||||
runs-on: dind
|
||||
needs: define-variables
|
||||
needs: prepare
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
@ -78,16 +93,16 @@ jobs:
|
|||
}
|
||||
|
||||
steps:
|
||||
- name: Echo strategy
|
||||
run: echo '${{ toJSON(fromJSON(needs.define-variables.outputs.build_matrix)) }}'
|
||||
- name: Echo matrix
|
||||
run: echo '${{ toJSON(matrix) }}'
|
||||
- name: Display build matrix
|
||||
run: |
|
||||
echo "Strategy: ${{ toJSON(fromJSON(needs.prepare.outputs.build_matrix)) }}"
|
||||
echo "Matrix: ${{ toJSON(matrix) }}"
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Install rust
|
||||
- name: Install Rust toolchain
|
||||
id: rust-toolchain
|
||||
uses: ./.forgejo/actions/rust-toolchain
|
||||
|
||||
|
@ -95,35 +110,34 @@ jobs:
|
|||
uses: docker/setup-buildx-action@v3
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
# Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here.
|
||||
- name: Login to builtin registry
|
||||
- name: Login to container registry
|
||||
if: vars.BUILTIN_REGISTRY != ''
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.BUILTIN_REGISTRY }}
|
||||
registry: ${{ vars.BUILTIN_REGISTRY }}
|
||||
username: ${{ vars.BUILTIN_REGISTRY_USER || github.actor }}
|
||||
password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}
|
||||
|
||||
# This step uses [docker/metadata-action](https://github.com/docker/metadata-action#about) to extract tags and labels that will be applied to the specified image. The `id` "meta" allows the output of this step to be referenced in a subsequent step. The `images` value provides the base name for the tags and labels.
|
||||
- name: Extract metadata (labels, annotations) for Docker
|
||||
- name: Extract Docker metadata
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{needs.define-variables.outputs.images}}
|
||||
images: ${{needs.prepare.outputs.images}}
|
||||
# default labels & annotations: https://github.com/docker/metadata-action/blob/master/src/meta.ts#L509
|
||||
env:
|
||||
DOCKER_METADATA_ANNOTATIONS_LEVELS: manifest,index
|
||||
|
||||
# This step uses the `docker/build-push-action` action to build the image, based on your repository's `Dockerfile`. If the build succeeds, it pushes the image to GitHub Packages.
|
||||
# It uses the `context` parameter to define the build's context as the set of files located in the specified path. For more information, see "[Usage](https://github.com/docker/build-push-action#usage)" in the README of the `docker/build-push-action` repository.
|
||||
# It uses the `tags` and `labels` parameters to tag and label the image with the output from the "meta" step.
|
||||
# It will not push images generated from a pull request
|
||||
- name: Get short git commit SHA
|
||||
id: sha
|
||||
run: |
|
||||
calculatedSha=$(git rev-parse --short ${{ github.sha }})
|
||||
echo "Short SHA: $calculatedSha (from full SHA: ${{ github.sha }})"
|
||||
echo "COMMIT_SHORT_SHA=$calculatedSha" >> $GITHUB_ENV
|
||||
- name: Get Git commit timestamps
|
||||
run: echo "TIMESTAMP=$(git log -1 --pretty=%ct)" >> $GITHUB_ENV
|
||||
- name: Get commit timestamp
|
||||
run: |
|
||||
timestamp=$(git log -1 --pretty=%ct)
|
||||
echo "Commit timestamp: $timestamp ($(date -d @$timestamp))"
|
||||
echo "TIMESTAMP=$timestamp" >> $GITHUB_ENV
|
||||
|
||||
- uses: ./.forgejo/actions/timelord
|
||||
with:
|
||||
|
@ -160,7 +174,7 @@ jobs:
|
|||
path: |
|
||||
var-lib-apt-${{ matrix.slug }}
|
||||
key: var-lib-apt-${{ matrix.slug }}
|
||||
- name: inject cache into docker
|
||||
- name: Inject build cache
|
||||
uses: https://github.com/reproducible-containers/buildkit-cache-dance@v3.1.0
|
||||
with:
|
||||
cache-map: |
|
||||
|
@ -176,7 +190,7 @@ jobs:
|
|||
}
|
||||
skip-extraction: ${{ steps.cache.outputs.cache-hit }}
|
||||
|
||||
- name: Build and push Docker image by digest
|
||||
- name: Build Docker image
|
||||
id: build
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
|
@ -191,28 +205,27 @@ jobs:
|
|||
labels: ${{ steps.meta.outputs.labels }}
|
||||
annotations: ${{ steps.meta.outputs.annotations }}
|
||||
cache-from: type=gha
|
||||
# cache-to: type=gha,mode=max
|
||||
cache-to: type=gha,mode=max
|
||||
sbom: true
|
||||
outputs: type=image,"name=${{ needs.define-variables.outputs.images_list }}",push-by-digest=true,name-canonical=true,push=true
|
||||
outputs: type=image,"name=${{ needs.prepare.outputs.images_list }}",push-by-digest=true,name-canonical=true,push=true
|
||||
env:
|
||||
SOURCE_DATE_EPOCH: ${{ env.TIMESTAMP }}
|
||||
|
||||
# For publishing multi-platform manifests
|
||||
- name: Export digest
|
||||
- name: Export image digest
|
||||
run: |
|
||||
mkdir -p /tmp/digests
|
||||
digest="${{ steps.build.outputs.digest }}"
|
||||
touch "/tmp/digests/${digest#sha256:}"
|
||||
|
||||
- name: Extract binary from container (image)
|
||||
- name: Create container from image
|
||||
id: extract-binary-image
|
||||
run: |
|
||||
mkdir -p /tmp/binaries
|
||||
digest="${{ steps.build.outputs.digest }}"
|
||||
echo "container_id=$(docker create --platform ${{ matrix.platform }} ${{ needs.define-variables.outputs.images_list }}@$digest)" >> $GITHUB_OUTPUT
|
||||
- name: Extract binary from container (copy)
|
||||
echo "container_id=$(docker create --platform ${{ matrix.platform }} ${{ needs.prepare.outputs.images_list }}@$digest)" >> $GITHUB_OUTPUT
|
||||
- name: Extract binary from container
|
||||
run: docker cp ${{ steps.extract-binary-image.outputs.container_id }}:/sbin/conduwuit /tmp/binaries/conduwuit-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}
|
||||
- name: Extract binary from container (cleanup)
|
||||
- name: Clean up container
|
||||
run: docker rm ${{ steps.extract-binary-image.outputs.container_id }}
|
||||
|
||||
- name: Upload binary artifact
|
||||
|
@ -230,9 +243,10 @@ jobs:
|
|||
if-no-files-found: error
|
||||
retention-days: 5
|
||||
|
||||
merge:
|
||||
publish:
|
||||
name: Publish Multi-platform Manifest
|
||||
runs-on: dind
|
||||
needs: [define-variables, build-image]
|
||||
needs: [prepare, build]
|
||||
steps:
|
||||
- name: Download digests
|
||||
uses: forgejo/download-artifact@v4
|
||||
|
@ -240,18 +254,18 @@ jobs:
|
|||
path: /tmp/digests
|
||||
pattern: digests-*
|
||||
merge-multiple: true
|
||||
# Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here.
|
||||
- name: Login to builtin registry
|
||||
- name: Login to container registry
|
||||
if: vars.BUILTIN_REGISTRY != ''
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.BUILTIN_REGISTRY }}
|
||||
registry: ${{ vars.BUILTIN_REGISTRY }}
|
||||
username: ${{ vars.BUILTIN_REGISTRY_USER || github.actor }}
|
||||
password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Extract metadata (tags) for Docker
|
||||
- name: Extract Docker tags
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
|
@ -263,15 +277,15 @@ jobs:
|
|||
type=ref,event=pr
|
||||
type=sha,format=long
|
||||
type=raw,value=latest,enable=${{ startsWith(github.ref, 'refs/tags/v') }}
|
||||
images: ${{needs.define-variables.outputs.images}}
|
||||
images: ${{needs.prepare.outputs.images}}
|
||||
# default labels & annotations: https://github.com/docker/metadata-action/blob/master/src/meta.ts#L509
|
||||
env:
|
||||
DOCKER_METADATA_ANNOTATIONS_LEVELS: index
|
||||
|
||||
- name: Create manifest list and push
|
||||
- name: Create and push manifest
|
||||
working-directory: /tmp/digests
|
||||
env:
|
||||
IMAGES: ${{needs.define-variables.outputs.images}}
|
||||
IMAGES: ${{needs.prepare.outputs.images}}
|
||||
shell: bash
|
||||
run: |
|
||||
IFS=$'\n'
|
||||
|
@ -287,7 +301,7 @@ jobs:
|
|||
|
||||
- name: Inspect image
|
||||
env:
|
||||
IMAGES: ${{needs.define-variables.outputs.images}}
|
||||
IMAGES: ${{needs.prepare.outputs.images}}
|
||||
shell: bash
|
||||
run: |
|
||||
IMAGES_LIST=($IMAGES)
|
60
.forgejo/workflows/renovate.yml
Normal file
60
.forgejo/workflows/renovate.yml
Normal file
|
@ -0,0 +1,60 @@
|
|||
name: Maintenance / Renovate
|
||||
on:
|
||||
schedule:
|
||||
# Run at 2am UTC daily
|
||||
- cron: '0 2 * * *'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
dryRun:
|
||||
description: 'Dry run mode'
|
||||
required: false
|
||||
default: 'false'
|
||||
type: choice
|
||||
options:
|
||||
- 'true'
|
||||
- 'false'
|
||||
logLevel:
|
||||
description: 'Log level'
|
||||
required: false
|
||||
default: 'info'
|
||||
type: choice
|
||||
options:
|
||||
- 'debug'
|
||||
- 'info'
|
||||
- 'warn'
|
||||
- 'error'
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- '.forgejo/workflows/renovate.yml'
|
||||
- 'renovate.json'
|
||||
|
||||
jobs:
|
||||
renovate:
|
||||
name: Renovate
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Run Renovate
|
||||
uses: renovatebot/github-action@v40.1.0
|
||||
with:
|
||||
token: ${{ secrets.RENOVATE_TOKEN }}
|
||||
configurationFile: renovate.json
|
||||
env:
|
||||
# Platform settings
|
||||
RENOVATE_PLATFORM: gitea
|
||||
RENOVATE_ENDPOINT: ${{ github.server_url }}/api/v1
|
||||
RENOVATE_TOKEN: ${{ secrets.RENOVATE_TOKEN }}
|
||||
|
||||
# Repository settings
|
||||
RENOVATE_REPOSITORIES: '["${{ github.repository }}"]'
|
||||
|
||||
# Behaviour settings
|
||||
RENOVATE_DRY_RUN: ${{ inputs.dryRun || 'false' }}
|
||||
LOG_LEVEL: ${{ inputs.logLevel || 'info' }}
|
||||
|
||||
# Forgejo/Gitea specific
|
||||
RENOVATE_GIT_AUTHOR: '${{ vars.RENOVATE_AUTHOR }}'
|
|
@ -1,144 +0,0 @@
|
|||
name: Checks / Rust
|
||||
|
||||
on:
|
||||
push:
|
||||
|
||||
jobs:
|
||||
format:
|
||||
name: Format
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Install rust
|
||||
uses: ./.forgejo/actions/rust-toolchain
|
||||
with:
|
||||
toolchain: "nightly"
|
||||
components: "rustfmt"
|
||||
|
||||
- name: Check formatting
|
||||
run: |
|
||||
cargo +nightly fmt --all -- --check
|
||||
|
||||
clippy:
|
||||
name: Clippy
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Install rust
|
||||
uses: ./.forgejo/actions/rust-toolchain
|
||||
|
||||
- uses: https://github.com/actions/create-github-app-token@v2
|
||||
id: app-token
|
||||
with:
|
||||
app-id: ${{ vars.GH_APP_ID }}
|
||||
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
|
||||
github-api-url: https://api.github.com
|
||||
owner: ${{ vars.GH_APP_OWNER }}
|
||||
repositories: ""
|
||||
- name: Install sccache
|
||||
uses: ./.forgejo/actions/sccache
|
||||
with:
|
||||
token: ${{ steps.app-token.outputs.token }}
|
||||
- run: sudo apt-get update
|
||||
- name: Install system dependencies
|
||||
uses: https://github.com/awalsh128/cache-apt-pkgs-action@v1
|
||||
with:
|
||||
packages: clang liburing-dev
|
||||
version: 1
|
||||
- name: Cache Rust registry
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/git
|
||||
!~/.cargo/git/checkouts
|
||||
~/.cargo/registry
|
||||
!~/.cargo/registry/src
|
||||
key: rust-registry-${{hashFiles('**/Cargo.lock') }}
|
||||
- name: Timelord
|
||||
uses: ./.forgejo/actions/timelord
|
||||
with:
|
||||
key: sccache-v0
|
||||
path: .
|
||||
- name: Clippy
|
||||
run: |
|
||||
cargo clippy \
|
||||
--workspace \
|
||||
--features full \
|
||||
--locked \
|
||||
--no-deps \
|
||||
--profile test \
|
||||
-- \
|
||||
-D warnings
|
||||
|
||||
- name: Show sccache stats
|
||||
if: always()
|
||||
run: sccache --show-stats
|
||||
|
||||
cargo-test:
|
||||
name: Cargo Test
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Install rust
|
||||
uses: ./.forgejo/actions/rust-toolchain
|
||||
|
||||
- uses: https://github.com/actions/create-github-app-token@v2
|
||||
id: app-token
|
||||
with:
|
||||
app-id: ${{ vars.GH_APP_ID }}
|
||||
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
|
||||
github-api-url: https://api.github.com
|
||||
owner: ${{ vars.GH_APP_OWNER }}
|
||||
repositories: ""
|
||||
- name: Install sccache
|
||||
uses: ./.forgejo/actions/sccache
|
||||
with:
|
||||
token: ${{ steps.app-token.outputs.token }}
|
||||
- run: sudo apt-get update
|
||||
- name: Install system dependencies
|
||||
uses: https://github.com/awalsh128/cache-apt-pkgs-action@v1
|
||||
with:
|
||||
packages: clang liburing-dev
|
||||
version: 1
|
||||
- name: Cache Rust registry
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/git
|
||||
!~/.cargo/git/checkouts
|
||||
~/.cargo/registry
|
||||
!~/.cargo/registry/src
|
||||
key: rust-registry-${{hashFiles('**/Cargo.lock') }}
|
||||
- name: Timelord
|
||||
uses: ./.forgejo/actions/timelord
|
||||
with:
|
||||
key: sccache-v0
|
||||
path: .
|
||||
- name: Cargo Test
|
||||
run: |
|
||||
cargo test \
|
||||
--workspace \
|
||||
--features full \
|
||||
--locked \
|
||||
--profile test \
|
||||
--all-targets \
|
||||
--no-fail-fast
|
||||
|
||||
- name: Show sccache stats
|
||||
if: always()
|
||||
run: sccache --show-stats
|
|
@ -9,7 +9,7 @@ repos:
|
|||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v5.0.0
|
||||
hooks:
|
||||
- id: check-byte-order-marker
|
||||
- id: fix-byte-order-marker
|
||||
- id: check-case-conflict
|
||||
- id: check-symlinks
|
||||
- id: destroyed-symlinks
|
||||
|
|
|
@ -22,5 +22,24 @@
|
|||
"tikv-jemalloc-ctl",
|
||||
"opentelemetry-rust",
|
||||
"tracing-opentelemetry"
|
||||
]
|
||||
],
|
||||
"github-actions": {
|
||||
"enabled": true,
|
||||
"fileMatch": [
|
||||
"(^|/)\\.forgejo/workflows/[^/]+\\.ya?ml$",
|
||||
"(^|/)\\.forgejo/actions/[^/]+/action\\.ya?ml$",
|
||||
"(^|/)\\.github/workflows/[^/]+\\.ya?ml$",
|
||||
"(^|/)\\.github/actions/[^/]+/action\\.ya?ml$"
|
||||
]
|
||||
},
|
||||
"packageRules": [
|
||||
{
|
||||
"description": "Group all non-major GitHub Actions updates",
|
||||
"matchManagers": ["github-actions"],
|
||||
"matchUpdateTypes": ["minor", "patch"],
|
||||
"groupName": "github-actions-non-major"
|
||||
}
|
||||
],
|
||||
"prConcurrentLimit": 3,
|
||||
"prHourlyLimit": 2
|
||||
}
|
||||
|
|
|
@ -8,7 +8,7 @@ use conduwuit::{
|
|||
ref_at,
|
||||
utils::{
|
||||
IterStream, ReadyExt,
|
||||
result::{FlatOk, LogErr},
|
||||
result::LogErr,
|
||||
stream::{BroadbandExt, TryIgnore, WidebandExt},
|
||||
},
|
||||
};
|
||||
|
@ -35,6 +35,7 @@ use ruma::{
|
|||
};
|
||||
use tracing::warn;
|
||||
|
||||
use super::utils::{count_to_token, parse_pagination_token as parse_token};
|
||||
use crate::Ruma;
|
||||
|
||||
/// list of safe and common non-state events to ignore if the user is ignored
|
||||
|
@ -84,14 +85,14 @@ pub(crate) async fn get_message_events_route(
|
|||
let from: PduCount = body
|
||||
.from
|
||||
.as_deref()
|
||||
.map(str::parse)
|
||||
.map(parse_token)
|
||||
.transpose()?
|
||||
.unwrap_or_else(|| match body.dir {
|
||||
| Direction::Forward => PduCount::min(),
|
||||
| Direction::Backward => PduCount::max(),
|
||||
});
|
||||
|
||||
let to: Option<PduCount> = body.to.as_deref().map(str::parse).flat_ok();
|
||||
let to: Option<PduCount> = body.to.as_deref().map(parse_token).transpose()?;
|
||||
|
||||
let limit: usize = body
|
||||
.limit
|
||||
|
@ -180,8 +181,8 @@ pub(crate) async fn get_message_events_route(
|
|||
.collect();
|
||||
|
||||
Ok(get_message_events::v3::Response {
|
||||
start: from.to_string(),
|
||||
end: next_token.as_ref().map(ToString::to_string),
|
||||
start: count_to_token(from),
|
||||
end: next_token.map(count_to_token),
|
||||
chunk,
|
||||
state,
|
||||
})
|
||||
|
|
|
@ -36,6 +36,7 @@ pub(super) mod typing;
|
|||
pub(super) mod unstable;
|
||||
pub(super) mod unversioned;
|
||||
pub(super) mod user_directory;
|
||||
pub(super) mod utils;
|
||||
pub(super) mod voip;
|
||||
pub(super) mod well_known;
|
||||
|
||||
|
|
|
@ -18,6 +18,7 @@ use ruma::{
|
|||
events::{TimelineEventType, relation::RelationType},
|
||||
};
|
||||
|
||||
use super::utils::{count_to_token, parse_pagination_token as parse_token};
|
||||
use crate::Ruma;
|
||||
|
||||
/// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}/{relType}/{eventType}`
|
||||
|
@ -110,14 +111,14 @@ async fn paginate_relations_with_filter(
|
|||
dir: Direction,
|
||||
) -> Result<get_relating_events::v1::Response> {
|
||||
let start: PduCount = from
|
||||
.map(str::parse)
|
||||
.map(parse_token)
|
||||
.transpose()?
|
||||
.unwrap_or_else(|| match dir {
|
||||
| Direction::Forward => PduCount::min(),
|
||||
| Direction::Backward => PduCount::max(),
|
||||
});
|
||||
|
||||
let to: Option<PduCount> = to.map(str::parse).flat_ok();
|
||||
let to: Option<PduCount> = to.map(parse_token).transpose()?;
|
||||
|
||||
// Use limit or else 30, with maximum 100
|
||||
let limit: usize = limit
|
||||
|
@ -129,6 +130,11 @@ async fn paginate_relations_with_filter(
|
|||
// Spec (v1.10) recommends depth of at least 3
|
||||
let depth: u8 = if recurse { 3 } else { 1 };
|
||||
|
||||
// Check if this is a thread request
|
||||
let is_thread = filter_rel_type
|
||||
.as_ref()
|
||||
.is_some_and(|rel| *rel == RelationType::Thread);
|
||||
|
||||
let events: Vec<_> = services
|
||||
.rooms
|
||||
.pdu_metadata
|
||||
|
@ -152,23 +158,58 @@ async fn paginate_relations_with_filter(
|
|||
.collect()
|
||||
.await;
|
||||
|
||||
let next_batch = match dir {
|
||||
| Direction::Forward => events.last(),
|
||||
| Direction::Backward => events.first(),
|
||||
// For threads, check if we should include the root event
|
||||
let mut root_event = None;
|
||||
if is_thread && dir == Direction::Backward {
|
||||
// Check if we've reached the beginning of the thread
|
||||
// (fewer events than requested means we've exhausted the thread)
|
||||
if events.len() < limit {
|
||||
// Try to get the thread root event
|
||||
if let Ok(root_pdu) = services.rooms.timeline.get_pdu(target).await {
|
||||
// Check visibility
|
||||
if services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.user_can_see_event(sender_user, room_id, target)
|
||||
.await
|
||||
{
|
||||
// Store the root event to add to the response
|
||||
root_event = Some(root_pdu);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
.map(at!(0))
|
||||
.as_ref()
|
||||
.map(ToString::to_string);
|
||||
|
||||
// Determine if there are more events to fetch
|
||||
let has_more = if root_event.is_some() {
|
||||
false // We've included the root, no more events
|
||||
} else {
|
||||
// Check if we got a full page of results (might be more)
|
||||
events.len() >= limit
|
||||
};
|
||||
|
||||
let next_batch = if has_more {
|
||||
match dir {
|
||||
| Direction::Forward => events.last(),
|
||||
| Direction::Backward => events.first(),
|
||||
}
|
||||
.map(|(count, _)| count_to_token(*count))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// Build the response chunk with thread root if needed
|
||||
let chunk: Vec<_> = root_event
|
||||
.into_iter()
|
||||
.map(Event::into_format)
|
||||
.chain(events.into_iter().map(at!(1)).map(Event::into_format))
|
||||
.collect();
|
||||
|
||||
Ok(get_relating_events::v1::Response {
|
||||
next_batch,
|
||||
prev_batch: from.map(Into::into),
|
||||
recursion_depth: recurse.then_some(depth.into()),
|
||||
chunk: events
|
||||
.into_iter()
|
||||
.map(at!(1))
|
||||
.map(Event::into_format)
|
||||
.collect(),
|
||||
chunk,
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -198,8 +198,8 @@ pub(crate) async fn login_route(
|
|||
.clone()
|
||||
.unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH).into());
|
||||
|
||||
// Generate a new token for the device
|
||||
let token = utils::random_string(TOKEN_LENGTH);
|
||||
// Generate a new token for the device (ensuring no collisions)
|
||||
let token = services.users.generate_unique_token().await;
|
||||
|
||||
// Determine if device_id was provided and exists in the db for this user
|
||||
let device_exists = if body.device_id.is_some() {
|
||||
|
|
28
src/api/client/utils.rs
Normal file
28
src/api/client/utils.rs
Normal file
|
@ -0,0 +1,28 @@
|
|||
use conduwuit::{
|
||||
Result, err,
|
||||
matrix::pdu::{PduCount, ShortEventId},
|
||||
};
|
||||
|
||||
/// Parse a pagination token, trying ShortEventId first, then falling back to
|
||||
/// PduCount
|
||||
pub(crate) fn parse_pagination_token(token: &str) -> Result<PduCount> {
|
||||
// Try parsing as ShortEventId first
|
||||
if let Ok(shorteventid) = token.parse::<ShortEventId>() {
|
||||
// ShortEventId maps directly to a PduCount in our database
|
||||
Ok(PduCount::Normal(shorteventid))
|
||||
} else if let Ok(count) = token.parse::<u64>() {
|
||||
// Fallback to PduCount for backwards compatibility
|
||||
Ok(PduCount::Normal(count))
|
||||
} else if let Ok(count) = token.parse::<i64>() {
|
||||
// Also handle negative counts for backfilled events
|
||||
Ok(PduCount::from_signed(count))
|
||||
} else {
|
||||
Err(err!(Request(InvalidParam("Invalid pagination token"))))
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert a PduCount to a token string (using the underlying ShortEventId)
|
||||
pub(crate) fn count_to_token(count: PduCount) -> String {
|
||||
// The PduCount's unsigned value IS the ShortEventId
|
||||
count.into_unsigned().to_string()
|
||||
}
|
|
@ -5,6 +5,14 @@ use axum_extra::{
|
|||
typed_header::TypedHeaderRejectionReason,
|
||||
};
|
||||
use conduwuit::{Err, Error, Result, debug_error, err, warn};
|
||||
use futures::{
|
||||
TryFutureExt,
|
||||
future::{
|
||||
Either::{Left, Right},
|
||||
select_ok,
|
||||
},
|
||||
pin_mut,
|
||||
};
|
||||
use ruma::{
|
||||
CanonicalJsonObject, CanonicalJsonValue, OwnedDeviceId, OwnedServerName, OwnedUserId, UserId,
|
||||
api::{
|
||||
|
@ -54,17 +62,7 @@ pub(super) async fn auth(
|
|||
| None => request.query.access_token.as_deref(),
|
||||
};
|
||||
|
||||
let token = if let Some(token) = token {
|
||||
match services.appservice.find_from_token(token).await {
|
||||
| Some(reg_info) => Token::Appservice(Box::new(reg_info)),
|
||||
| _ => match services.users.find_from_token(token).await {
|
||||
| Ok((user_id, device_id)) => Token::User((user_id, device_id)),
|
||||
| _ => Token::Invalid,
|
||||
},
|
||||
}
|
||||
} else {
|
||||
Token::None
|
||||
};
|
||||
let token = find_token(services, token).await?;
|
||||
|
||||
if metadata.authentication == AuthScheme::None {
|
||||
match metadata {
|
||||
|
@ -342,3 +340,25 @@ async fn parse_x_matrix(request: &mut Request) -> Result<XMatrix> {
|
|||
|
||||
Ok(x_matrix)
|
||||
}
|
||||
|
||||
async fn find_token(services: &Services, token: Option<&str>) -> Result<Token> {
|
||||
let Some(token) = token else {
|
||||
return Ok(Token::None);
|
||||
};
|
||||
|
||||
let user_token = services.users.find_from_token(token).map_ok(Token::User);
|
||||
|
||||
let appservice_token = services
|
||||
.appservice
|
||||
.find_from_token(token)
|
||||
.map_ok(Box::new)
|
||||
.map_ok(Token::Appservice);
|
||||
|
||||
pin_mut!(user_token, appservice_token);
|
||||
// Returns Ok if either token type succeeds, Err only if both fail
|
||||
match select_ok([Left(user_token), Right(appservice_token)]).await {
|
||||
| Err(e) if !e.is_not_found() => Err(e),
|
||||
| Ok((token, _)) => Ok(token),
|
||||
| _ => Ok(Token::Invalid),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,14 +4,14 @@ mod registration_info;
|
|||
use std::{collections::BTreeMap, iter::IntoIterator, sync::Arc};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use conduwuit::{Result, err, utils::stream::IterStream};
|
||||
use conduwuit::{Err, Result, err, utils::stream::IterStream};
|
||||
use database::Map;
|
||||
use futures::{Future, FutureExt, Stream, TryStreamExt};
|
||||
use ruma::{RoomAliasId, RoomId, UserId, api::appservice::Registration};
|
||||
use tokio::sync::{RwLock, RwLockReadGuard};
|
||||
|
||||
pub use self::{namespace_regex::NamespaceRegex, registration_info::RegistrationInfo};
|
||||
use crate::{Dep, sending};
|
||||
use crate::{Dep, globals, sending, users};
|
||||
|
||||
pub struct Service {
|
||||
registration_info: RwLock<Registrations>,
|
||||
|
@ -20,7 +20,9 @@ pub struct Service {
|
|||
}
|
||||
|
||||
struct Services {
|
||||
globals: Dep<globals::Service>,
|
||||
sending: Dep<sending::Service>,
|
||||
users: Dep<users::Service>,
|
||||
}
|
||||
|
||||
struct Data {
|
||||
|
@ -35,7 +37,9 @@ impl crate::Service for Service {
|
|||
Ok(Arc::new(Self {
|
||||
registration_info: RwLock::new(BTreeMap::new()),
|
||||
services: Services {
|
||||
globals: args.depend::<globals::Service>("globals"),
|
||||
sending: args.depend::<sending::Service>("sending"),
|
||||
users: args.depend::<users::Service>("users"),
|
||||
},
|
||||
db: Data {
|
||||
id_appserviceregistrations: args.db["id_appserviceregistrations"].clone(),
|
||||
|
@ -44,23 +48,89 @@ impl crate::Service for Service {
|
|||
}
|
||||
|
||||
async fn worker(self: Arc<Self>) -> Result {
|
||||
// Inserting registrations into cache
|
||||
self.iter_db_ids()
|
||||
.try_for_each(async |appservice| {
|
||||
self.registration_info
|
||||
.write()
|
||||
.await
|
||||
.insert(appservice.0, appservice.1.try_into()?);
|
||||
// First, collect all appservices to check for token conflicts
|
||||
let appservices: Vec<(String, Registration)> = self.iter_db_ids().try_collect().await?;
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.await
|
||||
// Check for appservice-to-appservice token conflicts
|
||||
for i in 0..appservices.len() {
|
||||
for j in i.saturating_add(1)..appservices.len() {
|
||||
if appservices[i].1.as_token == appservices[j].1.as_token {
|
||||
return Err!(Database(error!(
|
||||
"Token collision detected: Appservices '{}' and '{}' have the same token",
|
||||
appservices[i].0, appservices[j].0
|
||||
)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Process each appservice
|
||||
for (id, registration) in appservices {
|
||||
// During startup, resolve any token collisions in favour of appservices
|
||||
// by logging out conflicting user devices
|
||||
if let Ok((user_id, device_id)) = self
|
||||
.services
|
||||
.users
|
||||
.find_from_token(®istration.as_token)
|
||||
.await
|
||||
{
|
||||
conduwuit::warn!(
|
||||
"Token collision detected during startup: Appservice '{}' token was also \
|
||||
used by user '{}' device '{}'. Logging out the user device to resolve \
|
||||
conflict.",
|
||||
id,
|
||||
user_id.localpart(),
|
||||
device_id
|
||||
);
|
||||
|
||||
self.services
|
||||
.users
|
||||
.remove_device(&user_id, &device_id)
|
||||
.await;
|
||||
}
|
||||
|
||||
self.start_appservice(id, registration).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn name(&self) -> &str { crate::service::make_name(std::module_path!()) }
|
||||
}
|
||||
|
||||
impl Service {
|
||||
/// Starts an appservice, ensuring its sender_localpart user exists and is
|
||||
/// active. Creates the user if it doesn't exist, or reactivates it if it
|
||||
/// was deactivated. Then registers the appservice in memory for request
|
||||
/// handling.
|
||||
async fn start_appservice(&self, id: String, registration: Registration) -> Result {
|
||||
let appservice_user_id = UserId::parse_with_server_name(
|
||||
registration.sender_localpart.as_str(),
|
||||
self.services.globals.server_name(),
|
||||
)?;
|
||||
|
||||
if !self.services.users.exists(&appservice_user_id).await {
|
||||
self.services.users.create(&appservice_user_id, None)?;
|
||||
} else if self
|
||||
.services
|
||||
.users
|
||||
.is_deactivated(&appservice_user_id)
|
||||
.await
|
||||
.unwrap_or(false)
|
||||
{
|
||||
// Reactivate the appservice user if it was accidentally deactivated
|
||||
self.services
|
||||
.users
|
||||
.set_password(&appservice_user_id, None)?;
|
||||
}
|
||||
|
||||
self.registration_info
|
||||
.write()
|
||||
.await
|
||||
.insert(id, registration.try_into()?);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Registers an appservice and returns the ID to the caller
|
||||
pub async fn register_appservice(
|
||||
&self,
|
||||
|
@ -68,15 +138,40 @@ impl Service {
|
|||
appservice_config_body: &str,
|
||||
) -> Result {
|
||||
//TODO: Check for collisions between exclusive appservice namespaces
|
||||
self.registration_info
|
||||
.write()
|
||||
|
||||
// Check for token collision with other appservices (allow re-registration of
|
||||
// same appservice)
|
||||
if let Ok(existing) = self.find_from_token(®istration.as_token).await {
|
||||
if existing.registration.id != registration.id {
|
||||
return Err(err!(Request(InvalidParam(
|
||||
"Cannot register appservice: Token is already used by appservice '{}'. \
|
||||
Please generate a different token.",
|
||||
existing.registration.id
|
||||
))));
|
||||
}
|
||||
}
|
||||
|
||||
// Prevent token collision with existing user tokens
|
||||
if self
|
||||
.services
|
||||
.users
|
||||
.find_from_token(®istration.as_token)
|
||||
.await
|
||||
.insert(registration.id.clone(), registration.clone().try_into()?);
|
||||
.is_ok()
|
||||
{
|
||||
return Err(err!(Request(InvalidParam(
|
||||
"Cannot register appservice: The provided token is already in use by a user \
|
||||
device. Please generate a different token for the appservice."
|
||||
))));
|
||||
}
|
||||
|
||||
self.db
|
||||
.id_appserviceregistrations
|
||||
.insert(®istration.id, appservice_config_body);
|
||||
|
||||
self.start_appservice(registration.id.clone(), registration.clone())
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -113,12 +208,14 @@ impl Service {
|
|||
.map(|info| info.registration)
|
||||
}
|
||||
|
||||
pub async fn find_from_token(&self, token: &str) -> Option<RegistrationInfo> {
|
||||
/// Returns Result to match users::find_from_token for select_ok usage
|
||||
pub async fn find_from_token(&self, token: &str) -> Result<RegistrationInfo> {
|
||||
self.read()
|
||||
.await
|
||||
.values()
|
||||
.find(|info| info.registration.as_token == token)
|
||||
.cloned()
|
||||
.ok_or_else(|| err!(Request(NotFound("Appservice token not found"))))
|
||||
}
|
||||
|
||||
/// Checks if a given user id matches any exclusive appservice regex
|
||||
|
|
|
@ -61,9 +61,12 @@ impl Data {
|
|||
from: PduCount,
|
||||
dir: Direction,
|
||||
) -> impl Stream<Item = (PduCount, impl Event)> + Send + '_ {
|
||||
// Query from exact position then filter excludes it (saturating_inc could skip
|
||||
// events at min/max boundaries)
|
||||
let from_unsigned = from.into_unsigned();
|
||||
let mut current = ArrayVec::<u8, 16>::new();
|
||||
current.extend(target.to_be_bytes());
|
||||
current.extend(from.saturating_inc(dir).into_unsigned().to_be_bytes());
|
||||
current.extend(from_unsigned.to_be_bytes());
|
||||
let current = current.as_slice();
|
||||
match dir {
|
||||
| Direction::Forward => self.tofrom_relation.raw_keys_from(current).boxed(),
|
||||
|
@ -73,6 +76,17 @@ impl Data {
|
|||
.ready_take_while(move |key| key.starts_with(&target.to_be_bytes()))
|
||||
.map(|to_from| u64_from_u8(&to_from[8..16]))
|
||||
.map(PduCount::from_unsigned)
|
||||
.ready_filter(move |count| {
|
||||
if from == PduCount::min() || from == PduCount::max() {
|
||||
true
|
||||
} else {
|
||||
let count_unsigned = count.into_unsigned();
|
||||
match dir {
|
||||
| Direction::Forward => count_unsigned > from_unsigned,
|
||||
| Direction::Backward => count_unsigned < from_unsigned,
|
||||
}
|
||||
}
|
||||
})
|
||||
.wide_filter_map(move |shorteventid| async move {
|
||||
let pdu_id: RawPduId = PduId { shortroomid, shorteventid }.into();
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@ use ruma::{
|
|||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::json;
|
||||
|
||||
use crate::{Dep, account_data, admin, globals, rooms};
|
||||
use crate::{Dep, account_data, admin, appservice, globals, rooms};
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct UserSuspension {
|
||||
|
@ -40,6 +40,7 @@ struct Services {
|
|||
server: Arc<Server>,
|
||||
account_data: Dep<account_data::Service>,
|
||||
admin: Dep<admin::Service>,
|
||||
appservice: Dep<appservice::Service>,
|
||||
globals: Dep<globals::Service>,
|
||||
state_accessor: Dep<rooms::state_accessor::Service>,
|
||||
state_cache: Dep<rooms::state_cache::Service>,
|
||||
|
@ -76,6 +77,7 @@ impl crate::Service for Service {
|
|||
server: args.server.clone(),
|
||||
account_data: args.depend::<account_data::Service>("account_data"),
|
||||
admin: args.depend::<admin::Service>("admin"),
|
||||
appservice: args.depend::<appservice::Service>("appservice"),
|
||||
globals: args.depend::<globals::Service>("globals"),
|
||||
state_accessor: args
|
||||
.depend::<rooms::state_accessor::Service>("rooms::state_accessor"),
|
||||
|
@ -391,6 +393,31 @@ impl Service {
|
|||
self.db.userdeviceid_token.qry(&key).await.deserialized()
|
||||
}
|
||||
|
||||
/// Generate a unique access token that doesn't collide with existing tokens
|
||||
pub async fn generate_unique_token(&self) -> String {
|
||||
loop {
|
||||
let token = utils::random_string(32);
|
||||
|
||||
// Check for collision with appservice tokens
|
||||
if self
|
||||
.services
|
||||
.appservice
|
||||
.find_from_token(&token)
|
||||
.await
|
||||
.is_ok()
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check for collision with user tokens
|
||||
if self.db.token_userdeviceid.get(&token).await.is_ok() {
|
||||
continue;
|
||||
}
|
||||
|
||||
return token;
|
||||
}
|
||||
}
|
||||
|
||||
/// Replaces the access token of one device.
|
||||
pub async fn set_token(
|
||||
&self,
|
||||
|
@ -407,6 +434,19 @@ impl Service {
|
|||
)));
|
||||
}
|
||||
|
||||
// Check for token collision with appservices
|
||||
if self
|
||||
.services
|
||||
.appservice
|
||||
.find_from_token(token)
|
||||
.await
|
||||
.is_ok()
|
||||
{
|
||||
return Err!(Request(InvalidParam(
|
||||
"Token conflicts with an existing appservice token"
|
||||
)));
|
||||
}
|
||||
|
||||
// Remove old token
|
||||
if let Ok(old_token) = self.db.userdeviceid_token.qry(&key).await {
|
||||
self.db.token_userdeviceid.remove(&old_token);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue