Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions database/migrations/0001_schema.up.sql
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,15 @@ CREATE TABLE IF NOT EXISTS `Servers` (
UNIQUE (`key`)
);

CREATE TABLE IF NOT EXISTS `ServerTags` (
`id` INT1 UNSIGNED NOT NULL AUTO_INCREMENT,
`server_id` INT2 UNSIGNED NOT NULL,
`name` VARCHAR(255) NOT NULL,
PRIMARY KEY (`id`),
FOREIGN KEY (`server_id`) REFERENCES `Servers` (`id`),
UNIQUE (`server_id`, `name`)
);

CREATE TABLE IF NOT EXISTS `Jumpstats` (
`id` INT8 UNSIGNED NOT NULL AUTO_INCREMENT,
`type` INT1 UNSIGNED NOT NULL,
Expand Down
5 changes: 5 additions & 0 deletions database/migrations/9999_data.up.sql
Original file line number Diff line number Diff line change
Expand Up @@ -33,3 +33,8 @@ VALUES
76561198282622073,
"a107320d-ad7e-40f5-98e5-aa0e15171bc0"
);

INSERT INTO
`ServerTags` (`server_id`, `name`)
VALUES
(1, "private");
10 changes: 10 additions & 0 deletions src/services/maps/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,16 @@ impl MapService
#[tracing::instrument(level = "debug", err(Debug, level = "debug"))]
pub async fn fetch_maps(&self, req: FetchMapsRequest) -> Result<FetchMapsResponse>
{
let map_count = sqlx::query_scalar!("SELECT COUNT(id) FROM Maps")
.fetch_one(&self.database)
.await?
.try_conv::<u64>()
.expect("positive count");

if *req.offset >= map_count {
return Ok(FetchMapsResponse { maps: Vec::new(), total: map_count });
}

let map_chunks = sqlx::query_as::<_, FetchMapResponse>(&format!(
r"
{}
Expand Down
76 changes: 58 additions & 18 deletions src/services/servers/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,11 @@ use std::fmt;
use std::time::Duration;

use axum::extract::FromRef;
use itertools::Itertools;
use sqlx::{MySql, Pool, Row};
use tap::Pipe;
use tap::{Pipe, Tap, TryConv};

use crate::database::{SqlErrorExt, TransactionExt};
use crate::database::SqlErrorExt;
use crate::services::auth::{jwt, Jwt};
use crate::services::plugin::PluginVersionID;
use crate::services::AuthService;
Expand Down Expand Up @@ -73,36 +74,53 @@ impl ServerService
pub async fn fetch_server(&self, req: FetchServerRequest)
-> Result<Option<FetchServerResponse>>
{
let res = sqlx::query_as::<_, FetchServerResponse>(&format!(
let raw_servers = sqlx::query_as::<_, FetchServerResponse>(&format!(
r"
{}
WHERE
s.id = COALESCE(?, s.id)
AND s.name LIKE COALESCE(?, s.name)
LIMIT
1
",
queries::SELECT,
))
.bind(req.identifier.as_id())
.bind(req.identifier.as_name().map(|name| format!("%{name}%")))
.fetch_optional(&self.database)
.fetch_all(&self.database)
.await?;

Ok(res)
let Some(server_id) = raw_servers.first().map(|s| s.id) else {
return Ok(None);
};

let server = raw_servers
.into_iter()
.filter(|s| s.id == server_id)
.reduce(reduce_chunk)
.expect("we got the id we're filtering by from the original list");

Ok(Some(server))
}

/// Fetch information about servers.
#[tracing::instrument(level = "debug", err(Debug, level = "debug"))]
pub async fn fetch_servers(&self, req: FetchServersRequest) -> Result<FetchServersResponse>
{
let mut txn = self.database.begin().await?;
let owner_id = match req.owned_by {
None => None,
Some(player) => Some(player.resolve_id(txn.as_mut()).await?),
Some(player) => Some(player.resolve_id(&self.database).await?),
};

let servers = sqlx::query_as::<_, FetchServerResponse>(&format!(
let server_count = sqlx::query_scalar!("SELECT COUNT(id) FROM Servers")
.fetch_one(&self.database)
.await?
.try_conv::<u64>()
.expect("positive count");

if *req.offset >= server_count {
return Ok(FetchServersResponse { servers: Vec::new(), total: server_count });
}

let server_chunks = sqlx::query_as::<_, FetchServerResponse>(&format!(
r"
{}
WHERE
Expand All @@ -111,8 +129,6 @@ impl ServerService
AND s.owner_id = COALESCE(?, s.owner_id)
AND s.created_on > COALESCE(?, '1970-01-01 00:00:01')
AND s.created_on < COALESCE(?, '2038-01-19 03:14:07')
LIMIT
? OFFSET ?
",
queries::SELECT,
))
Expand All @@ -121,14 +137,23 @@ impl ServerService
.bind(owner_id)
.bind(req.created_after)
.bind(req.created_before)
.bind(*req.limit)
.bind(*req.offset)
.fetch_all(txn.as_mut())
.await?;
.fetch_all(&self.database)
.await?
.into_iter()
.chunk_by(|s| s.id);

let total = txn.total_rows().await?;
// Take into account how many maps we're gonna skip over
let mut total = *req.offset;

txn.commit().await?;
let servers = server_chunks
.into_iter()
.map(|(_, chunk)| chunk.reduce(reduce_chunk).expect("chunk can't be empty"))
.skip(*req.offset as usize)
.take(*req.limit as usize)
.collect_vec();

total += servers.len() as u64;
total += server_chunks.into_iter().count() as u64;

Ok(FetchServersResponse { servers, total })
}
Expand Down Expand Up @@ -337,6 +362,21 @@ impl ServerService
}
}

/// Reduce function for merging multiple database results for the same server
/// with different tags.
///
/// When we fetch servers from the DB, we get "duplicates" for servers with
/// different tags, since SQL doesn't support arrays. All the other information
/// is the same, except for the tags. We group results by their ID and then
/// reduce each chunk down into a single server that contains all the tags using
/// this function.
fn reduce_chunk(acc: FetchServerResponse, curr: FetchServerResponse) -> FetchServerResponse
{
assert_eq!(acc.id, curr.id, "merging two unrelated servers");

acc.tap_mut(|acc| acc.tags.0.extend(curr.tags.0))
}

#[cfg(test)]
mod tests
{
Expand Down
37 changes: 37 additions & 0 deletions src/services/servers/models/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -65,10 +65,47 @@ pub struct FetchServerResponse
#[sqlx(flatten)]
pub owner: ServerOwner,

/// Tags categorizing the server.
#[sqlx(rename = "tag_name")]
pub tags: ServerTags,

/// When this server was approved.
pub created_on: DateTime<Utc>,
}

/// Tags categorizing a server.
#[derive(Debug, Serialize, utoipa::ToSchema)]
#[serde(transparent)]
pub struct ServerTags(pub Vec<String>);

impl<DB> sqlx::Type<DB> for ServerTags
where
DB: sqlx::Database,
str: sqlx::Type<DB>,
{
fn type_info() -> <DB as sqlx::Database>::TypeInfo
{
<str as sqlx::Type<DB>>::type_info()
}

fn compatible(ty: &<DB as sqlx::Database>::TypeInfo) -> bool
{
<str as sqlx::Type<DB>>::compatible(ty)
}
}

impl<'r, DB> sqlx::Decode<'r, DB> for ServerTags
where
DB: sqlx::Database,
String: sqlx::Decode<'r, DB>,
{
fn decode(value: <DB as sqlx::Database>::ValueRef<'r>)
-> Result<Self, sqlx::error::BoxDynError>
{
String::decode(value).map(|s| vec![s]).map(Self)
}
}

impl IntoResponse for FetchServerResponse
{
fn into_response(self) -> Response
Expand Down
2 changes: 2 additions & 0 deletions src/services/servers/queries.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,10 @@ pub const SELECT: &str = r#"
s.port,
o.name owner_name,
o.id owner_id,
t.name tag_name,
s.created_on
FROM
Servers s
JOIN Players o ON o.id = s.owner_id
JOIN ServerTags t ON t.server_id = s.id
"#;