Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: Rewrite the backend in Rust #50

Closed
wants to merge 22 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 23 additions & 0 deletions backend-rs/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
# Created by https://www.toptal.com/developers/gitignore/api/rust
# Edit at https://www.toptal.com/developers/gitignore?templates=rust

### Rust ###
# Generated by Cargo
# will have compiled files and executables
debug/
target/

# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
Cargo.lock

# These are backup files generated by rustfmt
**/*.rs.bk

# MSVC Windows builds of rustc generate these, which store debugging information
*.pdb

# End of https://www.toptal.com/developers/gitignore/api/rust

# dotenv
.env
22 changes: 22 additions & 0 deletions backend-rs/Cargo.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
[package]
name = "backend-rs"
version = "0.1.0"
edition = "2021"

# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html

[dependencies]
axum = { version = "0.7", features = ["tracing"] }
dotenv = "0.15"
opensearch = "2.2"
serde = "1.0"
serde_json = "1.0"
sqlx = { version = "0.7", features = ["runtime-tokio", "postgres"] }
tokio = { version = "1.37", features = ["full"] }
tower-http = { version = "0.5", features = ["trace"] }
tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }

[dev-dependencies]
http-body-util = "0.1"
tower = "0.4"
247 changes: 247 additions & 0 deletions backend-rs/src/main.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,247 @@
use std::{collections::HashMap, env, sync::Arc};

use axum::{
extract::{Query, State},
http::StatusCode,
response::IntoResponse,
routing::{get, post},
Json, Router,
};
use opensearch::{indices::IndicesCreateParts, OpenSearch, SearchParts};
use serde_json::{json, Value};
use sqlx::postgres::{PgPool, PgPoolOptions};
use tower_http::trace::TraceLayer;
use tracing_subscriber::{fmt, EnvFilter};
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};

struct AppState {
opensearch: OpenSearch,
pool: PgPool,
}

enum AppError {
OpenSearchError(opensearch::Error),
SqlxError(sqlx::Error),
}

impl From<opensearch::Error> for AppError {
fn from(value: opensearch::Error) -> Self {
AppError::OpenSearchError(value)
}
}

impl From<sqlx::Error> for AppError {
fn from(value: sqlx::Error) -> Self {
AppError::SqlxError(value)
}
}

impl IntoResponse for AppError {
fn into_response(self) -> axum::response::Response {
let body = match self {
AppError::OpenSearchError(error) => error.to_string(),
AppError::SqlxError(error) => error.to_string(),
};
(StatusCode::INTERNAL_SERVER_ERROR, Json(body)).into_response()
}
}

#[derive(serde::Serialize)]
struct GetFlakeResponse {
releases: Vec<FlakeRelease>,
count: usize,
query: Option<String>,
}

#[derive(serde::Serialize, sqlx::FromRow)]
struct FlakeRelease {
#[serde(skip_serializing)]
id: i64,
owner: String,
repo: String,
version: String,
description: String,
// TODO: Change to DateTime?
created_at: String,
}

#[tokio::main]
async fn main() {
// TODO: read PG and OS host names from env variables
// build our application with a single route
dotenv::dotenv().ok();
tracing_subscriber::registry()
.with(fmt::layer())
.with(EnvFilter::from_default_env())
.init();
let database_url = env::var("DATABASE_URL").unwrap();
let pool = PgPoolOptions::new().connect(&database_url).await.unwrap();
let state = Arc::new(AppState {
opensearch: OpenSearch::default(),
pool,
});
// TODO: check if index exist before creating one
let _ = state
.opensearch
.indices()
.create(IndicesCreateParts::Index("flakes"))
.send()
.await;
// run our app with hyper, listening globally on port 3000
let listener = tokio::net::TcpListener::bind("0.0.0.0:3000").await.unwrap();
axum::serve(listener, app(state)).await.unwrap();
}

fn app(state: Arc<AppState>) -> Router {
let api = Router::new()
.route("/flake", get(get_flake))
.route("/publish", post(post_publish));
Router::new()
.nest("/api", api)
.layer(TraceLayer::new_for_http())
.with_state(state)
}

async fn get_flake(
State(state): State<Arc<AppState>>,
Query(params): Query<HashMap<String, String>>,
) -> Result<Json<GetFlakeResponse>, AppError> {
let query = params.get("q");
let releases = if let Some(q) = query {
let response = &state
.opensearch
.search(SearchParts::Index(&["flakes"]))
.size(10)
.body(json!({
"query": {
"multi_match": {
"query": q,
"fuzziness": "AUTO",
"fields": [
"description^2",
"readme",
"outputs",
"repo^2",
"owner^2",
],
}
}
}))
.send()
.await?
.json::<Value>()
.await?;
// TODO: Remove this unwrap, use fold or map to create the HashMap
let mut hits: HashMap<i64, i64> = HashMap::new();
for hit in response["hits"]["hits"].as_array().unwrap() {
// TODO: properly handle errors
hits.insert(
hit["_id"].as_i64().unwrap(),
hit["_score"].as_i64().unwrap(),
);
}
// TODO: This query is actually a join between different tables
let mut releases = sqlx::query_as::<_, FlakeRelease>(
"SELECT release.id AS id, \
githubowner.name AS owner, \
githubrepo.name AS repo, \
release.version AS version, \
release.description AS description, \
release.created_at AS created_at \
FROM release \
INNER JOIN githubrepo ON githubrepo.id = release.repo_id \
INNER JOIN githubowner ON githubowner.id = githubrepo.owner_id \
WHERE release.id IN ($1)",
)
.bind(hits.keys().cloned().collect::<Vec<i64>>())
.fetch_all(&state.pool)
.await?;
releases.sort_by(|a, b| hits[&b.id].cmp(&hits[&a.id]));
releases
} else {
sqlx::query_as::<_, FlakeRelease>(
"SELECT release.id AS id, \
githubowner.name AS owner, \
githubrepo.name AS repo, \
release.version AS version, \
release.description AS description, \
release.created_at AS created_at \
FROM release \
INNER JOIN githubrepo ON githubrepo.id = release.repo_id \
INNER JOIN githubowner ON githubowner.id = githubrepo.owner_id \
ORDER BY release.created_at DESC LIMIT 100",
)
.fetch_all(&state.pool)
.await?
};
let count = releases.len();
return Ok(Json(GetFlakeResponse {
releases,
count,
// TODO: Try to avoid using cloned()
query: query.cloned(),
}));
}

async fn post_publish() -> &'static str {
"Publish"
}

#[cfg(test)]
mod tests {
use std::env;

use super::*;
use axum::body::Body;
use axum::http::{Request, StatusCode};
use http_body_util::BodyExt;
use sqlx::postgres::PgConnectOptions;
use tower::ServiceExt;

#[tokio::test]
async fn test_get_flake_with_params() {
let host = env::var("PGHOST").unwrap().to_string();
let opts = PgConnectOptions::new().host(&host);
let pool = PgPoolOptions::new().connect_with(opts).await.unwrap();
let state = Arc::new(AppState {
opensearch: OpenSearch::default(),
pool,
});
let app = app(state);
let response = app
.oneshot(
Request::builder()
.uri("/api/flake?q=search")
.body(Body::empty())
.unwrap(),
)
.await
.unwrap();
let body = response.into_body().collect().await.unwrap().to_bytes();
let body: Value = serde_json::from_slice(&body).unwrap();
println!("#{body}");
// assert_eq!(response.status(), StatusCode::OK);
}

#[tokio::test]
async fn test_get_flake_without_params() {
let host = env::var("PGHOST").unwrap().to_string();
let opts = PgConnectOptions::new().host(&host);
let pool = PgPoolOptions::new().connect_with(opts).await.unwrap();
let state = Arc::new(AppState {
opensearch: OpenSearch::default(),
pool,
});
let app = app(state);
let response = app
.oneshot(
Request::builder()
.uri("/api/flake")
.body(Body::empty())
.unwrap(),
)
.await
.unwrap();
assert_eq!(response.status(), StatusCode::OK);
}
}
40 changes: 40 additions & 0 deletions devenv.lock
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,28 @@
"type": "github"
}
},
"fenix": {
"inputs": {
"nixpkgs": [
"nixpkgs"
],
"rust-analyzer-src": "rust-analyzer-src"
},
"locked": {
"lastModified": 1714397108,
"owner": "sandydoo",
"repo": "fenix",
"rev": "111b2aea9d2ac261c6f0a4aac4bde6c0e8c5bb3d",
"treeHash": "1bd45fad5c728d297601fbfc51771f56488e2beb",
"type": "github"
},
"original": {
"owner": "sandydoo",
"ref": "patch-rust-analyzer-preview",
"repo": "fenix",
"type": "github"
}
},
"flake-compat": {
"flake": false,
"locked": {
Expand Down Expand Up @@ -182,12 +204,30 @@
"root": {
"inputs": {
"devenv": "devenv",
"fenix": "fenix",
"mk-shell-bin": "mk-shell-bin",
"nix2container": "nix2container",
"nixpkgs": "nixpkgs",
"pre-commit-hooks": "pre-commit-hooks"
}
},
"rust-analyzer-src": {
"flake": false,
"locked": {
"lastModified": 1713628977,
"owner": "rust-lang",
"repo": "rust-analyzer",
"rev": "55d9a533b309119c8acd13061581b43ae8840823",
"treeHash": "435e9c646978f38b8389e483b4d4fb15ba932cad",
"type": "github"
},
"original": {
"owner": "rust-lang",
"ref": "nightly",
"repo": "rust-analyzer",
"type": "github"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
Expand Down
11 changes: 11 additions & 0 deletions devenv.nix
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,11 @@ in
pkgs.cloudflared
pkgs.openapi-generator-cli
pkgs.nodePackages.pyright
] ++ lib.optionals pkgs.stdenv.isDarwin [
pkgs.darwin.CF
pkgs.darwin.Security
pkgs.darwin.configd
pkgs.darwin.dyld
];

# https://github.com/cachix/devenv/pull/745
Expand All @@ -59,6 +64,12 @@ in

languages.elm.enable = true;

languages.rust = {
enable = true;
# https://github.com/launchbadge/sqlx/blob/main/FAQ.md#what-versions-of-rust-does-sqlx-support-what-is-sqlxs-msrv
channel = "stable";
};

services.opensearch.enable = !config.container.isBuilding;
services.postgres.enable = !config.container.isBuilding;
services.caddy.enable = true;
Expand Down
5 changes: 5 additions & 0 deletions devenv.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,3 +8,8 @@ inputs:
follows: nixpkgs
mk-shell-bin:
url: github:rrbutani/nix-mk-shell-bin
fenix:
url: github:sandydoo/fenix/patch-rust-analyzer-preview
inputs:
nixpkgs:
follows: nixpkgs
Loading