diff --git a/backend-rs/.gitignore b/backend-rs/.gitignore new file mode 100644 index 0000000..976e7ba --- /dev/null +++ b/backend-rs/.gitignore @@ -0,0 +1,23 @@ +# Created by https://www.toptal.com/developers/gitignore/api/rust +# Edit at https://www.toptal.com/developers/gitignore?templates=rust + +### Rust ### +# Generated by Cargo +# will have compiled files and executables +debug/ +target/ + +# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries +# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html +Cargo.lock + +# These are backup files generated by rustfmt +**/*.rs.bk + +# MSVC Windows builds of rustc generate these, which store debugging information +*.pdb + +# End of https://www.toptal.com/developers/gitignore/api/rust + +# dotenv +.env diff --git a/backend-rs/Cargo.toml b/backend-rs/Cargo.toml new file mode 100644 index 0000000..aaf25f2 --- /dev/null +++ b/backend-rs/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "backend-rs" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +axum = { version = "0.7", features = ["tracing"] } +dotenv = "0.15" +opensearch = "2.2" +serde = "1.0" +serde_json = "1.0" +sqlx = { version = "0.7", features = ["runtime-tokio", "postgres"] } +tokio = { version = "1.37", features = ["full"] } +tower-http = { version = "0.5", features = ["trace"] } +tracing = "0.1" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } + +[dev-dependencies] +http-body-util = "0.1" +tower = "0.4" diff --git a/backend-rs/src/main.rs b/backend-rs/src/main.rs new file mode 100644 index 0000000..730c759 --- /dev/null +++ b/backend-rs/src/main.rs @@ -0,0 +1,247 @@ +use std::{collections::HashMap, env, sync::Arc}; + +use axum::{ + extract::{Query, State}, + http::StatusCode, + response::IntoResponse, + routing::{get, post}, + Json, Router, +}; +use opensearch::{indices::IndicesCreateParts, OpenSearch, SearchParts}; +use serde_json::{json, Value}; +use sqlx::postgres::{PgPool, PgPoolOptions}; +use tower_http::trace::TraceLayer; +use tracing_subscriber::{fmt, EnvFilter}; +use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; + +struct AppState { + opensearch: OpenSearch, + pool: PgPool, +} + +enum AppError { + OpenSearchError(opensearch::Error), + SqlxError(sqlx::Error), +} + +impl From for AppError { + fn from(value: opensearch::Error) -> Self { + AppError::OpenSearchError(value) + } +} + +impl From for AppError { + fn from(value: sqlx::Error) -> Self { + AppError::SqlxError(value) + } +} + +impl IntoResponse for AppError { + fn into_response(self) -> axum::response::Response { + let body = match self { + AppError::OpenSearchError(error) => error.to_string(), + AppError::SqlxError(error) => error.to_string(), + }; + (StatusCode::INTERNAL_SERVER_ERROR, Json(body)).into_response() + } +} + +#[derive(serde::Serialize)] +struct GetFlakeResponse { + releases: Vec, + count: usize, + query: Option, +} + +#[derive(serde::Serialize, sqlx::FromRow)] +struct FlakeRelease { + #[serde(skip_serializing)] + id: i64, + owner: String, + repo: String, + version: String, + description: String, + // TODO: Change to DateTime? + created_at: String, +} + +#[tokio::main] +async fn main() { + // TODO: read PG and OS host names from env variables + // build our application with a single route + dotenv::dotenv().ok(); + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + let database_url = env::var("DATABASE_URL").unwrap(); + let pool = PgPoolOptions::new().connect(&database_url).await.unwrap(); + let state = Arc::new(AppState { + opensearch: OpenSearch::default(), + pool, + }); + // TODO: check if index exist before creating one + let _ = state + .opensearch + .indices() + .create(IndicesCreateParts::Index("flakes")) + .send() + .await; + // run our app with hyper, listening globally on port 3000 + let listener = tokio::net::TcpListener::bind("0.0.0.0:3000").await.unwrap(); + axum::serve(listener, app(state)).await.unwrap(); +} + +fn app(state: Arc) -> Router { + let api = Router::new() + .route("/flake", get(get_flake)) + .route("/publish", post(post_publish)); + Router::new() + .nest("/api", api) + .layer(TraceLayer::new_for_http()) + .with_state(state) +} + +async fn get_flake( + State(state): State>, + Query(params): Query>, +) -> Result, AppError> { + let query = params.get("q"); + let releases = if let Some(q) = query { + let response = &state + .opensearch + .search(SearchParts::Index(&["flakes"])) + .size(10) + .body(json!({ + "query": { + "multi_match": { + "query": q, + "fuzziness": "AUTO", + "fields": [ + "description^2", + "readme", + "outputs", + "repo^2", + "owner^2", + ], + } + } + })) + .send() + .await? + .json::() + .await?; + // TODO: Remove this unwrap, use fold or map to create the HashMap + let mut hits: HashMap = HashMap::new(); + for hit in response["hits"]["hits"].as_array().unwrap() { + // TODO: properly handle errors + hits.insert( + hit["_id"].as_i64().unwrap(), + hit["_score"].as_i64().unwrap(), + ); + } + // TODO: This query is actually a join between different tables + let mut releases = sqlx::query_as::<_, FlakeRelease>( + "SELECT release.id AS id, \ + githubowner.name AS owner, \ + githubrepo.name AS repo, \ + release.version AS version, \ + release.description AS description, \ + release.created_at AS created_at \ + FROM release \ + INNER JOIN githubrepo ON githubrepo.id = release.repo_id \ + INNER JOIN githubowner ON githubowner.id = githubrepo.owner_id \ + WHERE release.id IN ($1)", + ) + .bind(hits.keys().cloned().collect::>()) + .fetch_all(&state.pool) + .await?; + releases.sort_by(|a, b| hits[&b.id].cmp(&hits[&a.id])); + releases + } else { + sqlx::query_as::<_, FlakeRelease>( + "SELECT release.id AS id, \ + githubowner.name AS owner, \ + githubrepo.name AS repo, \ + release.version AS version, \ + release.description AS description, \ + release.created_at AS created_at \ + FROM release \ + INNER JOIN githubrepo ON githubrepo.id = release.repo_id \ + INNER JOIN githubowner ON githubowner.id = githubrepo.owner_id \ + ORDER BY release.created_at DESC LIMIT 100", + ) + .fetch_all(&state.pool) + .await? + }; + let count = releases.len(); + return Ok(Json(GetFlakeResponse { + releases, + count, + // TODO: Try to avoid using cloned() + query: query.cloned(), + })); +} + +async fn post_publish() -> &'static str { + "Publish" +} + +#[cfg(test)] +mod tests { + use std::env; + + use super::*; + use axum::body::Body; + use axum::http::{Request, StatusCode}; + use http_body_util::BodyExt; + use sqlx::postgres::PgConnectOptions; + use tower::ServiceExt; + + #[tokio::test] + async fn test_get_flake_with_params() { + let host = env::var("PGHOST").unwrap().to_string(); + let opts = PgConnectOptions::new().host(&host); + let pool = PgPoolOptions::new().connect_with(opts).await.unwrap(); + let state = Arc::new(AppState { + opensearch: OpenSearch::default(), + pool, + }); + let app = app(state); + let response = app + .oneshot( + Request::builder() + .uri("/api/flake?q=search") + .body(Body::empty()) + .unwrap(), + ) + .await + .unwrap(); + let body = response.into_body().collect().await.unwrap().to_bytes(); + let body: Value = serde_json::from_slice(&body).unwrap(); + println!("#{body}"); + // assert_eq!(response.status(), StatusCode::OK); + } + + #[tokio::test] + async fn test_get_flake_without_params() { + let host = env::var("PGHOST").unwrap().to_string(); + let opts = PgConnectOptions::new().host(&host); + let pool = PgPoolOptions::new().connect_with(opts).await.unwrap(); + let state = Arc::new(AppState { + opensearch: OpenSearch::default(), + pool, + }); + let app = app(state); + let response = app + .oneshot( + Request::builder() + .uri("/api/flake") + .body(Body::empty()) + .unwrap(), + ) + .await + .unwrap(); + assert_eq!(response.status(), StatusCode::OK); + } +} diff --git a/devenv.lock b/devenv.lock index a8e423b..938e814 100644 --- a/devenv.lock +++ b/devenv.lock @@ -17,6 +17,28 @@ "type": "github" } }, + "fenix": { + "inputs": { + "nixpkgs": [ + "nixpkgs" + ], + "rust-analyzer-src": "rust-analyzer-src" + }, + "locked": { + "lastModified": 1714397108, + "owner": "sandydoo", + "repo": "fenix", + "rev": "111b2aea9d2ac261c6f0a4aac4bde6c0e8c5bb3d", + "treeHash": "1bd45fad5c728d297601fbfc51771f56488e2beb", + "type": "github" + }, + "original": { + "owner": "sandydoo", + "ref": "patch-rust-analyzer-preview", + "repo": "fenix", + "type": "github" + } + }, "flake-compat": { "flake": false, "locked": { @@ -182,12 +204,30 @@ "root": { "inputs": { "devenv": "devenv", + "fenix": "fenix", "mk-shell-bin": "mk-shell-bin", "nix2container": "nix2container", "nixpkgs": "nixpkgs", "pre-commit-hooks": "pre-commit-hooks" } }, + "rust-analyzer-src": { + "flake": false, + "locked": { + "lastModified": 1713628977, + "owner": "rust-lang", + "repo": "rust-analyzer", + "rev": "55d9a533b309119c8acd13061581b43ae8840823", + "treeHash": "435e9c646978f38b8389e483b4d4fb15ba932cad", + "type": "github" + }, + "original": { + "owner": "rust-lang", + "ref": "nightly", + "repo": "rust-analyzer", + "type": "github" + } + }, "systems": { "locked": { "lastModified": 1681028828, diff --git a/devenv.nix b/devenv.nix index d247908..1d0d219 100644 --- a/devenv.nix +++ b/devenv.nix @@ -40,6 +40,11 @@ in pkgs.cloudflared pkgs.openapi-generator-cli pkgs.nodePackages.pyright + ] ++ lib.optionals pkgs.stdenv.isDarwin [ + pkgs.darwin.CF + pkgs.darwin.Security + pkgs.darwin.configd + pkgs.darwin.dyld ]; # https://github.com/cachix/devenv/pull/745 @@ -59,6 +64,12 @@ in languages.elm.enable = true; + languages.rust = { + enable = true; + # https://github.com/launchbadge/sqlx/blob/main/FAQ.md#what-versions-of-rust-does-sqlx-support-what-is-sqlxs-msrv + channel = "stable"; + }; + services.opensearch.enable = !config.container.isBuilding; services.postgres.enable = !config.container.isBuilding; services.caddy.enable = true; diff --git a/devenv.yaml b/devenv.yaml index 8ca8144..f652952 100644 --- a/devenv.yaml +++ b/devenv.yaml @@ -8,3 +8,8 @@ inputs: follows: nixpkgs mk-shell-bin: url: github:rrbutani/nix-mk-shell-bin + fenix: + url: github:sandydoo/fenix/patch-rust-analyzer-preview + inputs: + nixpkgs: + follows: nixpkgs