Skip to content
Open
Show file tree
Hide file tree
Changes from 52 commits
Commits
Show all changes
58 commits
Select commit Hold shift + click to select a range
0172394
fix: remove /distribute endpoint
Karrq Oct 6, 2025
fd46bcf
fix: remove `storage` from msp service
Karrq Oct 6, 2025
ece041d
refactor: file list without empty children
Karrq Oct 6, 2025
9240715
chore: cleanup changes
Karrq Oct 6, 2025
1bec86d
feat(download): authenticate user
Karrq Oct 7, 2025
5fd6128
Merge remote-tracking branch 'origin/main' into backend-cleanups
Karrq Oct 7, 2025
a3c9f01
feat(download): authenticate user
Karrq Oct 7, 2025
e8e1223
Merge remote-tracking branch 'origin/main' into backend-cleanups
Karrq Oct 7, 2025
7bda7f3
chore: cleanup debug lines
Karrq Oct 7, 2025
8ba5df4
fix: update filelist typings
Karrq Oct 7, 2025
a549a9e
feat(auth): configurable jwt expiry
Karrq Oct 8, 2025
c4670ed
fix: remove dead code
Karrq Oct 8, 2025
da65b10
Merge remote-tracking branch 'origin/main' into backend-cleanups
Karrq Oct 9, 2025
90bccb4
docs: extra todo
Karrq Oct 9, 2025
17c03f7
feat(auth): config nonce duration and siwe domain
Karrq Oct 9, 2025
fe82a37
feat(config): msp, rpc and upload retries
Karrq Oct 10, 2025
b6d16f7
feat(indexer): bucket total size, file count and value prop
Karrq Oct 10, 2025
50f4c98
perf(backend): cache msp id
Karrq Oct 10, 2025
e11d32a
perf(backend:db): request items by page
Karrq Oct 10, 2025
50af8f1
refactor: use typed `Address` instead of strings
Karrq Oct 10, 2025
044b227
fix(backend): cleanup health service
Karrq Oct 10, 2025
09adb12
chore: fix compilation
Karrq Oct 10, 2025
6df048e
fix(backend): mock repository relations
Karrq Oct 20, 2025
a305be3
fix(db): bucket value prop id
Karrq Oct 20, 2025
e5be386
chore: lints
Karrq Oct 20, 2025
e29e329
Merge remote-tracking branch 'origin/main' into backend-cleanups
Karrq Oct 20, 2025
dd1e4f9
fix(config): remove rpc retry config items
Karrq Oct 20, 2025
f247b66
chore: fmt
Karrq Oct 21, 2025
d58da99
fix(test): update expected error code
Karrq Oct 21, 2025
8bcfa67
fix(test): 2 more 0xstrings expected
Karrq Oct 21, 2025
92314cd
fix: one more 0x string
Karrq Oct 21, 2025
1f4494e
Merge remote-tracking branch 'origin/main' into backend-cleanups
Karrq Oct 21, 2025
6f91640
Merge branch 'main' into backend-cleanups
TDemeco Oct 21, 2025
434ec4d
fix(test): sdk mock e2e upload test data
Karrq Oct 22, 2025
49a2ed4
fix: proper bucket1_file3_key
Karrq Oct 22, 2025
ed64637
fix: allow download with mocks
Karrq Oct 23, 2025
ca913e6
Merge remote-tracking branch 'origin/main' into backend-cleanups
Karrq Oct 23, 2025
a758c87
Merge branch 'main' into backend-cleanups
Karrq Oct 23, 2025
1041bad
fix: amend PR comments
Karrq Oct 24, 2025
5f0ca17
feat(backend): pagination query parameters
Karrq Oct 27, 2025
9de48e0
Merge remote-tracking branch 'origin/main' into backend-cleanups
Karrq Oct 27, 2025
62bccf6
Merge remote-tracking branch 'origin/main' into backend-cleanups
Karrq Oct 28, 2025
b4e5ab9
Merge remote-tracking branch 'origin/main' into backend-cleanups
Karrq Oct 28, 2025
72b19af
chore: lints
Karrq Oct 28, 2025
1667d0a
fix: profile with checksummed address
Karrq Oct 29, 2025
af8c477
fix(backend): db not found leads to 404
Karrq Oct 29, 2025
2ee6617
Merge remote-tracking branch 'origin/main' into backend-cleanups
Karrq Oct 29, 2025
efa1b53
fix(test): adjust rust test assert
Karrq Oct 29, 2025
fed6549
Merge remote-tracking branch 'origin/main' into backend-cleanups
Karrq Oct 30, 2025
045a779
refactor(msp): get_file w/o auth
Karrq Oct 30, 2025
4dfb7d7
fix: compilation
Karrq Oct 30, 2025
5ce9cad
Merge remote-tracking branch 'origin/main' into backend-cleanups
Karrq Nov 3, 2025
be7b5bb
refactor(health): attempt rpc call before conn
Karrq Nov 4, 2025
be564c6
Merge remote-tracking branch 'origin/main' into backend-cleanups
Karrq Nov 4, 2025
a7f90b6
feat: optional authentication for query endpoints
Karrq Nov 4, 2025
ee674a5
chore: fix grammar
Karrq Nov 5, 2025
6838c4d
Merge remote-tracking branch 'origin/main' into backend-cleanups
Karrq Nov 5, 2025
b7d45ad
chore: cleanup dead method
Karrq Nov 6, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 11 additions & 3 deletions backend/backend_config.toml
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,6 @@ jwt_secret = "0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef
# WebSocket RPC endpoint for the StorageHub node
# Use ws:// for unencrypted or wss:// for TLS-encrypted connections
rpc_url = "ws://localhost:9944"
# URL for node to communicate with the backend (e.g for uploading a file)
# e.g: if running an msp in a container: http://host.docker.internal:8080
msp_callback_url = "http://localhost:8080"
# Request timeout in seconds (optional, default: 30)
# Increase this value for slower networks or heavy operations
timeout_secs = 30
Expand All @@ -42,6 +39,17 @@ verify_tls = true
# When true, uses mock RPC connections instead of real network calls
# mock_mode = false

# MSP-specific configuration
[msp]
# URL for node to communicate with the backend (e.g for uploading a file)
# e.g: if running an msp in a container: http://host.docker.internal:8080
callback_url = "http://localhost:8080"
# Number of retry attempts for file upload operations (default: 3)
# Increase for unreliable network conditions
upload_retry_attempts = 3
# Delay in seconds between file upload retry attempts (default: 1)
upload_retry_delay_secs = 1

# Database configuration
[database]
# PostgreSQL connection URL
Expand Down
4 changes: 2 additions & 2 deletions backend/bin/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ async fn main() -> Result<()> {
);
debug!(target: "main", database_url = %config.database.url, "Database configuration");
debug!(target: "main", rpc_url = %config.storage_hub.rpc_url, "RPC configuration");
debug!(target: "main", msp_callback_url = %config.storage_hub.msp_callback_url, "MSP callback configuration");
debug!(target: "main", msp_callback_url = %config.msp.callback_url, "MSP callback configuration");

let memory_storage = InMemoryStorage::new();
let storage = Arc::new(BoxedStorageWrapper::new(memory_storage));
Expand Down Expand Up @@ -135,7 +135,7 @@ fn load_config() -> Result<Config> {
config.storage_hub.rpc_url = rpc_url;
}
if let Some(msp_callback_url) = args.msp_callback_url {
config.storage_hub.msp_callback_url = msp_callback_url;
config.msp.callback_url = msp_callback_url;
}

Ok(config)
Expand Down
2 changes: 1 addition & 1 deletion backend/lib/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ chrono = { workspace = true, features = ["serde"] }
diesel = { workspace = true }
diesel-async = { workspace = true }
futures = { workspace = true }
alloy-core = "0.8"
alloy-core = { version = "0.8", features = ["serde"] }
alloy-signer = "0.8"
headers = { workspace = true }
hex = { workspace = true }
Expand Down
27 changes: 12 additions & 15 deletions backend/lib/src/api/handlers/auth.rs
Original file line number Diff line number Diff line change
Expand Up @@ -93,15 +93,15 @@ mod tests {

// Step 1: Get nonce challenge
let nonce_request = NonceRequest {
address: address.clone(),
address,
chain_id: 1,
};

let response = server.post(AUTH_NONCE_ENDPOINT).json(&nonce_request).await;

assert_eq!(response.status_code(), StatusCode::OK);
let nonce_response: NonceResponse = response.json();
assert!(nonce_response.message.contains(&address));
assert!(nonce_response.message.contains(&address.to_string()));

// Step 2: Sign the message and login
let signature = sign_message(&signing_key, &nonce_response.message);
Expand Down Expand Up @@ -163,17 +163,14 @@ mod tests {
let app = mock_app().await;
let server = TestServer::new(app).unwrap();

let invalid_request = NonceRequest {
address: "not_an_eth_address".to_string(),
chain_id: 1,
};
let invalid_json = serde_json::json!({
"address": "not_an_eth_address",
"chainId": 1
});

let response = server
.post(AUTH_NONCE_ENDPOINT)
.json(&invalid_request)
.await;
let response = server.post(AUTH_NONCE_ENDPOINT).json(&invalid_json).await;

assert_eq!(response.status_code(), StatusCode::BAD_REQUEST);
assert_eq!(response.status_code(), StatusCode::UNPROCESSABLE_ENTITY);
}

#[tokio::test]
Expand Down Expand Up @@ -299,7 +296,7 @@ mod tests {
// Unfortunately we can't easily advance system time
// so instead we create an "old" token
let old_claims = JwtClaims {
address: MOCK_ADDRESS.to_string(),
address: MOCK_ADDRESS,
iat: Utc::now().timestamp() - 10, // issued 10 seconds ago
exp: Utc::now().timestamp() + 10, // expires in 10 seconds
};
Expand Down Expand Up @@ -408,7 +405,7 @@ mod tests {

// Login with first wallet
let nonce_request1 = NonceRequest {
address: address1.clone(),
address: address1,
chain_id: 1,
};

Expand All @@ -428,7 +425,7 @@ mod tests {

// Login with second wallet
let nonce_request2 = NonceRequest {
address: address2.clone(),
address: address2,
chain_id: 1,
};

Expand Down Expand Up @@ -485,7 +482,7 @@ mod tests {
// until the token is expired
// so we create a token that's already expired
let expired_claims = JwtClaims {
address: MOCK_ADDRESS.to_string(),
address: MOCK_ADDRESS,
exp: Utc::now().timestamp() - 3600, // 1 hour ago
iat: Utc::now().timestamp() - 7200, // 2 hours ago
};
Expand Down
13 changes: 8 additions & 5 deletions backend/lib/src/api/handlers/buckets.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,18 +9,19 @@ use serde::Deserialize;
use tracing::debug;

use crate::{
error::Error, models::files::FileListResponse, services::auth::AuthenticatedUser,
services::Services,
api::handlers::pagination::Pagination, error::Error, models::files::FileListResponse,
services::auth::AuthenticatedUser, services::Services,
};

pub async fn list_buckets(
State(services): State<Services>,
AuthenticatedUser { address }: AuthenticatedUser,
Pagination { limit, offset }: Pagination,
) -> Result<impl IntoResponse, Error> {
debug!(user = %address, "GET list buckets");
let response = services
.msp
.list_user_buckets(&address)
.list_user_buckets(&address, offset, limit)
.await?
.collect::<Vec<_>>();
Ok(Json(response))
Expand All @@ -47,6 +48,7 @@ pub async fn get_files(
AuthenticatedUser { address }: AuthenticatedUser,
Path(bucket_id): Path<String>,
Query(query): Query<FilesQuery>,
Pagination { limit, offset }: Pagination,
) -> Result<impl IntoResponse, Error> {
let path = query.path.as_deref().unwrap_or("/");
debug!(
Expand All @@ -55,14 +57,15 @@ pub async fn get_files(
user = %address,
"GET bucket files"
);

let file_tree = services
.msp
.get_file_tree(&bucket_id, &address, path)
.get_file_tree(&bucket_id, &address, path, offset, limit)
.await?;

let response = FileListResponse {
bucket_id: bucket_id.clone(),
files: vec![file_tree],
tree: file_tree,
};

Ok(Json(response))
Expand Down
55 changes: 15 additions & 40 deletions backend/lib/src/api/handlers/files.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,4 @@
//! This module contains the handlers for the file management endpoints
//!
//! TODO: move the rest of the endpoints as they are implemented

use axum::{
body::{Body, Bytes},
Expand Down Expand Up @@ -30,32 +28,29 @@ use crate::{
pub async fn get_file_info(
State(services): State<Services>,
AuthenticatedUser { address }: AuthenticatedUser,
Path((bucket_id, file_key)): Path<(String, String)>,
Path((_bucket_id, file_key)): Path<(String, String)>,
) -> Result<impl IntoResponse, Error> {
debug!(
bucket_id = %bucket_id,
file_key = %file_key,
user = %address,
"GET file info"
);
let response = services
.msp
.get_file_info(&bucket_id, &address, &file_key)
.await?;
let response = services.msp.get_file_info(&address, &file_key).await?;
Ok(Json(response))
}

// Internal endpoint used by the MSP RPC to upload a file to the backend
// This function streams the file chunks via a channel to the thread running download_by_key.
/// Internal endpoint used by the MSP RPC to upload a file to the backend
///
/// This function streams the file chunks via a channel to the thread running download_by_key.
// TODO(AUTH): Add MSP Node authentication
// Currently this internal endpoint doesn't authenticate that
// the client connecting to it is the MSP Node
pub async fn internal_upload_by_key(
State(services): State<Services>,
Path((session_id, file_key)): Path<(String, String)>,
body: Body,
) -> (StatusCode, impl IntoResponse) {
debug!(file_key = %file_key, "PUT internal upload");
// TODO: re-add auth
// FIXME: make this only callable by the rpc itself
// let _auth = extract_bearer_token(&auth)?;

// Validate file_key is a hex string
let key = file_key.trim_start_matches("0x");
Expand Down Expand Up @@ -113,7 +108,7 @@ pub async fn download_by_key(
Path(file_key): Path<String>,
) -> Result<impl IntoResponse, Error> {
debug!(file_key = %file_key, user = %address, "GET download file");
// TODO(AUTH): verify that user has permissions to access this file

// Validate file_key is a hex string
let key = file_key.trim_start_matches("0x");
if hex::decode(key).is_err() {
Expand All @@ -123,6 +118,9 @@ pub async fn download_by_key(
// Check if file exists in MSP storage
let file_metadata = services.msp.check_file_status(&file_key).await?;

// Verify user has access to the requested file
let file_info = services.msp.get_file_info(&address, &file_key).await?;

// Generate a unique session ID for the download session
let session_id = Uuid::now_v7().to_string();

Expand All @@ -138,13 +136,9 @@ pub async fn download_by_key(
.add_session(&session_id, tx)
.map_err(|e| Error::BadRequest(e.to_string()))?;

let file_key_clone = file_key.clone();
tokio::spawn(async move {
// We trigger the download process via RPC call
let _ = services
.msp
.get_file_from_key(&session_id, &file_key_clone)
.await;
_ = services.msp.get_file(&session_id, file_info).await;
});

// Extract filename from location or use file_key as fallback
Expand Down Expand Up @@ -182,16 +176,14 @@ pub async fn download_by_key(
pub async fn upload_file(
State(services): State<Services>,
AuthenticatedUser { address }: AuthenticatedUser,
Path((bucket_id, file_key)): Path<(String, String)>,
Path((_bucket_id, file_key)): Path<(String, String)>,
mut multipart: Multipart,
) -> Result<impl IntoResponse, Error> {
debug!(
bucket_id = %bucket_id,
file_key = %file_key,
user = %address,
"PUT upload file"
);
// TODO(AUTH): verify that user has permissions to access this file

// Pre-check with MSP whether this file key is expected before doing heavy processing
let is_expected = services
Expand Down Expand Up @@ -252,25 +244,8 @@ pub async fn upload_file(
// Process and upload the file using the MSP service
let response = services
.msp
.process_and_upload_file(&bucket_id, &file_key, file_data_stream, file_metadata)
.process_and_upload_file(&address, &file_key, file_data_stream, file_metadata)
.await?;

Ok((StatusCode::CREATED, Json(response)))
}

pub async fn distribute_file(
State(services): State<Services>,
AuthenticatedUser { address }: AuthenticatedUser,
Path((bucket_id, file_key)): Path<(String, String)>,
) -> Result<impl IntoResponse, Error> {
debug!(
bucket_id = %bucket_id,
file_key = %file_key,
user = %address,
"POST distribute file"
);
// TODO(AUTH): verify that user has permissions to access this file

let response = services.msp.distribute_file(&bucket_id, &file_key).await?;
Ok(Json(response))
}
2 changes: 2 additions & 0 deletions backend/lib/src/api/handlers/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@ pub mod auth;
pub mod buckets;
pub mod files;

mod pagination;

// ==================== MSP Info Handlers ====================

pub async fn info(State(services): State<Services>) -> Result<impl IntoResponse, Error> {
Expand Down
61 changes: 61 additions & 0 deletions backend/lib/src/api/handlers/pagination.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
use axum::{
extract::{rejection::QueryRejection, FromRequestParts, Query},
http::request::Parts,
};
use serde::Deserialize;

use crate::constants::database::{DEFAULT_PAGE_LIMIT, MAX_PAGE_LIMIT};

// NOTE: we use i64 because the db uses i64
/// Resolved pagination parameters for the given endpoint
///
/// The query parameters used for the requests are the ones described in [`PaginationQuery`]
///
/// Values:
/// * `limit`: the maximum amount of items to respond with
/// * `offset`: the number of items to skip for this request
#[derive(Debug)]
pub struct Pagination {
pub limit: i64,
pub offset: i64,
}

impl From<PaginationQuery> for Pagination {
fn from(value: PaginationQuery) -> Self {
let limit = value.limit.unwrap_or(DEFAULT_PAGE_LIMIT).clamp(
0,
MAX_PAGE_LIMIT
.try_into()
.expect("MAX_PAGE_LIMIT to be representable as i64"),
);

let offset = limit * value.page.unwrap_or(0);

Self { offset, limit }
}
}

// NOTE: we use i64 because the db uses i64
/// Pagination query parameters for the given endpoint
///
/// Parameters:
/// * `limit`: the maximum amount of items to respond with (defaults to 0, capped at [`MAX_PAGE_LIMIT`])
/// * `page`: the number of pages to skip for this request (defaults to 0), the number of elements in the page is equal to `limit`
#[derive(Debug, Deserialize)]
pub struct PaginationQuery {
page: Option<i64>,
limit: Option<i64>,
}

impl<S> FromRequestParts<S> for Pagination
where
S: Send + Sync,
{
type Rejection = QueryRejection;

async fn from_request_parts(parts: &mut Parts, state: &S) -> Result<Self, Self::Rejection> {
let query = Query::<PaginationQuery>::from_request_parts(parts, state).await?;

Ok(query.0.into())
}
}
4 changes: 0 additions & 4 deletions backend/lib/src/api/routes.rs
Original file line number Diff line number Diff line change
Expand Up @@ -52,10 +52,6 @@ pub fn routes(services: Services) -> Router {
)
.merge(file_upload)
.merge(internal_file_upload)
.route(
"/buckets/{bucket_id}/distribute/{file_key}",
post(handlers::files::distribute_file),
)
.route(
"/download/{file_key}",
get(handlers::files::download_by_key),
Expand Down
Loading
Loading