Skip to main content

rpfm_server/
main.rs

1//---------------------------------------------------------------------------//
2// Copyright (c) 2017-2026 Ismael Gutiérrez González. All rights reserved.
3//
4// This file is part of the Rusted PackFile Manager (RPFM) project,
5// which can be found here: https://github.com/Frodo45127/rpfm.
6//
7// This file is licensed under the MIT license, which can be found here:
8// https://github.com/Frodo45127/rpfm/blob/master/LICENSE.
9//---------------------------------------------------------------------------//
10
11//! # `rpfm_server`
12//!
13//! Backend process for [Rusted PackFile Manager][rpfm]. Hosts the heavy work
14//! that the Qt6 UI ([`rpfm_ui`][ui]) and AI / MCP clients drive remotely:
15//! Pack I/O, schema decoding, diagnostics, search, dependencies, optimisation
16//! and so on.
17//!
18//! [rpfm]: https://github.com/Frodo45127/rpfm
19//! [ui]: https://crates.io/crates/rpfm_ui
20//!
21//! ## Architecture
22//!
23//! The server is built on [`axum`] (HTTP + WebSocket) and [`tokio`]. It binds
24//! to `127.0.0.1:45127` by default and exposes three endpoints:
25//!
26//! | Endpoint    | Method | Purpose                                                                          |
27//! |-------------|--------|----------------------------------------------------------------------------------|
28//! | `/ws`       | GET    | WebSocket upgrade. Carries the [`rpfm_ipc`] command/response protocol.           |
29//! | `/sessions` | GET    | REST: list every active session (used by the UI session picker).                 |
30//! | `/mcp`      | *      | MCP `StreamableHttpService` exposing the same surface to AI / MCP clients.       |
31//!
32//! Every client connection is wrapped in a [`session::Session`] managed by a
33//! [`session::SessionManager`]. Each session owns a dedicated background
34//! thread (see [`background_thread`]) that processes commands serially against
35//! its own in-memory state (open packs, dependency cache, settings cache),
36//! so multiple concurrent clients can't step on each other.
37//!
38//! ## Modules
39//!
40//! - [`background_thread`] — central command dispatcher; one async loop per session.
41//! - [`comms`] — generic mpsc-based request/response abstraction used to talk
42//!   to the background thread.
43//! - [`server_websocket`] — `/ws` upgrade handler and message multiplexer.
44//! - [`server_mcp`] — `/mcp` endpoint: tools, prompts, resources for MCP clients.
45//! - [`session`] — `SessionManager`, `Session`, lifecycle and timeout handling.
46//! - [`settings`] — JSON-backed settings store with batch-write optimisation.
47//! - [`updater`] — self-update checks against GitHub releases.
48//!
49//! ## Telemetry
50//!
51//! Logging, panic capture and action telemetry are wired through
52//! [`rpfm_telemetry`]. The Sentry guard returned by [`Logger::init`] is held
53//! for the process lifetime in [`main`].
54
55use axum::{extract::State, routing::get, Json, Router};
56use rmcp::transport::streamable_http_server::{session::local::LocalSessionManager, StreamableHttpService};
57use tokio::net::TcpListener;
58use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt, Layer};
59
60use std::net::SocketAddr;
61use std::path::PathBuf;
62use std::sync::Arc;
63
64use rpfm_ipc::helpers::SessionInfo;
65use rpfm_ipc::messages::{Command, Response};
66use rpfm_ipc::settings_keys::{ENABLE_CRASH_REPORTS, ENABLE_USAGE_TELEMETRY};
67
68use rpfm_telemetry::{Logger, SentryLayer, SENTRY_DSN, error, info, release_name};
69
70use crate::server_mcp::McpServer;
71use crate::session::SessionManager;
72use crate::settings::{error_path, init_config_path, Settings};
73use crate::server_websocket::ws_handler;
74
75pub mod background_thread;
76pub mod comms;
77pub mod server_mcp;
78pub mod server_websocket;
79pub mod session;
80pub mod settings;
81pub mod updater;
82#[cfg(test)] mod updater_test;
83
84use mimalloc::MiMalloc;
85
86#[global_allocator]
87static GLOBAL: MiMalloc = MiMalloc;
88
89//-------------------------------------------------------------------------------//
90//                                  Constants
91//-------------------------------------------------------------------------------//
92
93/// Sentry DSN used for crash reports and action telemetry.
94const SENTRY_DSN_KEY: &str = match option_env!("RPFM_SERVER_SENTRY_DSN") {
95    Some(dsn) => dsn,
96    None => "",
97};
98
99/// Default IP address the HTTP server binds to (`127.0.0.1` / loopback).
100const DEFAULT_ADDRESS: [u8; 4] = [127, 0, 0, 1];
101
102/// Default TCP port the HTTP server listens on.
103const DEFAULT_PORT: u16 = 45127;
104
105/// Organisation domain used to derive the OS-specific config directory
106/// (mirrors `QCoreApplication::organizationDomain` on the UI side).
107const ORG_DOMAIN: &str = "com";
108
109/// Organisation name used to derive the OS-specific config directory.
110const ORG_NAME: &str = "FrodoWazEre";
111
112/// Application name used to derive the OS-specific config directory.
113const APP_NAME: &str = "rpfm";
114
115//-------------------------------------------------------------------------------//
116//                                  Functions
117//-------------------------------------------------------------------------------//
118
119/// Process entry point.
120///
121/// Initialises the Sentry/telemetry guard, primes the telemetry toggles from
122/// persisted settings, builds the [`session::SessionManager`], wires the
123/// `axum` router (`/ws`, `/sessions`, `/mcp`) and starts the listener on
124/// [`DEFAULT_ADDRESS`]:[`DEFAULT_PORT`].
125///
126/// Returns when the listener stops accepting (typically after every session
127/// has been cleaned up — the cleanup task in [`session::SessionManager`]
128/// terminates the process when the session set drains).
129#[tokio::main]
130async fn main() {
131
132    // Sentry client guard, so we can reuse it later on and keep it in scope for the entire duration of the program.
133    // Must be initialized before the tracing subscriber so the SentryLayer can capture spans.
134    *SENTRY_DSN.write().unwrap() = SENTRY_DSN_KEY.to_owned();
135    let guard = Logger::init(&{
136        init_config_path().expect("Error while trying to initialize config path. We're fucked.");
137        error_path().unwrap_or_else(|_| PathBuf::from("."))
138    }, true, false, release_name!()).expect("Failed to initialize logging system.");
139
140    // Setup tracing subscriber for logging, redirecting to stderr to avoid interfering with MCP.
141    // The SentryLayer captures tracing spans/events as Sentry breadcrumbs and performance spans.
142    tracing_subscriber::registry()
143        .with(tracing_subscriber::fmt::layer()
144            .with_writer(std::io::stderr)
145            .with_filter(tracing_subscriber::filter::LevelFilter::INFO))
146        .with(SentryLayer::default())
147        .init();
148
149    if guard.is_enabled() {
150        info!("Sentry logging support for RPFM SERVER enabled. Starting...");
151    } else {
152        info!("Sentry logging support for RPFM SERVER disabled. Starting...");
153    }
154
155    // Read telemetry settings from disk before any sessions spin up so early commands
156    // are counted and crash reports respect the user's choice. Background threads will
157    // refresh these whenever the settings change.
158    if let Ok(settings) = Settings::init(false) {
159        rpfm_telemetry::set_usage_telemetry_enabled(settings.bool(ENABLE_USAGE_TELEMETRY));
160        rpfm_telemetry::set_crash_reports_enabled(settings.bool(ENABLE_CRASH_REPORTS));
161    }
162
163    // Create the session manager to handle per-client sessions,
164    // and start the background cleanup task for expired sessions.
165    let session_manager: Arc<SessionManager> = Arc::new(SessionManager::default());
166    SessionManager::start_cleanup_task(session_manager.clone());
167
168    // Create an MCP service with its own session for MCP clients.
169    let sm = session_manager.clone();
170    let http_service = StreamableHttpService::new(
171        move || {
172            let session = sm.create_session();
173            Ok(McpServer::new(session))
174        },
175        LocalSessionManager::default().into(),
176        Default::default(),
177    );
178
179    // Setup the endpoints for the server.
180    let app = Router::new()
181        .route("/ws", get(ws_handler))
182        .route("/sessions", get(sessions_handler))
183        .nest_service("/mcp", http_service)
184        .with_state(session_manager);
185
186    let addr = SocketAddr::from((DEFAULT_ADDRESS, DEFAULT_PORT));
187    match TcpListener::bind(addr).await {
188        Ok(listener) => {
189            info!("Listening on {}", addr);
190            axum::serve(listener, app).await.unwrap();
191        }
192        Err(err) => {
193            error!("Failed to bind to address {}: {}\n\nThis usually means you got another copy of the server running. Either use that one, or stop it and try again.", addr, err);
194        }
195    }
196}
197
198/// REST endpoint to get information about all active sessions.
199///
200/// Returns a JSON array of [`SessionInfo`] objects containing:
201/// - `session_id`: Unique session identifier
202/// - `connection_count`: Number of active WebSocket connections
203/// - `timeout_remaining_secs`: Seconds until session cleanup (if disconnected)
204/// - `is_shutting_down`: Whether session is marked for shutdown
205///
206/// This endpoint is used by the UI's session management dialog to display
207/// available sessions and allow users to connect to specific ones.
208async fn sessions_handler(State(session_manager): State<Arc<SessionManager>>) -> Json<Vec<SessionInfo>> {
209    let sessions = session_manager.get_sessions_info();
210    info!("Sessions endpoint queried: {} active session(s)", sessions.len());
211    Json(sessions)
212}