437 lines
14 KiB
Rust
437 lines
14 KiB
Rust
use actix_session::Session;
|
|
use actix_web::{HttpResponse, web};
|
|
use serde::Deserialize;
|
|
|
|
use shanty_db::entities::download_queue::DownloadStatus;
|
|
use shanty_db::entities::work_queue::WorkTaskType;
|
|
use shanty_db::queries;
|
|
|
|
use crate::auth;
|
|
use crate::config::AppConfig;
|
|
use crate::error::ApiError;
|
|
use crate::state::AppState;
|
|
|
|
pub fn configure(cfg: &mut web::ServiceConfig) {
|
|
cfg.service(web::resource("/status").route(web::get().to(get_status)))
|
|
.service(web::resource("/pipeline").route(web::post().to(trigger_pipeline)))
|
|
.service(web::resource("/index").route(web::post().to(trigger_index)))
|
|
.service(web::resource("/tag").route(web::post().to(trigger_tag)))
|
|
.service(web::resource("/organize").route(web::post().to(trigger_organize)))
|
|
.service(web::resource("/tasks/{id}").route(web::get().to(get_task)))
|
|
.service(web::resource("/watchlist").route(web::get().to(list_watchlist)))
|
|
.service(web::resource("/watchlist/{id}").route(web::delete().to(remove_watchlist)))
|
|
.service(web::resource("/monitor/check").route(web::post().to(trigger_monitor_check)))
|
|
.service(web::resource("/monitor/status").route(web::get().to(get_monitor_status)))
|
|
.service(web::resource("/scheduler/skip-pipeline").route(web::post().to(skip_pipeline)))
|
|
.service(web::resource("/scheduler/skip-monitor").route(web::post().to(skip_monitor)))
|
|
.service(web::resource("/mb-status").route(web::get().to(get_mb_status)))
|
|
.service(web::resource("/mb-import").route(web::post().to(trigger_mb_import)))
|
|
.service(
|
|
web::resource("/config")
|
|
.route(web::get().to(get_config))
|
|
.route(web::put().to(save_config)),
|
|
);
|
|
}
|
|
|
|
async fn get_status(
|
|
state: web::Data<AppState>,
|
|
session: Session,
|
|
) -> Result<HttpResponse, ApiError> {
|
|
auth::require_auth(&session)?;
|
|
let conn = state.db.conn();
|
|
|
|
let summary = shanty_watch::library_summary(conn).await?;
|
|
let pending_items = queries::downloads::list(conn, Some(DownloadStatus::Pending)).await?;
|
|
let downloading_items =
|
|
queries::downloads::list(conn, Some(DownloadStatus::Downloading)).await?;
|
|
let failed_items = queries::downloads::list(conn, Some(DownloadStatus::Failed)).await?;
|
|
let tasks = state.tasks.list();
|
|
|
|
let mut queue_items = Vec::new();
|
|
queue_items.extend(downloading_items.iter().cloned());
|
|
queue_items.extend(pending_items.iter().cloned());
|
|
queue_items.extend(failed_items.iter().take(5).cloned());
|
|
|
|
let needs_tagging = queries::tracks::get_needing_metadata(conn).await?;
|
|
|
|
// Work queue counts
|
|
let work_queue = queries::work_queue::counts_all(conn).await.ok();
|
|
|
|
// Scheduler state from DB
|
|
let scheduler_jobs = queries::scheduler_state::list_all(conn).await.unwrap_or_default();
|
|
let scheduler_json: serde_json::Value = scheduler_jobs
|
|
.iter()
|
|
.map(|j| {
|
|
(
|
|
j.job_name.clone(),
|
|
serde_json::json!({
|
|
"last_run": j.last_run_at,
|
|
"next_run": j.next_run_at,
|
|
"last_result": j.last_result,
|
|
"enabled": j.enabled,
|
|
}),
|
|
)
|
|
})
|
|
.collect::<serde_json::Map<String, serde_json::Value>>()
|
|
.into();
|
|
|
|
// Backward-compatible scheduled field (from scheduler_state DB)
|
|
let next_pipeline = scheduler_jobs
|
|
.iter()
|
|
.find(|j| j.job_name == "pipeline")
|
|
.and_then(|j| j.next_run_at);
|
|
let next_monitor = scheduler_jobs
|
|
.iter()
|
|
.find(|j| j.job_name == "monitor")
|
|
.and_then(|j| j.next_run_at);
|
|
|
|
Ok(HttpResponse::Ok().json(serde_json::json!({
|
|
"library": summary,
|
|
"queue": {
|
|
"pending": pending_items.len(),
|
|
"downloading": downloading_items.len(),
|
|
"failed": failed_items.len(),
|
|
"items": queue_items,
|
|
},
|
|
"tagging": {
|
|
"needs_tagging": needs_tagging.len(),
|
|
"items": needs_tagging.iter().take(20).collect::<Vec<_>>(),
|
|
},
|
|
"tasks": tasks,
|
|
"scheduled": {
|
|
"next_pipeline": next_pipeline,
|
|
"next_monitor": next_monitor,
|
|
},
|
|
"work_queue": work_queue,
|
|
"scheduler": scheduler_json,
|
|
})))
|
|
}
|
|
|
|
async fn trigger_index(
|
|
state: web::Data<AppState>,
|
|
session: Session,
|
|
) -> Result<HttpResponse, ApiError> {
|
|
auth::require_auth(&session)?;
|
|
let payload = serde_json::json!({"scan_all": true});
|
|
let item = queries::work_queue::enqueue(
|
|
state.db.conn(),
|
|
WorkTaskType::Index,
|
|
&payload.to_string(),
|
|
None,
|
|
)
|
|
.await?;
|
|
state.workers.notify(WorkTaskType::Index);
|
|
Ok(HttpResponse::Accepted().json(serde_json::json!({ "work_item_id": item.id })))
|
|
}
|
|
|
|
async fn trigger_tag(
|
|
state: web::Data<AppState>,
|
|
session: Session,
|
|
) -> Result<HttpResponse, ApiError> {
|
|
auth::require_auth(&session)?;
|
|
let conn = state.db.conn();
|
|
let untagged = queries::tracks::get_needing_metadata(conn).await?;
|
|
let mut count = 0;
|
|
for track in &untagged {
|
|
let payload = serde_json::json!({"track_id": track.id});
|
|
queries::work_queue::enqueue(conn, WorkTaskType::Tag, &payload.to_string(), None).await?;
|
|
count += 1;
|
|
}
|
|
if count > 0 {
|
|
state.workers.notify(WorkTaskType::Tag);
|
|
}
|
|
Ok(HttpResponse::Accepted().json(serde_json::json!({ "enqueued": count })))
|
|
}
|
|
|
|
async fn trigger_organize(
|
|
state: web::Data<AppState>,
|
|
session: Session,
|
|
) -> Result<HttpResponse, ApiError> {
|
|
auth::require_auth(&session)?;
|
|
let conn = state.db.conn();
|
|
let mut count = 0u64;
|
|
let mut offset = 0u64;
|
|
loop {
|
|
let tracks = queries::tracks::list(conn, 500, offset).await?;
|
|
if tracks.is_empty() {
|
|
break;
|
|
}
|
|
for track in &tracks {
|
|
let payload = serde_json::json!({"track_id": track.id});
|
|
queries::work_queue::enqueue(
|
|
conn,
|
|
WorkTaskType::Organize,
|
|
&payload.to_string(),
|
|
None,
|
|
)
|
|
.await?;
|
|
count += 1;
|
|
}
|
|
offset += 500;
|
|
}
|
|
if count > 0 {
|
|
state.workers.notify(WorkTaskType::Organize);
|
|
}
|
|
Ok(HttpResponse::Accepted().json(serde_json::json!({ "enqueued": count })))
|
|
}
|
|
|
|
async fn trigger_pipeline(
|
|
state: web::Data<AppState>,
|
|
session: Session,
|
|
) -> Result<HttpResponse, ApiError> {
|
|
auth::require_auth(&session)?;
|
|
let pipeline_id = crate::pipeline::trigger_pipeline(&state).await?;
|
|
Ok(HttpResponse::Accepted().json(serde_json::json!({ "pipeline_id": pipeline_id })))
|
|
}
|
|
|
|
async fn get_task(
|
|
state: web::Data<AppState>,
|
|
session: Session,
|
|
path: web::Path<String>,
|
|
) -> Result<HttpResponse, ApiError> {
|
|
auth::require_auth(&session)?;
|
|
let id = path.into_inner();
|
|
match state.tasks.get(&id) {
|
|
Some(task) => Ok(HttpResponse::Ok().json(task)),
|
|
None => Err(ApiError::NotFound(format!("task {id}"))),
|
|
}
|
|
}
|
|
|
|
async fn list_watchlist(
|
|
state: web::Data<AppState>,
|
|
session: Session,
|
|
) -> Result<HttpResponse, ApiError> {
|
|
let (user_id, _, _) = auth::require_auth(&session)?;
|
|
let items = shanty_watch::list_items(state.db.conn(), None, None, Some(user_id)).await?;
|
|
Ok(HttpResponse::Ok().json(items))
|
|
}
|
|
|
|
async fn remove_watchlist(
|
|
state: web::Data<AppState>,
|
|
session: Session,
|
|
path: web::Path<i32>,
|
|
) -> Result<HttpResponse, ApiError> {
|
|
auth::require_auth(&session)?;
|
|
let id = path.into_inner();
|
|
shanty_watch::remove_item(state.db.conn(), id).await?;
|
|
Ok(HttpResponse::NoContent().finish())
|
|
}
|
|
|
|
async fn trigger_monitor_check(
|
|
state: web::Data<AppState>,
|
|
session: Session,
|
|
) -> Result<HttpResponse, ApiError> {
|
|
auth::require_admin(&session)?;
|
|
let state = state.clone();
|
|
let task_id = state.tasks.register("monitor_check");
|
|
let tid = task_id.clone();
|
|
tokio::spawn(async move {
|
|
state
|
|
.tasks
|
|
.update_progress(&tid, 0, 0, "Checking monitored artists...");
|
|
match crate::monitor::check_monitored_artists(&state).await {
|
|
Ok(stats) => state.tasks.complete(
|
|
&tid,
|
|
format!(
|
|
"{} artists checked, {} new releases, {} tracks added",
|
|
stats.artists_checked, stats.new_releases_found, stats.tracks_added
|
|
),
|
|
),
|
|
Err(e) => state.tasks.fail(&tid, e.to_string()),
|
|
}
|
|
});
|
|
Ok(HttpResponse::Accepted().json(serde_json::json!({ "task_id": task_id })))
|
|
}
|
|
|
|
async fn get_monitor_status(
|
|
state: web::Data<AppState>,
|
|
session: Session,
|
|
) -> Result<HttpResponse, ApiError> {
|
|
auth::require_auth(&session)?;
|
|
let monitored = queries::artists::list_monitored(state.db.conn()).await?;
|
|
let items: Vec<serde_json::Value> = monitored
|
|
.iter()
|
|
.map(|a| {
|
|
serde_json::json!({
|
|
"id": a.id,
|
|
"name": a.name,
|
|
"musicbrainz_id": a.musicbrainz_id,
|
|
"monitored": a.monitored,
|
|
"last_checked_at": a.last_checked_at,
|
|
})
|
|
})
|
|
.collect();
|
|
Ok(HttpResponse::Ok().json(items))
|
|
}
|
|
|
|
async fn get_config(
|
|
state: web::Data<AppState>,
|
|
session: Session,
|
|
) -> Result<HttpResponse, ApiError> {
|
|
auth::require_auth(&session)?;
|
|
let config = state.config.read().await;
|
|
Ok(HttpResponse::Ok().json(&*config))
|
|
}
|
|
|
|
#[derive(Deserialize)]
|
|
struct SaveConfigRequest {
|
|
#[serde(flatten)]
|
|
config: AppConfig,
|
|
}
|
|
|
|
async fn save_config(
|
|
state: web::Data<AppState>,
|
|
session: Session,
|
|
body: web::Json<SaveConfigRequest>,
|
|
) -> Result<HttpResponse, ApiError> {
|
|
auth::require_admin(&session)?;
|
|
let new_config = body.into_inner().config;
|
|
|
|
// Persist to YAML
|
|
new_config
|
|
.save(state.config_path.as_deref())
|
|
.map_err(ApiError::Internal)?;
|
|
|
|
// Update in-memory config
|
|
let mut config = state.config.write().await;
|
|
*config = new_config.clone();
|
|
|
|
tracing::info!("config updated via API");
|
|
Ok(HttpResponse::Ok().json(&new_config))
|
|
}
|
|
|
|
async fn skip_pipeline(
|
|
state: web::Data<AppState>,
|
|
session: Session,
|
|
) -> Result<HttpResponse, ApiError> {
|
|
auth::require_admin(&session)?;
|
|
// Push next_run_at forward by one interval
|
|
let cfg = state.config.read().await;
|
|
let hours = cfg.scheduling.pipeline_interval_hours.max(1);
|
|
drop(cfg);
|
|
let next = chrono::Utc::now().naive_utc() + chrono::Duration::hours(i64::from(hours));
|
|
queries::scheduler_state::update_next_run(state.db.conn(), "pipeline", Some(next)).await?;
|
|
Ok(HttpResponse::Ok().json(serde_json::json!({"status": "skipped", "next_run": next})))
|
|
}
|
|
|
|
async fn skip_monitor(
|
|
state: web::Data<AppState>,
|
|
session: Session,
|
|
) -> Result<HttpResponse, ApiError> {
|
|
auth::require_admin(&session)?;
|
|
let cfg = state.config.read().await;
|
|
let hours = cfg.scheduling.monitor_interval_hours.max(1);
|
|
drop(cfg);
|
|
let next = chrono::Utc::now().naive_utc() + chrono::Duration::hours(i64::from(hours));
|
|
queries::scheduler_state::update_next_run(state.db.conn(), "monitor", Some(next)).await?;
|
|
Ok(HttpResponse::Ok().json(serde_json::json!({"status": "skipped", "next_run": next})))
|
|
}
|
|
|
|
async fn get_mb_status(
|
|
state: web::Data<AppState>,
|
|
session: Session,
|
|
) -> Result<HttpResponse, ApiError> {
|
|
auth::require_auth(&session)?;
|
|
let has_local = state.mb_client.has_local_db();
|
|
let stats = state.mb_client.local_stats();
|
|
Ok(HttpResponse::Ok().json(serde_json::json!({
|
|
"has_local_db": has_local,
|
|
"stats": stats,
|
|
})))
|
|
}
|
|
|
|
async fn trigger_mb_import(
|
|
state: web::Data<AppState>,
|
|
session: Session,
|
|
) -> Result<HttpResponse, ApiError> {
|
|
auth::require_admin(&session)?;
|
|
let task_id = state.tasks.register("mb_import");
|
|
let tid = task_id.clone();
|
|
let config = state.config.read().await.clone();
|
|
|
|
tokio::spawn(async move {
|
|
state
|
|
.tasks
|
|
.update_progress(&tid, 0, 0, "Starting MusicBrainz import...");
|
|
|
|
let data_dir = shanty_config::data_dir().join("mb-dumps");
|
|
let db_path = config
|
|
.musicbrainz
|
|
.local_db_path
|
|
.clone()
|
|
.unwrap_or_else(|| shanty_config::data_dir().join("shanty-mb.db"));
|
|
|
|
// Download dumps
|
|
state
|
|
.tasks
|
|
.update_progress(&tid, 0, 4, "Downloading dumps...");
|
|
if let Err(e) = std::fs::create_dir_all(&data_dir) {
|
|
state
|
|
.tasks
|
|
.fail(&tid, format!("Failed to create data dir: {e}"));
|
|
return;
|
|
}
|
|
|
|
let timestamp = match shanty_data::mb_import::discover_latest_dump_folder().await {
|
|
Ok(t) => t,
|
|
Err(e) => {
|
|
state
|
|
.tasks
|
|
.fail(&tid, format!("Failed to discover latest dump: {e}"));
|
|
return;
|
|
}
|
|
};
|
|
|
|
for (i, filename) in shanty_data::mb_import::DUMP_FILES.iter().enumerate() {
|
|
state.tasks.update_progress(
|
|
&tid,
|
|
i as u64,
|
|
4 + 4,
|
|
&format!("Downloading {filename}..."),
|
|
);
|
|
if let Err(e) =
|
|
shanty_data::mb_import::download_dump(filename, ×tamp, &data_dir, |msg| {
|
|
tracing::info!("{msg}");
|
|
state.tasks.update_progress(&tid, i as u64, 8, msg);
|
|
})
|
|
.await
|
|
{
|
|
state
|
|
.tasks
|
|
.fail(&tid, format!("Failed to download {filename}: {e}"));
|
|
return;
|
|
}
|
|
}
|
|
|
|
// Run import
|
|
state
|
|
.tasks
|
|
.update_progress(&tid, 4, 8, "Importing into database...");
|
|
|
|
let tid_clone = tid.clone();
|
|
let state_clone = state.clone();
|
|
let result = tokio::task::spawn_blocking(move || {
|
|
shanty_data::mb_import::run_import_at_path(&db_path, &data_dir, |msg| {
|
|
tracing::info!("{msg}");
|
|
state_clone.tasks.update_progress(&tid_clone, 4, 8, msg);
|
|
})
|
|
})
|
|
.await;
|
|
|
|
match result {
|
|
Ok(Ok(stats)) => {
|
|
tracing::info!(%stats, "MusicBrainz import complete");
|
|
state.tasks.complete(&tid, format!("{stats}"));
|
|
}
|
|
Ok(Err(e)) => {
|
|
state.tasks.fail(&tid, format!("Import failed: {e}"));
|
|
}
|
|
Err(e) => {
|
|
state.tasks.fail(&tid, format!("Import task panicked: {e}"));
|
|
}
|
|
}
|
|
});
|
|
|
|
Ok(HttpResponse::Accepted().json(serde_json::json!({ "task_id": task_id })))
|
|
}
|