From 7c30f288cdb41fc321a6e39a85b3a56d51f2e884 Mon Sep 17 00:00:00 2001 From: Connor Johnstone Date: Tue, 24 Mar 2026 11:38:07 -0400 Subject: [PATCH] fixed up the featured artist thing --- frontend/src/api.rs | 8 +- frontend/src/pages/album.rs | 4 +- frontend/src/pages/artist.rs | 52 +++++++++- frontend/src/pages/dashboard.rs | 173 +++++++++++++++++++------------- frontend/src/types.rs | 4 + src/cookie_refresh.rs | 5 +- src/routes/albums.rs | 26 +++-- src/routes/artists.rs | 34 ++++++- src/routes/system.rs | 13 +-- src/scheduler.rs | 31 ++++-- src/workers.rs | 15 +-- 11 files changed, 258 insertions(+), 107 deletions(-) diff --git a/frontend/src/api.rs b/frontend/src/api.rs index 195726e..3b2b749 100644 --- a/frontend/src/api.rs +++ b/frontend/src/api.rs @@ -176,8 +176,12 @@ pub async fn add_album( post_json(&format!("{BASE}/albums"), &body).await } -pub async fn watch_track(title: &str, mbid: &str) -> Result { - let body = serde_json::json!({"title": title, "mbid": mbid}).to_string(); +pub async fn watch_track( + artist: Option<&str>, + title: &str, + mbid: &str, +) -> Result { + let body = serde_json::json!({"artist": artist, "title": title, "mbid": mbid}).to_string(); post_json(&format!("{BASE}/tracks/watch"), &body).await } diff --git a/frontend/src/pages/album.rs b/frontend/src/pages/album.rs index b98e1cc..5852cb6 100644 --- a/frontend/src/pages/album.rs +++ b/frontend/src/pages/album.rs @@ -122,13 +122,15 @@ pub fn album_page(props: &Props) -> Html { let detail = detail.clone(); let title = t.title.clone(); let mbid = t.recording_mbid.clone(); + let artist = d.artist.clone(); Callback::from(move |_: MouseEvent| { let detail = detail.clone(); let title = title.clone(); let mbid = mbid.clone(); + let artist = artist.clone(); let idx = idx; wasm_bindgen_futures::spawn_local(async move { - if let Ok(resp) = api::watch_track(&title, &mbid).await { + if let Ok(resp) = api::watch_track(artist.as_deref(), &title, &mbid).await { if let Some(ref d) = *detail { let mut updated = d.clone(); if let Some(track) = updated.tracks.get_mut(idx) { diff --git a/frontend/src/pages/artist.rs b/frontend/src/pages/artist.rs index f921258..eb922d7 100644 --- a/frontend/src/pages/artist.rs +++ b/frontend/src/pages/artist.rs @@ -279,7 +279,7 @@ pub fn artist_page(props: &Props) -> Html {

{ "No releases found on MusicBrainz." }

} - // Group albums by type + // Group albums by type (primary credit only) { for ["Album", "EP", "Single"].iter().map(|release_type| { let type_albums: Vec<_> = d.albums.iter() .filter(|a| a.release_type.as_deref().unwrap_or("Album") == *release_type) @@ -397,6 +397,56 @@ pub fn artist_page(props: &Props) -> Html { } })} + + // Featured releases (collapsible, pre-collapsed) + { for ["Album", "EP", "Single"].iter().map(|release_type| { + let featured: Vec<_> = d.featured_albums.iter() + .filter(|a| a.release_type.as_deref().unwrap_or("Album") == *release_type) + .collect(); + if featured.is_empty() { + return html! {}; + } + html! { +
+ + { format!("Featured {}s ({})", release_type, featured.len()) } + + + + + + + + + + + { for featured.iter().map(|album| { + let cover_url = format!("https://coverartarchive.org/release/{}/front-250", album.mbid); + html! { + + + + + + } + })} + +
{ "Title" }{ "Date" }
+ () { + el.set_attribute("style", "display:none").ok(); + } + })} /> + + to={Route::Album { mbid: album.mbid.clone() }}> + { &album.title } + > + { album.date.as_deref().unwrap_or("") }
+
+ } + })} } } diff --git a/frontend/src/pages/dashboard.rs b/frontend/src/pages/dashboard.rs index 874658c..5c91e57 100644 --- a/frontend/src/pages/dashboard.rs +++ b/frontend/src/pages/dashboard.rs @@ -240,72 +240,107 @@ pub fn dashboard() -> Html { .iter() .any(|t| t.status == "Pending" || t.status == "Running"); - // Pre-compute scheduled task rows - let scheduled_rows = { - let mut rows = Vec::new(); - if let Some(ref sched) = s.scheduled { - if let Some(ref next) = sched.next_pipeline { - let on_skip = { - let message = message.clone(); - let error = error.clone(); - let fetch = fetch_status.clone(); - Callback::from(move |_: MouseEvent| { - let message = message.clone(); - let error = error.clone(); - let fetch = fetch.clone(); - wasm_bindgen_futures::spawn_local(async move { - match api::skip_scheduled_pipeline().await { - Ok(_) => { - message.set(Some("Next pipeline run skipped".into())); - fetch.emit(()); - } - Err(e) => error.set(Some(e.0)), - } - }); - }) - }; - rows.push(html! { - - { "Auto Pipeline" } - { "Scheduled" } - { format!("Next run: {}", format_next_run(next)) } - - - }); - } - if let Some(ref next) = sched.next_monitor { - let on_skip = { - let message = message.clone(); - let error = error.clone(); - let fetch = fetch_status.clone(); - Callback::from(move |_: MouseEvent| { - let message = message.clone(); - let error = error.clone(); - let fetch = fetch.clone(); - wasm_bindgen_futures::spawn_local(async move { - match api::skip_scheduled_monitor().await { - Ok(_) => { - message.set(Some("Next monitor check skipped".into())); - fetch.emit(()); - } - Err(e) => error.set(Some(e.0)), - } - }); - }) - }; - rows.push(html! { - - { "Monitor Check" } - { "Scheduled" } - { format!("Next run: {}", format_next_run(next)) } - - - }); - } - } - rows + // Skip callbacks for scheduler + let on_skip_pipeline = { + let message = message.clone(); + let error = error.clone(); + let fetch = fetch_status.clone(); + Callback::from(move |_: MouseEvent| { + let message = message.clone(); + let error = error.clone(); + let fetch = fetch.clone(); + wasm_bindgen_futures::spawn_local(async move { + match api::skip_scheduled_pipeline().await { + Ok(_) => { + message.set(Some("Next pipeline run skipped".into())); + fetch.emit(()); + } + Err(e) => error.set(Some(e.0)), + } + }); + }) + }; + let on_skip_monitor = { + let message = message.clone(); + let error = error.clone(); + let fetch = fetch_status.clone(); + Callback::from(move |_: MouseEvent| { + let message = message.clone(); + let error = error.clone(); + let fetch = fetch.clone(); + wasm_bindgen_futures::spawn_local(async move { + match api::skip_scheduled_monitor().await { + Ok(_) => { + message.set(Some("Next monitor check skipped".into())); + fetch.emit(()); + } + Err(e) => error.set(Some(e.0)), + } + }); + }) + }; + + let scheduled_jobs_html = { + let next_pipeline = s.scheduled.as_ref().and_then(|sc| sc.next_pipeline.as_ref()); + let next_monitor = s.scheduled.as_ref().and_then(|sc| sc.next_monitor.as_ref()); + let pipeline_next_str = next_pipeline.map(|n| format_next_run(n)).unwrap_or_default(); + let monitor_next_str = next_monitor.map(|n| format_next_run(n)).unwrap_or_default(); + let pipeline_last = s.scheduler.as_ref() + .and_then(|sc| sc.get("pipeline")) + .and_then(|j| j.get("last_result")) + .and_then(|v| v.as_str()) + .unwrap_or("") + .to_string(); + let monitor_last = s.scheduler.as_ref() + .and_then(|sc| sc.get("monitor")) + .and_then(|j| j.get("last_result")) + .and_then(|v| v.as_str()) + .unwrap_or("") + .to_string(); + + html! { +
+

{ "Scheduled Jobs" }

+ + + + + + + + + + + + + + + + + + + + +
{ "Job" }{ "Status" }{ "Next Run" }{ "Last Result" }
{ "Auto Pipeline" }{ if next_pipeline.is_some() { + html! { { "Scheduled" } } + } else { + html! { { "Idle" } } + }}{ pipeline_next_str }{ pipeline_last }{ if next_pipeline.is_some() { + html! { } + } else { + html! {} + }}
{ "Monitor Check" }{ if next_monitor.is_some() { + html! { { "Scheduled" } } + } else { + html! { { "Idle" } } + }}{ monitor_next_str }{ monitor_last }{ if next_monitor.is_some() { + html! { } + } else { + html! {} + }}
+
+ } }; - let has_scheduled = !scheduled_rows.is_empty(); html! {
@@ -394,8 +429,11 @@ pub fn dashboard() -> Html { } } - // Background Tasks (always show if there are tasks or scheduled items) - if !s.tasks.is_empty() || has_scheduled { + // Scheduled Jobs (always visible) + { scheduled_jobs_html } + + // Background Tasks (one-off tasks like MB import) + if !s.tasks.is_empty() {

{ "Background Tasks" }

@@ -403,7 +441,6 @@ pub fn dashboard() -> Html { - { for scheduled_rows.into_iter() } { for s.tasks.iter().map(|t| { let progress_html = if let Some(ref p) = t.progress { if p.total > 0 { diff --git a/frontend/src/types.rs b/frontend/src/types.rs index 816831d..4c03535 100644 --- a/frontend/src/types.rs +++ b/frontend/src/types.rs @@ -54,6 +54,8 @@ pub struct FullAlbumInfo { pub struct FullArtistDetail { pub artist: Artist, pub albums: Vec, + #[serde(default)] + pub featured_albums: Vec, pub artist_status: String, #[serde(default)] pub total_available_tracks: u32, @@ -120,6 +122,8 @@ pub struct Track { #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct MbAlbumDetail { pub mbid: String, + #[serde(default)] + pub artist: Option, pub tracks: Vec, } diff --git a/src/cookie_refresh.rs b/src/cookie_refresh.rs index 28296f2..00d6cb3 100644 --- a/src/cookie_refresh.rs +++ b/src/cookie_refresh.rs @@ -11,10 +11,7 @@ pub async fn run_refresh() -> Result { let cookies_path = shanty_config::data_dir().join("cookies.txt"); if !profile_dir.exists() { - return Err(format!( - "no Firefox profile at {}", - profile_dir.display() - )); + return Err(format!("no Firefox profile at {}", profile_dir.display())); } let script = find_script()?; diff --git a/src/routes/albums.rs b/src/routes/albums.rs index c1a6ffb..d542778 100644 --- a/src/routes/albums.rs +++ b/src/routes/albums.rs @@ -69,19 +69,32 @@ async fn get_album( let mbid = path.into_inner(); // Try fetching as a release first - let mb_tracks = match state.mb_client.get_release_tracks(&mbid).await { - Ok(tracks) => tracks, + let (mb_tracks, _release_mbid) = match state.mb_client.get_release_tracks(&mbid).await { + Ok(tracks) => (tracks, mbid.clone()), Err(_) => { // Probably a release-group MBID. Browse releases for this group. - let release_mbid = resolve_release_from_group(&state, &mbid).await?; - state + let resolved = resolve_release_from_group(&state, &mbid).await?; + let tracks = state .mb_client - .get_release_tracks(&release_mbid) + .get_release_tracks(&resolved) .await - .map_err(|e| ApiError::Internal(format!("MusicBrainz error: {e}")))? + .map_err(|e| ApiError::Internal(format!("MusicBrainz error: {e}")))?; + (tracks, resolved) } }; + // Get the album artist from the release's recording credits + let album_artist = if let Some(first_track) = mb_tracks.first() { + state + .mb_client + .get_recording(&first_track.recording_mbid) + .await + .ok() + .map(|r| r.artist) + } else { + None + }; + // Get all wanted items to check local status let all_wanted = queries::wanted::list(state.db.conn(), None, None).await?; @@ -112,6 +125,7 @@ async fn get_album( Ok(HttpResponse::Ok().json(serde_json::json!({ "mbid": mbid, + "artist": album_artist, "tracks": tracks, }))) } diff --git a/src/routes/artists.rs b/src/routes/artists.rs index 7b0cabc..7d38e22 100644 --- a/src/routes/artists.rs +++ b/src/routes/artists.rs @@ -376,14 +376,18 @@ pub async fn enrich_artist( .await; tracing::debug!(mbid = %mbid, has_photo = artist_photo.is_some(), has_bio = artist_bio.is_some(), has_banner = artist_banner.is_some(), "artist enrichment data"); - // Fetch release groups and filter by allowed secondary types + // Fetch release groups and split into primary vs featured let all_release_groups = state .search .get_release_groups(&mbid) .await .map_err(|e| ApiError::Internal(e.to_string()))?; let allowed = state.config.read().await.allowed_secondary_types.clone(); - let release_groups: Vec<_> = all_release_groups + + let (primary_rgs, featured_rgs): (Vec<_>, Vec<_>) = + all_release_groups.into_iter().partition(|rg| !rg.featured); + + let release_groups: Vec<_> = primary_rgs .into_iter() .filter(|rg| { if rg.secondary_types.is_empty() { @@ -395,6 +399,31 @@ pub async fn enrich_artist( }) .collect(); + // Featured release groups — just pass through with type filtering + let featured_albums: Vec = featured_rgs + .iter() + .filter(|rg| { + if rg.secondary_types.is_empty() { + true + } else { + rg.secondary_types.iter().all(|st| allowed.contains(st)) + } + }) + .map(|rg| FullAlbumInfo { + mbid: rg.first_release_id.clone().unwrap_or_else(|| rg.id.clone()), + title: rg.title.clone(), + release_type: rg.primary_type.clone(), + date: rg.first_release_date.clone(), + track_count: 0, + local_album_id: None, + watched_tracks: 0, + owned_tracks: 0, + downloaded_tracks: 0, + total_local_tracks: 0, + status: "featured".to_string(), + }) + .collect(); + // Get all wanted items for this artist let all_wanted = queries::wanted::list(state.db.conn(), None, None).await?; let artist_wanted: Vec<_> = all_wanted @@ -609,6 +638,7 @@ pub async fn enrich_artist( Ok(serde_json::json!({ "artist": artist, "albums": albums, + "featured_albums": featured_albums, "artist_status": artist_status, "total_available_tracks": total_available_tracks, "total_watched_tracks": total_artist_watched, diff --git a/src/routes/system.rs b/src/routes/system.rs index 5e951f4..34601b1 100644 --- a/src/routes/system.rs +++ b/src/routes/system.rs @@ -58,7 +58,9 @@ async fn get_status( let work_queue = queries::work_queue::counts_all(conn).await.ok(); // Scheduler state from DB - let scheduler_jobs = queries::scheduler_state::list_all(conn).await.unwrap_or_default(); + let scheduler_jobs = queries::scheduler_state::list_all(conn) + .await + .unwrap_or_default(); let scheduler_json: serde_json::Value = scheduler_jobs .iter() .map(|j| { @@ -158,13 +160,8 @@ async fn trigger_organize( } for track in &tracks { let payload = serde_json::json!({"track_id": track.id}); - queries::work_queue::enqueue( - conn, - WorkTaskType::Organize, - &payload.to_string(), - None, - ) - .await?; + queries::work_queue::enqueue(conn, WorkTaskType::Organize, &payload.to_string(), None) + .await?; count += 1; } offset += 500; diff --git a/src/scheduler.rs b/src/scheduler.rs index 138af59..f8af0a6 100644 --- a/src/scheduler.rs +++ b/src/scheduler.rs @@ -15,12 +15,27 @@ use crate::state::AppState; /// Spawn the unified scheduler background loop. pub fn spawn(state: web::Data) { tokio::spawn(async move { - // Initialize scheduler state rows in DB + // Initialize scheduler state rows in DB with next_run_at pre-populated for job_name in ["pipeline", "monitor", "cookie_refresh"] { - if let Err(e) = - queries::scheduler_state::get_or_create(state.db.conn(), job_name).await - { - tracing::error!(job = job_name, error = %e, "failed to init scheduler state"); + match queries::scheduler_state::get_or_create(state.db.conn(), job_name).await { + Ok(job) => { + if job.next_run_at.is_none() { + let (enabled, interval_secs) = read_job_config(&state, job_name).await; + if enabled { + let next = + Utc::now().naive_utc() + chrono::Duration::seconds(interval_secs); + let _ = queries::scheduler_state::update_next_run( + state.db.conn(), + job_name, + Some(next), + ) + .await; + } + } + } + Err(e) => { + tracing::error!(job = job_name, error = %e, "failed to init scheduler state"); + } } } @@ -55,8 +70,7 @@ where // If config says disabled, ensure DB state reflects it if !config_enabled { if job.enabled { - let _ = - queries::scheduler_state::set_enabled(state.db.conn(), job_name, false).await; + let _ = queries::scheduler_state::set_enabled(state.db.conn(), job_name, false).await; let _ = queries::scheduler_state::update_next_run(state.db.conn(), job_name, None).await; } @@ -103,8 +117,7 @@ where // Update last run and schedule next let _ = queries::scheduler_state::update_last_run(state.db.conn(), job_name, &result_str).await; let next = Utc::now().naive_utc() + chrono::Duration::seconds(interval_secs); - let _ = - queries::scheduler_state::update_next_run(state.db.conn(), job_name, Some(next)).await; + let _ = queries::scheduler_state::update_next_run(state.db.conn(), job_name, Some(next)).await; } async fn read_job_config(state: &web::Data, job_name: &str) -> (bool, i64) { diff --git a/src/workers.rs b/src/workers.rs index d9d95fb..f406a0d 100644 --- a/src/workers.rs +++ b/src/workers.rs @@ -64,7 +64,8 @@ impl WorkerManager { tokio::spawn(async move { loop { tokio::time::sleep(std::time::Duration::from_secs(6 * 3600)).await; - let _ = queries::work_queue::cleanup_completed(cleanup_state.db.conn(), 7).await; + let _ = + queries::work_queue::cleanup_completed(cleanup_state.db.conn(), 7).await; } }); }); @@ -316,7 +317,11 @@ async fn process_index( let cfg = state.config.read().await.clone(); let mut downstream = Vec::new(); - if payload.get("scan_all").and_then(|v| v.as_bool()).unwrap_or(false) { + if payload + .get("scan_all") + .and_then(|v| v.as_bool()) + .unwrap_or(false) + { // Full library scan let scan_config = shanty_index::ScanConfig { root: cfg.library_path.clone(), @@ -360,8 +365,7 @@ async fn process_tag( let track_id = payload .get("track_id") .and_then(|v| v.as_i64()) - .ok_or("missing track_id in payload")? - as i32; + .ok_or("missing track_id in payload")? as i32; let conn = state.db.conn(); let cfg = state.config.read().await.clone(); @@ -394,8 +398,7 @@ async fn process_organize( let track_id = payload .get("track_id") .and_then(|v| v.as_i64()) - .ok_or("missing track_id in payload")? - as i32; + .ok_or("missing track_id in payload")? as i32; let conn = state.db.conn(); let cfg = state.config.read().await.clone();
{ "Type" }{ "Status" }{ "Progress" }{ "Result" }