Compare commits

...

11 Commits

Author SHA1 Message Date
Connor Johnstone 181f736f25 fleshed out subsonic more 2026-04-01 19:36:24 -04:00
Connor Johnstone 8045dadc57 added the scroll bar 2026-03-31 13:18:18 -04:00
Connor Johnstone dcf4993f68 Unified the track logic. Seems to work much better 2026-03-26 17:38:16 -04:00
Connor Johnstone cf5a38a376 Attempt to fix playlist gen dropdown 2026-03-26 13:20:20 -04:00
Connor Johnstone e4947191d0 I **think** I've at least 99% fixed the top songs mismatch 2026-03-25 21:50:09 -04:00
Connor Johnstone b4e0756a90 proper fix plus delete artist actually removes files 2026-03-25 14:32:17 -04:00
Connor Johnstone c425402857 added the cleanup, which was missing. also artist lookup first rather than search on import 2026-03-25 14:04:24 -04:00
Connor Johnstone 64e20136f0 hopefully a fix for the enrich pipeline sometimes not running 2026-03-25 13:37:29 -04:00
Connor Johnstone 820d37262d format,test,blah 2026-03-24 20:47:14 -04:00
Connor Johnstone d9c6a7759e fixed some unwatch cleanup stuff 2026-03-24 20:41:13 -04:00
Connor Johnstone cc42f8ecbb Added the import/cleanup functionality 2026-03-24 15:58:14 -04:00
10 changed files with 529 additions and 13 deletions
+1
View File
@@ -43,6 +43,7 @@ pub struct Model {
pub file_mtime: Option<chrono::NaiveDateTime>,
pub added_at: chrono::NaiveDateTime,
pub updated_at: chrono::NaiveDateTime,
pub tagged: bool,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
+3
View File
@@ -14,6 +14,8 @@ pub enum WorkTaskType {
Tag,
#[sea_orm(string_value = "organize")]
Organize,
#[sea_orm(string_value = "enrich")]
Enrich,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, EnumIter, DeriveActiveEnum)]
@@ -36,6 +38,7 @@ impl std::fmt::Display for WorkTaskType {
Self::Index => write!(f, "index"),
Self::Tag => write!(f, "tag"),
Self::Organize => write!(f, "organize"),
Self::Enrich => write!(f, "enrich"),
}
}
}
@@ -0,0 +1,40 @@
use sea_orm_migration::prelude::*;
#[derive(DeriveMigrationName)]
pub struct Migration;
#[async_trait::async_trait]
impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.alter_table(
Table::alter()
.table(Tracks::Table)
.add_column(
ColumnDef::new(Tracks::Tagged)
.boolean()
.not_null()
.default(false),
)
.to_owned(),
)
.await
}
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.alter_table(
Table::alter()
.table(Tracks::Table)
.drop_column(Tracks::Tagged)
.to_owned(),
)
.await
}
}
#[derive(DeriveIden)]
enum Tracks {
Table,
Tagged,
}
@@ -0,0 +1,59 @@
use sea_orm_migration::prelude::*;
#[derive(DeriveMigrationName)]
pub struct Migration;
#[async_trait::async_trait]
impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
// Drop the unique index on artist name — different artists can share a name
// (e.g., "Clara" the Italian singer and "Clara" the Brazilian singer)
manager
.drop_index(
Index::drop()
.name("idx_artists_name_unique")
.table(Artists::Table)
.to_owned(),
)
.await?;
// Replace with a non-unique index for lookup performance
manager
.create_index(
Index::create()
.name("idx_artists_name")
.table(Artists::Table)
.col(Artists::Name)
.to_owned(),
)
.await
}
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.drop_index(
Index::drop()
.name("idx_artists_name")
.table(Artists::Table)
.to_owned(),
)
.await?;
manager
.create_index(
Index::create()
.name("idx_artists_name_unique")
.table(Artists::Table)
.col(Artists::Name)
.unique()
.to_owned(),
)
.await
}
}
#[derive(DeriveIden)]
enum Artists {
Table,
Name,
}
+4
View File
@@ -16,6 +16,8 @@ mod m20260320_000014_create_playlists;
mod m20260320_000015_add_subsonic_password;
mod m20260323_000016_remove_orphaned_artists;
mod m20260323_000017_create_work_queue_and_scheduler;
mod m20260324_000018_add_track_tagged;
mod m20260325_000019_allow_duplicate_artist_names;
pub struct Migrator;
@@ -39,6 +41,8 @@ impl MigratorTrait for Migrator {
Box::new(m20260320_000015_add_subsonic_password::Migration),
Box::new(m20260323_000016_remove_orphaned_artists::Migration),
Box::new(m20260323_000017_create_work_queue_and_scheduler::Migration),
Box::new(m20260324_000018_add_track_tagged::Migration),
Box::new(m20260325_000019_allow_duplicate_artist_names::Migration),
]
}
}
+152
View File
@@ -1,3 +1,4 @@
use sea_orm::sea_query::Expr;
use sea_orm::*;
use crate::entities::album::{self, ActiveModel, Entity as Albums, Model as Album};
@@ -84,6 +85,131 @@ pub async fn get_by_artist(db: &DatabaseConnection, artist_id: i32) -> DbResult<
.await?)
}
pub async fn get_random(db: &DatabaseConnection, count: u64) -> DbResult<Vec<Album>> {
Ok(Albums::find()
.order_by(Expr::cust("RANDOM()"), Order::Asc)
.limit(count)
.all(db)
.await?)
}
pub async fn list_newest(db: &DatabaseConnection, limit: u64, offset: u64) -> DbResult<Vec<Album>> {
Ok(Albums::find()
.order_by_desc(Expr::cust("COALESCE(year, 0)"))
.order_by_asc(album::Column::Name)
.limit(limit)
.offset(offset)
.all(db)
.await?)
}
pub async fn list_by_year_range(
db: &DatabaseConnection,
from: i32,
to: i32,
limit: u64,
offset: u64,
) -> DbResult<Vec<Album>> {
let (lo, hi) = if from <= to { (from, to) } else { (to, from) };
Ok(Albums::find()
.filter(album::Column::Year.gte(lo))
.filter(album::Column::Year.lte(hi))
.order_by_asc(album::Column::Year)
.limit(limit)
.offset(offset)
.all(db)
.await?)
}
pub async fn list_by_genre(
db: &DatabaseConnection,
genre: &str,
limit: u64,
offset: u64,
) -> DbResult<Vec<Album>> {
use crate::entities::track;
// Find album IDs that have tracks matching this genre
let pattern = format!("%{genre}%");
let album_ids: Vec<i32> = track::Entity::find()
.filter(Expr::cust_with_values(
"LOWER(genre) LIKE LOWER(?)",
[pattern],
))
.filter(track::Column::AlbumId.is_not_null())
.all(db)
.await?
.into_iter()
.filter_map(|t| t.album_id)
.collect::<std::collections::HashSet<_>>()
.into_iter()
.collect();
if album_ids.is_empty() {
return Ok(vec![]);
}
Ok(Albums::find()
.filter(album::Column::Id.is_in(album_ids))
.order_by_asc(album::Column::Name)
.limit(limit)
.offset(offset)
.all(db)
.await?)
}
pub async fn list_alphabetical_by_artist(
db: &DatabaseConnection,
limit: u64,
offset: u64,
) -> DbResult<Vec<Album>> {
Ok(Albums::find()
.order_by_asc(Expr::cust("LOWER(album_artist)"))
.order_by_asc(Expr::cust("LOWER(name)"))
.limit(limit)
.offset(offset)
.all(db)
.await?)
}
pub async fn list_recent(db: &DatabaseConnection, limit: u64, offset: u64) -> DbResult<Vec<Album>> {
use crate::entities::track;
// Find albums ordered by their most recently added track
let tracks = track::Entity::find()
.filter(track::Column::AlbumId.is_not_null())
.order_by_desc(track::Column::AddedAt)
.all(db)
.await?;
// Collect unique album IDs in order of most recent track
let mut seen = std::collections::HashSet::new();
let album_ids: Vec<i32> = tracks
.into_iter()
.filter_map(|t| t.album_id)
.filter(|id| seen.insert(*id))
.skip(offset as usize)
.take(limit as usize)
.collect();
if album_ids.is_empty() {
return Ok(vec![]);
}
// Fetch albums and preserve the ordering
let albums = Albums::find()
.filter(album::Column::Id.is_in(album_ids.clone()))
.all(db)
.await?;
let album_map: std::collections::HashMap<i32, Album> =
albums.into_iter().map(|a| (a.id, a)).collect();
Ok(album_ids
.into_iter()
.filter_map(|id| album_map.get(&id).cloned())
.collect())
}
pub async fn update(db: &DatabaseConnection, id: i32, model: ActiveModel) -> DbResult<Album> {
let mut active = model;
active.id = Set(id);
@@ -94,3 +220,29 @@ pub async fn delete(db: &DatabaseConnection, id: i32) -> DbResult<()> {
Albums::delete_by_id(id).exec(db).await?;
Ok(())
}
pub async fn delete_by_artist(db: &DatabaseConnection, artist_id: i32) -> DbResult<u64> {
let result = Albums::delete_many()
.filter(album::Column::ArtistId.eq(artist_id))
.exec(db)
.await?;
Ok(result.rows_affected)
}
/// Delete albums that have no tracks referencing them.
pub async fn delete_empty(db: &DatabaseConnection) -> DbResult<u64> {
let all = Albums::find().all(db).await?;
let mut deleted = 0u64;
for a in all {
let has_tracks = crate::entities::track::Entity::find()
.filter(crate::entities::track::Column::AlbumId.eq(a.id))
.count(db)
.await?
> 0;
if !has_tracks {
Albums::delete_by_id(a.id).exec(db).await?;
deleted += 1;
}
}
Ok(deleted)
}
+64 -4
View File
@@ -1,4 +1,5 @@
use chrono::Utc;
use sea_orm::sea_query::Expr;
use sea_orm::*;
use crate::entities::artist::{self, ActiveModel, Entity as Artists, Model as Artist};
@@ -20,13 +21,18 @@ pub async fn upsert(
}
if let Some(existing) = find_by_name(db, name).await? {
// Update musicbrainz_id if we have one now and didn't before
if musicbrainz_id.is_some() && existing.musicbrainz_id.is_none() {
// We have an MBID now and the existing record doesn't — update it
let mut active: ActiveModel = existing.into();
active.musicbrainz_id = Set(musicbrainz_id.map(String::from));
return Ok(active.update(db).await?);
}
return Ok(existing);
if musicbrainz_id.is_none() || existing.musicbrainz_id.as_deref() == musicbrainz_id {
// No MBID provided, or MBIDs match — return existing
return Ok(existing);
}
// MBIDs differ — this is a different artist with the same name.
// Fall through to insert a new record.
}
// Try to insert — if we race with another task, catch the unique constraint
@@ -47,7 +53,15 @@ pub async fn upsert(
Err(DbErr::Exec(RuntimeErr::SqlxError(sqlx_err)))
if sqlx_err.to_string().contains("UNIQUE constraint failed") =>
{
// Lost the race — another task inserted first, just look it up
// Lost the race on MBID unique constraint — look up by MBID first, then name
if let Some(mbid) = musicbrainz_id
&& let Some(existing) = Artists::find()
.filter(artist::Column::MusicbrainzId.eq(mbid))
.one(db)
.await?
{
return Ok(existing);
}
find_by_name(db, name)
.await?
.ok_or_else(|| DbError::Other(format!("artist '{name}' vanished after conflict")))
@@ -72,13 +86,20 @@ pub async fn find_by_name(db: &DatabaseConnection, name: &str) -> DbResult<Optio
pub async fn list(db: &DatabaseConnection, limit: u64, offset: u64) -> DbResult<Vec<Artist>> {
Ok(Artists::find()
.order_by_asc(artist::Column::Name)
.order_by_asc(Expr::cust("LOWER(name)"))
.limit(limit)
.offset(offset)
.all(db)
.await?)
}
pub async fn list_all(db: &DatabaseConnection) -> DbResult<Vec<Artist>> {
Ok(Artists::find()
.order_by_asc(Expr::cust("LOWER(name)"))
.all(db)
.await?)
}
pub async fn update(db: &DatabaseConnection, id: i32, model: ActiveModel) -> DbResult<Artist> {
let mut active = model;
active.id = Set(id);
@@ -127,6 +148,45 @@ pub async fn list_monitored(db: &DatabaseConnection) -> DbResult<Vec<Artist>> {
.await?)
}
/// Delete artists that have no tracks, no wanted items, no albums, and are not monitored.
pub async fn delete_unused(db: &DatabaseConnection) -> DbResult<u64> {
let conn = db;
let all = Artists::find().all(conn).await?;
let mut deleted = 0u64;
for a in all {
if a.monitored {
continue;
}
let has_tracks = crate::entities::track::Entity::find()
.filter(crate::entities::track::Column::ArtistId.eq(a.id))
.count(conn)
.await?
> 0;
if has_tracks {
continue;
}
let has_wanted = crate::entities::wanted_item::Entity::find()
.filter(crate::entities::wanted_item::Column::ArtistId.eq(a.id))
.count(conn)
.await?
> 0;
if has_wanted {
continue;
}
let has_albums = crate::entities::album::Entity::find()
.filter(crate::entities::album::Column::ArtistId.eq(a.id))
.count(conn)
.await?
> 0;
if has_albums {
continue;
}
Artists::delete_by_id(a.id).exec(conn).await?;
deleted += 1;
}
Ok(deleted)
}
pub async fn update_last_checked(db: &DatabaseConnection, id: i32) -> DbResult<Artist> {
let existing = get_by_id(db, id).await?;
let mut active: ActiveModel = existing.into();
+116 -3
View File
@@ -45,6 +45,13 @@ pub async fn get_by_path(db: &DatabaseConnection, file_path: &str) -> DbResult<O
.await?)
}
pub async fn get_by_mbid(db: &DatabaseConnection, mbid: &str) -> DbResult<Vec<Track>> {
Ok(Tracks::find()
.filter(track::Column::MusicbrainzId.eq(mbid))
.all(db)
.await?)
}
pub async fn list(db: &DatabaseConnection, limit: u64, offset: u64) -> DbResult<Vec<Track>> {
Ok(Tracks::find()
.order_by_asc(track::Column::Artist)
@@ -87,10 +94,10 @@ pub async fn get_by_artist(db: &DatabaseConnection, artist_id: i32) -> DbResult<
.await?)
}
pub async fn get_untagged(db: &DatabaseConnection) -> DbResult<Vec<Track>> {
pub async fn count_by_artist(db: &DatabaseConnection, artist_id: i32) -> DbResult<u64> {
Ok(Tracks::find()
.filter(track::Column::MusicbrainzId.is_null())
.all(db)
.filter(track::Column::ArtistId.eq(artist_id))
.count(db)
.await?)
}
@@ -164,6 +171,54 @@ pub async fn get_random(db: &DatabaseConnection, count: u64) -> DbResult<Vec<Tra
.await?)
}
/// Get random tracks with optional genre and year range filters.
pub async fn get_random_filtered(
db: &DatabaseConnection,
count: u64,
genre: Option<&str>,
from_year: Option<i32>,
to_year: Option<i32>,
) -> DbResult<Vec<Track>> {
let mut query = Tracks::find();
if let Some(g) = genre {
let pattern = format!("%{g}%");
query = query.filter(Expr::cust_with_values(
"LOWER(genre) LIKE LOWER(?)",
[pattern],
));
}
if let Some(y) = from_year {
query = query.filter(track::Column::Year.gte(y));
}
if let Some(y) = to_year {
query = query.filter(track::Column::Year.lte(y));
}
Ok(query
.order_by(Expr::cust("RANDOM()"), Order::Asc)
.limit(count)
.all(db)
.await?)
}
/// Get tracks matching a genre with pagination.
pub async fn get_by_genre_paginated(
db: &DatabaseConnection,
genre: &str,
limit: u64,
offset: u64,
) -> DbResult<Vec<Track>> {
let pattern = format!("%{genre}%");
Ok(Tracks::find()
.filter(Expr::cust_with_values(
"LOWER(genre) LIKE LOWER(?)",
[pattern],
))
.limit(limit)
.offset(offset)
.all(db)
.await?)
}
/// Get tracks added within the last N days.
pub async fn get_recent(db: &DatabaseConnection, days: u32, limit: u64) -> DbResult<Vec<Track>> {
let cutoff = Utc::now().naive_utc() - chrono::Duration::days(i64::from(days));
@@ -175,6 +230,64 @@ pub async fn get_recent(db: &DatabaseConnection, days: u32, limit: u64) -> DbRes
.await?)
}
/// Get tracks that haven't been processed by the tagger yet.
pub async fn get_untagged(db: &DatabaseConnection) -> DbResult<Vec<Track>> {
Ok(Tracks::find()
.filter(track::Column::Tagged.eq(false))
.all(db)
.await?)
}
pub async fn delete_by_artist(db: &DatabaseConnection, artist_id: i32) -> DbResult<u64> {
let result = Tracks::delete_many()
.filter(track::Column::ArtistId.eq(artist_id))
.exec(db)
.await?;
Ok(result.rows_affected)
}
/// Get tracks that have been tagged but have no corresponding wanted_item.
/// These are files that went through the pipeline but aren't part of any watched content.
pub async fn get_unwanted(db: &DatabaseConnection) -> DbResult<Vec<Track>> {
let all_tagged = Tracks::find()
.filter(track::Column::Tagged.eq(true))
.all(db)
.await?;
let all_wanted = crate::entities::wanted_item::Entity::find().all(db).await?;
let wanted_mbids: std::collections::HashSet<&str> = all_wanted
.iter()
.filter_map(|w| w.musicbrainz_id.as_deref())
.collect();
let wanted_track_ids: std::collections::HashSet<i32> =
all_wanted.iter().filter_map(|w| w.track_id).collect();
Ok(all_tagged
.into_iter()
.filter(|t| {
// Not linked by track_id
!wanted_track_ids.contains(&t.id)
// Not linked by MBID
&& !t.musicbrainz_id.as_deref().is_some_and(|mbid| wanted_mbids.contains(mbid))
})
.collect())
}
/// Delete tracks whose files no longer exist on disk.
pub async fn delete_orphaned(db: &DatabaseConnection) -> DbResult<u64> {
let all = Tracks::find().all(db).await?;
let mut deleted = 0u64;
for t in all {
if !std::path::Path::new(&t.file_path).exists() {
Tracks::delete_by_id(t.id).exec(db).await?;
deleted += 1;
}
}
Ok(deleted)
}
/// Get tracks by artist name (case-insensitive match).
pub async fn get_by_artist_name(db: &DatabaseConnection, name: &str) -> DbResult<Vec<Track>> {
Ok(Tracks::find()
+40
View File
@@ -69,6 +69,30 @@ pub async fn update_status(
Ok(active.update(db).await?)
}
pub async fn update_mbid(
db: &DatabaseConnection,
id: i32,
musicbrainz_id: &str,
) -> DbResult<WantedItem> {
let existing = get_by_id(db, id).await?;
let mut active: ActiveModel = existing.into();
active.musicbrainz_id = Set(Some(musicbrainz_id.to_string()));
active.updated_at = Set(Utc::now().naive_utc());
Ok(active.update(db).await?)
}
pub async fn update_track_id(
db: &DatabaseConnection,
id: i32,
track_id: i32,
) -> DbResult<WantedItem> {
let existing = get_by_id(db, id).await?;
let mut active: ActiveModel = existing.into();
active.track_id = Set(Some(track_id));
active.updated_at = Set(Utc::now().naive_utc());
Ok(active.update(db).await?)
}
pub async fn find_by_mbid(
db: &DatabaseConnection,
musicbrainz_id: &str,
@@ -84,6 +108,22 @@ pub async fn remove(db: &DatabaseConnection, id: i32) -> DbResult<()> {
Ok(())
}
pub async fn remove_by_artist(db: &DatabaseConnection, artist_id: i32) -> DbResult<u64> {
let result = WantedItems::delete_many()
.filter(wanted_item::Column::ArtistId.eq(artist_id))
.exec(db)
.await?;
Ok(result.rows_affected)
}
pub async fn remove_by_mbid(db: &DatabaseConnection, musicbrainz_id: &str) -> DbResult<u64> {
let result = WantedItems::delete_many()
.filter(wanted_item::Column::MusicbrainzId.eq(musicbrainz_id))
.exec(db)
.await?;
Ok(result.rows_affected)
}
/// Promote all Downloaded items to Owned status. Returns the count updated.
pub async fn promote_downloaded_to_owned(db: &DatabaseConnection) -> DbResult<u64> {
let now = Utc::now().naive_utc();
+50 -6
View File
@@ -199,6 +199,12 @@ pub async fn counts_all(db: &DatabaseConnection) -> DbResult<AllCounts> {
completed: 0,
failed: 0,
},
enrich: TypeCounts {
pending: 0,
running: 0,
completed: 0,
failed: 0,
},
};
for item in items {
@@ -207,6 +213,7 @@ pub async fn counts_all(db: &DatabaseConnection) -> DbResult<AllCounts> {
WorkTaskType::Index => &mut result.index,
WorkTaskType::Tag => &mut result.tag,
WorkTaskType::Organize => &mut result.organize,
WorkTaskType::Enrich => &mut result.enrich,
};
match item.status {
WorkQueueStatus::Pending => counts.pending += 1,
@@ -224,16 +231,24 @@ pub struct AllCounts {
pub index: TypeCounts,
pub tag: TypeCounts,
pub organize: TypeCounts,
pub enrich: TypeCounts,
}
/// Check if all items for a pipeline are completed.
pub async fn pipeline_is_complete(db: &DatabaseConnection, pipeline_id: &str) -> DbResult<bool> {
let incomplete = WorkQueue::find()
/// Check if all items for a pipeline are completed (excluding a specific item ID,
/// typically the currently-running item that hasn't been marked complete yet).
pub async fn pipeline_is_complete(
db: &DatabaseConnection,
pipeline_id: &str,
exclude_id: Option<i32>,
) -> DbResult<bool> {
let mut query = WorkQueue::find()
.filter(work_queue::Column::PipelineId.eq(pipeline_id))
.filter(work_queue::Column::Status.ne(WorkQueueStatus::Completed))
.filter(work_queue::Column::Status.ne(WorkQueueStatus::Failed))
.count(db)
.await?;
.filter(work_queue::Column::Status.ne(WorkQueueStatus::Failed));
if let Some(id) = exclude_id {
query = query.filter(work_queue::Column::Id.ne(id));
}
let incomplete = query.count(db).await?;
Ok(incomplete == 0)
}
@@ -248,6 +263,35 @@ pub async fn cleanup_completed(db: &DatabaseConnection, older_than_days: i64) ->
Ok(result.rows_affected)
}
/// Delete all completed and failed items for a specific pipeline.
pub async fn clear_pipeline(db: &DatabaseConnection, pipeline_id: &str) -> DbResult<u64> {
let result = WorkQueue::delete_many()
.filter(work_queue::Column::PipelineId.eq(pipeline_id))
.filter(
work_queue::Column::Status
.eq(WorkQueueStatus::Completed)
.or(work_queue::Column::Status.eq(WorkQueueStatus::Failed)),
)
.exec(db)
.await?;
Ok(result.rows_affected)
}
/// Delete all completed/failed items that belong to a pipeline (have a pipeline_id).
/// Does not affect standalone (non-pipeline) work items.
pub async fn clear_all_pipelines(db: &DatabaseConnection) -> DbResult<u64> {
let result = WorkQueue::delete_many()
.filter(work_queue::Column::PipelineId.is_not_null())
.filter(
work_queue::Column::Status
.eq(WorkQueueStatus::Completed)
.or(work_queue::Column::Status.eq(WorkQueueStatus::Failed)),
)
.exec(db)
.await?;
Ok(result.rows_affected)
}
/// Check if there are any pending or running items.
pub async fn has_active_work(db: &DatabaseConnection) -> DbResult<bool> {
let count = WorkQueue::find()