test_musehub_analysis.py
python
| 1 | """Tests for Muse Hub Analysis endpoints — . |
| 2 | |
| 3 | Covers all acceptance criteria: |
| 4 | - GET /musehub/repos/{repo_id}/analysis/{ref}/{dimension} returns structured JSON |
| 5 | - All 13 dimensions return valid typed data |
| 6 | - Aggregate endpoint returns all 13 dimensions |
| 7 | - Track and section query param filters are applied |
| 8 | - Unknown dimension returns 404 |
| 9 | - Unknown repo_id returns 404 |
| 10 | - ETag header is present in all responses |
| 11 | - Service layer: compute_dimension raises ValueError for unknown dimension |
| 12 | - Service layer: each dimension returns the correct model type |
| 13 | |
| 14 | Covers (emotion map): |
| 15 | - test_compute_emotion_map_returns_correct_type — service returns EmotionMapResponse |
| 16 | - test_emotion_map_evolution_has_beat_samples — evolution list is non-empty with valid vectors |
| 17 | - test_emotion_map_trajectory_ordered — trajectory is oldest-first with head last |
| 18 | - test_emotion_map_drift_count — drift has len(trajectory)-1 entries |
| 19 | - test_emotion_map_narrative_nonempty — narrative is a non-empty string |
| 20 | - test_emotion_map_is_deterministic — same ref always returns same summary_vector |
| 21 | - test_emotion_map_endpoint_200 — HTTP GET returns 200 with required fields |
| 22 | - test_emotion_map_endpoint_requires_auth — endpoint returns 401 without auth |
| 23 | - test_emotion_map_endpoint_unknown_repo_404 — unknown repo returns 404 |
| 24 | - test_emotion_map_endpoint_etag — ETag header is present |
| 25 | |
| 26 | Covers (emotion diff): |
| 27 | - test_compute_emotion_diff_returns_correct_type — service returns EmotionDiffResponse |
| 28 | - test_emotion_diff_base_emotion_axes_in_range — base vector axes are all in [0, 1] |
| 29 | - test_emotion_diff_head_emotion_axes_in_range — head vector axes are all in [0, 1] |
| 30 | - test_emotion_diff_delta_axes_in_range — delta axes are all in [-1, 1] |
| 31 | - test_emotion_diff_delta_equals_head_minus_base — delta = head - base per axis |
| 32 | - test_emotion_diff_interpretation_nonempty — interpretation string is non-empty |
| 33 | - test_emotion_diff_is_deterministic — same refs always return same delta |
| 34 | - test_emotion_diff_different_refs_differ — distinct refs produce distinct vectors |
| 35 | - test_emotion_diff_endpoint_200 — HTTP GET returns 200 with required fields |
| 36 | - test_emotion_diff_endpoint_requires_auth — endpoint returns 401 without auth |
| 37 | - test_emotion_diff_endpoint_unknown_repo_404 — unknown repo returns 404 |
| 38 | - test_emotion_diff_endpoint_etag — ETag header is present |
| 39 | |
| 40 | Covers (recall / semantic search): |
| 41 | - test_compute_recall_returns_correct_type — service returns RecallResponse |
| 42 | - test_compute_recall_scores_descending — matches are sorted best-first |
| 43 | - test_compute_recall_scores_in_range — all scores are in [0, 1] |
| 44 | - test_compute_recall_limit_respected — limit caps the result count |
| 45 | - test_compute_recall_is_deterministic — same (ref, q) always returns same matches |
| 46 | - test_compute_recall_differs_by_query — different queries produce different results |
| 47 | - test_compute_recall_match_dimensions_nonempty — every match has at least one matched dimension |
| 48 | - test_recall_endpoint_200 — HTTP GET returns 200 with required fields |
| 49 | - test_recall_endpoint_requires_auth — endpoint returns 401 without auth |
| 50 | - test_recall_endpoint_unknown_repo_404 — unknown repo returns 404 |
| 51 | - test_recall_endpoint_etag_header — ETag header is present |
| 52 | - test_recall_endpoint_limit_param — ?limit=3 caps results to 3 |
| 53 | - test_recall_endpoint_missing_q_422 — missing ?q returns 422 |
| 54 | """ |
| 55 | from __future__ import annotations |
| 56 | |
| 57 | import pytest |
| 58 | from httpx import AsyncClient |
| 59 | from sqlalchemy.ext.asyncio import AsyncSession |
| 60 | |
| 61 | from musehub.models.musehub_analysis import ( |
| 62 | ALL_DIMENSIONS, |
| 63 | AggregateAnalysisResponse, |
| 64 | AnalysisResponse, |
| 65 | ChordMapData, |
| 66 | CommitEmotionSnapshot, |
| 67 | ContourData, |
| 68 | DivergenceData, |
| 69 | DynamicsData, |
| 70 | EmotionData, |
| 71 | EmotionDelta8D, |
| 72 | EmotionDiffResponse, |
| 73 | EmotionDrift, |
| 74 | EmotionMapResponse, |
| 75 | EmotionVector, |
| 76 | EmotionVector8D, |
| 77 | FormData, |
| 78 | GrooveData, |
| 79 | HarmonyData, |
| 80 | KeyData, |
| 81 | MeterData, |
| 82 | MotifEntry, |
| 83 | MotifsData, |
| 84 | RecallMatch, |
| 85 | RecallResponse, |
| 86 | RefSimilarityResponse, |
| 87 | SimilarityData, |
| 88 | TempoData, |
| 89 | ) |
| 90 | from musehub.services.musehub_analysis import ( |
| 91 | compute_aggregate_analysis, |
| 92 | compute_analysis_response, |
| 93 | compute_dimension, |
| 94 | compute_emotion_diff, |
| 95 | compute_emotion_map, |
| 96 | compute_recall, |
| 97 | compute_ref_similarity, |
| 98 | ) |
| 99 | |
| 100 | |
| 101 | # --------------------------------------------------------------------------- |
| 102 | # Helpers |
| 103 | # --------------------------------------------------------------------------- |
| 104 | |
| 105 | |
| 106 | async def _create_repo(client: AsyncClient, auth_headers: dict[str, str]) -> str: |
| 107 | """Create a test repo and return its repo_id.""" |
| 108 | resp = await client.post( |
| 109 | "/api/v1/musehub/repos", |
| 110 | json={"name": "analysis-test-repo", "owner": "testuser", "visibility": "private"}, |
| 111 | headers=auth_headers, |
| 112 | ) |
| 113 | assert resp.status_code == 201 |
| 114 | return str(resp.json()["repoId"]) |
| 115 | |
| 116 | |
| 117 | # --------------------------------------------------------------------------- |
| 118 | # Service unit tests — no HTTP |
| 119 | # --------------------------------------------------------------------------- |
| 120 | |
| 121 | |
| 122 | def test_compute_dimension_harmony_returns_harmony_data() -> None: |
| 123 | """compute_dimension('harmony', ...) returns a HarmonyData instance.""" |
| 124 | result = compute_dimension("harmony", "main") |
| 125 | assert isinstance(result, HarmonyData) |
| 126 | assert result.tonic != "" |
| 127 | assert result.mode != "" |
| 128 | assert 0.0 <= result.key_confidence <= 1.0 |
| 129 | assert len(result.chord_progression) > 0 |
| 130 | assert result.total_beats > 0 |
| 131 | |
| 132 | |
| 133 | def test_compute_dimension_dynamics_returns_dynamics_data() -> None: |
| 134 | result = compute_dimension("dynamics", "main") |
| 135 | assert isinstance(result, DynamicsData) |
| 136 | assert 0 <= result.min_velocity <= result.peak_velocity <= 127 |
| 137 | assert result.dynamic_range == result.peak_velocity - result.min_velocity |
| 138 | assert len(result.velocity_curve) > 0 |
| 139 | |
| 140 | |
| 141 | def test_compute_dimension_motifs_returns_motifs_data() -> None: |
| 142 | result = compute_dimension("motifs", "main") |
| 143 | assert isinstance(result, MotifsData) |
| 144 | assert result.total_motifs == len(result.motifs) |
| 145 | for motif in result.motifs: |
| 146 | assert motif.occurrence_count == len(motif.occurrences) |
| 147 | |
| 148 | |
| 149 | def test_motifs_data_has_extended_fields() -> None: |
| 150 | """MotifsData now carries sections, all_tracks for grid rendering.""" |
| 151 | result = compute_dimension("motifs", "main") |
| 152 | assert isinstance(result, MotifsData) |
| 153 | assert isinstance(result.sections, list) |
| 154 | assert len(result.sections) > 0 |
| 155 | assert isinstance(result.all_tracks, list) |
| 156 | assert len(result.all_tracks) > 0 |
| 157 | |
| 158 | |
| 159 | def test_motif_entry_has_contour_label() -> None: |
| 160 | """Every MotifEntry must carry a melodic contour label.""" |
| 161 | result = compute_dimension("motifs", "main") |
| 162 | assert isinstance(result, MotifsData) |
| 163 | valid_labels = { |
| 164 | "ascending-step", |
| 165 | "descending-step", |
| 166 | "arch", |
| 167 | "valley", |
| 168 | "oscillating", |
| 169 | "static", |
| 170 | } |
| 171 | for motif in result.motifs: |
| 172 | assert isinstance(motif, MotifEntry) |
| 173 | assert motif.contour_label in valid_labels, ( |
| 174 | f"Unknown contour label: {motif.contour_label!r}" |
| 175 | ) |
| 176 | |
| 177 | |
| 178 | def test_motif_entry_has_transformations() -> None: |
| 179 | """Each MotifEntry must include at least one transformation.""" |
| 180 | result = compute_dimension("motifs", "main") |
| 181 | assert isinstance(result, MotifsData) |
| 182 | for motif in result.motifs: |
| 183 | assert isinstance(motif, MotifEntry) |
| 184 | assert len(motif.transformations) > 0 |
| 185 | for xform in motif.transformations: |
| 186 | assert xform.transformation_type in { |
| 187 | "inversion", |
| 188 | "retrograde", |
| 189 | "retrograde-inversion", |
| 190 | "transposition", |
| 191 | } |
| 192 | assert isinstance(xform.intervals, list) |
| 193 | assert isinstance(xform.occurrences, list) |
| 194 | |
| 195 | |
| 196 | def test_motif_entry_has_recurrence_grid() -> None: |
| 197 | """recurrence_grid is a flat list of cells covering every track x section pair.""" |
| 198 | result = compute_dimension("motifs", "main") |
| 199 | assert isinstance(result, MotifsData) |
| 200 | expected_cells = len(result.all_tracks) * len(result.sections) |
| 201 | for motif in result.motifs: |
| 202 | assert isinstance(motif, MotifEntry) |
| 203 | assert len(motif.recurrence_grid) == expected_cells, ( |
| 204 | f"Expected {expected_cells} cells, got {len(motif.recurrence_grid)} " |
| 205 | f"for motif {motif.motif_id!r}" |
| 206 | ) |
| 207 | for cell in motif.recurrence_grid: |
| 208 | assert cell.track in result.all_tracks |
| 209 | assert cell.section in result.sections |
| 210 | assert isinstance(cell.present, bool) |
| 211 | assert cell.occurrence_count >= 0 |
| 212 | |
| 213 | |
| 214 | def test_motif_entry_tracks_cross_track() -> None: |
| 215 | """MotifEntry.tracks lists all tracks where the motif or its transforms appear.""" |
| 216 | result = compute_dimension("motifs", "main") |
| 217 | assert isinstance(result, MotifsData) |
| 218 | for motif in result.motifs: |
| 219 | assert isinstance(motif, MotifEntry) |
| 220 | assert len(motif.tracks) > 0 |
| 221 | # Every track in the list must appear in the global all_tracks roster |
| 222 | for track in motif.tracks: |
| 223 | assert track in result.all_tracks, ( |
| 224 | f"motif.tracks references unknown track {track!r}" |
| 225 | ) |
| 226 | |
| 227 | |
| 228 | def test_compute_dimension_form_returns_form_data() -> None: |
| 229 | result = compute_dimension("form", "main") |
| 230 | assert isinstance(result, FormData) |
| 231 | assert result.form_label != "" |
| 232 | assert len(result.sections) > 0 |
| 233 | for sec in result.sections: |
| 234 | assert sec.length_beats == sec.end_beat - sec.start_beat |
| 235 | |
| 236 | |
| 237 | def test_compute_dimension_groove_returns_groove_data() -> None: |
| 238 | result = compute_dimension("groove", "main") |
| 239 | assert isinstance(result, GrooveData) |
| 240 | assert 0.0 <= result.swing_factor <= 1.0 |
| 241 | assert result.bpm > 0 |
| 242 | |
| 243 | |
| 244 | def test_compute_dimension_emotion_returns_emotion_data() -> None: |
| 245 | result = compute_dimension("emotion", "main") |
| 246 | assert isinstance(result, EmotionData) |
| 247 | assert -1.0 <= result.valence <= 1.0 |
| 248 | assert 0.0 <= result.arousal <= 1.0 |
| 249 | assert result.primary_emotion != "" |
| 250 | |
| 251 | |
| 252 | def test_compute_dimension_chord_map_returns_chord_map_data() -> None: |
| 253 | result = compute_dimension("chord-map", "main") |
| 254 | assert isinstance(result, ChordMapData) |
| 255 | assert result.total_chords == len(result.progression) |
| 256 | |
| 257 | |
| 258 | def test_compute_dimension_contour_returns_contour_data() -> None: |
| 259 | result = compute_dimension("contour", "main") |
| 260 | assert isinstance(result, ContourData) |
| 261 | assert result.shape in ("arch", "ascending", "descending", "flat", "wave") |
| 262 | assert len(result.pitch_curve) > 0 |
| 263 | |
| 264 | |
| 265 | def test_compute_dimension_key_returns_key_data() -> None: |
| 266 | result = compute_dimension("key", "main") |
| 267 | assert isinstance(result, KeyData) |
| 268 | assert 0.0 <= result.confidence <= 1.0 |
| 269 | assert result.tonic != "" |
| 270 | |
| 271 | |
| 272 | def test_compute_dimension_tempo_returns_tempo_data() -> None: |
| 273 | result = compute_dimension("tempo", "main") |
| 274 | assert isinstance(result, TempoData) |
| 275 | assert result.bpm > 0 |
| 276 | assert 0.0 <= result.stability <= 1.0 |
| 277 | |
| 278 | |
| 279 | def test_compute_dimension_meter_returns_meter_data() -> None: |
| 280 | result = compute_dimension("meter", "main") |
| 281 | assert isinstance(result, MeterData) |
| 282 | assert "/" in result.time_signature |
| 283 | assert len(result.beat_strength_profile) > 0 |
| 284 | |
| 285 | |
| 286 | def test_compute_dimension_similarity_returns_similarity_data() -> None: |
| 287 | result = compute_dimension("similarity", "main") |
| 288 | assert isinstance(result, SimilarityData) |
| 289 | assert result.embedding_dimensions > 0 |
| 290 | for commit in result.similar_commits: |
| 291 | assert 0.0 <= commit.score <= 1.0 |
| 292 | |
| 293 | |
| 294 | def test_compute_dimension_divergence_returns_divergence_data() -> None: |
| 295 | result = compute_dimension("divergence", "main") |
| 296 | assert isinstance(result, DivergenceData) |
| 297 | assert 0.0 <= result.divergence_score <= 1.0 |
| 298 | assert result.base_ref != "" |
| 299 | |
| 300 | |
| 301 | def test_compute_dimension_unknown_raises_value_error() -> None: |
| 302 | """compute_dimension raises ValueError for unknown dimension names.""" |
| 303 | with pytest.raises(ValueError, match="Unknown analysis dimension"): |
| 304 | compute_dimension("not-a-dimension", "main") |
| 305 | |
| 306 | |
| 307 | def test_compute_dimension_is_deterministic() -> None: |
| 308 | """Same ref always produces the same output (stub is ref-keyed).""" |
| 309 | r1 = compute_dimension("harmony", "abc123") |
| 310 | r2 = compute_dimension("harmony", "abc123") |
| 311 | assert isinstance(r1, HarmonyData) |
| 312 | assert isinstance(r2, HarmonyData) |
| 313 | assert r1.tonic == r2.tonic |
| 314 | assert r1.mode == r2.mode |
| 315 | |
| 316 | |
| 317 | def test_compute_dimension_differs_by_ref() -> None: |
| 318 | """Different refs produce different results (seed derives from ref).""" |
| 319 | r1 = compute_dimension("tempo", "main") |
| 320 | r2 = compute_dimension("tempo", "develop") |
| 321 | assert isinstance(r1, TempoData) |
| 322 | assert isinstance(r2, TempoData) |
| 323 | # They may differ — just ensure they don't raise |
| 324 | assert r1.bpm > 0 |
| 325 | assert r2.bpm > 0 |
| 326 | |
| 327 | |
| 328 | def test_all_dimensions_list_has_13_entries() -> None: |
| 329 | """ALL_DIMENSIONS must contain exactly 13 entries.""" |
| 330 | assert len(ALL_DIMENSIONS) == 13 |
| 331 | |
| 332 | |
| 333 | def test_compute_analysis_response_envelope() -> None: |
| 334 | """compute_analysis_response returns a complete AnalysisResponse envelope.""" |
| 335 | resp = compute_analysis_response( |
| 336 | repo_id="test-repo-id", |
| 337 | dimension="harmony", |
| 338 | ref="main", |
| 339 | track="bass", |
| 340 | section="chorus", |
| 341 | ) |
| 342 | assert isinstance(resp, AnalysisResponse) |
| 343 | assert resp.dimension == "harmony" |
| 344 | assert resp.ref == "main" |
| 345 | assert resp.filters_applied.track == "bass" |
| 346 | assert resp.filters_applied.section == "chorus" |
| 347 | assert isinstance(resp.data, HarmonyData) |
| 348 | |
| 349 | |
| 350 | def test_compute_aggregate_returns_all_dimensions() -> None: |
| 351 | """compute_aggregate_analysis returns one entry per supported dimension.""" |
| 352 | agg = compute_aggregate_analysis(repo_id="test-repo-id", ref="main") |
| 353 | assert isinstance(agg, AggregateAnalysisResponse) |
| 354 | assert len(agg.dimensions) == 13 |
| 355 | returned_dims = {d.dimension for d in agg.dimensions} |
| 356 | assert returned_dims == set(ALL_DIMENSIONS) |
| 357 | |
| 358 | |
| 359 | def test_compute_aggregate_all_have_same_ref() -> None: |
| 360 | """All dimension entries in aggregate share the same ref.""" |
| 361 | agg = compute_aggregate_analysis(repo_id="test-repo-id", ref="feature/jazz") |
| 362 | for dim in agg.dimensions: |
| 363 | assert dim.ref == "feature/jazz" |
| 364 | |
| 365 | |
| 366 | def test_compute_aggregate_filters_propagated() -> None: |
| 367 | """Track and section filters are propagated to all dimension entries.""" |
| 368 | agg = compute_aggregate_analysis( |
| 369 | repo_id="test-repo-id", ref="main", track="keys", section="verse_1" |
| 370 | ) |
| 371 | for dim in agg.dimensions: |
| 372 | assert dim.filters_applied.track == "keys" |
| 373 | assert dim.filters_applied.section == "verse_1" |
| 374 | |
| 375 | |
| 376 | # --------------------------------------------------------------------------- |
| 377 | # HTTP integration tests |
| 378 | # --------------------------------------------------------------------------- |
| 379 | |
| 380 | |
| 381 | @pytest.mark.anyio |
| 382 | async def test_analysis_harmony_endpoint( |
| 383 | client: AsyncClient, |
| 384 | auth_headers: dict[str, str], |
| 385 | db_session: AsyncSession, |
| 386 | ) -> None: |
| 387 | """GET /musehub/repos/{repo_id}/analysis/{ref}/harmony returns dedicated harmony data. |
| 388 | |
| 389 | The /harmony path is now handled by the dedicated HarmonyAnalysisResponse endpoint rather than the generic /{dimension} catch-all. It returns |
| 390 | Roman-numeral-centric data (key, mode, romanNumerals, cadences, modulations) |
| 391 | rather than the generic AnalysisResponse envelope. |
| 392 | """ |
| 393 | repo_id = await _create_repo(client, auth_headers) |
| 394 | resp = await client.get( |
| 395 | f"/api/v1/musehub/repos/{repo_id}/analysis/main/harmony", |
| 396 | headers=auth_headers, |
| 397 | ) |
| 398 | assert resp.status_code == 200 |
| 399 | body = resp.json() |
| 400 | # Dedicated harmony endpoint — HarmonyAnalysisResponse shape (not AnalysisResponse) |
| 401 | assert "key" in body |
| 402 | assert "mode" in body |
| 403 | assert "romanNumerals" in body |
| 404 | assert "cadences" in body |
| 405 | assert "modulations" in body |
| 406 | assert "harmonicRhythmBpm" in body |
| 407 | |
| 408 | |
| 409 | @pytest.mark.anyio |
| 410 | async def test_analysis_dynamics_endpoint( |
| 411 | client: AsyncClient, |
| 412 | auth_headers: dict[str, str], |
| 413 | db_session: AsyncSession, |
| 414 | ) -> None: |
| 415 | """GET .../{repo_id}/analysis/{ref}/dynamics returns velocity data.""" |
| 416 | repo_id = await _create_repo(client, auth_headers) |
| 417 | resp = await client.get( |
| 418 | f"/api/v1/musehub/repos/{repo_id}/analysis/main/dynamics", |
| 419 | headers=auth_headers, |
| 420 | ) |
| 421 | assert resp.status_code == 200 |
| 422 | data = resp.json()["data"] |
| 423 | assert "peakVelocity" in data |
| 424 | assert "meanVelocity" in data |
| 425 | assert "velocityCurve" in data |
| 426 | |
| 427 | |
| 428 | @pytest.mark.anyio |
| 429 | async def test_analysis_all_dimensions( |
| 430 | client: AsyncClient, |
| 431 | auth_headers: dict[str, str], |
| 432 | db_session: AsyncSession, |
| 433 | ) -> None: |
| 434 | """Aggregate GET .../analysis/{ref} returns all 13 dimensions.""" |
| 435 | repo_id = await _create_repo(client, auth_headers) |
| 436 | resp = await client.get( |
| 437 | f"/api/v1/musehub/repos/{repo_id}/analysis/main", |
| 438 | headers=auth_headers, |
| 439 | ) |
| 440 | assert resp.status_code == 200 |
| 441 | body = resp.json() |
| 442 | assert body["ref"] == "main" |
| 443 | assert body["repoId"] == repo_id |
| 444 | assert "dimensions" in body |
| 445 | assert len(body["dimensions"]) == 13 |
| 446 | returned_dims = {d["dimension"] for d in body["dimensions"]} |
| 447 | assert returned_dims == set(ALL_DIMENSIONS) |
| 448 | |
| 449 | |
| 450 | @pytest.mark.anyio |
| 451 | async def test_analysis_track_filter( |
| 452 | client: AsyncClient, |
| 453 | auth_headers: dict[str, str], |
| 454 | db_session: AsyncSession, |
| 455 | ) -> None: |
| 456 | """Track filter is reflected in filtersApplied across dimensions.""" |
| 457 | repo_id = await _create_repo(client, auth_headers) |
| 458 | resp = await client.get( |
| 459 | f"/api/v1/musehub/repos/{repo_id}/analysis/main/groove?track=bass", |
| 460 | headers=auth_headers, |
| 461 | ) |
| 462 | assert resp.status_code == 200 |
| 463 | body = resp.json() |
| 464 | assert body["filtersApplied"]["track"] == "bass" |
| 465 | assert body["filtersApplied"]["section"] is None |
| 466 | |
| 467 | |
| 468 | @pytest.mark.anyio |
| 469 | async def test_analysis_section_filter( |
| 470 | client: AsyncClient, |
| 471 | auth_headers: dict[str, str], |
| 472 | db_session: AsyncSession, |
| 473 | ) -> None: |
| 474 | """Section filter is reflected in filtersApplied.""" |
| 475 | repo_id = await _create_repo(client, auth_headers) |
| 476 | resp = await client.get( |
| 477 | f"/api/v1/musehub/repos/{repo_id}/analysis/main/emotion?section=chorus", |
| 478 | headers=auth_headers, |
| 479 | ) |
| 480 | assert resp.status_code == 200 |
| 481 | body = resp.json() |
| 482 | assert body["filtersApplied"]["section"] == "chorus" |
| 483 | |
| 484 | |
| 485 | @pytest.mark.anyio |
| 486 | async def test_analysis_unknown_dimension_404( |
| 487 | client: AsyncClient, |
| 488 | auth_headers: dict[str, str], |
| 489 | db_session: AsyncSession, |
| 490 | ) -> None: |
| 491 | """Unknown dimension returns 404, not 422.""" |
| 492 | repo_id = await _create_repo(client, auth_headers) |
| 493 | resp = await client.get( |
| 494 | f"/api/v1/musehub/repos/{repo_id}/analysis/main/not-a-dimension", |
| 495 | headers=auth_headers, |
| 496 | ) |
| 497 | assert resp.status_code == 404 |
| 498 | assert "not-a-dimension" in resp.json()["detail"] |
| 499 | |
| 500 | |
| 501 | @pytest.mark.anyio |
| 502 | async def test_analysis_unknown_repo_404( |
| 503 | client: AsyncClient, |
| 504 | auth_headers: dict[str, str], |
| 505 | db_session: AsyncSession, |
| 506 | ) -> None: |
| 507 | """Unknown repo_id returns 404 for single-dimension endpoint.""" |
| 508 | resp = await client.get( |
| 509 | "/api/v1/musehub/repos/00000000-0000-0000-0000-000000000000/analysis/main/harmony", |
| 510 | headers=auth_headers, |
| 511 | ) |
| 512 | assert resp.status_code == 404 |
| 513 | |
| 514 | |
| 515 | @pytest.mark.anyio |
| 516 | async def test_analysis_aggregate_unknown_repo_404( |
| 517 | client: AsyncClient, |
| 518 | auth_headers: dict[str, str], |
| 519 | db_session: AsyncSession, |
| 520 | ) -> None: |
| 521 | """Unknown repo_id returns 404 for aggregate endpoint.""" |
| 522 | resp = await client.get( |
| 523 | "/api/v1/musehub/repos/00000000-0000-0000-0000-000000000000/analysis/main", |
| 524 | headers=auth_headers, |
| 525 | ) |
| 526 | assert resp.status_code == 404 |
| 527 | |
| 528 | |
| 529 | @pytest.mark.anyio |
| 530 | async def test_analysis_cache_headers( |
| 531 | client: AsyncClient, |
| 532 | auth_headers: dict[str, str], |
| 533 | db_session: AsyncSession, |
| 534 | ) -> None: |
| 535 | """ETag and Last-Modified headers are present in analysis responses.""" |
| 536 | repo_id = await _create_repo(client, auth_headers) |
| 537 | resp = await client.get( |
| 538 | f"/api/v1/musehub/repos/{repo_id}/analysis/main/key", |
| 539 | headers=auth_headers, |
| 540 | ) |
| 541 | assert resp.status_code == 200 |
| 542 | assert "etag" in resp.headers |
| 543 | assert resp.headers["etag"].startswith('"') |
| 544 | assert "last-modified" in resp.headers |
| 545 | |
| 546 | |
| 547 | @pytest.mark.anyio |
| 548 | async def test_analysis_aggregate_cache_headers( |
| 549 | client: AsyncClient, |
| 550 | auth_headers: dict[str, str], |
| 551 | db_session: AsyncSession, |
| 552 | ) -> None: |
| 553 | """Aggregate endpoint also includes ETag header.""" |
| 554 | repo_id = await _create_repo(client, auth_headers) |
| 555 | resp = await client.get( |
| 556 | f"/api/v1/musehub/repos/{repo_id}/analysis/main", |
| 557 | headers=auth_headers, |
| 558 | ) |
| 559 | assert resp.status_code == 200 |
| 560 | assert "etag" in resp.headers |
| 561 | |
| 562 | |
| 563 | @pytest.mark.anyio |
| 564 | async def test_analysis_requires_auth( |
| 565 | client: AsyncClient, |
| 566 | auth_headers: dict[str, str], |
| 567 | ) -> None: |
| 568 | """Analysis endpoint returns 401 without a Bearer token for private repos. |
| 569 | |
| 570 | Pre-existing fix: the route must check auth AFTER confirming the repo exists, |
| 571 | so the test creates a real private repo first to reach the auth gate. |
| 572 | """ |
| 573 | repo_id = await _create_repo(client, auth_headers) |
| 574 | resp = await client.get( |
| 575 | f"/api/v1/musehub/repos/{repo_id}/analysis/main/harmony", |
| 576 | ) |
| 577 | assert resp.status_code == 401 |
| 578 | |
| 579 | |
| 580 | @pytest.mark.anyio |
| 581 | async def test_analysis_aggregate_requires_auth( |
| 582 | client: AsyncClient, |
| 583 | auth_headers: dict[str, str], |
| 584 | ) -> None: |
| 585 | """Aggregate analysis endpoint returns 401 without a Bearer token for private repos. |
| 586 | |
| 587 | Pre-existing fix: the route must check auth AFTER confirming the repo exists, |
| 588 | so the test creates a real private repo first to reach the auth gate. |
| 589 | """ |
| 590 | repo_id = await _create_repo(client, auth_headers) |
| 591 | resp = await client.get( |
| 592 | f"/api/v1/musehub/repos/{repo_id}/analysis/main", |
| 593 | ) |
| 594 | assert resp.status_code == 401 |
| 595 | |
| 596 | |
| 597 | @pytest.mark.anyio |
| 598 | async def test_analysis_all_13_dimensions_individually( |
| 599 | client: AsyncClient, |
| 600 | auth_headers: dict[str, str], |
| 601 | db_session: AsyncSession, |
| 602 | ) -> None: |
| 603 | """Each of the 13 dimensions returns 200; harmony now has a dedicated endpoint. |
| 604 | |
| 605 | The ``harmony`` dimension path is handled by the dedicated HarmonyAnalysisResponse |
| 606 | endpoint which returns a different response shape (no ``dimension`` |
| 607 | envelope field). All other 12 dimensions continue to use the generic AnalysisResponse |
| 608 | envelope and are verified here. |
| 609 | """ |
| 610 | repo_id = await _create_repo(client, auth_headers) |
| 611 | for dim in ALL_DIMENSIONS: |
| 612 | # /similarity is a dedicated cross-ref endpoint requiring ?compare= |
| 613 | params = {"compare": "main"} if dim == "similarity" else {} |
| 614 | resp = await client.get( |
| 615 | f"/api/v1/musehub/repos/{repo_id}/analysis/main/{dim}", |
| 616 | headers=auth_headers, |
| 617 | params=params, |
| 618 | ) |
| 619 | assert resp.status_code == 200, f"Dimension {dim!r} returned {resp.status_code}" |
| 620 | body = resp.json() |
| 621 | if dim == "harmony": |
| 622 | # Dedicated endpoint — HarmonyAnalysisResponse (no "dimension" envelope) |
| 623 | assert "key" in body, f"Harmony endpoint missing 'key' field" |
| 624 | assert "romanNumerals" in body, f"Harmony endpoint missing 'romanNumerals' field" |
| 625 | elif dim == "similarity": |
| 626 | # Dedicated endpoint — RefSimilarityResponse (no "dimension" envelope) |
| 627 | pass # tested separately in test_ref_similarity_endpoint_* |
| 628 | else: |
| 629 | assert body["dimension"] == dim, ( |
| 630 | f"Expected dimension={dim!r}, got {body['dimension']!r}" |
| 631 | ) |
| 632 | |
| 633 | |
| 634 | # --------------------------------------------------------------------------- |
| 635 | # Emotion map service unit tests |
| 636 | # --------------------------------------------------------------------------- |
| 637 | |
| 638 | |
| 639 | def test_compute_emotion_map_returns_correct_type() -> None: |
| 640 | """compute_emotion_map returns an EmotionMapResponse instance.""" |
| 641 | result = compute_emotion_map(repo_id="test-repo", ref="main") |
| 642 | assert isinstance(result, EmotionMapResponse) |
| 643 | |
| 644 | |
| 645 | def test_emotion_map_evolution_has_beat_samples() -> None: |
| 646 | """Evolution list is non-empty and all vectors have values in [0, 1].""" |
| 647 | result = compute_emotion_map(repo_id="test-repo", ref="main") |
| 648 | assert len(result.evolution) > 0 |
| 649 | for point in result.evolution: |
| 650 | v = point.vector |
| 651 | assert isinstance(v, EmotionVector) |
| 652 | assert 0.0 <= v.energy <= 1.0 |
| 653 | assert 0.0 <= v.valence <= 1.0 |
| 654 | assert 0.0 <= v.tension <= 1.0 |
| 655 | assert 0.0 <= v.darkness <= 1.0 |
| 656 | |
| 657 | |
| 658 | def test_emotion_map_summary_vector_valid() -> None: |
| 659 | """Summary vector values are all in [0, 1].""" |
| 660 | result = compute_emotion_map(repo_id="test-repo", ref="main") |
| 661 | sv = result.summary_vector |
| 662 | assert 0.0 <= sv.energy <= 1.0 |
| 663 | assert 0.0 <= sv.valence <= 1.0 |
| 664 | assert 0.0 <= sv.tension <= 1.0 |
| 665 | assert 0.0 <= sv.darkness <= 1.0 |
| 666 | |
| 667 | |
| 668 | def test_emotion_map_trajectory_ordered() -> None: |
| 669 | """Trajectory list ends with the head commit.""" |
| 670 | result = compute_emotion_map(repo_id="test-repo", ref="deadbeef") |
| 671 | assert len(result.trajectory) >= 2 |
| 672 | head = result.trajectory[-1] |
| 673 | assert isinstance(head, CommitEmotionSnapshot) |
| 674 | assert head.commit_id.startswith("deadbeef") |
| 675 | |
| 676 | |
| 677 | def test_emotion_map_drift_count() -> None: |
| 678 | """Drift list has exactly len(trajectory) - 1 entries.""" |
| 679 | result = compute_emotion_map(repo_id="test-repo", ref="main") |
| 680 | assert len(result.drift) == len(result.trajectory) - 1 |
| 681 | |
| 682 | |
| 683 | def test_emotion_map_drift_entries_valid() -> None: |
| 684 | """Each drift entry has non-negative drift and a valid dominant_change axis.""" |
| 685 | result = compute_emotion_map(repo_id="test-repo", ref="main") |
| 686 | valid_axes = {"energy", "valence", "tension", "darkness"} |
| 687 | for entry in result.drift: |
| 688 | assert isinstance(entry, EmotionDrift) |
| 689 | assert entry.drift >= 0.0 |
| 690 | assert entry.dominant_change in valid_axes |
| 691 | |
| 692 | |
| 693 | def test_emotion_map_narrative_nonempty() -> None: |
| 694 | """Narrative is a non-empty string describing the emotional journey.""" |
| 695 | result = compute_emotion_map(repo_id="test-repo", ref="main") |
| 696 | assert isinstance(result.narrative, str) |
| 697 | assert len(result.narrative) > 10 |
| 698 | |
| 699 | |
| 700 | def test_emotion_map_source_is_valid() -> None: |
| 701 | """Source field is one of the three valid attribution values.""" |
| 702 | result = compute_emotion_map(repo_id="test-repo", ref="main") |
| 703 | assert result.source in ("explicit", "inferred", "mixed") |
| 704 | |
| 705 | |
| 706 | def test_emotion_map_is_deterministic() -> None: |
| 707 | """Same ref always produces the same summary_vector.""" |
| 708 | r1 = compute_emotion_map(repo_id="test-repo", ref="jazz-ref") |
| 709 | r2 = compute_emotion_map(repo_id="test-repo", ref="jazz-ref") |
| 710 | assert r1.summary_vector.energy == r2.summary_vector.energy |
| 711 | assert r1.summary_vector.valence == r2.summary_vector.valence |
| 712 | assert r1.summary_vector.tension == r2.summary_vector.tension |
| 713 | assert r1.summary_vector.darkness == r2.summary_vector.darkness |
| 714 | |
| 715 | |
| 716 | def test_emotion_map_filters_propagated() -> None: |
| 717 | """Track and section filters are reflected in filters_applied.""" |
| 718 | result = compute_emotion_map( |
| 719 | repo_id="test-repo", ref="main", track="bass", section="chorus" |
| 720 | ) |
| 721 | assert result.filters_applied.track == "bass" |
| 722 | assert result.filters_applied.section == "chorus" |
| 723 | |
| 724 | |
| 725 | # --------------------------------------------------------------------------- |
| 726 | # Emotion map HTTP endpoint tests |
| 727 | # --------------------------------------------------------------------------- |
| 728 | |
| 729 | |
| 730 | @pytest.mark.anyio |
| 731 | async def test_emotion_map_endpoint_200( |
| 732 | client: AsyncClient, |
| 733 | auth_headers: dict[str, str], |
| 734 | db_session: AsyncSession, |
| 735 | ) -> None: |
| 736 | """GET /api/v1/musehub/repos/{repo_id}/analysis/{ref}/emotion-map returns 200.""" |
| 737 | repo_id = await _create_repo(client, auth_headers) |
| 738 | resp = await client.get( |
| 739 | f"/api/v1/musehub/repos/{repo_id}/analysis/main/emotion-map", |
| 740 | headers=auth_headers, |
| 741 | ) |
| 742 | assert resp.status_code == 200 |
| 743 | body = resp.json() |
| 744 | assert body["repoId"] == repo_id |
| 745 | assert body["ref"] == "main" |
| 746 | assert "evolution" in body |
| 747 | assert "trajectory" in body |
| 748 | assert "drift" in body |
| 749 | assert "narrative" in body |
| 750 | assert "summaryVector" in body |
| 751 | assert "source" in body |
| 752 | |
| 753 | |
| 754 | @pytest.mark.anyio |
| 755 | async def test_emotion_map_endpoint_requires_auth( |
| 756 | client: AsyncClient, |
| 757 | db_session: AsyncSession, |
| 758 | ) -> None: |
| 759 | """Emotion map endpoint returns 401 without a Bearer token.""" |
| 760 | resp = await client.get( |
| 761 | "/api/v1/musehub/repos/some-id/analysis/main/emotion-map", |
| 762 | ) |
| 763 | assert resp.status_code == 401 |
| 764 | |
| 765 | |
| 766 | @pytest.mark.anyio |
| 767 | async def test_emotion_map_endpoint_unknown_repo_404( |
| 768 | client: AsyncClient, |
| 769 | auth_headers: dict[str, str], |
| 770 | db_session: AsyncSession, |
| 771 | ) -> None: |
| 772 | """Emotion map endpoint returns 404 for an unknown repo_id.""" |
| 773 | resp = await client.get( |
| 774 | "/api/v1/musehub/repos/00000000-0000-0000-0000-000000000000/analysis/main/emotion-map", |
| 775 | headers=auth_headers, |
| 776 | ) |
| 777 | assert resp.status_code == 404 |
| 778 | |
| 779 | |
| 780 | @pytest.mark.anyio |
| 781 | async def test_emotion_map_endpoint_etag( |
| 782 | client: AsyncClient, |
| 783 | auth_headers: dict[str, str], |
| 784 | db_session: AsyncSession, |
| 785 | ) -> None: |
| 786 | """Emotion map endpoint includes ETag header for client-side caching.""" |
| 787 | repo_id = await _create_repo(client, auth_headers) |
| 788 | resp = await client.get( |
| 789 | f"/api/v1/musehub/repos/{repo_id}/analysis/main/emotion-map", |
| 790 | headers=auth_headers, |
| 791 | ) |
| 792 | assert resp.status_code == 200 |
| 793 | assert "etag" in resp.headers |
| 794 | assert resp.headers["etag"].startswith('"') |
| 795 | |
| 796 | |
| 797 | @pytest.mark.anyio |
| 798 | async def test_emotion_map_endpoint_track_filter( |
| 799 | client: AsyncClient, |
| 800 | auth_headers: dict[str, str], |
| 801 | db_session: AsyncSession, |
| 802 | ) -> None: |
| 803 | """Track filter is reflected in filtersApplied of the emotion map response.""" |
| 804 | repo_id = await _create_repo(client, auth_headers) |
| 805 | resp = await client.get( |
| 806 | f"/api/v1/musehub/repos/{repo_id}/analysis/main/emotion-map?track=bass", |
| 807 | headers=auth_headers, |
| 808 | ) |
| 809 | assert resp.status_code == 200 |
| 810 | assert resp.json()["filtersApplied"]["track"] == "bass" |
| 811 | |
| 812 | |
| 813 | @pytest.mark.anyio |
| 814 | async def test_contour_track_filter( |
| 815 | client: AsyncClient, |
| 816 | auth_headers: dict[str, str], |
| 817 | db_session: AsyncSession, |
| 818 | ) -> None: |
| 819 | """Track filter is applied and reflected in filtersApplied for the contour dimension. |
| 820 | |
| 821 | Verifies acceptance criterion: contour analysis respects the |
| 822 | ``?track=`` query parameter so melodists can view per-instrument contour. |
| 823 | """ |
| 824 | repo_id = await _create_repo(client, auth_headers) |
| 825 | resp = await client.get( |
| 826 | f"/api/v1/musehub/repos/{repo_id}/analysis/main/contour?track=lead", |
| 827 | headers=auth_headers, |
| 828 | ) |
| 829 | assert resp.status_code == 200 |
| 830 | body = resp.json() |
| 831 | assert body["dimension"] == "contour" |
| 832 | assert body["filtersApplied"]["track"] == "lead" |
| 833 | data = body["data"] |
| 834 | assert "shape" in data |
| 835 | assert "pitchCurve" in data |
| 836 | assert len(data["pitchCurve"]) > 0 |
| 837 | |
| 838 | |
| 839 | @pytest.mark.anyio |
| 840 | async def test_tempo_section_filter( |
| 841 | client: AsyncClient, |
| 842 | auth_headers: dict[str, str], |
| 843 | db_session: AsyncSession, |
| 844 | ) -> None: |
| 845 | """Section filter is applied and reflected in filtersApplied for the tempo dimension. |
| 846 | |
| 847 | Verifies that tempo analysis scoped to a named section returns valid TempoData |
| 848 | and records the section filter in the response envelope. |
| 849 | """ |
| 850 | repo_id = await _create_repo(client, auth_headers) |
| 851 | resp = await client.get( |
| 852 | f"/api/v1/musehub/repos/{repo_id}/analysis/main/tempo?section=chorus", |
| 853 | headers=auth_headers, |
| 854 | ) |
| 855 | assert resp.status_code == 200 |
| 856 | body = resp.json() |
| 857 | assert body["dimension"] == "tempo" |
| 858 | assert body["filtersApplied"]["section"] == "chorus" |
| 859 | data = body["data"] |
| 860 | assert data["bpm"] > 0 |
| 861 | assert 0.0 <= data["stability"] <= 1.0 |
| 862 | |
| 863 | |
| 864 | @pytest.mark.anyio |
| 865 | async def test_analysis_aggregate_endpoint_returns_all_dimensions( |
| 866 | client: AsyncClient, |
| 867 | auth_headers: dict[str, str], |
| 868 | db_session: AsyncSession, |
| 869 | ) -> None: |
| 870 | """GET /api/v1/musehub/repos/{repo_id}/analysis/{ref} returns all 13 dimensions. |
| 871 | |
| 872 | Regression test: the aggregate endpoint must return all 13 |
| 873 | musical dimensions so the analysis dashboard can render summary cards for each |
| 874 | in a single round-trip — agents must not have to query dimensions individually. |
| 875 | """ |
| 876 | repo_id = await _create_repo(client, auth_headers) |
| 877 | resp = await client.get( |
| 878 | f"/api/v1/musehub/repos/{repo_id}/analysis/main", |
| 879 | headers=auth_headers, |
| 880 | ) |
| 881 | assert resp.status_code == 200 |
| 882 | body = resp.json() |
| 883 | assert body["ref"] == "main" |
| 884 | assert body["repoId"] == repo_id |
| 885 | assert "dimensions" in body |
| 886 | assert len(body["dimensions"]) == 13 |
| 887 | returned_dims = {d["dimension"] for d in body["dimensions"]} |
| 888 | assert returned_dims == set(ALL_DIMENSIONS) |
| 889 | for dim_entry in body["dimensions"]: |
| 890 | assert "dimension" in dim_entry |
| 891 | assert "ref" in dim_entry |
| 892 | assert "computedAt" in dim_entry |
| 893 | assert "data" in dim_entry |
| 894 | assert "filtersApplied" in dim_entry |
| 895 | |
| 896 | |
| 897 | # --------------------------------------------------------------------------- |
| 898 | # Issue #414 — GET /analysis/{ref}/harmony endpoint |
| 899 | # --------------------------------------------------------------------------- |
| 900 | |
| 901 | |
| 902 | from musehub.models.musehub_analysis import HarmonyAnalysisResponse # noqa: E402 |
| 903 | from musehub.services.musehub_analysis import compute_harmony_analysis # noqa: E402 |
| 904 | |
| 905 | |
| 906 | def test_compute_harmony_analysis_returns_correct_type() -> None: |
| 907 | """compute_harmony_analysis returns a HarmonyAnalysisResponse instance.""" |
| 908 | result = compute_harmony_analysis(repo_id="repo-test", ref="main") |
| 909 | assert isinstance(result, HarmonyAnalysisResponse) |
| 910 | |
| 911 | |
| 912 | def test_compute_harmony_analysis_key_has_mode() -> None: |
| 913 | """The key field includes both tonic and mode, e.g. 'C major'.""" |
| 914 | result = compute_harmony_analysis(repo_id="repo-test", ref="main") |
| 915 | assert result.mode in result.key |
| 916 | assert len(result.key.split()) == 2 # "C major", "F minor", etc. |
| 917 | |
| 918 | |
| 919 | def test_compute_harmony_analysis_roman_numerals_nonempty() -> None: |
| 920 | """roman_numerals must contain at least one chord event.""" |
| 921 | result = compute_harmony_analysis(repo_id="repo-test", ref="main") |
| 922 | assert len(result.roman_numerals) >= 1 |
| 923 | for rn in result.roman_numerals: |
| 924 | assert rn.beat >= 0.0 |
| 925 | assert rn.chord != "" |
| 926 | assert rn.root != "" |
| 927 | assert rn.quality != "" |
| 928 | assert rn.function != "" |
| 929 | |
| 930 | |
| 931 | def test_compute_harmony_analysis_cadences_nonempty() -> None: |
| 932 | """cadences must contain at least one entry with valid from/to fields.""" |
| 933 | result = compute_harmony_analysis(repo_id="repo-test", ref="main") |
| 934 | assert len(result.cadences) >= 1 |
| 935 | for cadence in result.cadences: |
| 936 | assert cadence.beat >= 0.0 |
| 937 | assert cadence.type != "" |
| 938 | assert cadence.from_ != "" |
| 939 | assert cadence.to != "" |
| 940 | |
| 941 | |
| 942 | def test_compute_harmony_analysis_harmonic_rhythm_positive() -> None: |
| 943 | """harmonic_rhythm_bpm must be a positive float.""" |
| 944 | result = compute_harmony_analysis(repo_id="repo-test", ref="main") |
| 945 | assert result.harmonic_rhythm_bpm > 0.0 |
| 946 | |
| 947 | |
| 948 | def test_compute_harmony_analysis_is_deterministic() -> None: |
| 949 | """Same ref always produces the same key and mode (deterministic stub).""" |
| 950 | r1 = compute_harmony_analysis(repo_id="repo-a", ref="abc123") |
| 951 | r2 = compute_harmony_analysis(repo_id="repo-b", ref="abc123") |
| 952 | assert r1.key == r2.key |
| 953 | assert r1.mode == r2.mode |
| 954 | assert r1.harmonic_rhythm_bpm == r2.harmonic_rhythm_bpm |
| 955 | |
| 956 | |
| 957 | def test_compute_harmony_analysis_different_refs_differ() -> None: |
| 958 | """Different refs produce different harmonic data.""" |
| 959 | r1 = compute_harmony_analysis(repo_id="repo-test", ref="ref-aaa") |
| 960 | r2 = compute_harmony_analysis(repo_id="repo-test", ref="ref-zzz") |
| 961 | # At least the key or mode differs across distinct refs. |
| 962 | assert (r1.key != r2.key) or (r1.mode != r2.mode) or (r1.harmonic_rhythm_bpm != r2.harmonic_rhythm_bpm) |
| 963 | |
| 964 | |
| 965 | @pytest.mark.anyio |
| 966 | async def test_harmony_endpoint_returns_200( |
| 967 | client: AsyncClient, |
| 968 | auth_headers: dict[str, str], |
| 969 | db_session: AsyncSession, |
| 970 | ) -> None: |
| 971 | """GET /analysis/{ref}/harmony returns 200 with all required fields. |
| 972 | |
| 973 | Regression test: the dedicated harmony endpoint must return |
| 974 | structured Roman-numeral harmonic data so agents can reason about tonal |
| 975 | function, cadence structure, and modulations without parsing raw chord symbols. |
| 976 | """ |
| 977 | repo_id = await _create_repo(client, auth_headers) |
| 978 | resp = await client.get( |
| 979 | f"/api/v1/musehub/repos/{repo_id}/analysis/main/harmony", |
| 980 | headers=auth_headers, |
| 981 | ) |
| 982 | assert resp.status_code == 200 |
| 983 | body = resp.json() |
| 984 | assert "key" in body |
| 985 | assert "mode" in body |
| 986 | assert "romanNumerals" in body |
| 987 | assert "cadences" in body |
| 988 | assert "modulations" in body |
| 989 | assert "harmonicRhythmBpm" in body |
| 990 | assert isinstance(body["romanNumerals"], list) |
| 991 | assert len(body["romanNumerals"]) >= 1 |
| 992 | assert isinstance(body["cadences"], list) |
| 993 | assert len(body["cadences"]) >= 1 |
| 994 | assert body["harmonicRhythmBpm"] > 0.0 |
| 995 | |
| 996 | |
| 997 | @pytest.mark.anyio |
| 998 | async def test_harmony_endpoint_roman_numerals_fields( |
| 999 | client: AsyncClient, |
| 1000 | auth_headers: dict[str, str], |
| 1001 | db_session: AsyncSession, |
| 1002 | ) -> None: |
| 1003 | """Each roman numeral event carries beat, chord, root, quality, and function.""" |
| 1004 | repo_id = await _create_repo(client, auth_headers) |
| 1005 | resp = await client.get( |
| 1006 | f"/api/v1/musehub/repos/{repo_id}/analysis/main/harmony", |
| 1007 | headers=auth_headers, |
| 1008 | ) |
| 1009 | assert resp.status_code == 200 |
| 1010 | for rn in resp.json()["romanNumerals"]: |
| 1011 | assert "beat" in rn |
| 1012 | assert "chord" in rn |
| 1013 | assert "root" in rn |
| 1014 | assert "quality" in rn |
| 1015 | assert "function" in rn |
| 1016 | |
| 1017 | |
| 1018 | @pytest.mark.anyio |
| 1019 | async def test_harmony_endpoint_cadence_fields( |
| 1020 | client: AsyncClient, |
| 1021 | auth_headers: dict[str, str], |
| 1022 | db_session: AsyncSession, |
| 1023 | ) -> None: |
| 1024 | """Each cadence event carries beat, type, from, and to fields.""" |
| 1025 | repo_id = await _create_repo(client, auth_headers) |
| 1026 | resp = await client.get( |
| 1027 | f"/api/v1/musehub/repos/{repo_id}/analysis/main/harmony", |
| 1028 | headers=auth_headers, |
| 1029 | ) |
| 1030 | assert resp.status_code == 200 |
| 1031 | for cadence in resp.json()["cadences"]: |
| 1032 | assert "beat" in cadence |
| 1033 | assert "type" in cadence |
| 1034 | assert "from" in cadence |
| 1035 | assert "to" in cadence |
| 1036 | |
| 1037 | |
| 1038 | @pytest.mark.anyio |
| 1039 | async def test_harmony_endpoint_etag_header( |
| 1040 | client: AsyncClient, |
| 1041 | auth_headers: dict[str, str], |
| 1042 | db_session: AsyncSession, |
| 1043 | ) -> None: |
| 1044 | """GET /analysis/{ref}/harmony includes an ETag header for cache validation.""" |
| 1045 | repo_id = await _create_repo(client, auth_headers) |
| 1046 | resp = await client.get( |
| 1047 | f"/api/v1/musehub/repos/{repo_id}/analysis/main/harmony", |
| 1048 | headers=auth_headers, |
| 1049 | ) |
| 1050 | assert resp.status_code == 200 |
| 1051 | assert "etag" in resp.headers |
| 1052 | |
| 1053 | |
| 1054 | @pytest.mark.anyio |
| 1055 | async def test_harmony_endpoint_requires_auth_for_private_repo( |
| 1056 | client: AsyncClient, |
| 1057 | auth_headers: dict[str, str], |
| 1058 | db_session: AsyncSession, |
| 1059 | ) -> None: |
| 1060 | """GET /analysis/{ref}/harmony on a private repo without auth returns 401.""" |
| 1061 | # Create a private repo with valid auth, then access without auth. |
| 1062 | resp = await client.post( |
| 1063 | "/api/v1/musehub/repos", |
| 1064 | json={"name": "private-harmony-repo", "owner": "testuser", "visibility": "private"}, |
| 1065 | headers=auth_headers, |
| 1066 | ) |
| 1067 | assert resp.status_code == 201 |
| 1068 | repo_id = str(resp.json()["repoId"]) |
| 1069 | |
| 1070 | resp = await client.get( |
| 1071 | f"/api/v1/musehub/repos/{repo_id}/analysis/main/harmony", |
| 1072 | ) |
| 1073 | assert resp.status_code == 401 |
| 1074 | |
| 1075 | |
| 1076 | @pytest.mark.anyio |
| 1077 | async def test_harmony_endpoint_unknown_repo_404( |
| 1078 | client: AsyncClient, |
| 1079 | auth_headers: dict[str, str], |
| 1080 | db_session: AsyncSession, |
| 1081 | ) -> None: |
| 1082 | """GET /analysis/{ref}/harmony with an unknown repo_id returns 404.""" |
| 1083 | resp = await client.get( |
| 1084 | "/api/v1/musehub/repos/nonexistent-repo-id/analysis/main/harmony", |
| 1085 | headers=auth_headers, |
| 1086 | ) |
| 1087 | assert resp.status_code == 404 |
| 1088 | |
| 1089 | |
| 1090 | @pytest.mark.anyio |
| 1091 | async def test_harmony_endpoint_track_filter( |
| 1092 | client: AsyncClient, |
| 1093 | auth_headers: dict[str, str], |
| 1094 | db_session: AsyncSession, |
| 1095 | ) -> None: |
| 1096 | """GET /analysis/{ref}/harmony?track=keys returns 200 (filter accepted).""" |
| 1097 | repo_id = await _create_repo(client, auth_headers) |
| 1098 | resp = await client.get( |
| 1099 | f"/api/v1/musehub/repos/{repo_id}/analysis/main/harmony?track=keys", |
| 1100 | headers=auth_headers, |
| 1101 | ) |
| 1102 | assert resp.status_code == 200 |
| 1103 | body = resp.json() |
| 1104 | assert "key" in body |
| 1105 | assert "romanNumerals" in body |
| 1106 | |
| 1107 | |
| 1108 | # --------------------------------------------------------------------------- |
| 1109 | # Issue #410 — GET /analysis/{ref}/recall semantic search |
| 1110 | # --------------------------------------------------------------------------- |
| 1111 | |
| 1112 | |
| 1113 | def test_compute_recall_returns_correct_type() -> None: |
| 1114 | """compute_recall returns a RecallResponse instance.""" |
| 1115 | result = compute_recall(repo_id="repo-test", ref="main", query="jazzy swing groove") |
| 1116 | assert isinstance(result, RecallResponse) |
| 1117 | |
| 1118 | |
| 1119 | def test_compute_recall_scores_descending() -> None: |
| 1120 | """Matches are sorted in descending score order (best match first).""" |
| 1121 | result = compute_recall(repo_id="repo-test", ref="main", query="minor key tension") |
| 1122 | scores = [m.score for m in result.matches] |
| 1123 | assert scores == sorted(scores, reverse=True), "Matches must be ranked best-first" |
| 1124 | |
| 1125 | |
| 1126 | def test_compute_recall_scores_in_range() -> None: |
| 1127 | """All cosine similarity scores must be in [0.0, 1.0].""" |
| 1128 | result = compute_recall(repo_id="repo-test", ref="main", query="ascending melodic contour") |
| 1129 | for match in result.matches: |
| 1130 | assert isinstance(match, RecallMatch) |
| 1131 | assert 0.0 <= match.score <= 1.0, f"Score out of range: {match.score}" |
| 1132 | |
| 1133 | |
| 1134 | def test_compute_recall_limit_respected() -> None: |
| 1135 | """The limit parameter caps the number of returned matches.""" |
| 1136 | result = compute_recall(repo_id="repo-test", ref="main", query="swing", limit=3) |
| 1137 | assert len(result.matches) <= 3 |
| 1138 | assert result.total_matches >= len(result.matches) |
| 1139 | |
| 1140 | |
| 1141 | def test_compute_recall_limit_clamped_to_50() -> None: |
| 1142 | """Limits above 50 are silently clamped to 50.""" |
| 1143 | result = compute_recall(repo_id="repo-test", ref="main", query="groove", limit=200) |
| 1144 | assert len(result.matches) <= 50 |
| 1145 | |
| 1146 | |
| 1147 | def test_compute_recall_is_deterministic() -> None: |
| 1148 | """Same (ref, query) always produces identical matches.""" |
| 1149 | r1 = compute_recall(repo_id="repo-a", ref="main", query="jazz harmony") |
| 1150 | r2 = compute_recall(repo_id="repo-b", ref="main", query="jazz harmony") |
| 1151 | assert len(r1.matches) == len(r2.matches) |
| 1152 | for m1, m2 in zip(r1.matches, r2.matches): |
| 1153 | assert m1.commit_id == m2.commit_id |
| 1154 | assert m1.score == m2.score |
| 1155 | |
| 1156 | |
| 1157 | def test_compute_recall_differs_by_query() -> None: |
| 1158 | """Different queries produce different match sets.""" |
| 1159 | r1 = compute_recall(repo_id="repo-test", ref="main", query="swing groove") |
| 1160 | r2 = compute_recall(repo_id="repo-test", ref="main", query="ascending melodic contour") |
| 1161 | # At least the first commit IDs should differ between distinct queries. |
| 1162 | assert r1.matches[0].commit_id != r2.matches[0].commit_id |
| 1163 | |
| 1164 | |
| 1165 | def test_compute_recall_match_dimensions_nonempty() -> None: |
| 1166 | """Every RecallMatch must carry at least one matched dimension.""" |
| 1167 | result = compute_recall(repo_id="repo-test", ref="main", query="harmonic tension") |
| 1168 | for match in result.matches: |
| 1169 | assert len(match.matched_dimensions) >= 1, ( |
| 1170 | f"Match {match.commit_id!r} has no matched_dimensions" |
| 1171 | ) |
| 1172 | |
| 1173 | |
| 1174 | def test_compute_recall_query_echoed() -> None: |
| 1175 | """The response echoes the query parameter so clients can display it.""" |
| 1176 | q = "brooding minor feel with slow groove" |
| 1177 | result = compute_recall(repo_id="repo-test", ref="develop", query=q) |
| 1178 | assert result.query == q |
| 1179 | |
| 1180 | |
| 1181 | def test_compute_recall_embedding_dimensions() -> None: |
| 1182 | """embedding_dimensions matches the expected 128-dim feature space.""" |
| 1183 | result = compute_recall(repo_id="repo-test", ref="main", query="any query") |
| 1184 | assert result.embedding_dimensions == 128 |
| 1185 | |
| 1186 | |
| 1187 | @pytest.mark.anyio |
| 1188 | async def test_recall_endpoint_200( |
| 1189 | client: AsyncClient, |
| 1190 | auth_headers: dict[str, str], |
| 1191 | db_session: AsyncSession, |
| 1192 | ) -> None: |
| 1193 | """GET /api/v1/musehub/repos/{repo_id}/analysis/{ref}/recall?q= returns 200. |
| 1194 | |
| 1195 | Regression test: the recall endpoint must return a ranked list |
| 1196 | of semantically similar commits so agents can retrieve musically relevant history |
| 1197 | without issuing expensive dimension-by-dimension comparisons. |
| 1198 | """ |
| 1199 | repo_id = await _create_repo(client, auth_headers) |
| 1200 | resp = await client.get( |
| 1201 | f"/api/v1/musehub/repos/{repo_id}/analysis/main/recall?q=jazzy+swing+groove", |
| 1202 | headers=auth_headers, |
| 1203 | ) |
| 1204 | assert resp.status_code == 200 |
| 1205 | body = resp.json() |
| 1206 | assert body["repoId"] == repo_id |
| 1207 | assert body["ref"] == "main" |
| 1208 | assert body["query"] == "jazzy swing groove" |
| 1209 | assert "matches" in body |
| 1210 | assert isinstance(body["matches"], list) |
| 1211 | assert body["totalMatches"] >= 0 |
| 1212 | assert body["embeddingDimensions"] == 128 |
| 1213 | |
| 1214 | |
| 1215 | @pytest.mark.anyio |
| 1216 | async def test_recall_endpoint_match_fields( |
| 1217 | client: AsyncClient, |
| 1218 | auth_headers: dict[str, str], |
| 1219 | db_session: AsyncSession, |
| 1220 | ) -> None: |
| 1221 | """Each match carries commitId, commitMessage, branch, score, and matchedDimensions.""" |
| 1222 | repo_id = await _create_repo(client, auth_headers) |
| 1223 | resp = await client.get( |
| 1224 | f"/api/v1/musehub/repos/{repo_id}/analysis/main/recall?q=harmony", |
| 1225 | headers=auth_headers, |
| 1226 | ) |
| 1227 | assert resp.status_code == 200 |
| 1228 | for match in resp.json()["matches"]: |
| 1229 | assert "commitId" in match |
| 1230 | assert "commitMessage" in match |
| 1231 | assert "branch" in match |
| 1232 | assert "score" in match |
| 1233 | assert "matchedDimensions" in match |
| 1234 | assert 0.0 <= match["score"] <= 1.0 |
| 1235 | assert len(match["matchedDimensions"]) >= 1 |
| 1236 | |
| 1237 | |
| 1238 | @pytest.mark.anyio |
| 1239 | async def test_recall_endpoint_requires_auth( |
| 1240 | client: AsyncClient, |
| 1241 | db_session: AsyncSession, |
| 1242 | ) -> None: |
| 1243 | """Recall endpoint returns 401 without a Bearer token.""" |
| 1244 | resp = await client.get( |
| 1245 | "/api/v1/musehub/repos/some-repo/analysis/main/recall?q=groove", |
| 1246 | ) |
| 1247 | assert resp.status_code == 401 |
| 1248 | |
| 1249 | |
| 1250 | @pytest.mark.anyio |
| 1251 | async def test_recall_endpoint_unknown_repo_404( |
| 1252 | client: AsyncClient, |
| 1253 | auth_headers: dict[str, str], |
| 1254 | db_session: AsyncSession, |
| 1255 | ) -> None: |
| 1256 | """Recall endpoint returns 404 for an unknown repo_id.""" |
| 1257 | resp = await client.get( |
| 1258 | "/api/v1/musehub/repos/00000000-0000-0000-0000-000000000000/analysis/main/recall?q=swing", |
| 1259 | headers=auth_headers, |
| 1260 | ) |
| 1261 | assert resp.status_code == 404 |
| 1262 | |
| 1263 | |
| 1264 | @pytest.mark.anyio |
| 1265 | async def test_recall_endpoint_etag_header( |
| 1266 | client: AsyncClient, |
| 1267 | auth_headers: dict[str, str], |
| 1268 | db_session: AsyncSession, |
| 1269 | ) -> None: |
| 1270 | """Recall endpoint includes an ETag header for client-side cache validation.""" |
| 1271 | repo_id = await _create_repo(client, auth_headers) |
| 1272 | resp = await client.get( |
| 1273 | f"/api/v1/musehub/repos/{repo_id}/analysis/main/recall?q=groove", |
| 1274 | headers=auth_headers, |
| 1275 | ) |
| 1276 | assert resp.status_code == 200 |
| 1277 | assert "etag" in resp.headers |
| 1278 | assert resp.headers["etag"].startswith('"') |
| 1279 | |
| 1280 | |
| 1281 | @pytest.mark.anyio |
| 1282 | async def test_recall_endpoint_limit_param( |
| 1283 | client: AsyncClient, |
| 1284 | auth_headers: dict[str, str], |
| 1285 | db_session: AsyncSession, |
| 1286 | ) -> None: |
| 1287 | """?limit=3 caps the returned matches to at most 3 results.""" |
| 1288 | repo_id = await _create_repo(client, auth_headers) |
| 1289 | resp = await client.get( |
| 1290 | f"/api/v1/musehub/repos/{repo_id}/analysis/main/recall?q=swing&limit=3", |
| 1291 | headers=auth_headers, |
| 1292 | ) |
| 1293 | assert resp.status_code == 200 |
| 1294 | assert len(resp.json()["matches"]) <= 3 |
| 1295 | |
| 1296 | |
| 1297 | @pytest.mark.anyio |
| 1298 | async def test_recall_endpoint_missing_q_422( |
| 1299 | client: AsyncClient, |
| 1300 | auth_headers: dict[str, str], |
| 1301 | db_session: AsyncSession, |
| 1302 | ) -> None: |
| 1303 | """Missing ?q returns 422 Unprocessable Entity (required query param).""" |
| 1304 | repo_id = await _create_repo(client, auth_headers) |
| 1305 | resp = await client.get( |
| 1306 | f"/api/v1/musehub/repos/{repo_id}/analysis/main/recall", |
| 1307 | headers=auth_headers, |
| 1308 | ) |
| 1309 | assert resp.status_code == 422 |
| 1310 | |
| 1311 | |
| 1312 | @pytest.mark.anyio |
| 1313 | async def test_recall_endpoint_scores_descending( |
| 1314 | client: AsyncClient, |
| 1315 | auth_headers: dict[str, str], |
| 1316 | db_session: AsyncSession, |
| 1317 | ) -> None: |
| 1318 | """Recall endpoint returns matches sorted best-first (descending score).""" |
| 1319 | repo_id = await _create_repo(client, auth_headers) |
| 1320 | resp = await client.get( |
| 1321 | f"/api/v1/musehub/repos/{repo_id}/analysis/main/recall?q=jazz+harmony", |
| 1322 | headers=auth_headers, |
| 1323 | ) |
| 1324 | assert resp.status_code == 200 |
| 1325 | scores = [m["score"] for m in resp.json()["matches"]] |
| 1326 | assert scores == sorted(scores, reverse=True), "Matches must be in descending score order" |
| 1327 | |
| 1328 | |
| 1329 | # --------------------------------------------------------------------------- |
| 1330 | # Issue #406 — Cross-ref similarity service unit tests |
| 1331 | # --------------------------------------------------------------------------- |
| 1332 | |
| 1333 | |
| 1334 | def test_compute_ref_similarity_returns_correct_type() -> None: |
| 1335 | """compute_ref_similarity returns a RefSimilarityResponse.""" |
| 1336 | result = compute_ref_similarity( |
| 1337 | repo_id="repo-1", |
| 1338 | base_ref="main", |
| 1339 | compare_ref="experiment/jazz-voicings", |
| 1340 | ) |
| 1341 | assert isinstance(result, RefSimilarityResponse) |
| 1342 | |
| 1343 | |
| 1344 | def test_compute_ref_similarity_dimensions_in_range() -> None: |
| 1345 | """All 10 dimension scores are within [0.0, 1.0].""" |
| 1346 | result = compute_ref_similarity( |
| 1347 | repo_id="repo-1", |
| 1348 | base_ref="main", |
| 1349 | compare_ref="feat/new-bridge", |
| 1350 | ) |
| 1351 | dims = result.dimensions |
| 1352 | for attr in ( |
| 1353 | "pitch_distribution", |
| 1354 | "rhythm_pattern", |
| 1355 | "tempo", |
| 1356 | "dynamics", |
| 1357 | "harmonic_content", |
| 1358 | "form", |
| 1359 | "instrument_blend", |
| 1360 | "groove", |
| 1361 | "contour", |
| 1362 | "emotion", |
| 1363 | ): |
| 1364 | score = getattr(dims, attr) |
| 1365 | assert 0.0 <= score <= 1.0, f"{attr} out of range: {score}" |
| 1366 | |
| 1367 | |
| 1368 | def test_compute_ref_similarity_overall_in_range() -> None: |
| 1369 | """overall_similarity is within [0.0, 1.0].""" |
| 1370 | result = compute_ref_similarity( |
| 1371 | repo_id="repo-1", |
| 1372 | base_ref="v1.0", |
| 1373 | compare_ref="v2.0", |
| 1374 | ) |
| 1375 | assert 0.0 <= result.overall_similarity <= 1.0 |
| 1376 | |
| 1377 | |
| 1378 | def test_compute_ref_similarity_is_deterministic() -> None: |
| 1379 | """Same ref pair always returns the same overall_similarity.""" |
| 1380 | a = compute_ref_similarity(repo_id="r", base_ref="main", compare_ref="dev") |
| 1381 | b = compute_ref_similarity(repo_id="r", base_ref="main", compare_ref="dev") |
| 1382 | assert a.overall_similarity == b.overall_similarity |
| 1383 | assert a.dimensions == b.dimensions |
| 1384 | |
| 1385 | |
| 1386 | def test_compute_ref_similarity_interpretation_nonempty() -> None: |
| 1387 | """interpretation is a non-empty string.""" |
| 1388 | result = compute_ref_similarity( |
| 1389 | repo_id="repo-1", |
| 1390 | base_ref="main", |
| 1391 | compare_ref="feature/rhythm-variations", |
| 1392 | ) |
| 1393 | assert isinstance(result.interpretation, str) |
| 1394 | assert len(result.interpretation) > 0 |
| 1395 | |
| 1396 | |
| 1397 | # --------------------------------------------------------------------------- |
| 1398 | # Issue #406 — Cross-ref similarity HTTP endpoint tests |
| 1399 | # --------------------------------------------------------------------------- |
| 1400 | |
| 1401 | |
| 1402 | @pytest.mark.anyio |
| 1403 | async def test_ref_similarity_endpoint_200( |
| 1404 | client: AsyncClient, |
| 1405 | auth_headers: dict[str, str], |
| 1406 | db_session: AsyncSession, |
| 1407 | ) -> None: |
| 1408 | """GET /analysis/{ref}/similarity returns 200 with required fields.""" |
| 1409 | repo_id = await _create_repo(client, auth_headers) |
| 1410 | resp = await client.get( |
| 1411 | f"/api/v1/musehub/repos/{repo_id}/analysis/main/similarity?compare=dev", |
| 1412 | headers=auth_headers, |
| 1413 | ) |
| 1414 | assert resp.status_code == 200 |
| 1415 | body = resp.json() |
| 1416 | assert body["baseRef"] == "main" |
| 1417 | assert body["compareRef"] == "dev" |
| 1418 | assert "overallSimilarity" in body |
| 1419 | assert "dimensions" in body |
| 1420 | assert "interpretation" in body |
| 1421 | dims = body["dimensions"] |
| 1422 | for key in ( |
| 1423 | "pitchDistribution", |
| 1424 | "rhythmPattern", |
| 1425 | "tempo", |
| 1426 | "dynamics", |
| 1427 | "harmonicContent", |
| 1428 | "form", |
| 1429 | "instrumentBlend", |
| 1430 | "groove", |
| 1431 | "contour", |
| 1432 | "emotion", |
| 1433 | ): |
| 1434 | assert key in dims, f"Missing dimension key: {key}" |
| 1435 | assert 0.0 <= dims[key] <= 1.0 |
| 1436 | |
| 1437 | |
| 1438 | @pytest.mark.anyio |
| 1439 | async def test_ref_similarity_endpoint_requires_compare( |
| 1440 | client: AsyncClient, |
| 1441 | auth_headers: dict[str, str], |
| 1442 | db_session: AsyncSession, |
| 1443 | ) -> None: |
| 1444 | """Missing compare param returns 422 Unprocessable Entity.""" |
| 1445 | repo_id = await _create_repo(client, auth_headers) |
| 1446 | resp = await client.get( |
| 1447 | f"/api/v1/musehub/repos/{repo_id}/analysis/main/similarity", |
| 1448 | headers=auth_headers, |
| 1449 | ) |
| 1450 | assert resp.status_code == 422 |
| 1451 | |
| 1452 | |
| 1453 | @pytest.mark.anyio |
| 1454 | async def test_ref_similarity_endpoint_requires_auth( |
| 1455 | client: AsyncClient, |
| 1456 | auth_headers: dict[str, str], |
| 1457 | db_session: AsyncSession, |
| 1458 | ) -> None: |
| 1459 | """Private repo returns 401 when no auth token is provided.""" |
| 1460 | repo_id = await _create_repo(client, auth_headers) |
| 1461 | resp = await client.get( |
| 1462 | f"/api/v1/musehub/repos/{repo_id}/analysis/main/similarity?compare=dev", |
| 1463 | ) |
| 1464 | assert resp.status_code == 401 |
| 1465 | |
| 1466 | |
| 1467 | @pytest.mark.anyio |
| 1468 | async def test_ref_similarity_endpoint_unknown_repo_404( |
| 1469 | client: AsyncClient, |
| 1470 | auth_headers: dict[str, str], |
| 1471 | db_session: AsyncSession, |
| 1472 | ) -> None: |
| 1473 | """Unknown repo_id returns 404.""" |
| 1474 | resp = await client.get( |
| 1475 | "/api/v1/musehub/repos/00000000-0000-0000-0000-000000000000/analysis/main/similarity?compare=dev", |
| 1476 | headers=auth_headers, |
| 1477 | ) |
| 1478 | assert resp.status_code == 404 |
| 1479 | |
| 1480 | |
| 1481 | @pytest.mark.anyio |
| 1482 | async def test_ref_similarity_endpoint_etag( |
| 1483 | client: AsyncClient, |
| 1484 | auth_headers: dict[str, str], |
| 1485 | db_session: AsyncSession, |
| 1486 | ) -> None: |
| 1487 | """Similarity endpoint includes ETag header for client-side caching.""" |
| 1488 | repo_id = await _create_repo(client, auth_headers) |
| 1489 | resp = await client.get( |
| 1490 | f"/api/v1/musehub/repos/{repo_id}/analysis/main/similarity?compare=dev", |
| 1491 | headers=auth_headers, |
| 1492 | ) |
| 1493 | assert resp.status_code == 200 |
| 1494 | assert "etag" in resp.headers |
| 1495 | assert resp.headers["etag"].startswith('"') |
| 1496 | |
| 1497 | |
| 1498 | # --------------------------------------------------------------------------- |
| 1499 | # Service unit tests — emotion diff |
| 1500 | # --------------------------------------------------------------------------- |
| 1501 | |
| 1502 | |
| 1503 | def test_compute_emotion_diff_returns_correct_type() -> None: |
| 1504 | """compute_emotion_diff returns an EmotionDiffResponse instance.""" |
| 1505 | result = compute_emotion_diff(repo_id="test-repo", head_ref="abc123", base_ref="main") |
| 1506 | assert isinstance(result, EmotionDiffResponse) |
| 1507 | |
| 1508 | |
| 1509 | def test_emotion_diff_base_emotion_axes_in_range() -> None: |
| 1510 | """All axes in the base_emotion vector are in [0, 1].""" |
| 1511 | result = compute_emotion_diff(repo_id="repo", head_ref="head", base_ref="base") |
| 1512 | vec = result.base_emotion |
| 1513 | assert isinstance(vec, EmotionVector8D) |
| 1514 | for axis in ( |
| 1515 | vec.valence, vec.energy, vec.tension, vec.complexity, |
| 1516 | vec.warmth, vec.brightness, vec.darkness, vec.playfulness, |
| 1517 | ): |
| 1518 | assert 0.0 <= axis <= 1.0, f"base axis out of range: {axis}" |
| 1519 | |
| 1520 | |
| 1521 | def test_emotion_diff_head_emotion_axes_in_range() -> None: |
| 1522 | """All axes in the head_emotion vector are in [0, 1].""" |
| 1523 | result = compute_emotion_diff(repo_id="repo", head_ref="head", base_ref="base") |
| 1524 | vec = result.head_emotion |
| 1525 | assert isinstance(vec, EmotionVector8D) |
| 1526 | for axis in ( |
| 1527 | vec.valence, vec.energy, vec.tension, vec.complexity, |
| 1528 | vec.warmth, vec.brightness, vec.darkness, vec.playfulness, |
| 1529 | ): |
| 1530 | assert 0.0 <= axis <= 1.0, f"head axis out of range: {axis}" |
| 1531 | |
| 1532 | |
| 1533 | def test_emotion_diff_delta_axes_in_range() -> None: |
| 1534 | """All axes in the delta are in [-1, 1].""" |
| 1535 | result = compute_emotion_diff(repo_id="repo", head_ref="deadbeef", base_ref="cafebabe") |
| 1536 | d = result.delta |
| 1537 | assert isinstance(d, EmotionDelta8D) |
| 1538 | for axis in ( |
| 1539 | d.valence, d.energy, d.tension, d.complexity, |
| 1540 | d.warmth, d.brightness, d.darkness, d.playfulness, |
| 1541 | ): |
| 1542 | assert -1.0 <= axis <= 1.0, f"delta axis out of range: {axis}" |
| 1543 | |
| 1544 | |
| 1545 | def test_emotion_diff_delta_equals_head_minus_base() -> None: |
| 1546 | """delta.valence equals round(head.valence - base.valence, 4), clamped to [-1, 1].""" |
| 1547 | result = compute_emotion_diff(repo_id="repo", head_ref="abc", base_ref="def") |
| 1548 | expected = max(-1.0, min(1.0, round(result.head_emotion.valence - result.base_emotion.valence, 4))) |
| 1549 | assert result.delta.valence == expected |
| 1550 | |
| 1551 | |
| 1552 | def test_emotion_diff_interpretation_nonempty() -> None: |
| 1553 | """interpretation is a non-empty string.""" |
| 1554 | result = compute_emotion_diff(repo_id="repo", head_ref="abc123", base_ref="main") |
| 1555 | assert isinstance(result.interpretation, str) |
| 1556 | assert len(result.interpretation) > 0 |
| 1557 | |
| 1558 | |
| 1559 | def test_emotion_diff_is_deterministic() -> None: |
| 1560 | """Same head_ref and base_ref always produce the same delta.""" |
| 1561 | r1 = compute_emotion_diff(repo_id="repo", head_ref="abc123", base_ref="main") |
| 1562 | r2 = compute_emotion_diff(repo_id="repo", head_ref="abc123", base_ref="main") |
| 1563 | assert r1.delta.valence == r2.delta.valence |
| 1564 | assert r1.delta.tension == r2.delta.tension |
| 1565 | assert r1.interpretation == r2.interpretation |
| 1566 | |
| 1567 | |
| 1568 | def test_emotion_diff_different_refs_differ() -> None: |
| 1569 | """Different head refs produce different base_emotion vectors.""" |
| 1570 | r1 = compute_emotion_diff(repo_id="repo", head_ref="ref-alpha", base_ref="main") |
| 1571 | r2 = compute_emotion_diff(repo_id="repo", head_ref="ref-beta", base_ref="main") |
| 1572 | # At least one axis should differ between two unrelated refs |
| 1573 | vectors_differ = any( |
| 1574 | getattr(r1.head_emotion, ax) != getattr(r2.head_emotion, ax) |
| 1575 | for ax in ("valence", "energy", "tension", "complexity", |
| 1576 | "warmth", "brightness", "darkness", "playfulness") |
| 1577 | ) |
| 1578 | assert vectors_differ, "Different head refs should produce different head_emotion vectors" |
| 1579 | |
| 1580 | |
| 1581 | # --------------------------------------------------------------------------- |
| 1582 | # HTTP integration tests — emotion diff endpoint |
| 1583 | # --------------------------------------------------------------------------- |
| 1584 | |
| 1585 | |
| 1586 | @pytest.mark.anyio |
| 1587 | async def test_emotion_diff_endpoint_200( |
| 1588 | client: AsyncClient, |
| 1589 | auth_headers: dict[str, str], |
| 1590 | db_session: AsyncSession, |
| 1591 | ) -> None: |
| 1592 | """GET /analysis/{ref}/emotion-diff?base=X returns 200 with all required fields.""" |
| 1593 | repo_id = await _create_repo(client, auth_headers) |
| 1594 | resp = await client.get( |
| 1595 | f"/api/v1/musehub/repos/{repo_id}/analysis/main/emotion-diff?base=main~1", |
| 1596 | headers=auth_headers, |
| 1597 | ) |
| 1598 | assert resp.status_code == 200 |
| 1599 | body = resp.json() |
| 1600 | assert "repoId" in body |
| 1601 | assert "baseRef" in body |
| 1602 | assert "headRef" in body |
| 1603 | assert "computedAt" in body |
| 1604 | assert "baseEmotion" in body |
| 1605 | assert "headEmotion" in body |
| 1606 | assert "delta" in body |
| 1607 | assert "interpretation" in body |
| 1608 | # Verify 8-axis structure on delta |
| 1609 | for axis in ("valence", "energy", "tension", "complexity", |
| 1610 | "warmth", "brightness", "darkness", "playfulness"): |
| 1611 | assert axis in body["delta"], f"delta missing axis: {axis}" |
| 1612 | |
| 1613 | |
| 1614 | @pytest.mark.anyio |
| 1615 | async def test_emotion_diff_endpoint_requires_auth( |
| 1616 | client: AsyncClient, |
| 1617 | auth_headers: dict[str, str], |
| 1618 | db_session: AsyncSession, |
| 1619 | ) -> None: |
| 1620 | """GET /analysis/{ref}/emotion-diff without auth returns 401.""" |
| 1621 | repo_id = await _create_repo(client, auth_headers) |
| 1622 | resp = await client.get( |
| 1623 | f"/api/v1/musehub/repos/{repo_id}/analysis/main/emotion-diff?base=main~1", |
| 1624 | ) |
| 1625 | assert resp.status_code == 401 |
| 1626 | |
| 1627 | |
| 1628 | @pytest.mark.anyio |
| 1629 | async def test_emotion_diff_endpoint_unknown_repo_404( |
| 1630 | client: AsyncClient, |
| 1631 | auth_headers: dict[str, str], |
| 1632 | db_session: AsyncSession, |
| 1633 | ) -> None: |
| 1634 | """GET /analysis/{ref}/emotion-diff with an unknown repo_id returns 404.""" |
| 1635 | resp = await client.get( |
| 1636 | "/api/v1/musehub/repos/nonexistent-repo/analysis/main/emotion-diff?base=main~1", |
| 1637 | headers=auth_headers, |
| 1638 | ) |
| 1639 | assert resp.status_code == 404 |
| 1640 | |
| 1641 | |
| 1642 | @pytest.mark.anyio |
| 1643 | async def test_emotion_diff_endpoint_etag( |
| 1644 | client: AsyncClient, |
| 1645 | auth_headers: dict[str, str], |
| 1646 | db_session: AsyncSession, |
| 1647 | ) -> None: |
| 1648 | """GET /analysis/{ref}/emotion-diff includes an ETag header for cache validation.""" |
| 1649 | repo_id = await _create_repo(client, auth_headers) |
| 1650 | resp = await client.get( |
| 1651 | f"/api/v1/musehub/repos/{repo_id}/analysis/main/emotion-diff?base=main~1", |
| 1652 | headers=auth_headers, |
| 1653 | ) |
| 1654 | assert resp.status_code == 200 |
| 1655 | assert "etag" in resp.headers |