Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/aignostics/platform/CLAUDE.md
Original file line number Diff line number Diff line change
Expand Up @@ -793,7 +793,7 @@ def delete(self) -> None:
- ✅ `Applications.list()` - Application list (5 min TTL)
- ✅ `Applications.details()` - Application details (5 min TTL)
- ✅ `Runs.details()` - Run details (15 sec TTL)
- ✅ `Runs.results()` - Run results (15 sec TTL)
- ✅ `Runs.results()` - Run results (15 sec TTL), supports `item_ids` and `external_ids` filters
- ✅ `Runs.list()` - Run list (15 sec TTL)

**Cache Bypass (NEW):**
Expand Down
19 changes: 16 additions & 3 deletions src/aignostics/platform/resources/runs.py
Original file line number Diff line number Diff line change
Expand Up @@ -202,17 +202,24 @@ def delete(self) -> None:
)
operation_cache_clear() # Clear all caches since we added a new run

def results(self, nocache: bool = False) -> t.Iterator[ItemResultData]:
def results(
self,
nocache: bool = False,
item_ids: list[str] | None = None,
external_ids: list[str] | None = None,
) -> t.Iterator[ItemResultData]:
"""Retrieves the results of all items in the run.

Retries on network and server errors.

Args:
nocache (bool): If True, skip reading from cache and fetch fresh data from the API.
The fresh result will still be cached for subsequent calls. Defaults to False.
item_ids (list[str] | None): Optional list of item IDs to filter results by.
external_ids (list[str] | None): Optional list of external IDs to filter results by.

Returns:
list[ItemResultData]: A list of item results.
Iterator[ItemResultData]: An iterator over item results.

Raises:
Exception: If the API request fails.
Expand All @@ -237,7 +244,13 @@ def results_with_retry(run_id: str, **kwargs: object) -> list[ItemResultData]:
)
)

return paginate(lambda **kwargs: results_with_retry(self.run_id, nocache=nocache, **kwargs))
filter_kwargs: dict[str, object] = {}
if item_ids:
filter_kwargs["item_id__in"] = item_ids
if external_ids:
filter_kwargs["external_id__in"] = external_ids

return paginate(lambda **kwargs: results_with_retry(self.run_id, nocache=nocache, **filter_kwargs, **kwargs))

def download_to_folder( # noqa: C901
self,
Expand Down
11 changes: 10 additions & 1 deletion tests/aignostics/application/cli_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,12 @@
from tests.constants_test import (
HETA_APPLICATION_ID,
HETA_APPLICATION_VERSION,
SPOT_0_CRC32C,
SPOT_0_FILENAME,
SPOT_0_GS_URL,
SPOT_0_HEIGHT,
SPOT_0_RESOLUTION_MPP,
SPOT_0_WIDTH,
SPOT_1_EXPECTED_RESULT_FILES,
SPOT_1_FILENAME,
SPOT_1_FILESIZE,
Expand Down Expand Up @@ -367,7 +373,10 @@ def test_cli_run_submit_and_describe_and_cancel_and_download_and_delete( # noqa
record_property("tested-item-id", "TC-APPLICATION-CLI-02")
csv_content = "external_id;checksum_base64_crc32c;resolution_mpp;width_px;height_px;staining_method;tissue;disease;"
csv_content += "platform_bucket_url\n"
csv_content += ";5onqtA==;0.26268186053789266;7447;7196;H&E;LUNG;LUNG_CANCER;gs://bucket/test"
csv_content += (
f"{SPOT_0_FILENAME};{SPOT_0_CRC32C};{SPOT_0_RESOLUTION_MPP};{SPOT_0_WIDTH};{SPOT_0_HEIGHT}"
f";H&E;LUNG;LUNG_CANCER;{SPOT_0_GS_URL}"
)
Comment on lines +376 to +379
Copy link
Copy Markdown
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This test flaked on me because the CSV is invalid, so the run failed before it could be cancelled

csv_path = tmp_path / "dummy.csv"
csv_path.write_text(csv_content)
result = runner.invoke(
Expand Down
64 changes: 59 additions & 5 deletions tests/aignostics/platform/resources/runs_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,11 +16,7 @@
RunReadResponse,
)

from aignostics.platform.resources.runs import (
LIST_APPLICATION_RUNS_MAX_PAGE_SIZE,
Run,
Runs,
)
from aignostics.platform.resources.runs import LIST_APPLICATION_RUNS_MAX_PAGE_SIZE, Run, Runs
from aignostics.platform.resources.utils import PAGE_SIZE


Expand Down Expand Up @@ -577,6 +573,64 @@ def test_runs_list_delegates_to_list_data(runs, mock_api) -> None:
assert "nocache" not in call_kwargs


@pytest.mark.unit
def test_application_run_results_with_filters(app_run, mock_api) -> None:
"""Test that Run.results() correctly maps item_ids and external_ids to API parameters.

Verifies that:
- item_ids maps to item_id__in
- external_ids maps to external_id__in
"""
# Arrange
item_ids = ["item-1", "item-2"]
external_ids = ["ext-1", "ext-2"]
page1 = [Mock(spec=ItemResultReadResponse) for _ in range(PAGE_SIZE)]
page2 = [Mock(spec=ItemResultReadResponse) for _ in range(3)]
mock_api.list_run_items_v1_runs_run_id_items_get.side_effect = [page1, page2]

# Act
result = list(app_run.results(item_ids=item_ids, external_ids=external_ids))

# Assert - filters are passed and pagination still works
assert len(result) == PAGE_SIZE + 3
assert mock_api.list_run_items_v1_runs_run_id_items_get.call_count == 2
for call in mock_api.list_run_items_v1_runs_run_id_items_get.call_args_list:
call_kwargs = call[1]
assert call_kwargs["item_id__in"] == item_ids
assert call_kwargs["external_id__in"] == external_ids
assert call_kwargs["run_id"] == app_run.run_id


@pytest.mark.unit
def test_application_run_results_without_filters_omits_filter_kwargs(app_run, mock_api) -> None:
"""Test that Run.results() does not pass filter kwargs when no filters are provided."""
# Arrange
mock_api.list_run_items_v1_runs_run_id_items_get.return_value = []

# Act
list(app_run.results())

# Assert
call_kwargs = mock_api.list_run_items_v1_runs_run_id_items_get.call_args[1]
assert "item_id__in" not in call_kwargs
assert "external_id__in" not in call_kwargs


@pytest.mark.unit
def test_application_run_results_with_empty_list_filters_omits_filter_kwargs(app_run, mock_api) -> None:
"""Test that Run.results() treats empty lists same as None (no filter applied)."""
# Arrange
mock_api.list_run_items_v1_runs_run_id_items_get.return_value = []

# Act - empty lists should behave like None
list(app_run.results(item_ids=[], external_ids=[]))

# Assert - filter kwargs should NOT be present
call_kwargs = mock_api.list_run_items_v1_runs_run_id_items_get.call_args[1]
assert "item_id__in" not in call_kwargs
assert "external_id__in" not in call_kwargs


@pytest.mark.unit
@pytest.mark.parametrize(
("hide_platform_queue_position", "expected_platform_queue_position"),
Expand Down
Loading