diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0ea316c..6004b61 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -7,12 +7,17 @@ on: - 'integrated/**' - 'stl-preview-head/**' - 'stl-preview-base/**' + pull_request: + branches-ignore: + - 'stl-preview-head/**' + - 'stl-preview-base/**' jobs: lint: timeout-minutes: 10 name: lint runs-on: ${{ github.repository == 'stainless-sdks/sunrise-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} + if: github.event_name == 'push' || github.event.pull_request.head.repo.fork steps: - uses: actions/checkout@v4 @@ -30,10 +35,51 @@ jobs: - name: Run lints run: ./scripts/lint + build: + if: github.event_name == 'push' || github.event.pull_request.head.repo.fork + timeout-minutes: 10 + name: build + permissions: + contents: read + id-token: write + runs-on: ${{ github.repository == 'stainless-sdks/sunrise-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} + steps: + - uses: actions/checkout@v4 + + - name: Install Rye + run: | + curl -sSf https://rye.astral.sh/get | bash + echo "$HOME/.rye/shims" >> $GITHUB_PATH + env: + RYE_VERSION: '0.44.0' + RYE_INSTALL_OPTION: '--yes' + + - name: Install dependencies + run: rye sync --all-features + + - name: Run build + run: rye build + + - name: Get GitHub OIDC Token + if: github.repository == 'stainless-sdks/sunrise-python' + id: github-oidc + uses: actions/github-script@v6 + with: + script: core.setOutput('github_token', await core.getIDToken()); + + - name: Upload tarball + if: github.repository == 'stainless-sdks/sunrise-python' + env: + URL: https://pkg.stainless.com/s + AUTH: ${{ steps.github-oidc.outputs.github_token }} + SHA: ${{ github.sha }} + run: ./scripts/utils/upload-artifact.sh + test: timeout-minutes: 10 name: test runs-on: ${{ github.repository == 'stainless-sdks/sunrise-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} + if: github.event_name == 'push' || github.event.pull_request.head.repo.fork steps: - uses: actions/checkout@v4 diff --git a/.gitignore b/.gitignore index 5ef86bd..4aeb2ab 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,4 @@ .prism.log -.vscode _dev __pycache__ diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 1b77f50..6538ca9 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.7.0" + ".": "0.8.0" } \ No newline at end of file diff --git a/.stats.yml b/.stats.yml index aa7b006..b868f52 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 52 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/contextual-ai%2Fsunrise-8d75c58c83d13f67b6a125c3eb4639d213c91aec7dbb6e06f0cd5bdfc074d54e.yml -openapi_spec_hash: 47795284631814d0f8eb42f6a0d5a3b3 -config_hash: 1ecef0ff4fd125bbc00eec65e3dd4798 +configured_endpoints: 34 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/contextual-ai%2Fsunrise-db7245c74772a8cd47c02886619fed0568fbb58b1fa8aba0dc77524b924a4fb6.yml +openapi_spec_hash: ca3de8d7b14b78683e39464fe7d4b1e1 +config_hash: 410f8a2f86f605885911277be47c3c78 diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000..5b01030 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,3 @@ +{ + "python.analysis.importFormat": "relative", +} diff --git a/CHANGELOG.md b/CHANGELOG.md index a2efd9b..1a96ddb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,65 @@ # Changelog +## 0.8.0 (2025-08-26) + +Full Changelog: [v0.7.0...v0.8.0](https://github.com/ContextualAI/contextual-client-python/compare/v0.7.0...v0.8.0) + +### Features + +* **api:** update via SDK Studio ([04fabfd](https://github.com/ContextualAI/contextual-client-python/commit/04fabfd2dd9bc21d5481bbea16148d9300e21196)) +* **api:** update via SDK Studio ([feab9f8](https://github.com/ContextualAI/contextual-client-python/commit/feab9f82b627246dbc5592a7ba6bac5de7afd7e1)) +* clean up environment call outs ([5aacfd7](https://github.com/ContextualAI/contextual-client-python/commit/5aacfd73cd62e9440b927c74e29bd4ee03766334)) +* **client:** add follow_redirects request option ([35e7c78](https://github.com/ContextualAI/contextual-client-python/commit/35e7c78c7d1801a0afe4d73bbff3e7c695f5f19f)) +* **client:** add support for aiohttp ([d54f53c](https://github.com/ContextualAI/contextual-client-python/commit/d54f53cfa0878acbad344622f7aae1b2e939ae1c)) +* **client:** support file upload requests ([44d064d](https://github.com/ContextualAI/contextual-client-python/commit/44d064d3013ef31ec6cb709682ab5fef4d2ed531)) + + +### Bug Fixes + +* **ci:** correct conditional ([0e1ab57](https://github.com/ContextualAI/contextual-client-python/commit/0e1ab57132d5a038aac790b463166200ae436fc3)) +* **ci:** release-doctor — report correct token name ([ce0af3b](https://github.com/ContextualAI/contextual-client-python/commit/ce0af3be8b2f90af2bc4e38979a801df1e98e989)) +* **client:** correctly parse binary response | stream ([518cbab](https://github.com/ContextualAI/contextual-client-python/commit/518cbabda3ce7f53721c0fc916ae89706899a4ec)) +* **client:** don't send Content-Type header on GET requests ([1ba6bcc](https://github.com/ContextualAI/contextual-client-python/commit/1ba6bcc49090112b3ec0dc9a0b1f5c2b487e378e)) +* **docs/api:** remove references to nonexistent types ([9fd7133](https://github.com/ContextualAI/contextual-client-python/commit/9fd7133c6748ba1b1676a674da35d57f02f01a86)) +* **parsing:** correctly handle nested discriminated unions ([130f4c1](https://github.com/ContextualAI/contextual-client-python/commit/130f4c17f8fbf89a42fa1709d6e4b4a8b36c4036)) +* **parsing:** ignore empty metadata ([a81e190](https://github.com/ContextualAI/contextual-client-python/commit/a81e19084356382c7b709215b1462e099d56f2a6)) +* **parsing:** parse extra field types ([89f10b3](https://github.com/ContextualAI/contextual-client-python/commit/89f10b3a97483b99e0ec06a346286619faec5c12)) +* resolve pydantic violation. ([afcfc1c](https://github.com/ContextualAI/contextual-client-python/commit/afcfc1cb265aa3911164ea727af5de6d965d15a5)) +* **tests:** fix: tests which call HTTP endpoints directly with the example parameters ([d7920f1](https://github.com/ContextualAI/contextual-client-python/commit/d7920f111d6175e6714482918e34992fb51739d9)) + + +### Chores + +* **ci:** change upload type ([f72dfb7](https://github.com/ContextualAI/contextual-client-python/commit/f72dfb77ff1fcae80efa5b286800ed77af6d0889)) +* **ci:** enable for pull requests ([84fbba4](https://github.com/ContextualAI/contextual-client-python/commit/84fbba4c22dbbf8517841c7961a37dba246126dc)) +* **ci:** fix installation instructions ([f191464](https://github.com/ContextualAI/contextual-client-python/commit/f191464e75f48395e76d6007712ae8548268b45f)) +* **ci:** only run for pushes and fork pull requests ([b9520a0](https://github.com/ContextualAI/contextual-client-python/commit/b9520a0ad9c16d3ad0386ce70a15df4191751364)) +* **ci:** upload sdks to package manager ([1f04b9e](https://github.com/ContextualAI/contextual-client-python/commit/1f04b9ecca3a4a3d2235c5cfa21bd9b36a358754)) +* **docs:** grammar improvements ([01370fb](https://github.com/ContextualAI/contextual-client-python/commit/01370fb62278f1def879352910c2520102c89993)) +* **docs:** remove reference to rye shell ([68f70a8](https://github.com/ContextualAI/contextual-client-python/commit/68f70a88e5b45773140c4b4a02c0506f3d078ad9)) +* **docs:** remove unnecessary param examples ([f603dcd](https://github.com/ContextualAI/contextual-client-python/commit/f603dcdd966c77ce3e8b8dba8e878eb273ef1688)) +* **internal:** bump pinned h11 dep ([f0aca79](https://github.com/ContextualAI/contextual-client-python/commit/f0aca79b109176c6a83b31434ccdbc30e58f059d)) +* **internal:** change ci workflow machines ([9e79111](https://github.com/ContextualAI/contextual-client-python/commit/9e7911165b348e96ad55b6fd7faf8855c009c26f)) +* **internal:** codegen related update ([0310d7c](https://github.com/ContextualAI/contextual-client-python/commit/0310d7ce2bca6a80cd3b0d53a1103b4dc1fa8c32)) +* **internal:** fix ruff target version ([465af9e](https://github.com/ContextualAI/contextual-client-python/commit/465af9ec69d6456078fb4137b39d1dd33a3f60b2)) +* **internal:** update comment in script ([01101c7](https://github.com/ContextualAI/contextual-client-python/commit/01101c7ff8496be98feb71c14eda4b6695cc7331)) +* **internal:** update conftest.py ([b324ed3](https://github.com/ContextualAI/contextual-client-python/commit/b324ed373c9c174a44eb52dc6d2384e82c0af4b8)) +* **internal:** update examples ([40379a3](https://github.com/ContextualAI/contextual-client-python/commit/40379a3d51aef12b1a0264e515ac145c91e41644)) +* **package:** mark python 3.13 as supported ([f37217f](https://github.com/ContextualAI/contextual-client-python/commit/f37217ff20d84d47c9adaf89c14151075e329972)) +* **project:** add settings file for vscode ([77265c1](https://github.com/ContextualAI/contextual-client-python/commit/77265c18261b46255146f4a0fd82e2aae41ae160)) +* **readme:** fix version rendering on pypi ([5857ef3](https://github.com/ContextualAI/contextual-client-python/commit/5857ef3c8252e39ab66b1dea3e035580d0f2f006)) +* **readme:** update badges ([b747f45](https://github.com/ContextualAI/contextual-client-python/commit/b747f452ab31df0805dd07a516fe63c460353c57)) +* **tests:** add tests for httpx client instantiation & proxies ([0c4973f](https://github.com/ContextualAI/contextual-client-python/commit/0c4973fed123a77a16b189439b3f4976fcc91770)) +* **tests:** run tests in parallel ([f75c912](https://github.com/ContextualAI/contextual-client-python/commit/f75c912ff643028317dde5fb0dfd08470b26ac29)) +* **tests:** skip some failing tests on the latest python versions ([dd32830](https://github.com/ContextualAI/contextual-client-python/commit/dd32830a8266dbf736c85285ec611854659511e7)) +* update @stainless-api/prism-cli to v5.15.0 ([82c8bc7](https://github.com/ContextualAI/contextual-client-python/commit/82c8bc7b281e624cff3606c46dea4a00ed99cc05)) +* update github action ([2d36800](https://github.com/ContextualAI/contextual-client-python/commit/2d36800896a198d92225efa540eb4f0faff092aa)) + + +### Documentation + +* **client:** fix httpx.Timeout documentation reference ([3517a3d](https://github.com/ContextualAI/contextual-client-python/commit/3517a3d02c7447c027bc82baf3a83333eb3c9b55)) + ## 0.7.0 (2025-05-13) Full Changelog: [v0.6.0...v0.7.0](https://github.com/ContextualAI/contextual-client-python/compare/v0.6.0...v0.7.0) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index ab82b5a..3c21def 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -17,8 +17,7 @@ $ rye sync --all-features You can then run scripts using `rye run python script.py` or by activating the virtual environment: ```sh -$ rye shell -# or manually activate - https://docs.python.org/3/library/venv.html#how-venvs-work +# Activate the virtual environment - https://docs.python.org/3/library/venv.html#how-venvs-work $ source .venv/bin/activate # now you can omit the `rye run` prefix diff --git a/README.md b/README.md index 4be5749..c05a737 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,7 @@ # Contextual AI Python API library -[![PyPI version](https://img.shields.io/pypi/v/contextual-client.svg)](https://pypi.org/project/contextual-client/) + +[![PyPI version](https://img.shields.io/pypi/v/contextual-client.svg?label=pypi%20(stable))](https://pypi.org/project/contextual-client/) The Contextual AI Python library provides convenient access to the Contextual AI REST API from any Python 3.8+ application. The library includes type definitions for all request params and response fields, @@ -66,6 +67,39 @@ asyncio.run(main()) Functionality between the synchronous and asynchronous clients is otherwise identical. +### With aiohttp + +By default, the async client uses `httpx` for HTTP requests. However, for improved concurrency performance you may also use `aiohttp` as the HTTP backend. + +You can enable this by installing `aiohttp`: + +```sh +# install from PyPI +pip install contextual-client[aiohttp] +``` + +Then you can enable it by instantiating the client with `http_client=DefaultAioHttpClient()`: + +```python +import asyncio +from contextual import DefaultAioHttpClient +from contextual import AsyncContextualAI + + +async def main() -> None: + async with AsyncContextualAI( + api_key="My API Key", + http_client=DefaultAioHttpClient(), + ) as client: + create_agent_output = await client.agents.create( + name="Example", + ) + print(create_agent_output.id) + + +asyncio.run(main()) +``` + ## Using types Nested request parameters are [TypedDicts](https://docs.python.org/3/library/typing.html#typing.TypedDict). Responses are [Pydantic models](https://docs.pydantic.dev) which also provide helper methods for things like: @@ -149,33 +183,7 @@ client = ContextualAI() create_agent_output = client.agents.create( name="xxx", - agent_configs={ - "filter_and_rerank_config": { - "rerank_instructions": "rerank_instructions", - "reranker_score_filter_threshold": 0, - "top_k_reranked_chunks": 0, - }, - "generate_response_config": { - "avoid_commentary": True, - "calculate_groundedness": True, - "frequency_penalty": 0, - "max_new_tokens": 0, - "seed": 0, - "temperature": 0, - "top_p": 0, - }, - "global_config": { - "enable_filter": True, - "enable_multi_turn": True, - "enable_rerank": True, - "should_check_retrieval_need": True, - }, - "retrieval_config": { - "lexical_alpha": 0, - "semantic_alpha": 0, - "top_k_retrieved_chunks": 0, - }, - }, + agent_configs={}, ) print(create_agent_output.agent_configs) ``` @@ -267,7 +275,7 @@ client.with_options(max_retries=5).agents.create( ### Timeouts By default requests time out after 1 minute. You can configure this with a `timeout` option, -which accepts a float or an [`httpx.Timeout`](https://www.python-httpx.org/advanced/#fine-tuning-the-configuration) object: +which accepts a float or an [`httpx.Timeout`](https://www.python-httpx.org/advanced/timeouts/#fine-tuning-the-configuration) object: ```python from contextual import ContextualAI diff --git a/SECURITY.md b/SECURITY.md index 92a473f..97e18f0 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -16,11 +16,11 @@ before making any information public. ## Reporting Non-SDK Related Security Issues If you encounter security issues that are not directly related to SDKs but pertain to the services -or products provided by Contextual AI please follow the respective company's security reporting guidelines. +or products provided by Contextual AI, please follow the respective company's security reporting guidelines. ### Contextual AI Terms and Policies -Please contact support@contextual.ai for any questions or concerns regarding security of our services. +Please contact support@contextual.ai for any questions or concerns regarding the security of our services. --- diff --git a/api.md b/api.md index 34b9ced..5ce1f46 100644 --- a/api.md +++ b/api.md @@ -8,18 +8,18 @@ from contextual.types import ( Datastore, DatastoreMetadata, ListDatastoresResponse, - DatastoreDeleteResponse, - DatastoreResetResponse, + DatastoreUpdateResponse, ) ``` Methods: - client.datastores.create(\*\*params) -> CreateDatastoreResponse +- client.datastores.update(datastore_id, \*\*params) -> DatastoreUpdateResponse - client.datastores.list(\*\*params) -> SyncDatastoresPage[Datastore] -- client.datastores.delete(datastore_id) -> object +- client.datastores.delete(datastore_id) -> object - client.datastores.metadata(datastore_id) -> DatastoreMetadata -- client.datastores.reset(datastore_id) -> object +- client.datastores.reset(datastore_id) -> object ## Documents @@ -27,18 +27,20 @@ Types: ```python from contextual.types.datastores import ( + BaseMetadataFilter, CompositeMetadataFilter, DocumentMetadata, IngestionResponse, ListDocumentsResponse, - DocumentDeleteResponse, + DocumentGetParseResultResponse, ) ``` Methods: - client.datastores.documents.list(datastore_id, \*\*params) -> SyncDocumentsPage[DocumentMetadata] -- client.datastores.documents.delete(document_id, \*, datastore_id) -> object +- client.datastores.documents.delete(document_id, \*, datastore_id) -> object +- client.datastores.documents.get_parse_result(document_id, \*, datastore_id, \*\*params) -> DocumentGetParseResultResponse - client.datastores.documents.ingest(datastore_id, \*\*params) -> IngestionResponse - client.datastores.documents.metadata(document_id, \*, datastore_id) -> DocumentMetadata - client.datastores.documents.set_metadata(document_id, \*, datastore_id, \*\*params) -> DocumentMetadata @@ -58,171 +60,48 @@ from contextual.types import ( GlobalConfig, ListAgentsResponse, RetrievalConfig, - AgentUpdateResponse, - AgentDeleteResponse, AgentMetadataResponse, - AgentResetResponse, ) ``` Methods: - client.agents.create(\*\*params) -> CreateAgentOutput -- client.agents.update(agent_id, \*\*params) -> object +- client.agents.update(agent_id, \*\*params) -> object - client.agents.list(\*\*params) -> SyncPage[Agent] -- client.agents.delete(agent_id) -> object +- client.agents.delete(agent_id) -> object +- client.agents.copy(agent_id) -> CreateAgentOutput - client.agents.metadata(agent_id) -> AgentMetadataResponse -- client.agents.reset(agent_id) -> object +- client.agents.reset(agent_id) -> object ## Query Types: ```python -from contextual.types.agents import ( - QueryResponse, - RetrievalInfoResponse, - QueryFeedbackResponse, - QueryMetricsResponse, -) +from contextual.types.agents import QueryResponse, RetrievalInfoResponse, QueryMetricsResponse ``` Methods: - client.agents.query.create(agent_id, \*\*params) -> QueryResponse -- client.agents.query.feedback(agent_id, \*\*params) -> object +- client.agents.query.feedback(agent_id, \*\*params) -> object - client.agents.query.metrics(agent_id, \*\*params) -> QueryMetricsResponse - client.agents.query.retrieval_info(message_id, \*, agent_id, \*\*params) -> RetrievalInfoResponse -## Evaluate - -Types: - -```python -from contextual.types.agents import CreateEvaluationResponse -``` - -Methods: - -- client.agents.evaluate.create(agent_id, \*\*params) -> CreateEvaluationResponse - -### Jobs - -Types: - -```python -from contextual.types.agents.evaluate import ( - EvaluationJobMetadata, - ListEvaluationJobsResponse, - JobCancelResponse, -) -``` - -Methods: - -- client.agents.evaluate.jobs.list(agent_id) -> ListEvaluationJobsResponse -- client.agents.evaluate.jobs.cancel(job_id, \*, agent_id) -> object -- client.agents.evaluate.jobs.metadata(job_id, \*, agent_id) -> EvaluationJobMetadata - -## Datasets - -Types: - -```python -from contextual.types.agents import CreateDatasetResponse, DatasetMetadata, ListDatasetsResponse -``` - -### Tune - -Types: - -```python -from contextual.types.agents.datasets import TuneDeleteResponse -``` - -Methods: - -- client.agents.datasets.tune.create(agent_id, \*\*params) -> CreateDatasetResponse -- client.agents.datasets.tune.retrieve(dataset_name, \*, agent_id, \*\*params) -> BinaryAPIResponse -- client.agents.datasets.tune.update(dataset_name, \*, agent_id, \*\*params) -> CreateDatasetResponse -- client.agents.datasets.tune.list(agent_id, \*\*params) -> ListDatasetsResponse -- client.agents.datasets.tune.delete(dataset_name, \*, agent_id) -> object -- client.agents.datasets.tune.metadata(dataset_name, \*, agent_id, \*\*params) -> DatasetMetadata - -### Evaluate - -Types: - -```python -from contextual.types.agents.datasets import EvaluateDeleteResponse -``` - -Methods: - -- client.agents.datasets.evaluate.create(agent_id, \*\*params) -> CreateDatasetResponse -- client.agents.datasets.evaluate.retrieve(dataset_name, \*, agent_id, \*\*params) -> BinaryAPIResponse -- client.agents.datasets.evaluate.update(dataset_name, \*, agent_id, \*\*params) -> CreateDatasetResponse -- client.agents.datasets.evaluate.list(agent_id, \*\*params) -> ListDatasetsResponse -- client.agents.datasets.evaluate.delete(dataset_name, \*, agent_id) -> object -- client.agents.datasets.evaluate.metadata(dataset_name, \*, agent_id, \*\*params) -> DatasetMetadata - -## Tune - -Types: - -```python -from contextual.types.agents import CreateTuneResponse -``` - -Methods: - -- client.agents.tune.create(agent_id, \*\*params) -> CreateTuneResponse - -### Jobs - -Types: - -```python -from contextual.types.agents.tune import ListTuneJobsResponse, TuneJobMetadata, JobDeleteResponse -``` - -Methods: - -- client.agents.tune.jobs.list(agent_id) -> ListTuneJobsResponse -- client.agents.tune.jobs.delete(job_id, \*, agent_id) -> object -- client.agents.tune.jobs.metadata(job_id, \*, agent_id) -> TuneJobMetadata - -### Models - -Types: - -```python -from contextual.types.agents.tune import ListTuneModelsResponse -``` - -Methods: - -- client.agents.tune.models.list(agent_id) -> ListTuneModelsResponse - # Users Types: ```python -from contextual.types import ( - InviteUsersResponse, - ListUsersResponse, - NewUser, - UserUpdateResponse, - UserDeactivateResponse, -) +from contextual.types import InviteUsersResponse, ListUsersResponse, NewUser ``` Methods: -- client.users.update(\*\*params) -> object +- client.users.update(\*\*params) -> object - client.users.list(\*\*params) -> SyncUsersPage[User] -- client.users.deactivate(\*\*params) -> object +- client.users.deactivate(\*\*params) -> object - client.users.invite(\*\*params) -> InviteUsersResponse # LMUnit diff --git a/bin/check-release-environment b/bin/check-release-environment index 4fafbfe..b845b0f 100644 --- a/bin/check-release-environment +++ b/bin/check-release-environment @@ -3,7 +3,7 @@ errors=() if [ -z "${PYPI_TOKEN}" ]; then - errors+=("The CONTEXTUAL_AI_PYPI_TOKEN secret has not been set. Please set it in either this repository's secrets or your organization secrets.") + errors+=("The PYPI_TOKEN secret has not been set. Please set it in either this repository's secrets or your organization secrets.") fi lenErrors=${#errors[@]} diff --git a/pyproject.toml b/pyproject.toml index 9a3b763..1169a3e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "contextual-client" -version = "0.7.0" +version = "0.8.0" description = "The official Python library for the Contextual AI API" dynamic = ["readme"] license = "Apache-2.0" @@ -24,6 +24,7 @@ classifiers = [ "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", "Operating System :: OS Independent", "Operating System :: POSIX", "Operating System :: MacOS", @@ -37,6 +38,8 @@ classifiers = [ Homepage = "https://github.com/ContextualAI/contextual-client-python" Repository = "https://github.com/ContextualAI/contextual-client-python" +[project.optional-dependencies] +aiohttp = ["aiohttp", "httpx_aiohttp>=0.1.8"] [tool.rye] managed = true @@ -54,6 +57,7 @@ dev-dependencies = [ "importlib-metadata>=6.7.0", "rich>=13.7.1", "nest_asyncio==1.6.0", + "pytest-xdist>=3.6.1", ] [tool.rye.scripts] @@ -125,7 +129,7 @@ replacement = '[\1](https://github.com/ContextualAI/contextual-client-python/tre [tool.pytest.ini_options] testpaths = ["tests"] -addopts = "--tb=short" +addopts = "--tb=short -n auto" xfail_strict = true asyncio_mode = "auto" asyncio_default_fixture_loop_scope = "session" @@ -155,7 +159,7 @@ reportPrivateUsage = false [tool.ruff] line-length = 120 output-format = "grouped" -target-version = "py37" +target-version = "py38" [tool.ruff.format] docstring-code-format = true diff --git a/requirements-dev.lock b/requirements-dev.lock index 42c2907..1a6388b 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -10,6 +10,13 @@ # universal: false -e file:. +aiohappyeyeballs==2.6.1 + # via aiohttp +aiohttp==3.12.8 + # via contextual-client + # via httpx-aiohttp +aiosignal==1.3.2 + # via aiohttp annotated-types==0.6.0 # via pydantic anyio==4.4.0 @@ -17,6 +24,10 @@ anyio==4.4.0 # via httpx argcomplete==3.1.2 # via nox +async-timeout==5.0.1 + # via aiohttp +attrs==25.3.0 + # via aiohttp certifi==2023.7.22 # via httpcore # via httpx @@ -30,18 +41,27 @@ distro==1.8.0 exceptiongroup==1.2.2 # via anyio # via pytest +execnet==2.1.1 + # via pytest-xdist filelock==3.12.4 # via virtualenv -h11==0.14.0 +frozenlist==1.6.2 + # via aiohttp + # via aiosignal +h11==0.16.0 # via httpcore -httpcore==1.0.2 +httpcore==1.0.9 # via httpx httpx==0.28.1 # via contextual-client + # via httpx-aiohttp # via respx +httpx-aiohttp==0.1.8 + # via contextual-client idna==3.4 # via anyio # via httpx + # via yarl importlib-metadata==7.0.0 iniconfig==2.0.0 # via pytest @@ -49,6 +69,9 @@ markdown-it-py==3.0.0 # via rich mdurl==0.1.2 # via markdown-it-py +multidict==6.4.4 + # via aiohttp + # via yarl mypy==1.14.1 mypy-extensions==1.0.0 # via mypy @@ -63,6 +86,9 @@ platformdirs==3.11.0 # via virtualenv pluggy==1.5.0 # via pytest +propcache==0.3.1 + # via aiohttp + # via yarl pydantic==2.10.3 # via contextual-client pydantic-core==2.27.1 @@ -72,7 +98,9 @@ pygments==2.18.0 pyright==1.1.399 pytest==8.3.3 # via pytest-asyncio + # via pytest-xdist pytest-asyncio==0.24.0 +pytest-xdist==3.7.0 python-dateutil==2.8.2 # via time-machine pytz==2023.3.post1 @@ -94,11 +122,14 @@ tomli==2.0.2 typing-extensions==4.12.2 # via anyio # via contextual-client + # via multidict # via mypy # via pydantic # via pydantic-core # via pyright virtualenv==20.24.5 # via nox +yarl==1.20.0 + # via aiohttp zipp==3.17.0 # via importlib-metadata diff --git a/requirements.lock b/requirements.lock index bc4698e..321707d 100644 --- a/requirements.lock +++ b/requirements.lock @@ -10,11 +10,22 @@ # universal: false -e file:. +aiohappyeyeballs==2.6.1 + # via aiohttp +aiohttp==3.12.8 + # via contextual-client + # via httpx-aiohttp +aiosignal==1.3.2 + # via aiohttp annotated-types==0.6.0 # via pydantic anyio==4.4.0 # via contextual-client # via httpx +async-timeout==5.0.1 + # via aiohttp +attrs==25.3.0 + # via aiohttp certifi==2023.7.22 # via httpcore # via httpx @@ -22,15 +33,28 @@ distro==1.8.0 # via contextual-client exceptiongroup==1.2.2 # via anyio -h11==0.14.0 +frozenlist==1.6.2 + # via aiohttp + # via aiosignal +h11==0.16.0 # via httpcore -httpcore==1.0.2 +httpcore==1.0.9 # via httpx httpx==0.28.1 # via contextual-client + # via httpx-aiohttp +httpx-aiohttp==0.1.8 + # via contextual-client idna==3.4 # via anyio # via httpx + # via yarl +multidict==6.4.4 + # via aiohttp + # via yarl +propcache==0.3.1 + # via aiohttp + # via yarl pydantic==2.10.3 # via contextual-client pydantic-core==2.27.1 @@ -41,5 +65,8 @@ sniffio==1.3.0 typing-extensions==4.12.2 # via anyio # via contextual-client + # via multidict # via pydantic # via pydantic-core +yarl==1.20.0 + # via aiohttp diff --git a/scripts/utils/upload-artifact.sh b/scripts/utils/upload-artifact.sh new file mode 100755 index 0000000..6fb2114 --- /dev/null +++ b/scripts/utils/upload-artifact.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash +set -exuo pipefail + +FILENAME=$(basename dist/*.whl) + +RESPONSE=$(curl -X POST "$URL?filename=$FILENAME" \ + -H "Authorization: Bearer $AUTH" \ + -H "Content-Type: application/json") + +SIGNED_URL=$(echo "$RESPONSE" | jq -r '.url') + +if [[ "$SIGNED_URL" == "null" ]]; then + echo -e "\033[31mFailed to get signed URL.\033[0m" + exit 1 +fi + +UPLOAD_RESPONSE=$(curl -v -X PUT \ + -H "Content-Type: binary/octet-stream" \ + --data-binary "@dist/$FILENAME" "$SIGNED_URL" 2>&1) + +if echo "$UPLOAD_RESPONSE" | grep -q "HTTP/[0-9.]* 200"; then + echo -e "\033[32mUploaded build to Stainless storage.\033[0m" + echo -e "\033[32mInstallation: pip install 'https://pkg.stainless.com/s/sunrise-python/$SHA/$FILENAME'\033[0m" +else + echo -e "\033[31mFailed to upload artifact.\033[0m" + exit 1 +fi diff --git a/src/contextual/__init__.py b/src/contextual/__init__.py index c9d0895..831570c 100644 --- a/src/contextual/__init__.py +++ b/src/contextual/__init__.py @@ -36,7 +36,7 @@ UnprocessableEntityError, APIResponseValidationError, ) -from ._base_client import DefaultHttpxClient, DefaultAsyncHttpxClient +from ._base_client import DefaultHttpxClient, DefaultAioHttpClient, DefaultAsyncHttpxClient from ._utils._logs import setup_logging as _setup_logging __all__ = [ @@ -78,6 +78,7 @@ "DEFAULT_CONNECTION_LIMITS", "DefaultHttpxClient", "DefaultAsyncHttpxClient", + "DefaultAioHttpClient", ] if not _t.TYPE_CHECKING: diff --git a/src/contextual/_base_client.py b/src/contextual/_base_client.py index 7369dca..2d5b5fa 100644 --- a/src/contextual/_base_client.py +++ b/src/contextual/_base_client.py @@ -529,6 +529,18 @@ def _build_request( # work around https://github.com/encode/httpx/discussions/2880 kwargs["extensions"] = {"sni_hostname": prepared_url.host.replace("_", "-")} + is_body_allowed = options.method.lower() != "get" + + if is_body_allowed: + if isinstance(json_data, bytes): + kwargs["content"] = json_data + else: + kwargs["json"] = json_data if is_given(json_data) else None + kwargs["files"] = files + else: + headers.pop("Content-Type", None) + kwargs.pop("data", None) + # TODO: report this error to httpx return self._client.build_request( # pyright: ignore[reportUnknownMemberType] headers=headers, @@ -540,8 +552,6 @@ def _build_request( # so that passing a `TypedDict` doesn't cause an error. # https://github.com/microsoft/pyright/issues/3526#event-6715453066 params=self.qs.stringify(cast(Mapping[str, Any], params)) if params else None, - json=json_data if is_given(json_data) else None, - files=files, **kwargs, ) @@ -960,6 +970,9 @@ def request( if self.custom_auth is not None: kwargs["auth"] = self.custom_auth + if options.follow_redirects is not None: + kwargs["follow_redirects"] = options.follow_redirects + log.debug("Sending HTTP Request: %s %s", request.method, request.url) response = None @@ -1068,7 +1081,14 @@ def _process_response( ) -> ResponseT: origin = get_origin(cast_to) or cast_to - if inspect.isclass(origin) and issubclass(origin, BaseAPIResponse): + if ( + inspect.isclass(origin) + and issubclass(origin, BaseAPIResponse) + # we only want to actually return the custom BaseAPIResponse class if we're + # returning the raw response, or if we're not streaming SSE, as if we're streaming + # SSE then `cast_to` doesn't actively reflect the type we need to parse into + and (not stream or bool(response.request.headers.get(RAW_RESPONSE_HEADER))) + ): if not issubclass(origin, APIResponse): raise TypeError(f"API Response types must subclass {APIResponse}; Received {origin}") @@ -1279,6 +1299,24 @@ def __init__(self, **kwargs: Any) -> None: super().__init__(**kwargs) +try: + import httpx_aiohttp +except ImportError: + + class _DefaultAioHttpClient(httpx.AsyncClient): + def __init__(self, **_kwargs: Any) -> None: + raise RuntimeError("To use the aiohttp client you must have installed the package with the `aiohttp` extra") +else: + + class _DefaultAioHttpClient(httpx_aiohttp.HttpxAiohttpClient): # type: ignore + def __init__(self, **kwargs: Any) -> None: + kwargs.setdefault("timeout", DEFAULT_TIMEOUT) + kwargs.setdefault("limits", DEFAULT_CONNECTION_LIMITS) + kwargs.setdefault("follow_redirects", True) + + super().__init__(**kwargs) + + if TYPE_CHECKING: DefaultAsyncHttpxClient = httpx.AsyncClient """An alias to `httpx.AsyncClient` that provides the same defaults that this SDK @@ -1287,8 +1325,12 @@ def __init__(self, **kwargs: Any) -> None: This is useful because overriding the `http_client` with your own instance of `httpx.AsyncClient` will result in httpx's defaults being used, not ours. """ + + DefaultAioHttpClient = httpx.AsyncClient + """An alias to `httpx.AsyncClient` that changes the default HTTP transport to `aiohttp`.""" else: DefaultAsyncHttpxClient = _DefaultAsyncHttpxClient + DefaultAioHttpClient = _DefaultAioHttpClient class AsyncHttpxClientWrapper(DefaultAsyncHttpxClient): @@ -1460,6 +1502,9 @@ async def request( if self.custom_auth is not None: kwargs["auth"] = self.custom_auth + if options.follow_redirects is not None: + kwargs["follow_redirects"] = options.follow_redirects + log.debug("Sending HTTP Request: %s %s", request.method, request.url) response = None @@ -1568,7 +1613,14 @@ async def _process_response( ) -> ResponseT: origin = get_origin(cast_to) or cast_to - if inspect.isclass(origin) and issubclass(origin, BaseAPIResponse): + if ( + inspect.isclass(origin) + and issubclass(origin, BaseAPIResponse) + # we only want to actually return the custom BaseAPIResponse class if we're + # returning the raw response, or if we're not streaming SSE, as if we're streaming + # SSE then `cast_to` doesn't actively reflect the type we need to parse into + and (not stream or bool(response.request.headers.get(RAW_RESPONSE_HEADER))) + ): if not issubclass(origin, AsyncAPIResponse): raise TypeError(f"API Response types must subclass {AsyncAPIResponse}; Received {origin}") diff --git a/src/contextual/_files.py b/src/contextual/_files.py index 8c48fd6..3a712ec 100644 --- a/src/contextual/_files.py +++ b/src/contextual/_files.py @@ -69,12 +69,12 @@ def _transform_file(file: FileTypes) -> HttpxFileTypes: return file if is_tuple_t(file): - return (file[0], _read_file_content(file[1]), *file[2:]) + return (file[0], read_file_content(file[1]), *file[2:]) raise TypeError(f"Expected file types input to be a FileContent type or to be a tuple") -def _read_file_content(file: FileContent) -> HttpxFileContent: +def read_file_content(file: FileContent) -> HttpxFileContent: if isinstance(file, os.PathLike): return pathlib.Path(file).read_bytes() return file @@ -111,12 +111,12 @@ async def _async_transform_file(file: FileTypes) -> HttpxFileTypes: return file if is_tuple_t(file): - return (file[0], await _async_read_file_content(file[1]), *file[2:]) + return (file[0], await async_read_file_content(file[1]), *file[2:]) raise TypeError(f"Expected file types input to be a FileContent type or to be a tuple") -async def _async_read_file_content(file: FileContent) -> HttpxFileContent: +async def async_read_file_content(file: FileContent) -> HttpxFileContent: if isinstance(file, os.PathLike): return await anyio.Path(file).read_bytes() diff --git a/src/contextual/_models.py b/src/contextual/_models.py index 798956f..b8387ce 100644 --- a/src/contextual/_models.py +++ b/src/contextual/_models.py @@ -2,9 +2,10 @@ import os import inspect -from typing import TYPE_CHECKING, Any, Type, Union, Generic, TypeVar, Callable, cast +from typing import TYPE_CHECKING, Any, Type, Union, Generic, TypeVar, Callable, Optional, cast from datetime import date, datetime from typing_extensions import ( + List, Unpack, Literal, ClassVar, @@ -207,14 +208,18 @@ def construct( # pyright: ignore[reportIncompatibleMethodOverride] else: fields_values[name] = field_get_default(field) + extra_field_type = _get_extra_fields_type(__cls) + _extra = {} for key, value in values.items(): if key not in model_fields: + parsed = construct_type(value=value, type_=extra_field_type) if extra_field_type is not None else value + if PYDANTIC_V2: - _extra[key] = value + _extra[key] = parsed else: _fields_set.add(key) - fields_values[key] = value + fields_values[key] = parsed object.__setattr__(m, "__dict__", fields_values) @@ -366,7 +371,24 @@ def _construct_field(value: object, field: FieldInfo, key: str) -> object: if type_ is None: raise RuntimeError(f"Unexpected field type is None for {key}") - return construct_type(value=value, type_=type_) + return construct_type(value=value, type_=type_, metadata=getattr(field, "metadata", None)) + + +def _get_extra_fields_type(cls: type[pydantic.BaseModel]) -> type | None: + if not PYDANTIC_V2: + # TODO + return None + + schema = cls.__pydantic_core_schema__ + if schema["type"] == "model": + fields = schema["schema"] + if fields["type"] == "model-fields": + extras = fields.get("extras_schema") + if extras and "cls" in extras: + # mypy can't narrow the type + return extras["cls"] # type: ignore[no-any-return] + + return None def is_basemodel(type_: type) -> bool: @@ -420,7 +442,7 @@ def construct_type_unchecked(*, value: object, type_: type[_T]) -> _T: return cast(_T, construct_type(value=value, type_=type_)) -def construct_type(*, value: object, type_: object) -> object: +def construct_type(*, value: object, type_: object, metadata: Optional[List[Any]] = None) -> object: """Loose coercion to the expected type with construction of nested values. If the given value does not match the expected type then it is returned as-is. @@ -438,8 +460,10 @@ def construct_type(*, value: object, type_: object) -> object: type_ = type_.__value__ # type: ignore[unreachable] # unwrap `Annotated[T, ...]` -> `T` - if is_annotated_type(type_): - meta: tuple[Any, ...] = get_args(type_)[1:] + if metadata is not None and len(metadata) > 0: + meta: tuple[Any, ...] = tuple(metadata) + elif is_annotated_type(type_): + meta = get_args(type_)[1:] type_ = extract_type_arg(type_, 0) else: meta = tuple() @@ -737,6 +761,7 @@ class FinalRequestOptionsInput(TypedDict, total=False): idempotency_key: str json_data: Body extra_json: AnyMapping + follow_redirects: bool @final @@ -750,6 +775,7 @@ class FinalRequestOptions(pydantic.BaseModel): files: Union[HttpxRequestFiles, None] = None idempotency_key: Union[str, None] = None post_parser: Union[Callable[[Any], Any], NotGiven] = NotGiven() + follow_redirects: Union[bool, None] = None # It should be noted that we cannot use `json` here as that would override # a BaseModel method in an incompatible fashion. diff --git a/src/contextual/_types.py b/src/contextual/_types.py index 883a2da..46a038e 100644 --- a/src/contextual/_types.py +++ b/src/contextual/_types.py @@ -100,6 +100,7 @@ class RequestOptions(TypedDict, total=False): params: Query extra_json: AnyMapping idempotency_key: str + follow_redirects: bool # Sentinel class used until PEP 0661 is accepted @@ -215,3 +216,4 @@ class _GenericAlias(Protocol): class HttpxSendArgs(TypedDict, total=False): auth: httpx.Auth + follow_redirects: bool diff --git a/src/contextual/_version.py b/src/contextual/_version.py index 99bb927..a855209 100644 --- a/src/contextual/_version.py +++ b/src/contextual/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "contextual" -__version__ = "0.7.0" # x-release-please-version +__version__ = "0.8.0" # x-release-please-version diff --git a/src/contextual/resources/agents/__init__.py b/src/contextual/resources/agents/__init__.py index 7acf4ec..b3996d0 100644 --- a/src/contextual/resources/agents/__init__.py +++ b/src/contextual/resources/agents/__init__.py @@ -1,13 +1,5 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from .tune import ( - TuneResource, - AsyncTuneResource, - TuneResourceWithRawResponse, - AsyncTuneResourceWithRawResponse, - TuneResourceWithStreamingResponse, - AsyncTuneResourceWithStreamingResponse, -) from .query import ( QueryResource, AsyncQueryResource, @@ -24,22 +16,6 @@ AgentsResourceWithStreamingResponse, AsyncAgentsResourceWithStreamingResponse, ) -from .datasets import ( - DatasetsResource, - AsyncDatasetsResource, - DatasetsResourceWithRawResponse, - AsyncDatasetsResourceWithRawResponse, - DatasetsResourceWithStreamingResponse, - AsyncDatasetsResourceWithStreamingResponse, -) -from .evaluate import ( - EvaluateResource, - AsyncEvaluateResource, - EvaluateResourceWithRawResponse, - AsyncEvaluateResourceWithRawResponse, - EvaluateResourceWithStreamingResponse, - AsyncEvaluateResourceWithStreamingResponse, -) __all__ = [ "QueryResource", @@ -48,24 +24,6 @@ "AsyncQueryResourceWithRawResponse", "QueryResourceWithStreamingResponse", "AsyncQueryResourceWithStreamingResponse", - "EvaluateResource", - "AsyncEvaluateResource", - "EvaluateResourceWithRawResponse", - "AsyncEvaluateResourceWithRawResponse", - "EvaluateResourceWithStreamingResponse", - "AsyncEvaluateResourceWithStreamingResponse", - "DatasetsResource", - "AsyncDatasetsResource", - "DatasetsResourceWithRawResponse", - "AsyncDatasetsResourceWithRawResponse", - "DatasetsResourceWithStreamingResponse", - "AsyncDatasetsResourceWithStreamingResponse", - "TuneResource", - "AsyncTuneResource", - "TuneResourceWithRawResponse", - "AsyncTuneResourceWithRawResponse", - "TuneResourceWithStreamingResponse", - "AsyncTuneResourceWithStreamingResponse", "AgentsResource", "AsyncAgentsResource", "AgentsResourceWithRawResponse", diff --git a/src/contextual/resources/agents/agents.py b/src/contextual/resources/agents/agents.py index e823e2d..0a2c3a9 100644 --- a/src/contextual/resources/agents/agents.py +++ b/src/contextual/resources/agents/agents.py @@ -18,14 +18,6 @@ from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven from ..._utils import maybe_transform, async_maybe_transform from ..._compat import cached_property -from .tune.tune import ( - TuneResource, - AsyncTuneResource, - TuneResourceWithRawResponse, - AsyncTuneResourceWithRawResponse, - TuneResourceWithStreamingResponse, - AsyncTuneResourceWithStreamingResponse, -) from ..._resource import SyncAPIResource, AsyncAPIResource from ..._response import ( to_raw_response_wrapper, @@ -36,22 +28,6 @@ from ...pagination import SyncPage, AsyncPage from ...types.agent import Agent from ..._base_client import AsyncPaginator, make_request_options -from .datasets.datasets import ( - DatasetsResource, - AsyncDatasetsResource, - DatasetsResourceWithRawResponse, - AsyncDatasetsResourceWithRawResponse, - DatasetsResourceWithStreamingResponse, - AsyncDatasetsResourceWithStreamingResponse, -) -from .evaluate.evaluate import ( - EvaluateResource, - AsyncEvaluateResource, - EvaluateResourceWithRawResponse, - AsyncEvaluateResourceWithRawResponse, - EvaluateResourceWithStreamingResponse, - AsyncEvaluateResourceWithStreamingResponse, -) from ...types.agent_configs_param import AgentConfigsParam from ...types.create_agent_output import CreateAgentOutput from ...types.agent_metadata_response import AgentMetadataResponse @@ -64,18 +40,6 @@ class AgentsResource(SyncAPIResource): def query(self) -> QueryResource: return QueryResource(self._client) - @cached_property - def evaluate(self) -> EvaluateResource: - return EvaluateResource(self._client) - - @cached_property - def datasets(self) -> DatasetsResource: - return DatasetsResource(self._client) - - @cached_property - def tune(self) -> TuneResource: - return TuneResource(self._client) - @cached_property def with_raw_response(self) -> AgentsResourceWithRawResponse: """ @@ -103,6 +67,7 @@ def create( datastore_ids: List[str] | NotGiven = NOT_GIVEN, description: str | NotGiven = NOT_GIVEN, filter_prompt: str | NotGiven = NOT_GIVEN, + multiturn_system_prompt: str | NotGiven = NOT_GIVEN, no_retrieval_system_prompt: str | NotGiven = NOT_GIVEN, suggested_queries: List[str] | NotGiven = NOT_GIVEN, system_prompt: str | NotGiven = NOT_GIVEN, @@ -143,6 +108,8 @@ def create( filter_prompt: The prompt to an LLM which determines whether retrieved chunks are relevant to a given query and filters out irrelevant chunks. + multiturn_system_prompt: Instructions on how the agent should handle multi-turn conversations. + no_retrieval_system_prompt: Instructions on how the agent should respond when there are no relevant retrievals that can be used to answer a query. @@ -171,6 +138,7 @@ def create( "datastore_ids": datastore_ids, "description": description, "filter_prompt": filter_prompt, + "multiturn_system_prompt": multiturn_system_prompt, "no_retrieval_system_prompt": no_retrieval_system_prompt, "suggested_queries": suggested_queries, "system_prompt": system_prompt, @@ -191,6 +159,7 @@ def update( datastore_ids: List[str] | NotGiven = NOT_GIVEN, filter_prompt: str | NotGiven = NOT_GIVEN, llm_model_id: str | NotGiven = NOT_GIVEN, + multiturn_system_prompt: str | NotGiven = NOT_GIVEN, no_retrieval_system_prompt: str | NotGiven = NOT_GIVEN, suggested_queries: List[str] | NotGiven = NOT_GIVEN, system_prompt: str | NotGiven = NOT_GIVEN, @@ -220,6 +189,8 @@ def update( on which they were tuned. If no model is specified, the default model is used. Set to `default` to switch from a tuned model to the default model. + multiturn_system_prompt: Instructions on how the agent should handle multi-turn conversations. + no_retrieval_system_prompt: Instructions on how the agent should respond when there are no relevant retrievals that can be used to answer a query. @@ -249,6 +220,7 @@ def update( "datastore_ids": datastore_ids, "filter_prompt": filter_prompt, "llm_model_id": llm_model_id, + "multiturn_system_prompt": multiturn_system_prompt, "no_retrieval_system_prompt": no_retrieval_system_prompt, "suggested_queries": suggested_queries, "system_prompt": system_prompt, @@ -349,6 +321,42 @@ def delete( cast_to=object, ) + def copy( + self, + agent_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> CreateAgentOutput: + """ + Copy an existing agent with all its configurations and datastore associations. + The copied agent will have "[COPY]" appended to its name. + + Args: + agent_id: ID of the agent to copy + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not agent_id: + raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") + return self._post( + f"/agents/{agent_id}/copy", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=CreateAgentOutput, + ) + def metadata( self, agent_id: str, @@ -430,18 +438,6 @@ class AsyncAgentsResource(AsyncAPIResource): def query(self) -> AsyncQueryResource: return AsyncQueryResource(self._client) - @cached_property - def evaluate(self) -> AsyncEvaluateResource: - return AsyncEvaluateResource(self._client) - - @cached_property - def datasets(self) -> AsyncDatasetsResource: - return AsyncDatasetsResource(self._client) - - @cached_property - def tune(self) -> AsyncTuneResource: - return AsyncTuneResource(self._client) - @cached_property def with_raw_response(self) -> AsyncAgentsResourceWithRawResponse: """ @@ -469,6 +465,7 @@ async def create( datastore_ids: List[str] | NotGiven = NOT_GIVEN, description: str | NotGiven = NOT_GIVEN, filter_prompt: str | NotGiven = NOT_GIVEN, + multiturn_system_prompt: str | NotGiven = NOT_GIVEN, no_retrieval_system_prompt: str | NotGiven = NOT_GIVEN, suggested_queries: List[str] | NotGiven = NOT_GIVEN, system_prompt: str | NotGiven = NOT_GIVEN, @@ -509,6 +506,8 @@ async def create( filter_prompt: The prompt to an LLM which determines whether retrieved chunks are relevant to a given query and filters out irrelevant chunks. + multiturn_system_prompt: Instructions on how the agent should handle multi-turn conversations. + no_retrieval_system_prompt: Instructions on how the agent should respond when there are no relevant retrievals that can be used to answer a query. @@ -537,6 +536,7 @@ async def create( "datastore_ids": datastore_ids, "description": description, "filter_prompt": filter_prompt, + "multiturn_system_prompt": multiturn_system_prompt, "no_retrieval_system_prompt": no_retrieval_system_prompt, "suggested_queries": suggested_queries, "system_prompt": system_prompt, @@ -557,6 +557,7 @@ async def update( datastore_ids: List[str] | NotGiven = NOT_GIVEN, filter_prompt: str | NotGiven = NOT_GIVEN, llm_model_id: str | NotGiven = NOT_GIVEN, + multiturn_system_prompt: str | NotGiven = NOT_GIVEN, no_retrieval_system_prompt: str | NotGiven = NOT_GIVEN, suggested_queries: List[str] | NotGiven = NOT_GIVEN, system_prompt: str | NotGiven = NOT_GIVEN, @@ -586,6 +587,8 @@ async def update( on which they were tuned. If no model is specified, the default model is used. Set to `default` to switch from a tuned model to the default model. + multiturn_system_prompt: Instructions on how the agent should handle multi-turn conversations. + no_retrieval_system_prompt: Instructions on how the agent should respond when there are no relevant retrievals that can be used to answer a query. @@ -615,6 +618,7 @@ async def update( "datastore_ids": datastore_ids, "filter_prompt": filter_prompt, "llm_model_id": llm_model_id, + "multiturn_system_prompt": multiturn_system_prompt, "no_retrieval_system_prompt": no_retrieval_system_prompt, "suggested_queries": suggested_queries, "system_prompt": system_prompt, @@ -715,6 +719,42 @@ async def delete( cast_to=object, ) + async def copy( + self, + agent_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> CreateAgentOutput: + """ + Copy an existing agent with all its configurations and datastore associations. + The copied agent will have "[COPY]" appended to its name. + + Args: + agent_id: ID of the agent to copy + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not agent_id: + raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") + return await self._post( + f"/agents/{agent_id}/copy", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=CreateAgentOutput, + ) + async def metadata( self, agent_id: str, @@ -807,6 +847,9 @@ def __init__(self, agents: AgentsResource) -> None: self.delete = to_raw_response_wrapper( agents.delete, ) + self.copy = to_raw_response_wrapper( + agents.copy, + ) self.metadata = to_raw_response_wrapper( agents.metadata, ) @@ -818,18 +861,6 @@ def __init__(self, agents: AgentsResource) -> None: def query(self) -> QueryResourceWithRawResponse: return QueryResourceWithRawResponse(self._agents.query) - @cached_property - def evaluate(self) -> EvaluateResourceWithRawResponse: - return EvaluateResourceWithRawResponse(self._agents.evaluate) - - @cached_property - def datasets(self) -> DatasetsResourceWithRawResponse: - return DatasetsResourceWithRawResponse(self._agents.datasets) - - @cached_property - def tune(self) -> TuneResourceWithRawResponse: - return TuneResourceWithRawResponse(self._agents.tune) - class AsyncAgentsResourceWithRawResponse: def __init__(self, agents: AsyncAgentsResource) -> None: @@ -847,6 +878,9 @@ def __init__(self, agents: AsyncAgentsResource) -> None: self.delete = async_to_raw_response_wrapper( agents.delete, ) + self.copy = async_to_raw_response_wrapper( + agents.copy, + ) self.metadata = async_to_raw_response_wrapper( agents.metadata, ) @@ -858,18 +892,6 @@ def __init__(self, agents: AsyncAgentsResource) -> None: def query(self) -> AsyncQueryResourceWithRawResponse: return AsyncQueryResourceWithRawResponse(self._agents.query) - @cached_property - def evaluate(self) -> AsyncEvaluateResourceWithRawResponse: - return AsyncEvaluateResourceWithRawResponse(self._agents.evaluate) - - @cached_property - def datasets(self) -> AsyncDatasetsResourceWithRawResponse: - return AsyncDatasetsResourceWithRawResponse(self._agents.datasets) - - @cached_property - def tune(self) -> AsyncTuneResourceWithRawResponse: - return AsyncTuneResourceWithRawResponse(self._agents.tune) - class AgentsResourceWithStreamingResponse: def __init__(self, agents: AgentsResource) -> None: @@ -887,6 +909,9 @@ def __init__(self, agents: AgentsResource) -> None: self.delete = to_streamed_response_wrapper( agents.delete, ) + self.copy = to_streamed_response_wrapper( + agents.copy, + ) self.metadata = to_streamed_response_wrapper( agents.metadata, ) @@ -898,18 +923,6 @@ def __init__(self, agents: AgentsResource) -> None: def query(self) -> QueryResourceWithStreamingResponse: return QueryResourceWithStreamingResponse(self._agents.query) - @cached_property - def evaluate(self) -> EvaluateResourceWithStreamingResponse: - return EvaluateResourceWithStreamingResponse(self._agents.evaluate) - - @cached_property - def datasets(self) -> DatasetsResourceWithStreamingResponse: - return DatasetsResourceWithStreamingResponse(self._agents.datasets) - - @cached_property - def tune(self) -> TuneResourceWithStreamingResponse: - return TuneResourceWithStreamingResponse(self._agents.tune) - class AsyncAgentsResourceWithStreamingResponse: def __init__(self, agents: AsyncAgentsResource) -> None: @@ -927,6 +940,9 @@ def __init__(self, agents: AsyncAgentsResource) -> None: self.delete = async_to_streamed_response_wrapper( agents.delete, ) + self.copy = async_to_streamed_response_wrapper( + agents.copy, + ) self.metadata = async_to_streamed_response_wrapper( agents.metadata, ) @@ -937,15 +953,3 @@ def __init__(self, agents: AsyncAgentsResource) -> None: @cached_property def query(self) -> AsyncQueryResourceWithStreamingResponse: return AsyncQueryResourceWithStreamingResponse(self._agents.query) - - @cached_property - def evaluate(self) -> AsyncEvaluateResourceWithStreamingResponse: - return AsyncEvaluateResourceWithStreamingResponse(self._agents.evaluate) - - @cached_property - def datasets(self) -> AsyncDatasetsResourceWithStreamingResponse: - return AsyncDatasetsResourceWithStreamingResponse(self._agents.datasets) - - @cached_property - def tune(self) -> AsyncTuneResourceWithStreamingResponse: - return AsyncTuneResourceWithStreamingResponse(self._agents.tune) diff --git a/src/contextual/resources/agents/datasets/__init__.py b/src/contextual/resources/agents/datasets/__init__.py deleted file mode 100644 index 059bd75..0000000 --- a/src/contextual/resources/agents/datasets/__init__.py +++ /dev/null @@ -1,47 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .tune import ( - TuneResource, - AsyncTuneResource, - TuneResourceWithRawResponse, - AsyncTuneResourceWithRawResponse, - TuneResourceWithStreamingResponse, - AsyncTuneResourceWithStreamingResponse, -) -from .datasets import ( - DatasetsResource, - AsyncDatasetsResource, - DatasetsResourceWithRawResponse, - AsyncDatasetsResourceWithRawResponse, - DatasetsResourceWithStreamingResponse, - AsyncDatasetsResourceWithStreamingResponse, -) -from .evaluate import ( - EvaluateResource, - AsyncEvaluateResource, - EvaluateResourceWithRawResponse, - AsyncEvaluateResourceWithRawResponse, - EvaluateResourceWithStreamingResponse, - AsyncEvaluateResourceWithStreamingResponse, -) - -__all__ = [ - "TuneResource", - "AsyncTuneResource", - "TuneResourceWithRawResponse", - "AsyncTuneResourceWithRawResponse", - "TuneResourceWithStreamingResponse", - "AsyncTuneResourceWithStreamingResponse", - "EvaluateResource", - "AsyncEvaluateResource", - "EvaluateResourceWithRawResponse", - "AsyncEvaluateResourceWithRawResponse", - "EvaluateResourceWithStreamingResponse", - "AsyncEvaluateResourceWithStreamingResponse", - "DatasetsResource", - "AsyncDatasetsResource", - "DatasetsResourceWithRawResponse", - "AsyncDatasetsResourceWithRawResponse", - "DatasetsResourceWithStreamingResponse", - "AsyncDatasetsResourceWithStreamingResponse", -] diff --git a/src/contextual/resources/agents/datasets/datasets.py b/src/contextual/resources/agents/datasets/datasets.py deleted file mode 100644 index 8b3610d..0000000 --- a/src/contextual/resources/agents/datasets/datasets.py +++ /dev/null @@ -1,134 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .tune import ( - TuneResource, - AsyncTuneResource, - TuneResourceWithRawResponse, - AsyncTuneResourceWithRawResponse, - TuneResourceWithStreamingResponse, - AsyncTuneResourceWithStreamingResponse, -) -from .evaluate import ( - EvaluateResource, - AsyncEvaluateResource, - EvaluateResourceWithRawResponse, - AsyncEvaluateResourceWithRawResponse, - EvaluateResourceWithStreamingResponse, - AsyncEvaluateResourceWithStreamingResponse, -) -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource - -__all__ = ["DatasetsResource", "AsyncDatasetsResource"] - - -class DatasetsResource(SyncAPIResource): - @cached_property - def tune(self) -> TuneResource: - return TuneResource(self._client) - - @cached_property - def evaluate(self) -> EvaluateResource: - return EvaluateResource(self._client) - - @cached_property - def with_raw_response(self) -> DatasetsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/ContextualAI/contextual-client-python#accessing-raw-response-data-eg-headers - """ - return DatasetsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> DatasetsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/ContextualAI/contextual-client-python#with_streaming_response - """ - return DatasetsResourceWithStreamingResponse(self) - - -class AsyncDatasetsResource(AsyncAPIResource): - @cached_property - def tune(self) -> AsyncTuneResource: - return AsyncTuneResource(self._client) - - @cached_property - def evaluate(self) -> AsyncEvaluateResource: - return AsyncEvaluateResource(self._client) - - @cached_property - def with_raw_response(self) -> AsyncDatasetsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/ContextualAI/contextual-client-python#accessing-raw-response-data-eg-headers - """ - return AsyncDatasetsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncDatasetsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/ContextualAI/contextual-client-python#with_streaming_response - """ - return AsyncDatasetsResourceWithStreamingResponse(self) - - -class DatasetsResourceWithRawResponse: - def __init__(self, datasets: DatasetsResource) -> None: - self._datasets = datasets - - @cached_property - def tune(self) -> TuneResourceWithRawResponse: - return TuneResourceWithRawResponse(self._datasets.tune) - - @cached_property - def evaluate(self) -> EvaluateResourceWithRawResponse: - return EvaluateResourceWithRawResponse(self._datasets.evaluate) - - -class AsyncDatasetsResourceWithRawResponse: - def __init__(self, datasets: AsyncDatasetsResource) -> None: - self._datasets = datasets - - @cached_property - def tune(self) -> AsyncTuneResourceWithRawResponse: - return AsyncTuneResourceWithRawResponse(self._datasets.tune) - - @cached_property - def evaluate(self) -> AsyncEvaluateResourceWithRawResponse: - return AsyncEvaluateResourceWithRawResponse(self._datasets.evaluate) - - -class DatasetsResourceWithStreamingResponse: - def __init__(self, datasets: DatasetsResource) -> None: - self._datasets = datasets - - @cached_property - def tune(self) -> TuneResourceWithStreamingResponse: - return TuneResourceWithStreamingResponse(self._datasets.tune) - - @cached_property - def evaluate(self) -> EvaluateResourceWithStreamingResponse: - return EvaluateResourceWithStreamingResponse(self._datasets.evaluate) - - -class AsyncDatasetsResourceWithStreamingResponse: - def __init__(self, datasets: AsyncDatasetsResource) -> None: - self._datasets = datasets - - @cached_property - def tune(self) -> AsyncTuneResourceWithStreamingResponse: - return AsyncTuneResourceWithStreamingResponse(self._datasets.tune) - - @cached_property - def evaluate(self) -> AsyncEvaluateResourceWithStreamingResponse: - return AsyncEvaluateResourceWithStreamingResponse(self._datasets.evaluate) diff --git a/src/contextual/resources/agents/datasets/evaluate.py b/src/contextual/resources/agents/datasets/evaluate.py deleted file mode 100644 index 8ecaf1c..0000000 --- a/src/contextual/resources/agents/datasets/evaluate.py +++ /dev/null @@ -1,900 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Mapping, cast -from typing_extensions import Literal - -import httpx - -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes -from ...._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource -from ...._response import ( - BinaryAPIResponse, - AsyncBinaryAPIResponse, - StreamedBinaryAPIResponse, - AsyncStreamedBinaryAPIResponse, - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - to_custom_raw_response_wrapper, - async_to_streamed_response_wrapper, - to_custom_streamed_response_wrapper, - async_to_custom_raw_response_wrapper, - async_to_custom_streamed_response_wrapper, -) -from ...._base_client import make_request_options -from ....types.agents.datasets import ( - evaluate_list_params, - evaluate_create_params, - evaluate_update_params, - evaluate_metadata_params, - evaluate_retrieve_params, -) -from ....types.agents.dataset_metadata import DatasetMetadata -from ....types.agents.list_datasets_response import ListDatasetsResponse -from ....types.agents.create_dataset_response import CreateDatasetResponse - -__all__ = ["EvaluateResource", "AsyncEvaluateResource"] - - -class EvaluateResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> EvaluateResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/ContextualAI/contextual-client-python#accessing-raw-response-data-eg-headers - """ - return EvaluateResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> EvaluateResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/ContextualAI/contextual-client-python#with_streaming_response - """ - return EvaluateResourceWithStreamingResponse(self) - - def create( - self, - agent_id: str, - *, - dataset_name: str, - dataset_type: Literal["evaluation_set"], - file: FileTypes, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> CreateDatasetResponse: - """ - Create a new evaluation `Dataset` for the specified `Agent` using the provided - JSONL or CSV file. A `Dataset` is a versioned collection of samples conforming - to a particular schema, and can be used to store `Evaluation` test-sets and - retrieve `Evaluation` results. - - Each `Dataset` is versioned and validated against its schema during creation and - subsequent updates. The provided `Dataset` file must conform to the schema - defined for the `dataset_type`. - - File schema for `dataset_type` `evaluation_set` is a CSV file or a JSONL file - where each line is one JSON object. The following keys are required: - - - `prompt` (`string`): Prompt or question - - - `reference` (`string`): Reference or ground truth response - - Args: - agent_id: Agent ID to associate with the evaluation dataset - - dataset_name: Name of the evaluation dataset - - dataset_type: Type of evaluation dataset which determines its schema and validation rules. - - file: JSONL or CSV file containing the evaluation dataset - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_id: - raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") - body = deepcopy_minimal( - { - "dataset_name": dataset_name, - "dataset_type": dataset_type, - "file": file, - } - ) - files = extract_files(cast(Mapping[str, object], body), paths=[["file"]]) - # It should be noted that the actual Content-Type header that will be - # sent to the server will contain a `boundary` parameter, e.g. - # multipart/form-data; boundary=---abc-- - extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} - return self._post( - f"/agents/{agent_id}/datasets/evaluate", - body=maybe_transform(body, evaluate_create_params.EvaluateCreateParams), - files=files, - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=CreateDatasetResponse, - ) - - def retrieve( - self, - dataset_name: str, - *, - agent_id: str, - batch_size: int | NotGiven = NOT_GIVEN, - version: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> BinaryAPIResponse: - """Stream the raw content of an evaluation `Dataset` version. - - If no version is - specified, the latest version is used. - - The `Dataset` content is downloaded in batches. Batch size can be configured to - meet specific processing requirements. - - Returns a `StreamingResponse`, an asynchronous stream of `Dataset` content with: - - - Content-Type: application/octet-stream - - - Content-Disposition: attachment - - - Chunked transfer encoding - - Args: - agent_id: Agent ID associated with the evaluation dataset - - dataset_name: Name of the evaluation dataset to retrieve - - batch_size: Batch size for processing - - version: Version number of the evaluation dataset to retrieve. Defaults to the latest - version if not specified. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_id: - raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") - if not dataset_name: - raise ValueError(f"Expected a non-empty value for `dataset_name` but received {dataset_name!r}") - extra_headers = {"Accept": "application/octet-stream", **(extra_headers or {})} - return self._get( - f"/agents/{agent_id}/datasets/evaluate/{dataset_name}", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "batch_size": batch_size, - "version": version, - }, - evaluate_retrieve_params.EvaluateRetrieveParams, - ), - ), - cast_to=BinaryAPIResponse, - ) - - def update( - self, - dataset_name: str, - *, - agent_id: str, - dataset_type: Literal["evaluation_set"], - file: FileTypes, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> CreateDatasetResponse: - """ - Append to an existing evaluation `Dataset`. - - Create a new version of the dataset by appending content to the `Dataset` and - validating against its schema. - - File schema for `dataset_type` `evaluation_set` is a CSV file or a JSONL file - where each line is one JSON object. The following keys are required: - - - `prompt` (`string`): Prompt or question - - - `reference` (`string`): Reference or ground truth response - - Args: - agent_id: Agent ID associated with the evaluation dataset - - dataset_name: Name of the evaluation dataset to append to - - dataset_type: Type of evaluation dataset which determines its schema and validation rules. - Must match the `dataset_type` used at dataset creation time. - - file: JSONL or CSV file containing the entries to append to the evaluation dataset - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_id: - raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") - if not dataset_name: - raise ValueError(f"Expected a non-empty value for `dataset_name` but received {dataset_name!r}") - body = deepcopy_minimal( - { - "dataset_type": dataset_type, - "file": file, - } - ) - files = extract_files(cast(Mapping[str, object], body), paths=[["file"]]) - # It should be noted that the actual Content-Type header that will be - # sent to the server will contain a `boundary` parameter, e.g. - # multipart/form-data; boundary=---abc-- - extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} - return self._put( - f"/agents/{agent_id}/datasets/evaluate/{dataset_name}", - body=maybe_transform(body, evaluate_update_params.EvaluateUpdateParams), - files=files, - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=CreateDatasetResponse, - ) - - def list( - self, - agent_id: str, - *, - dataset_name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ListDatasetsResponse: - """ - List all evaluation `Datasets` and their versions belonging to a particular - `Agent`. - - If a `dataset_name` filter is provided, all versions of that `Dataset` will be - listed. - - Includes metadata and schema for each `Dataset` version. - - Args: - agent_id: Agent ID for which to list associated evaluation datasets - - dataset_name: Optional dataset name to filter the results by. If provided, only versions from - that dataset are listed. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_id: - raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") - return self._get( - f"/agents/{agent_id}/datasets/evaluate", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform({"dataset_name": dataset_name}, evaluate_list_params.EvaluateListParams), - ), - cast_to=ListDatasetsResponse, - ) - - def delete( - self, - dataset_name: str, - *, - agent_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> object: - """ - Delete an evaluation `Dataset` and all its versions. - - Permanently removes the `Dataset`, including all associated metadata. - - This operation is irreversible. - - Args: - agent_id: Agent ID associated with the evaluation dataset - - dataset_name: Name of the evaluation dataset to delete - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_id: - raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") - if not dataset_name: - raise ValueError(f"Expected a non-empty value for `dataset_name` but received {dataset_name!r}") - return self._delete( - f"/agents/{agent_id}/datasets/evaluate/{dataset_name}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=object, - ) - - def metadata( - self, - dataset_name: str, - *, - agent_id: str, - version: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DatasetMetadata: - """ - Retrieve details of a specific evaluation `Dataset` version, or the latest - version if no `version` is specified. - - Provides comprehensive information about the `Dataset`, including its metadata - and schema. - - Args: - agent_id: Agent ID associated with the evaluation dataset - - dataset_name: Name of the evaluation dataset to retrieve details for - - version: Version number of the dataset. Defaults to the latest version if not specified. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_id: - raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") - if not dataset_name: - raise ValueError(f"Expected a non-empty value for `dataset_name` but received {dataset_name!r}") - return self._get( - f"/agents/{agent_id}/datasets/evaluate/{dataset_name}/metadata", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform({"version": version}, evaluate_metadata_params.EvaluateMetadataParams), - ), - cast_to=DatasetMetadata, - ) - - -class AsyncEvaluateResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncEvaluateResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/ContextualAI/contextual-client-python#accessing-raw-response-data-eg-headers - """ - return AsyncEvaluateResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncEvaluateResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/ContextualAI/contextual-client-python#with_streaming_response - """ - return AsyncEvaluateResourceWithStreamingResponse(self) - - async def create( - self, - agent_id: str, - *, - dataset_name: str, - dataset_type: Literal["evaluation_set"], - file: FileTypes, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> CreateDatasetResponse: - """ - Create a new evaluation `Dataset` for the specified `Agent` using the provided - JSONL or CSV file. A `Dataset` is a versioned collection of samples conforming - to a particular schema, and can be used to store `Evaluation` test-sets and - retrieve `Evaluation` results. - - Each `Dataset` is versioned and validated against its schema during creation and - subsequent updates. The provided `Dataset` file must conform to the schema - defined for the `dataset_type`. - - File schema for `dataset_type` `evaluation_set` is a CSV file or a JSONL file - where each line is one JSON object. The following keys are required: - - - `prompt` (`string`): Prompt or question - - - `reference` (`string`): Reference or ground truth response - - Args: - agent_id: Agent ID to associate with the evaluation dataset - - dataset_name: Name of the evaluation dataset - - dataset_type: Type of evaluation dataset which determines its schema and validation rules. - - file: JSONL or CSV file containing the evaluation dataset - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_id: - raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") - body = deepcopy_minimal( - { - "dataset_name": dataset_name, - "dataset_type": dataset_type, - "file": file, - } - ) - files = extract_files(cast(Mapping[str, object], body), paths=[["file"]]) - # It should be noted that the actual Content-Type header that will be - # sent to the server will contain a `boundary` parameter, e.g. - # multipart/form-data; boundary=---abc-- - extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} - return await self._post( - f"/agents/{agent_id}/datasets/evaluate", - body=await async_maybe_transform(body, evaluate_create_params.EvaluateCreateParams), - files=files, - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=CreateDatasetResponse, - ) - - async def retrieve( - self, - dataset_name: str, - *, - agent_id: str, - batch_size: int | NotGiven = NOT_GIVEN, - version: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AsyncBinaryAPIResponse: - """Stream the raw content of an evaluation `Dataset` version. - - If no version is - specified, the latest version is used. - - The `Dataset` content is downloaded in batches. Batch size can be configured to - meet specific processing requirements. - - Returns a `StreamingResponse`, an asynchronous stream of `Dataset` content with: - - - Content-Type: application/octet-stream - - - Content-Disposition: attachment - - - Chunked transfer encoding - - Args: - agent_id: Agent ID associated with the evaluation dataset - - dataset_name: Name of the evaluation dataset to retrieve - - batch_size: Batch size for processing - - version: Version number of the evaluation dataset to retrieve. Defaults to the latest - version if not specified. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_id: - raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") - if not dataset_name: - raise ValueError(f"Expected a non-empty value for `dataset_name` but received {dataset_name!r}") - extra_headers = {"Accept": "application/octet-stream", **(extra_headers or {})} - return await self._get( - f"/agents/{agent_id}/datasets/evaluate/{dataset_name}", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "batch_size": batch_size, - "version": version, - }, - evaluate_retrieve_params.EvaluateRetrieveParams, - ), - ), - cast_to=AsyncBinaryAPIResponse, - ) - - async def update( - self, - dataset_name: str, - *, - agent_id: str, - dataset_type: Literal["evaluation_set"], - file: FileTypes, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> CreateDatasetResponse: - """ - Append to an existing evaluation `Dataset`. - - Create a new version of the dataset by appending content to the `Dataset` and - validating against its schema. - - File schema for `dataset_type` `evaluation_set` is a CSV file or a JSONL file - where each line is one JSON object. The following keys are required: - - - `prompt` (`string`): Prompt or question - - - `reference` (`string`): Reference or ground truth response - - Args: - agent_id: Agent ID associated with the evaluation dataset - - dataset_name: Name of the evaluation dataset to append to - - dataset_type: Type of evaluation dataset which determines its schema and validation rules. - Must match the `dataset_type` used at dataset creation time. - - file: JSONL or CSV file containing the entries to append to the evaluation dataset - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_id: - raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") - if not dataset_name: - raise ValueError(f"Expected a non-empty value for `dataset_name` but received {dataset_name!r}") - body = deepcopy_minimal( - { - "dataset_type": dataset_type, - "file": file, - } - ) - files = extract_files(cast(Mapping[str, object], body), paths=[["file"]]) - # It should be noted that the actual Content-Type header that will be - # sent to the server will contain a `boundary` parameter, e.g. - # multipart/form-data; boundary=---abc-- - extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} - return await self._put( - f"/agents/{agent_id}/datasets/evaluate/{dataset_name}", - body=await async_maybe_transform(body, evaluate_update_params.EvaluateUpdateParams), - files=files, - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=CreateDatasetResponse, - ) - - async def list( - self, - agent_id: str, - *, - dataset_name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ListDatasetsResponse: - """ - List all evaluation `Datasets` and their versions belonging to a particular - `Agent`. - - If a `dataset_name` filter is provided, all versions of that `Dataset` will be - listed. - - Includes metadata and schema for each `Dataset` version. - - Args: - agent_id: Agent ID for which to list associated evaluation datasets - - dataset_name: Optional dataset name to filter the results by. If provided, only versions from - that dataset are listed. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_id: - raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") - return await self._get( - f"/agents/{agent_id}/datasets/evaluate", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - {"dataset_name": dataset_name}, evaluate_list_params.EvaluateListParams - ), - ), - cast_to=ListDatasetsResponse, - ) - - async def delete( - self, - dataset_name: str, - *, - agent_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> object: - """ - Delete an evaluation `Dataset` and all its versions. - - Permanently removes the `Dataset`, including all associated metadata. - - This operation is irreversible. - - Args: - agent_id: Agent ID associated with the evaluation dataset - - dataset_name: Name of the evaluation dataset to delete - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_id: - raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") - if not dataset_name: - raise ValueError(f"Expected a non-empty value for `dataset_name` but received {dataset_name!r}") - return await self._delete( - f"/agents/{agent_id}/datasets/evaluate/{dataset_name}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=object, - ) - - async def metadata( - self, - dataset_name: str, - *, - agent_id: str, - version: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DatasetMetadata: - """ - Retrieve details of a specific evaluation `Dataset` version, or the latest - version if no `version` is specified. - - Provides comprehensive information about the `Dataset`, including its metadata - and schema. - - Args: - agent_id: Agent ID associated with the evaluation dataset - - dataset_name: Name of the evaluation dataset to retrieve details for - - version: Version number of the dataset. Defaults to the latest version if not specified. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_id: - raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") - if not dataset_name: - raise ValueError(f"Expected a non-empty value for `dataset_name` but received {dataset_name!r}") - return await self._get( - f"/agents/{agent_id}/datasets/evaluate/{dataset_name}/metadata", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - {"version": version}, evaluate_metadata_params.EvaluateMetadataParams - ), - ), - cast_to=DatasetMetadata, - ) - - -class EvaluateResourceWithRawResponse: - def __init__(self, evaluate: EvaluateResource) -> None: - self._evaluate = evaluate - - self.create = to_raw_response_wrapper( - evaluate.create, - ) - self.retrieve = to_custom_raw_response_wrapper( - evaluate.retrieve, - BinaryAPIResponse, - ) - self.update = to_raw_response_wrapper( - evaluate.update, - ) - self.list = to_raw_response_wrapper( - evaluate.list, - ) - self.delete = to_raw_response_wrapper( - evaluate.delete, - ) - self.metadata = to_raw_response_wrapper( - evaluate.metadata, - ) - - -class AsyncEvaluateResourceWithRawResponse: - def __init__(self, evaluate: AsyncEvaluateResource) -> None: - self._evaluate = evaluate - - self.create = async_to_raw_response_wrapper( - evaluate.create, - ) - self.retrieve = async_to_custom_raw_response_wrapper( - evaluate.retrieve, - AsyncBinaryAPIResponse, - ) - self.update = async_to_raw_response_wrapper( - evaluate.update, - ) - self.list = async_to_raw_response_wrapper( - evaluate.list, - ) - self.delete = async_to_raw_response_wrapper( - evaluate.delete, - ) - self.metadata = async_to_raw_response_wrapper( - evaluate.metadata, - ) - - -class EvaluateResourceWithStreamingResponse: - def __init__(self, evaluate: EvaluateResource) -> None: - self._evaluate = evaluate - - self.create = to_streamed_response_wrapper( - evaluate.create, - ) - self.retrieve = to_custom_streamed_response_wrapper( - evaluate.retrieve, - StreamedBinaryAPIResponse, - ) - self.update = to_streamed_response_wrapper( - evaluate.update, - ) - self.list = to_streamed_response_wrapper( - evaluate.list, - ) - self.delete = to_streamed_response_wrapper( - evaluate.delete, - ) - self.metadata = to_streamed_response_wrapper( - evaluate.metadata, - ) - - -class AsyncEvaluateResourceWithStreamingResponse: - def __init__(self, evaluate: AsyncEvaluateResource) -> None: - self._evaluate = evaluate - - self.create = async_to_streamed_response_wrapper( - evaluate.create, - ) - self.retrieve = async_to_custom_streamed_response_wrapper( - evaluate.retrieve, - AsyncStreamedBinaryAPIResponse, - ) - self.update = async_to_streamed_response_wrapper( - evaluate.update, - ) - self.list = async_to_streamed_response_wrapper( - evaluate.list, - ) - self.delete = async_to_streamed_response_wrapper( - evaluate.delete, - ) - self.metadata = async_to_streamed_response_wrapper( - evaluate.metadata, - ) diff --git a/src/contextual/resources/agents/datasets/tune.py b/src/contextual/resources/agents/datasets/tune.py deleted file mode 100644 index e722664..0000000 --- a/src/contextual/resources/agents/datasets/tune.py +++ /dev/null @@ -1,934 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Mapping, cast -from typing_extensions import Literal - -import httpx - -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes -from ...._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource -from ...._response import ( - BinaryAPIResponse, - AsyncBinaryAPIResponse, - StreamedBinaryAPIResponse, - AsyncStreamedBinaryAPIResponse, - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - to_custom_raw_response_wrapper, - async_to_streamed_response_wrapper, - to_custom_streamed_response_wrapper, - async_to_custom_raw_response_wrapper, - async_to_custom_streamed_response_wrapper, -) -from ...._base_client import make_request_options -from ....types.agents.datasets import ( - tune_list_params, - tune_create_params, - tune_update_params, - tune_metadata_params, - tune_retrieve_params, -) -from ....types.agents.dataset_metadata import DatasetMetadata -from ....types.agents.list_datasets_response import ListDatasetsResponse -from ....types.agents.create_dataset_response import CreateDatasetResponse - -__all__ = ["TuneResource", "AsyncTuneResource"] - - -class TuneResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> TuneResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/ContextualAI/contextual-client-python#accessing-raw-response-data-eg-headers - """ - return TuneResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> TuneResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/ContextualAI/contextual-client-python#with_streaming_response - """ - return TuneResourceWithStreamingResponse(self) - - def create( - self, - agent_id: str, - *, - dataset_name: str, - dataset_type: Literal["tuning_set"], - file: FileTypes, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> CreateDatasetResponse: - """ - Create a new tuning `Dataset` for the specified `Agent` using the provided JSONL - or CSV file. A `Dataset` is a versioned collection of samples conforming to a - particular schema, and can be used as a source of training and test data for - tuning jobs. - - Each `Dataset` is versioned and validated against its schema during creation and - subsequent updates. The provided `Dataset` file must conform to the schema - defined for the `dataset_type`. - - File schema for `dataset_type` `tuning_set` is a CSV file or a JSONL file where - each line is one JSON object. The following keys are required: - - - `knowledge` (`list[str]`): Retrieved knowledge used to generate the reference - answer. `knowledge` is a list of retrieved text chunks. - - - `reference` (`str`): The gold-standard answer to the prompt. - - - `guideline` (`str`): Guidelines for model output. If you do not have special - guidelines for the model's output, you can use the `System Prompt` defined in - your Agent configuration as the `guideline`. - - - `prompt` (`str`): Question for the model to respond to. - - For examples of what `tuning_set` should look like, check out our - `Tune & Evaluation Guide`. - - Args: - agent_id: Agent ID to associate with the tune dataset - - dataset_name: Name of the tune dataset - - dataset_type: Type of tune dataset which determines its schema and validation rules. - - file: JSONL or CSV file containing the tune dataset - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_id: - raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") - body = deepcopy_minimal( - { - "dataset_name": dataset_name, - "dataset_type": dataset_type, - "file": file, - } - ) - files = extract_files(cast(Mapping[str, object], body), paths=[["file"]]) - # It should be noted that the actual Content-Type header that will be - # sent to the server will contain a `boundary` parameter, e.g. - # multipart/form-data; boundary=---abc-- - extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} - return self._post( - f"/agents/{agent_id}/datasets/tune", - body=maybe_transform(body, tune_create_params.TuneCreateParams), - files=files, - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=CreateDatasetResponse, - ) - - def retrieve( - self, - dataset_name: str, - *, - agent_id: str, - batch_size: int | NotGiven = NOT_GIVEN, - version: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> BinaryAPIResponse: - """Stream the raw content of a tuning `Dataset` version. - - If no version is - specified, the latest version is used. - - The `Dataset` content is downloaded in batches. Batch size can be configured to - meet specific processing requirements. - - Returns a `StreamingResponse`, an asynchronous stream of `Dataset` content with: - - - Content-Type: application/octet-stream - - - Content-Disposition: attachment - - - Chunked transfer encoding - - Args: - agent_id: Agent ID associated with the tune dataset - - dataset_name: Name of the tune dataset to retrieve - - batch_size: Batch size for processing - - version: Version number of the tune dataset to retrieve. Defaults to the latest version - if not specified. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_id: - raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") - if not dataset_name: - raise ValueError(f"Expected a non-empty value for `dataset_name` but received {dataset_name!r}") - extra_headers = {"Accept": "application/octet-stream", **(extra_headers or {})} - return self._get( - f"/agents/{agent_id}/datasets/tune/{dataset_name}", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "batch_size": batch_size, - "version": version, - }, - tune_retrieve_params.TuneRetrieveParams, - ), - ), - cast_to=BinaryAPIResponse, - ) - - def update( - self, - dataset_name: str, - *, - agent_id: str, - dataset_type: Literal["tuning_set"], - file: FileTypes, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> CreateDatasetResponse: - """ - Append to an existing tuning `Dataset`. - - Create a new version of the dataset by appending content to the `Dataset` and - validating against its schema. - - File schema for `dataset_type` `evaluation_set` is a CSV file or a JSONL file - where each line is one JSON object. The following keys are required: - - - `knowledge` (`list[str]`): Retrieved knowledge used to generate the reference - answer. `knowledge` is a list of retrieved text chunks. - - - `reference` (`str`): The gold-standard answer to the prompt. - - - `guideline` (`str`): Guidelines for model output. If you do not have special - guidelines for the model's output, you can use the `System Prompt` defined in - your Agent configuration as the `guideline`. - - - `prompt` (`str`): Question for the model to respond to. - - For examples of what `tuning_set` should look like, check out our - `Tune & Evaluation Guide`. - - Args: - agent_id: Agent ID associated with the tune dataset - - dataset_name: Name of the tune dataset to append to - - dataset_type: Type of tune dataset which determines its schema and validation rules. Must - match the `dataset_type` used at dataset creation time. - - file: JSONL or CSV file containing the entries to append to the tune dataset - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_id: - raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") - if not dataset_name: - raise ValueError(f"Expected a non-empty value for `dataset_name` but received {dataset_name!r}") - body = deepcopy_minimal( - { - "dataset_type": dataset_type, - "file": file, - } - ) - files = extract_files(cast(Mapping[str, object], body), paths=[["file"]]) - # It should be noted that the actual Content-Type header that will be - # sent to the server will contain a `boundary` parameter, e.g. - # multipart/form-data; boundary=---abc-- - extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} - return self._put( - f"/agents/{agent_id}/datasets/tune/{dataset_name}", - body=maybe_transform(body, tune_update_params.TuneUpdateParams), - files=files, - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=CreateDatasetResponse, - ) - - def list( - self, - agent_id: str, - *, - dataset_name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ListDatasetsResponse: - """ - List all tuning `Datasets` and their versions belonging to a particular `Agent`. - - If a `dataset_name` filter is provided, all versions of that `Dataset` will be - listed. - - Includes metadata and schema for each `Dataset` version. - - Args: - agent_id: Agent ID for which to list associated evaluation datasets - - dataset_name: Optional dataset name to filter the results by. If provided, only versions from - that dataset are listed. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_id: - raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") - return self._get( - f"/agents/{agent_id}/datasets/tune", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform({"dataset_name": dataset_name}, tune_list_params.TuneListParams), - ), - cast_to=ListDatasetsResponse, - ) - - def delete( - self, - dataset_name: str, - *, - agent_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> object: - """ - Delete a tuning `Dataset` and all its versions. - - Permanently removes the `Dataset`, including all associated metadata. - - This operation is irreversible. - - Args: - agent_id: Agent ID associated with the tune dataset - - dataset_name: Name of the tune dataset to delete - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_id: - raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") - if not dataset_name: - raise ValueError(f"Expected a non-empty value for `dataset_name` but received {dataset_name!r}") - return self._delete( - f"/agents/{agent_id}/datasets/tune/{dataset_name}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=object, - ) - - def metadata( - self, - dataset_name: str, - *, - agent_id: str, - version: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DatasetMetadata: - """ - Retrieve details of a specific tuning `Dataset` version, or the latest version - if no `version` is specified. - - Provides comprehensive information about the `Dataset`, including its metadata - and schema. - - Args: - agent_id: Agent ID associated with the tune dataset - - dataset_name: Name of the tune dataset to retrieve details for - - version: Version number of the dataset. Defaults to the latest version if not specified. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_id: - raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") - if not dataset_name: - raise ValueError(f"Expected a non-empty value for `dataset_name` but received {dataset_name!r}") - return self._get( - f"/agents/{agent_id}/datasets/tune/{dataset_name}/metadata", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform({"version": version}, tune_metadata_params.TuneMetadataParams), - ), - cast_to=DatasetMetadata, - ) - - -class AsyncTuneResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncTuneResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/ContextualAI/contextual-client-python#accessing-raw-response-data-eg-headers - """ - return AsyncTuneResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncTuneResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/ContextualAI/contextual-client-python#with_streaming_response - """ - return AsyncTuneResourceWithStreamingResponse(self) - - async def create( - self, - agent_id: str, - *, - dataset_name: str, - dataset_type: Literal["tuning_set"], - file: FileTypes, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> CreateDatasetResponse: - """ - Create a new tuning `Dataset` for the specified `Agent` using the provided JSONL - or CSV file. A `Dataset` is a versioned collection of samples conforming to a - particular schema, and can be used as a source of training and test data for - tuning jobs. - - Each `Dataset` is versioned and validated against its schema during creation and - subsequent updates. The provided `Dataset` file must conform to the schema - defined for the `dataset_type`. - - File schema for `dataset_type` `tuning_set` is a CSV file or a JSONL file where - each line is one JSON object. The following keys are required: - - - `knowledge` (`list[str]`): Retrieved knowledge used to generate the reference - answer. `knowledge` is a list of retrieved text chunks. - - - `reference` (`str`): The gold-standard answer to the prompt. - - - `guideline` (`str`): Guidelines for model output. If you do not have special - guidelines for the model's output, you can use the `System Prompt` defined in - your Agent configuration as the `guideline`. - - - `prompt` (`str`): Question for the model to respond to. - - For examples of what `tuning_set` should look like, check out our - `Tune & Evaluation Guide`. - - Args: - agent_id: Agent ID to associate with the tune dataset - - dataset_name: Name of the tune dataset - - dataset_type: Type of tune dataset which determines its schema and validation rules. - - file: JSONL or CSV file containing the tune dataset - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_id: - raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") - body = deepcopy_minimal( - { - "dataset_name": dataset_name, - "dataset_type": dataset_type, - "file": file, - } - ) - files = extract_files(cast(Mapping[str, object], body), paths=[["file"]]) - # It should be noted that the actual Content-Type header that will be - # sent to the server will contain a `boundary` parameter, e.g. - # multipart/form-data; boundary=---abc-- - extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} - return await self._post( - f"/agents/{agent_id}/datasets/tune", - body=await async_maybe_transform(body, tune_create_params.TuneCreateParams), - files=files, - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=CreateDatasetResponse, - ) - - async def retrieve( - self, - dataset_name: str, - *, - agent_id: str, - batch_size: int | NotGiven = NOT_GIVEN, - version: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AsyncBinaryAPIResponse: - """Stream the raw content of a tuning `Dataset` version. - - If no version is - specified, the latest version is used. - - The `Dataset` content is downloaded in batches. Batch size can be configured to - meet specific processing requirements. - - Returns a `StreamingResponse`, an asynchronous stream of `Dataset` content with: - - - Content-Type: application/octet-stream - - - Content-Disposition: attachment - - - Chunked transfer encoding - - Args: - agent_id: Agent ID associated with the tune dataset - - dataset_name: Name of the tune dataset to retrieve - - batch_size: Batch size for processing - - version: Version number of the tune dataset to retrieve. Defaults to the latest version - if not specified. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_id: - raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") - if not dataset_name: - raise ValueError(f"Expected a non-empty value for `dataset_name` but received {dataset_name!r}") - extra_headers = {"Accept": "application/octet-stream", **(extra_headers or {})} - return await self._get( - f"/agents/{agent_id}/datasets/tune/{dataset_name}", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "batch_size": batch_size, - "version": version, - }, - tune_retrieve_params.TuneRetrieveParams, - ), - ), - cast_to=AsyncBinaryAPIResponse, - ) - - async def update( - self, - dataset_name: str, - *, - agent_id: str, - dataset_type: Literal["tuning_set"], - file: FileTypes, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> CreateDatasetResponse: - """ - Append to an existing tuning `Dataset`. - - Create a new version of the dataset by appending content to the `Dataset` and - validating against its schema. - - File schema for `dataset_type` `evaluation_set` is a CSV file or a JSONL file - where each line is one JSON object. The following keys are required: - - - `knowledge` (`list[str]`): Retrieved knowledge used to generate the reference - answer. `knowledge` is a list of retrieved text chunks. - - - `reference` (`str`): The gold-standard answer to the prompt. - - - `guideline` (`str`): Guidelines for model output. If you do not have special - guidelines for the model's output, you can use the `System Prompt` defined in - your Agent configuration as the `guideline`. - - - `prompt` (`str`): Question for the model to respond to. - - For examples of what `tuning_set` should look like, check out our - `Tune & Evaluation Guide`. - - Args: - agent_id: Agent ID associated with the tune dataset - - dataset_name: Name of the tune dataset to append to - - dataset_type: Type of tune dataset which determines its schema and validation rules. Must - match the `dataset_type` used at dataset creation time. - - file: JSONL or CSV file containing the entries to append to the tune dataset - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_id: - raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") - if not dataset_name: - raise ValueError(f"Expected a non-empty value for `dataset_name` but received {dataset_name!r}") - body = deepcopy_minimal( - { - "dataset_type": dataset_type, - "file": file, - } - ) - files = extract_files(cast(Mapping[str, object], body), paths=[["file"]]) - # It should be noted that the actual Content-Type header that will be - # sent to the server will contain a `boundary` parameter, e.g. - # multipart/form-data; boundary=---abc-- - extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} - return await self._put( - f"/agents/{agent_id}/datasets/tune/{dataset_name}", - body=await async_maybe_transform(body, tune_update_params.TuneUpdateParams), - files=files, - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=CreateDatasetResponse, - ) - - async def list( - self, - agent_id: str, - *, - dataset_name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ListDatasetsResponse: - """ - List all tuning `Datasets` and their versions belonging to a particular `Agent`. - - If a `dataset_name` filter is provided, all versions of that `Dataset` will be - listed. - - Includes metadata and schema for each `Dataset` version. - - Args: - agent_id: Agent ID for which to list associated evaluation datasets - - dataset_name: Optional dataset name to filter the results by. If provided, only versions from - that dataset are listed. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_id: - raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") - return await self._get( - f"/agents/{agent_id}/datasets/tune", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform({"dataset_name": dataset_name}, tune_list_params.TuneListParams), - ), - cast_to=ListDatasetsResponse, - ) - - async def delete( - self, - dataset_name: str, - *, - agent_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> object: - """ - Delete a tuning `Dataset` and all its versions. - - Permanently removes the `Dataset`, including all associated metadata. - - This operation is irreversible. - - Args: - agent_id: Agent ID associated with the tune dataset - - dataset_name: Name of the tune dataset to delete - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_id: - raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") - if not dataset_name: - raise ValueError(f"Expected a non-empty value for `dataset_name` but received {dataset_name!r}") - return await self._delete( - f"/agents/{agent_id}/datasets/tune/{dataset_name}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=object, - ) - - async def metadata( - self, - dataset_name: str, - *, - agent_id: str, - version: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DatasetMetadata: - """ - Retrieve details of a specific tuning `Dataset` version, or the latest version - if no `version` is specified. - - Provides comprehensive information about the `Dataset`, including its metadata - and schema. - - Args: - agent_id: Agent ID associated with the tune dataset - - dataset_name: Name of the tune dataset to retrieve details for - - version: Version number of the dataset. Defaults to the latest version if not specified. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_id: - raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") - if not dataset_name: - raise ValueError(f"Expected a non-empty value for `dataset_name` but received {dataset_name!r}") - return await self._get( - f"/agents/{agent_id}/datasets/tune/{dataset_name}/metadata", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform({"version": version}, tune_metadata_params.TuneMetadataParams), - ), - cast_to=DatasetMetadata, - ) - - -class TuneResourceWithRawResponse: - def __init__(self, tune: TuneResource) -> None: - self._tune = tune - - self.create = to_raw_response_wrapper( - tune.create, - ) - self.retrieve = to_custom_raw_response_wrapper( - tune.retrieve, - BinaryAPIResponse, - ) - self.update = to_raw_response_wrapper( - tune.update, - ) - self.list = to_raw_response_wrapper( - tune.list, - ) - self.delete = to_raw_response_wrapper( - tune.delete, - ) - self.metadata = to_raw_response_wrapper( - tune.metadata, - ) - - -class AsyncTuneResourceWithRawResponse: - def __init__(self, tune: AsyncTuneResource) -> None: - self._tune = tune - - self.create = async_to_raw_response_wrapper( - tune.create, - ) - self.retrieve = async_to_custom_raw_response_wrapper( - tune.retrieve, - AsyncBinaryAPIResponse, - ) - self.update = async_to_raw_response_wrapper( - tune.update, - ) - self.list = async_to_raw_response_wrapper( - tune.list, - ) - self.delete = async_to_raw_response_wrapper( - tune.delete, - ) - self.metadata = async_to_raw_response_wrapper( - tune.metadata, - ) - - -class TuneResourceWithStreamingResponse: - def __init__(self, tune: TuneResource) -> None: - self._tune = tune - - self.create = to_streamed_response_wrapper( - tune.create, - ) - self.retrieve = to_custom_streamed_response_wrapper( - tune.retrieve, - StreamedBinaryAPIResponse, - ) - self.update = to_streamed_response_wrapper( - tune.update, - ) - self.list = to_streamed_response_wrapper( - tune.list, - ) - self.delete = to_streamed_response_wrapper( - tune.delete, - ) - self.metadata = to_streamed_response_wrapper( - tune.metadata, - ) - - -class AsyncTuneResourceWithStreamingResponse: - def __init__(self, tune: AsyncTuneResource) -> None: - self._tune = tune - - self.create = async_to_streamed_response_wrapper( - tune.create, - ) - self.retrieve = async_to_custom_streamed_response_wrapper( - tune.retrieve, - AsyncStreamedBinaryAPIResponse, - ) - self.update = async_to_streamed_response_wrapper( - tune.update, - ) - self.list = async_to_streamed_response_wrapper( - tune.list, - ) - self.delete = async_to_streamed_response_wrapper( - tune.delete, - ) - self.metadata = async_to_streamed_response_wrapper( - tune.metadata, - ) diff --git a/src/contextual/resources/agents/evaluate/__init__.py b/src/contextual/resources/agents/evaluate/__init__.py deleted file mode 100644 index 17643ad..0000000 --- a/src/contextual/resources/agents/evaluate/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .jobs import ( - JobsResource, - AsyncJobsResource, - JobsResourceWithRawResponse, - AsyncJobsResourceWithRawResponse, - JobsResourceWithStreamingResponse, - AsyncJobsResourceWithStreamingResponse, -) -from .evaluate import ( - EvaluateResource, - AsyncEvaluateResource, - EvaluateResourceWithRawResponse, - AsyncEvaluateResourceWithRawResponse, - EvaluateResourceWithStreamingResponse, - AsyncEvaluateResourceWithStreamingResponse, -) - -__all__ = [ - "JobsResource", - "AsyncJobsResource", - "JobsResourceWithRawResponse", - "AsyncJobsResourceWithRawResponse", - "JobsResourceWithStreamingResponse", - "AsyncJobsResourceWithStreamingResponse", - "EvaluateResource", - "AsyncEvaluateResource", - "EvaluateResourceWithRawResponse", - "AsyncEvaluateResourceWithRawResponse", - "EvaluateResourceWithStreamingResponse", - "AsyncEvaluateResourceWithStreamingResponse", -] diff --git a/src/contextual/resources/agents/evaluate/evaluate.py b/src/contextual/resources/agents/evaluate/evaluate.py deleted file mode 100644 index 8eee69d..0000000 --- a/src/contextual/resources/agents/evaluate/evaluate.py +++ /dev/null @@ -1,316 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Mapping, cast -from typing_extensions import Literal - -import httpx - -from .jobs import ( - JobsResource, - AsyncJobsResource, - JobsResourceWithRawResponse, - AsyncJobsResourceWithRawResponse, - JobsResourceWithStreamingResponse, - AsyncJobsResourceWithStreamingResponse, -) -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes -from ...._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource -from ...._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ...._base_client import make_request_options -from ....types.agents import evaluate_create_params -from ....types.agents.create_evaluation_response import CreateEvaluationResponse - -__all__ = ["EvaluateResource", "AsyncEvaluateResource"] - - -class EvaluateResource(SyncAPIResource): - @cached_property - def jobs(self) -> JobsResource: - return JobsResource(self._client) - - @cached_property - def with_raw_response(self) -> EvaluateResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/ContextualAI/contextual-client-python#accessing-raw-response-data-eg-headers - """ - return EvaluateResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> EvaluateResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/ContextualAI/contextual-client-python#with_streaming_response - """ - return EvaluateResourceWithStreamingResponse(self) - - def create( - self, - agent_id: str, - *, - metrics: List[Literal["equivalence", "groundedness"]], - evalset_file: FileTypes | NotGiven = NOT_GIVEN, - evalset_name: str | NotGiven = NOT_GIVEN, - llm_model_id: str | NotGiven = NOT_GIVEN, - notes: str | NotGiven = NOT_GIVEN, - override_configuration: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> CreateEvaluationResponse: - """ - Launch an `Evaluation` job which evaluates an `Agent` on a set of test questions - and reference answers. - - An `Evaluation` is an asynchronous operation. Users can select one or more - metrics to assess the quality of generated answers. These metrics include - `equivalence` and `groundedness`. `equivalence` evaluates if the Agent response - is equivalent to the ground truth (model-driven binary classification). - `groundedness` decomposes the Agent response into claims and then evaluates if - the claims are grounded by the retrieved documents. - - `Evaluation` data can be provided in one of two forms: - - - A CSV `evalset_file` containing the columns `prompt` (i.e. questions) and - `reference` (i.e. gold-answers). - - - An `evalset_name` which refers to a `Dataset` created through the - `/datasets/evaluate` API. - - Args: - agent_id: Agent ID of the agent to evaluate - - metrics: List of metrics to use. Supported metrics are `equivalence` and `groundedness`. - - evalset_file: Evalset file (CSV) to use for evaluation, containing the columns `prompt` (i.e. - question) and `reference` (i.e. ground truth response). Either `evalset_name` or - `evalset_file` must be provided, but not both. - - evalset_name: Name of the Dataset to use for evaluation, created through the - `/datasets/evaluate` API. Either `evalset_name` or `evalset_file` must be - provided, but not both. - - llm_model_id: ID of the model to evaluate. Uses the default model if not specified. - - notes: User notes for the evaluation job. - - override_configuration: Override the configuration for the query. This will override the configuration - for the agent during evaluation. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_id: - raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") - body = deepcopy_minimal( - { - "metrics": metrics, - "evalset_file": evalset_file, - "evalset_name": evalset_name, - "llm_model_id": llm_model_id, - "notes": notes, - "override_configuration": override_configuration, - } - ) - files = extract_files(cast(Mapping[str, object], body), paths=[["evalset_file"]]) - # It should be noted that the actual Content-Type header that will be - # sent to the server will contain a `boundary` parameter, e.g. - # multipart/form-data; boundary=---abc-- - extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} - return self._post( - f"/agents/{agent_id}/evaluate", - body=maybe_transform(body, evaluate_create_params.EvaluateCreateParams), - files=files, - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=CreateEvaluationResponse, - ) - - -class AsyncEvaluateResource(AsyncAPIResource): - @cached_property - def jobs(self) -> AsyncJobsResource: - return AsyncJobsResource(self._client) - - @cached_property - def with_raw_response(self) -> AsyncEvaluateResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/ContextualAI/contextual-client-python#accessing-raw-response-data-eg-headers - """ - return AsyncEvaluateResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncEvaluateResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/ContextualAI/contextual-client-python#with_streaming_response - """ - return AsyncEvaluateResourceWithStreamingResponse(self) - - async def create( - self, - agent_id: str, - *, - metrics: List[Literal["equivalence", "groundedness"]], - evalset_file: FileTypes | NotGiven = NOT_GIVEN, - evalset_name: str | NotGiven = NOT_GIVEN, - llm_model_id: str | NotGiven = NOT_GIVEN, - notes: str | NotGiven = NOT_GIVEN, - override_configuration: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> CreateEvaluationResponse: - """ - Launch an `Evaluation` job which evaluates an `Agent` on a set of test questions - and reference answers. - - An `Evaluation` is an asynchronous operation. Users can select one or more - metrics to assess the quality of generated answers. These metrics include - `equivalence` and `groundedness`. `equivalence` evaluates if the Agent response - is equivalent to the ground truth (model-driven binary classification). - `groundedness` decomposes the Agent response into claims and then evaluates if - the claims are grounded by the retrieved documents. - - `Evaluation` data can be provided in one of two forms: - - - A CSV `evalset_file` containing the columns `prompt` (i.e. questions) and - `reference` (i.e. gold-answers). - - - An `evalset_name` which refers to a `Dataset` created through the - `/datasets/evaluate` API. - - Args: - agent_id: Agent ID of the agent to evaluate - - metrics: List of metrics to use. Supported metrics are `equivalence` and `groundedness`. - - evalset_file: Evalset file (CSV) to use for evaluation, containing the columns `prompt` (i.e. - question) and `reference` (i.e. ground truth response). Either `evalset_name` or - `evalset_file` must be provided, but not both. - - evalset_name: Name of the Dataset to use for evaluation, created through the - `/datasets/evaluate` API. Either `evalset_name` or `evalset_file` must be - provided, but not both. - - llm_model_id: ID of the model to evaluate. Uses the default model if not specified. - - notes: User notes for the evaluation job. - - override_configuration: Override the configuration for the query. This will override the configuration - for the agent during evaluation. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_id: - raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") - body = deepcopy_minimal( - { - "metrics": metrics, - "evalset_file": evalset_file, - "evalset_name": evalset_name, - "llm_model_id": llm_model_id, - "notes": notes, - "override_configuration": override_configuration, - } - ) - files = extract_files(cast(Mapping[str, object], body), paths=[["evalset_file"]]) - # It should be noted that the actual Content-Type header that will be - # sent to the server will contain a `boundary` parameter, e.g. - # multipart/form-data; boundary=---abc-- - extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} - return await self._post( - f"/agents/{agent_id}/evaluate", - body=await async_maybe_transform(body, evaluate_create_params.EvaluateCreateParams), - files=files, - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=CreateEvaluationResponse, - ) - - -class EvaluateResourceWithRawResponse: - def __init__(self, evaluate: EvaluateResource) -> None: - self._evaluate = evaluate - - self.create = to_raw_response_wrapper( - evaluate.create, - ) - - @cached_property - def jobs(self) -> JobsResourceWithRawResponse: - return JobsResourceWithRawResponse(self._evaluate.jobs) - - -class AsyncEvaluateResourceWithRawResponse: - def __init__(self, evaluate: AsyncEvaluateResource) -> None: - self._evaluate = evaluate - - self.create = async_to_raw_response_wrapper( - evaluate.create, - ) - - @cached_property - def jobs(self) -> AsyncJobsResourceWithRawResponse: - return AsyncJobsResourceWithRawResponse(self._evaluate.jobs) - - -class EvaluateResourceWithStreamingResponse: - def __init__(self, evaluate: EvaluateResource) -> None: - self._evaluate = evaluate - - self.create = to_streamed_response_wrapper( - evaluate.create, - ) - - @cached_property - def jobs(self) -> JobsResourceWithStreamingResponse: - return JobsResourceWithStreamingResponse(self._evaluate.jobs) - - -class AsyncEvaluateResourceWithStreamingResponse: - def __init__(self, evaluate: AsyncEvaluateResource) -> None: - self._evaluate = evaluate - - self.create = async_to_streamed_response_wrapper( - evaluate.create, - ) - - @cached_property - def jobs(self) -> AsyncJobsResourceWithStreamingResponse: - return AsyncJobsResourceWithStreamingResponse(self._evaluate.jobs) diff --git a/src/contextual/resources/agents/evaluate/jobs.py b/src/contextual/resources/agents/evaluate/jobs.py deleted file mode 100644 index 80983b2..0000000 --- a/src/contextual/resources/agents/evaluate/jobs.py +++ /dev/null @@ -1,368 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import httpx - -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource -from ...._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ...._base_client import make_request_options -from ....types.agents.evaluate.evaluation_job_metadata import EvaluationJobMetadata -from ....types.agents.evaluate.list_evaluation_jobs_response import ListEvaluationJobsResponse - -__all__ = ["JobsResource", "AsyncJobsResource"] - - -class JobsResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> JobsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/ContextualAI/contextual-client-python#accessing-raw-response-data-eg-headers - """ - return JobsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> JobsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/ContextualAI/contextual-client-python#with_streaming_response - """ - return JobsResourceWithStreamingResponse(self) - - def list( - self, - agent_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ListEvaluationJobsResponse: - """ - Retrieve a list of `Evaluation` jobs run for a given `Agent`, including the - `Evaluation`'s status and other metadata. - - Args: - agent_id: ID of agent for which to retrieve evaluation jobs - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_id: - raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") - return self._get( - f"/agents/{agent_id}/evaluate/jobs", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ListEvaluationJobsResponse, - ) - - def cancel( - self, - job_id: str, - *, - agent_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> object: - """ - Cancels an `Evaluation` job if it is still in progress. - - Args: - agent_id: Agent ID for which to cancel the evaluation - - job_id: Evaluation job ID to cancel - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_id: - raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") - if not job_id: - raise ValueError(f"Expected a non-empty value for `job_id` but received {job_id!r}") - return self._post( - f"/agents/{agent_id}/evaluate/jobs/{job_id}/cancel", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=object, - ) - - def metadata( - self, - job_id: str, - *, - agent_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> EvaluationJobMetadata: - """Get an `Evaluation` job's status and results. - - There are six possible statuses: - 'pending', 'processing', 'retrying', 'completed', 'failed', 'cancelled'. - - If the evaluation job has completed, you will see your evaluation `metrics` , - `job_metadata`, and the `dataset_name` where your eval metrics and row-by-row - results are stored. You can use the `/datasets/evaluate` API to view the - specified `dataset`. - - Args: - agent_id: ID of agent for which to retrieve evaluations - - job_id: Evaluation job ID to retrieve status and results for - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_id: - raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") - if not job_id: - raise ValueError(f"Expected a non-empty value for `job_id` but received {job_id!r}") - return self._get( - f"/agents/{agent_id}/evaluate/jobs/{job_id}/metadata", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=EvaluationJobMetadata, - ) - - -class AsyncJobsResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncJobsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/ContextualAI/contextual-client-python#accessing-raw-response-data-eg-headers - """ - return AsyncJobsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncJobsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/ContextualAI/contextual-client-python#with_streaming_response - """ - return AsyncJobsResourceWithStreamingResponse(self) - - async def list( - self, - agent_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ListEvaluationJobsResponse: - """ - Retrieve a list of `Evaluation` jobs run for a given `Agent`, including the - `Evaluation`'s status and other metadata. - - Args: - agent_id: ID of agent for which to retrieve evaluation jobs - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_id: - raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") - return await self._get( - f"/agents/{agent_id}/evaluate/jobs", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ListEvaluationJobsResponse, - ) - - async def cancel( - self, - job_id: str, - *, - agent_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> object: - """ - Cancels an `Evaluation` job if it is still in progress. - - Args: - agent_id: Agent ID for which to cancel the evaluation - - job_id: Evaluation job ID to cancel - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_id: - raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") - if not job_id: - raise ValueError(f"Expected a non-empty value for `job_id` but received {job_id!r}") - return await self._post( - f"/agents/{agent_id}/evaluate/jobs/{job_id}/cancel", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=object, - ) - - async def metadata( - self, - job_id: str, - *, - agent_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> EvaluationJobMetadata: - """Get an `Evaluation` job's status and results. - - There are six possible statuses: - 'pending', 'processing', 'retrying', 'completed', 'failed', 'cancelled'. - - If the evaluation job has completed, you will see your evaluation `metrics` , - `job_metadata`, and the `dataset_name` where your eval metrics and row-by-row - results are stored. You can use the `/datasets/evaluate` API to view the - specified `dataset`. - - Args: - agent_id: ID of agent for which to retrieve evaluations - - job_id: Evaluation job ID to retrieve status and results for - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_id: - raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") - if not job_id: - raise ValueError(f"Expected a non-empty value for `job_id` but received {job_id!r}") - return await self._get( - f"/agents/{agent_id}/evaluate/jobs/{job_id}/metadata", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=EvaluationJobMetadata, - ) - - -class JobsResourceWithRawResponse: - def __init__(self, jobs: JobsResource) -> None: - self._jobs = jobs - - self.list = to_raw_response_wrapper( - jobs.list, - ) - self.cancel = to_raw_response_wrapper( - jobs.cancel, - ) - self.metadata = to_raw_response_wrapper( - jobs.metadata, - ) - - -class AsyncJobsResourceWithRawResponse: - def __init__(self, jobs: AsyncJobsResource) -> None: - self._jobs = jobs - - self.list = async_to_raw_response_wrapper( - jobs.list, - ) - self.cancel = async_to_raw_response_wrapper( - jobs.cancel, - ) - self.metadata = async_to_raw_response_wrapper( - jobs.metadata, - ) - - -class JobsResourceWithStreamingResponse: - def __init__(self, jobs: JobsResource) -> None: - self._jobs = jobs - - self.list = to_streamed_response_wrapper( - jobs.list, - ) - self.cancel = to_streamed_response_wrapper( - jobs.cancel, - ) - self.metadata = to_streamed_response_wrapper( - jobs.metadata, - ) - - -class AsyncJobsResourceWithStreamingResponse: - def __init__(self, jobs: AsyncJobsResource) -> None: - self._jobs = jobs - - self.list = async_to_streamed_response_wrapper( - jobs.list, - ) - self.cancel = async_to_streamed_response_wrapper( - jobs.cancel, - ) - self.metadata = async_to_streamed_response_wrapper( - jobs.metadata, - ) diff --git a/src/contextual/resources/agents/query.py b/src/contextual/resources/agents/query.py index 7e3f0c8..d321b34 100644 --- a/src/contextual/resources/agents/query.py +++ b/src/contextual/resources/agents/query.py @@ -62,6 +62,7 @@ def create( conversation_id: str | NotGiven = NOT_GIVEN, documents_filters: query_create_params.DocumentsFilters | NotGiven = NOT_GIVEN, llm_model_id: str | NotGiven = NOT_GIVEN, + override_configuration: query_create_params.OverrideConfiguration | NotGiven = NOT_GIVEN, stream: bool | NotGiven = NOT_GIVEN, structured_output: query_create_params.StructuredOutput | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -134,6 +135,9 @@ def create( llm_model_id: Model ID of the specific fine-tuned or aligned LLM model to use. Defaults to base model if not specified. + override_configuration: This will modify select configuration parameters for the agent during the + response generation. + stream: Set to `true` to receive a streamed response structured_output: Custom output structure format. @@ -156,6 +160,7 @@ def create( "conversation_id": conversation_id, "documents_filters": documents_filters, "llm_model_id": llm_model_id, + "override_configuration": override_configuration, "stream": stream, "structured_output": structured_output, }, @@ -394,6 +399,7 @@ async def create( conversation_id: str | NotGiven = NOT_GIVEN, documents_filters: query_create_params.DocumentsFilters | NotGiven = NOT_GIVEN, llm_model_id: str | NotGiven = NOT_GIVEN, + override_configuration: query_create_params.OverrideConfiguration | NotGiven = NOT_GIVEN, stream: bool | NotGiven = NOT_GIVEN, structured_output: query_create_params.StructuredOutput | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -466,6 +472,9 @@ async def create( llm_model_id: Model ID of the specific fine-tuned or aligned LLM model to use. Defaults to base model if not specified. + override_configuration: This will modify select configuration parameters for the agent during the + response generation. + stream: Set to `true` to receive a streamed response structured_output: Custom output structure format. @@ -488,6 +497,7 @@ async def create( "conversation_id": conversation_id, "documents_filters": documents_filters, "llm_model_id": llm_model_id, + "override_configuration": override_configuration, "stream": stream, "structured_output": structured_output, }, diff --git a/src/contextual/resources/agents/tune/__init__.py b/src/contextual/resources/agents/tune/__init__.py deleted file mode 100644 index 57e22b2..0000000 --- a/src/contextual/resources/agents/tune/__init__.py +++ /dev/null @@ -1,47 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .jobs import ( - JobsResource, - AsyncJobsResource, - JobsResourceWithRawResponse, - AsyncJobsResourceWithRawResponse, - JobsResourceWithStreamingResponse, - AsyncJobsResourceWithStreamingResponse, -) -from .tune import ( - TuneResource, - AsyncTuneResource, - TuneResourceWithRawResponse, - AsyncTuneResourceWithRawResponse, - TuneResourceWithStreamingResponse, - AsyncTuneResourceWithStreamingResponse, -) -from .models import ( - ModelsResource, - AsyncModelsResource, - ModelsResourceWithRawResponse, - AsyncModelsResourceWithRawResponse, - ModelsResourceWithStreamingResponse, - AsyncModelsResourceWithStreamingResponse, -) - -__all__ = [ - "JobsResource", - "AsyncJobsResource", - "JobsResourceWithRawResponse", - "AsyncJobsResourceWithRawResponse", - "JobsResourceWithStreamingResponse", - "AsyncJobsResourceWithStreamingResponse", - "ModelsResource", - "AsyncModelsResource", - "ModelsResourceWithRawResponse", - "AsyncModelsResourceWithRawResponse", - "ModelsResourceWithStreamingResponse", - "AsyncModelsResourceWithStreamingResponse", - "TuneResource", - "AsyncTuneResource", - "TuneResourceWithRawResponse", - "AsyncTuneResourceWithRawResponse", - "TuneResourceWithStreamingResponse", - "AsyncTuneResourceWithStreamingResponse", -] diff --git a/src/contextual/resources/agents/tune/jobs.py b/src/contextual/resources/agents/tune/jobs.py deleted file mode 100644 index 52c25b9..0000000 --- a/src/contextual/resources/agents/tune/jobs.py +++ /dev/null @@ -1,372 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import httpx - -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource -from ...._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ...._base_client import make_request_options -from ....types.agents.tune.tune_job_metadata import TuneJobMetadata -from ....types.agents.tune.list_tune_jobs_response import ListTuneJobsResponse - -__all__ = ["JobsResource", "AsyncJobsResource"] - - -class JobsResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> JobsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/ContextualAI/contextual-client-python#accessing-raw-response-data-eg-headers - """ - return JobsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> JobsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/ContextualAI/contextual-client-python#with_streaming_response - """ - return JobsResourceWithStreamingResponse(self) - - def list( - self, - agent_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ListTuneJobsResponse: - """ - Retrieve a list of all fine-tuning jobs for a specified Agent. - - Args: - agent_id: ID of the Agent to list tuning jobs for - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_id: - raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") - return self._get( - f"/agents/{agent_id}/tune/jobs", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ListTuneJobsResponse, - ) - - def delete( - self, - job_id: str, - *, - agent_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> object: - """Cancel a specific fine-tuning job. - - Terminates the fine-tuning job if it is still - in progress. - - Args: - agent_id: ID of the Agent associated with the tuning job - - job_id: ID of the tuning job to cancel - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_id: - raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") - if not job_id: - raise ValueError(f"Expected a non-empty value for `job_id` but received {job_id!r}") - return self._delete( - f"/agents/{agent_id}/tune/jobs/{job_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=object, - ) - - def metadata( - self, - job_id: str, - *, - agent_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> TuneJobMetadata: - """Retrieve the status of a specific tuning job. - - Fetches the current status and - evaluation results, if available, for the specified tuning job. After the tuning - job is complete, the metadata associated with the tune job will include - evaluation results and a model ID. You can then activate the tuned model for - your agent by editing its config with the tuned model ID and the "Edit Agent" - API (i.e. the `PUT /agents/{agent_id}` API). To deactivate the tuned model, you - will need to edit the Agent's config again and set the `llm_model_id` field to - "default". For an end-to-end walkthrough, see the `Tune & Evaluation Guide`. - - Args: - agent_id: ID of the Agent associated with the tuning job - - job_id: ID of the tuning job to retrieve the status for - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_id: - raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") - if not job_id: - raise ValueError(f"Expected a non-empty value for `job_id` but received {job_id!r}") - return self._get( - f"/agents/{agent_id}/tune/jobs/{job_id}/metadata", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=TuneJobMetadata, - ) - - -class AsyncJobsResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncJobsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/ContextualAI/contextual-client-python#accessing-raw-response-data-eg-headers - """ - return AsyncJobsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncJobsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/ContextualAI/contextual-client-python#with_streaming_response - """ - return AsyncJobsResourceWithStreamingResponse(self) - - async def list( - self, - agent_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ListTuneJobsResponse: - """ - Retrieve a list of all fine-tuning jobs for a specified Agent. - - Args: - agent_id: ID of the Agent to list tuning jobs for - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_id: - raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") - return await self._get( - f"/agents/{agent_id}/tune/jobs", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ListTuneJobsResponse, - ) - - async def delete( - self, - job_id: str, - *, - agent_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> object: - """Cancel a specific fine-tuning job. - - Terminates the fine-tuning job if it is still - in progress. - - Args: - agent_id: ID of the Agent associated with the tuning job - - job_id: ID of the tuning job to cancel - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_id: - raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") - if not job_id: - raise ValueError(f"Expected a non-empty value for `job_id` but received {job_id!r}") - return await self._delete( - f"/agents/{agent_id}/tune/jobs/{job_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=object, - ) - - async def metadata( - self, - job_id: str, - *, - agent_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> TuneJobMetadata: - """Retrieve the status of a specific tuning job. - - Fetches the current status and - evaluation results, if available, for the specified tuning job. After the tuning - job is complete, the metadata associated with the tune job will include - evaluation results and a model ID. You can then activate the tuned model for - your agent by editing its config with the tuned model ID and the "Edit Agent" - API (i.e. the `PUT /agents/{agent_id}` API). To deactivate the tuned model, you - will need to edit the Agent's config again and set the `llm_model_id` field to - "default". For an end-to-end walkthrough, see the `Tune & Evaluation Guide`. - - Args: - agent_id: ID of the Agent associated with the tuning job - - job_id: ID of the tuning job to retrieve the status for - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_id: - raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") - if not job_id: - raise ValueError(f"Expected a non-empty value for `job_id` but received {job_id!r}") - return await self._get( - f"/agents/{agent_id}/tune/jobs/{job_id}/metadata", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=TuneJobMetadata, - ) - - -class JobsResourceWithRawResponse: - def __init__(self, jobs: JobsResource) -> None: - self._jobs = jobs - - self.list = to_raw_response_wrapper( - jobs.list, - ) - self.delete = to_raw_response_wrapper( - jobs.delete, - ) - self.metadata = to_raw_response_wrapper( - jobs.metadata, - ) - - -class AsyncJobsResourceWithRawResponse: - def __init__(self, jobs: AsyncJobsResource) -> None: - self._jobs = jobs - - self.list = async_to_raw_response_wrapper( - jobs.list, - ) - self.delete = async_to_raw_response_wrapper( - jobs.delete, - ) - self.metadata = async_to_raw_response_wrapper( - jobs.metadata, - ) - - -class JobsResourceWithStreamingResponse: - def __init__(self, jobs: JobsResource) -> None: - self._jobs = jobs - - self.list = to_streamed_response_wrapper( - jobs.list, - ) - self.delete = to_streamed_response_wrapper( - jobs.delete, - ) - self.metadata = to_streamed_response_wrapper( - jobs.metadata, - ) - - -class AsyncJobsResourceWithStreamingResponse: - def __init__(self, jobs: AsyncJobsResource) -> None: - self._jobs = jobs - - self.list = async_to_streamed_response_wrapper( - jobs.list, - ) - self.delete = async_to_streamed_response_wrapper( - jobs.delete, - ) - self.metadata = async_to_streamed_response_wrapper( - jobs.metadata, - ) diff --git a/src/contextual/resources/agents/tune/models.py b/src/contextual/resources/agents/tune/models.py deleted file mode 100644 index 5b9b917..0000000 --- a/src/contextual/resources/agents/tune/models.py +++ /dev/null @@ -1,167 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import httpx - -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource -from ...._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ...._base_client import make_request_options -from ....types.agents.tune.list_tune_models_response import ListTuneModelsResponse - -__all__ = ["ModelsResource", "AsyncModelsResource"] - - -class ModelsResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> ModelsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/ContextualAI/contextual-client-python#accessing-raw-response-data-eg-headers - """ - return ModelsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> ModelsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/ContextualAI/contextual-client-python#with_streaming_response - """ - return ModelsResourceWithStreamingResponse(self) - - def list( - self, - agent_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ListTuneModelsResponse: - """ - Retrieves a list of tuned models associated with the specified Agent. - - Args: - agent_id: ID of the Agent from which to retrieve tuned models - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_id: - raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") - return self._get( - f"/agents/{agent_id}/tune/models", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ListTuneModelsResponse, - ) - - -class AsyncModelsResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncModelsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/ContextualAI/contextual-client-python#accessing-raw-response-data-eg-headers - """ - return AsyncModelsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncModelsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/ContextualAI/contextual-client-python#with_streaming_response - """ - return AsyncModelsResourceWithStreamingResponse(self) - - async def list( - self, - agent_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ListTuneModelsResponse: - """ - Retrieves a list of tuned models associated with the specified Agent. - - Args: - agent_id: ID of the Agent from which to retrieve tuned models - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_id: - raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") - return await self._get( - f"/agents/{agent_id}/tune/models", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ListTuneModelsResponse, - ) - - -class ModelsResourceWithRawResponse: - def __init__(self, models: ModelsResource) -> None: - self._models = models - - self.list = to_raw_response_wrapper( - models.list, - ) - - -class AsyncModelsResourceWithRawResponse: - def __init__(self, models: AsyncModelsResource) -> None: - self._models = models - - self.list = async_to_raw_response_wrapper( - models.list, - ) - - -class ModelsResourceWithStreamingResponse: - def __init__(self, models: ModelsResource) -> None: - self._models = models - - self.list = to_streamed_response_wrapper( - models.list, - ) - - -class AsyncModelsResourceWithStreamingResponse: - def __init__(self, models: AsyncModelsResource) -> None: - self._models = models - - self.list = async_to_streamed_response_wrapper( - models.list, - ) diff --git a/src/contextual/resources/agents/tune/tune.py b/src/contextual/resources/agents/tune/tune.py deleted file mode 100644 index 10bbb9d..0000000 --- a/src/contextual/resources/agents/tune/tune.py +++ /dev/null @@ -1,498 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Mapping, Optional, cast -from typing_extensions import Literal - -import httpx - -from .jobs import ( - JobsResource, - AsyncJobsResource, - JobsResourceWithRawResponse, - AsyncJobsResourceWithRawResponse, - JobsResourceWithStreamingResponse, - AsyncJobsResourceWithStreamingResponse, -) -from .models import ( - ModelsResource, - AsyncModelsResource, - ModelsResourceWithRawResponse, - AsyncModelsResourceWithRawResponse, - ModelsResourceWithStreamingResponse, - AsyncModelsResourceWithStreamingResponse, -) -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes -from ...._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource -from ...._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ...._base_client import make_request_options -from ....types.agents import tune_create_params -from ....types.agents.create_tune_response import CreateTuneResponse - -__all__ = ["TuneResource", "AsyncTuneResource"] - - -class TuneResource(SyncAPIResource): - @cached_property - def jobs(self) -> JobsResource: - return JobsResource(self._client) - - @cached_property - def models(self) -> ModelsResource: - return ModelsResource(self._client) - - @cached_property - def with_raw_response(self) -> TuneResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/ContextualAI/contextual-client-python#accessing-raw-response-data-eg-headers - """ - return TuneResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> TuneResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/ContextualAI/contextual-client-python#with_streaming_response - """ - return TuneResourceWithStreamingResponse(self) - - def create( - self, - agent_id: str, - *, - hyperparams_learning_rate: float | NotGiven = NOT_GIVEN, - hyperparams_lora_alpha: Literal[8, 16, 32, 64, 128] | NotGiven = NOT_GIVEN, - hyperparams_lora_dropout: float | NotGiven = NOT_GIVEN, - hyperparams_lora_rank: Literal[8, 16, 32, 64] | NotGiven = NOT_GIVEN, - hyperparams_num_epochs: int | NotGiven = NOT_GIVEN, - hyperparams_warmup_ratio: float | NotGiven = NOT_GIVEN, - metadata_file: FileTypes | NotGiven = NOT_GIVEN, - sdp_only: bool | NotGiven = NOT_GIVEN, - synth_data: bool | NotGiven = NOT_GIVEN, - test_dataset_name: Optional[str] | NotGiven = NOT_GIVEN, - test_file: Optional[FileTypes] | NotGiven = NOT_GIVEN, - train_dataset_name: Optional[str] | NotGiven = NOT_GIVEN, - training_file: Optional[FileTypes] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> CreateTuneResponse: - """ - Create a tuning job for the specified `Agent` to specialize it to your specific - domain or use case. - - This API initiates an asynchronous tuning task. You can provide the required - data through one of two ways: - - - Provide a `training_file` and an optional `test_file`. If no `test_file` is - provided, a portion of the `training_file` will be held out as the test set. - For easy reusability, the `training_file` is automatically saved as a `Tuning` - `Dataset`, and the `test_file` as an `Evaluation` `Dataset`. You can manage - them via the `/datasets/tune` and `/datasets/evaluation` endpoints. - - - Provide a `Tuning` `Dataset` and an optional `Evaluation` `Dataset`. You can - create a `Tuning` `Dataset` and `Evaluation` `Dataset` using the - `/datasets/tune` and `/datasets/evaluation` endpoints respectively. - - The API returns a tune job `id` which can be used to check on the status of your - tuning task through the `GET /tune/jobs/{job_id}/metadata` endpoint. - - After the tuning job is complete, the metadata associated with the tune job will - include evaluation results and a model ID. You can then deploy the tuned model - to the agent by editing its config with the tuned model ID and the "Edit Agent" - API (i.e. the `PUT /agents/{agent_id}` API). To deactivate the tuned model, you - will need to edit the Agent's config again and set the `llm_model_id` field to - "default". For an end-to-end walkthrough, see the `Tune & Evaluation Guide`. - - Args: - agent_id: ID of the Agent to list tuning jobs for - - hyperparams_learning_rate: Controls how quickly the model adapts to the training data. Must be greater than - 0 and less than or equal to 0.1. - - hyperparams_lora_alpha: Scaling factor that controls the magnitude of LoRA updates. Higher values lead - to stronger adaptation effects. The effective learning strength is determined by - the ratio of lora_alpha/lora_rank. Must be one of: 8, 16, 32, 64 or 128 - - hyperparams_lora_dropout: LoRA dropout randomly disables connections during training to prevent - overfitting and improve generalization when fine-tuning language models with - Low-Rank Adaptation. Must be between 0 and 1 (exclusive). - - hyperparams_lora_rank: Controls the capacity of the LoRA adapters. Must be one of: 8, 16, 32, or 64. - - hyperparams_num_epochs: Number of complete passes through the training dataset. - - hyperparams_warmup_ratio: Fraction of training steps used for learning rate warmup. Must be between 0 and - 1 (exclusive). - - metadata_file: Optional. Metadata file to use for synthetic data pipeline. - - sdp_only: Runs the SDP pipeline only if set to True. - - synth_data: Optional. Whether to generate synthetic data for training - - test_dataset_name: Optional. `Dataset` to use for testing model checkpoints, created through the - `/datasets/evaluate` API. - - test_file: Optional. Local path to the test data file. The test file should follow the same - format as the training data file. - - train_dataset_name: `Dataset` to use for training, created through the `/datasets/tune` API. Either - `train_dataset_name` or `training_file` must be provided, but not both. - - training_file: Local path to the training data file. - - The file should be in JSON array format, where each element of the array is a - JSON object represents a single training example. The four required fields are - `guideline`, `prompt`, `reference`, and `knowledge`. - - - `knowledge` (`list[str]`): Retrieved knowledge used to generate the reference - answer. `knowledge` is a list of retrieved text chunks. - - - `reference` (`str`): The gold-standard answer to the prompt. - - - `guideline` (`str`): Guidelines for model output. If you do not have special - guidelines for the model's output, you can use the `System Prompt` defined in - your Agent configuration as the `guideline`. - - - `prompt` (`str`): Question for the model to respond to. - - Example: - - ```json - [ - { - "guideline": "The answer should be accurate.", - "prompt": "What was last quarter's revenue?", - "reference": "According to recent reports, the Q3 revenue was $1.2 million, a 0.1 million increase from Q2.", - "knowledge": [ - "Quarterly report: Q3 revenue was $1.2 million.", - "Quarterly report: Q2 revenue was $1.1 million.", - ... - ], - }, - ... - ] - ``` - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_id: - raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") - body = deepcopy_minimal( - { - "hyperparams_learning_rate": hyperparams_learning_rate, - "hyperparams_lora_alpha": hyperparams_lora_alpha, - "hyperparams_lora_dropout": hyperparams_lora_dropout, - "hyperparams_lora_rank": hyperparams_lora_rank, - "hyperparams_num_epochs": hyperparams_num_epochs, - "hyperparams_warmup_ratio": hyperparams_warmup_ratio, - "metadata_file": metadata_file, - "sdp_only": sdp_only, - "synth_data": synth_data, - "test_dataset_name": test_dataset_name, - "test_file": test_file, - "train_dataset_name": train_dataset_name, - "training_file": training_file, - } - ) - files = extract_files( - cast(Mapping[str, object], body), paths=[["training_file"], ["test_file"], ["metadata_file"]] - ) - # It should be noted that the actual Content-Type header that will be - # sent to the server will contain a `boundary` parameter, e.g. - # multipart/form-data; boundary=---abc-- - extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} - return self._post( - f"/agents/{agent_id}/tune", - body=maybe_transform(body, tune_create_params.TuneCreateParams), - files=files, - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=CreateTuneResponse, - ) - - -class AsyncTuneResource(AsyncAPIResource): - @cached_property - def jobs(self) -> AsyncJobsResource: - return AsyncJobsResource(self._client) - - @cached_property - def models(self) -> AsyncModelsResource: - return AsyncModelsResource(self._client) - - @cached_property - def with_raw_response(self) -> AsyncTuneResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/ContextualAI/contextual-client-python#accessing-raw-response-data-eg-headers - """ - return AsyncTuneResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncTuneResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/ContextualAI/contextual-client-python#with_streaming_response - """ - return AsyncTuneResourceWithStreamingResponse(self) - - async def create( - self, - agent_id: str, - *, - hyperparams_learning_rate: float | NotGiven = NOT_GIVEN, - hyperparams_lora_alpha: Literal[8, 16, 32, 64, 128] | NotGiven = NOT_GIVEN, - hyperparams_lora_dropout: float | NotGiven = NOT_GIVEN, - hyperparams_lora_rank: Literal[8, 16, 32, 64] | NotGiven = NOT_GIVEN, - hyperparams_num_epochs: int | NotGiven = NOT_GIVEN, - hyperparams_warmup_ratio: float | NotGiven = NOT_GIVEN, - metadata_file: FileTypes | NotGiven = NOT_GIVEN, - sdp_only: bool | NotGiven = NOT_GIVEN, - synth_data: bool | NotGiven = NOT_GIVEN, - test_dataset_name: Optional[str] | NotGiven = NOT_GIVEN, - test_file: Optional[FileTypes] | NotGiven = NOT_GIVEN, - train_dataset_name: Optional[str] | NotGiven = NOT_GIVEN, - training_file: Optional[FileTypes] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> CreateTuneResponse: - """ - Create a tuning job for the specified `Agent` to specialize it to your specific - domain or use case. - - This API initiates an asynchronous tuning task. You can provide the required - data through one of two ways: - - - Provide a `training_file` and an optional `test_file`. If no `test_file` is - provided, a portion of the `training_file` will be held out as the test set. - For easy reusability, the `training_file` is automatically saved as a `Tuning` - `Dataset`, and the `test_file` as an `Evaluation` `Dataset`. You can manage - them via the `/datasets/tune` and `/datasets/evaluation` endpoints. - - - Provide a `Tuning` `Dataset` and an optional `Evaluation` `Dataset`. You can - create a `Tuning` `Dataset` and `Evaluation` `Dataset` using the - `/datasets/tune` and `/datasets/evaluation` endpoints respectively. - - The API returns a tune job `id` which can be used to check on the status of your - tuning task through the `GET /tune/jobs/{job_id}/metadata` endpoint. - - After the tuning job is complete, the metadata associated with the tune job will - include evaluation results and a model ID. You can then deploy the tuned model - to the agent by editing its config with the tuned model ID and the "Edit Agent" - API (i.e. the `PUT /agents/{agent_id}` API). To deactivate the tuned model, you - will need to edit the Agent's config again and set the `llm_model_id` field to - "default". For an end-to-end walkthrough, see the `Tune & Evaluation Guide`. - - Args: - agent_id: ID of the Agent to list tuning jobs for - - hyperparams_learning_rate: Controls how quickly the model adapts to the training data. Must be greater than - 0 and less than or equal to 0.1. - - hyperparams_lora_alpha: Scaling factor that controls the magnitude of LoRA updates. Higher values lead - to stronger adaptation effects. The effective learning strength is determined by - the ratio of lora_alpha/lora_rank. Must be one of: 8, 16, 32, 64 or 128 - - hyperparams_lora_dropout: LoRA dropout randomly disables connections during training to prevent - overfitting and improve generalization when fine-tuning language models with - Low-Rank Adaptation. Must be between 0 and 1 (exclusive). - - hyperparams_lora_rank: Controls the capacity of the LoRA adapters. Must be one of: 8, 16, 32, or 64. - - hyperparams_num_epochs: Number of complete passes through the training dataset. - - hyperparams_warmup_ratio: Fraction of training steps used for learning rate warmup. Must be between 0 and - 1 (exclusive). - - metadata_file: Optional. Metadata file to use for synthetic data pipeline. - - sdp_only: Runs the SDP pipeline only if set to True. - - synth_data: Optional. Whether to generate synthetic data for training - - test_dataset_name: Optional. `Dataset` to use for testing model checkpoints, created through the - `/datasets/evaluate` API. - - test_file: Optional. Local path to the test data file. The test file should follow the same - format as the training data file. - - train_dataset_name: `Dataset` to use for training, created through the `/datasets/tune` API. Either - `train_dataset_name` or `training_file` must be provided, but not both. - - training_file: Local path to the training data file. - - The file should be in JSON array format, where each element of the array is a - JSON object represents a single training example. The four required fields are - `guideline`, `prompt`, `reference`, and `knowledge`. - - - `knowledge` (`list[str]`): Retrieved knowledge used to generate the reference - answer. `knowledge` is a list of retrieved text chunks. - - - `reference` (`str`): The gold-standard answer to the prompt. - - - `guideline` (`str`): Guidelines for model output. If you do not have special - guidelines for the model's output, you can use the `System Prompt` defined in - your Agent configuration as the `guideline`. - - - `prompt` (`str`): Question for the model to respond to. - - Example: - - ```json - [ - { - "guideline": "The answer should be accurate.", - "prompt": "What was last quarter's revenue?", - "reference": "According to recent reports, the Q3 revenue was $1.2 million, a 0.1 million increase from Q2.", - "knowledge": [ - "Quarterly report: Q3 revenue was $1.2 million.", - "Quarterly report: Q2 revenue was $1.1 million.", - ... - ], - }, - ... - ] - ``` - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_id: - raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") - body = deepcopy_minimal( - { - "hyperparams_learning_rate": hyperparams_learning_rate, - "hyperparams_lora_alpha": hyperparams_lora_alpha, - "hyperparams_lora_dropout": hyperparams_lora_dropout, - "hyperparams_lora_rank": hyperparams_lora_rank, - "hyperparams_num_epochs": hyperparams_num_epochs, - "hyperparams_warmup_ratio": hyperparams_warmup_ratio, - "metadata_file": metadata_file, - "sdp_only": sdp_only, - "synth_data": synth_data, - "test_dataset_name": test_dataset_name, - "test_file": test_file, - "train_dataset_name": train_dataset_name, - "training_file": training_file, - } - ) - files = extract_files( - cast(Mapping[str, object], body), paths=[["training_file"], ["test_file"], ["metadata_file"]] - ) - # It should be noted that the actual Content-Type header that will be - # sent to the server will contain a `boundary` parameter, e.g. - # multipart/form-data; boundary=---abc-- - extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} - return await self._post( - f"/agents/{agent_id}/tune", - body=await async_maybe_transform(body, tune_create_params.TuneCreateParams), - files=files, - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=CreateTuneResponse, - ) - - -class TuneResourceWithRawResponse: - def __init__(self, tune: TuneResource) -> None: - self._tune = tune - - self.create = to_raw_response_wrapper( - tune.create, - ) - - @cached_property - def jobs(self) -> JobsResourceWithRawResponse: - return JobsResourceWithRawResponse(self._tune.jobs) - - @cached_property - def models(self) -> ModelsResourceWithRawResponse: - return ModelsResourceWithRawResponse(self._tune.models) - - -class AsyncTuneResourceWithRawResponse: - def __init__(self, tune: AsyncTuneResource) -> None: - self._tune = tune - - self.create = async_to_raw_response_wrapper( - tune.create, - ) - - @cached_property - def jobs(self) -> AsyncJobsResourceWithRawResponse: - return AsyncJobsResourceWithRawResponse(self._tune.jobs) - - @cached_property - def models(self) -> AsyncModelsResourceWithRawResponse: - return AsyncModelsResourceWithRawResponse(self._tune.models) - - -class TuneResourceWithStreamingResponse: - def __init__(self, tune: TuneResource) -> None: - self._tune = tune - - self.create = to_streamed_response_wrapper( - tune.create, - ) - - @cached_property - def jobs(self) -> JobsResourceWithStreamingResponse: - return JobsResourceWithStreamingResponse(self._tune.jobs) - - @cached_property - def models(self) -> ModelsResourceWithStreamingResponse: - return ModelsResourceWithStreamingResponse(self._tune.models) - - -class AsyncTuneResourceWithStreamingResponse: - def __init__(self, tune: AsyncTuneResource) -> None: - self._tune = tune - - self.create = async_to_streamed_response_wrapper( - tune.create, - ) - - @cached_property - def jobs(self) -> AsyncJobsResourceWithStreamingResponse: - return AsyncJobsResourceWithStreamingResponse(self._tune.jobs) - - @cached_property - def models(self) -> AsyncModelsResourceWithStreamingResponse: - return AsyncModelsResourceWithStreamingResponse(self._tune.models) diff --git a/src/contextual/resources/datastores/datastores.py b/src/contextual/resources/datastores/datastores.py index 0eed851..12d3468 100644 --- a/src/contextual/resources/datastores/datastores.py +++ b/src/contextual/resources/datastores/datastores.py @@ -4,7 +4,7 @@ import httpx -from ...types import datastore_list_params, datastore_create_params +from ...types import datastore_list_params, datastore_create_params, datastore_update_params from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven from ..._utils import maybe_transform, async_maybe_transform from ..._compat import cached_property @@ -28,6 +28,7 @@ from ...types.datastore import Datastore from ...types.datastore_metadata import DatastoreMetadata from ...types.create_datastore_response import CreateDatastoreResponse +from ...types.datastore_update_response import DatastoreUpdateResponse __all__ = ["DatastoresResource", "AsyncDatastoresResource"] @@ -60,6 +61,7 @@ def create( self, *, name: str, + configuration: datastore_create_params.Configuration | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -88,6 +90,8 @@ def create( Args: name: Name of the datastore + configuration: Configuration of the datastore. If not provided, default configuration is used. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -98,13 +102,68 @@ def create( """ return self._post( "/datastores", - body=maybe_transform({"name": name}, datastore_create_params.DatastoreCreateParams), + body=maybe_transform( + { + "name": name, + "configuration": configuration, + }, + datastore_create_params.DatastoreCreateParams, + ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=CreateDatastoreResponse, ) + def update( + self, + datastore_id: str, + *, + configuration: datastore_update_params.Configuration | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> DatastoreUpdateResponse: + """ + Edit Datastore Configuration + + Args: + datastore_id: ID of the datastore to edit + + configuration: Configuration of the datastore. If not provided, current configuration is + retained. + + name: Name of the datastore + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not datastore_id: + raise ValueError(f"Expected a non-empty value for `datastore_id` but received {datastore_id!r}") + return self._put( + f"/datastores/{datastore_id}", + body=maybe_transform( + { + "configuration": configuration, + "name": name, + }, + datastore_update_params.DatastoreUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=DatastoreUpdateResponse, + ) + def list( self, *, @@ -304,6 +363,7 @@ async def create( self, *, name: str, + configuration: datastore_create_params.Configuration | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -332,6 +392,8 @@ async def create( Args: name: Name of the datastore + configuration: Configuration of the datastore. If not provided, default configuration is used. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -342,13 +404,68 @@ async def create( """ return await self._post( "/datastores", - body=await async_maybe_transform({"name": name}, datastore_create_params.DatastoreCreateParams), + body=await async_maybe_transform( + { + "name": name, + "configuration": configuration, + }, + datastore_create_params.DatastoreCreateParams, + ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=CreateDatastoreResponse, ) + async def update( + self, + datastore_id: str, + *, + configuration: datastore_update_params.Configuration | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> DatastoreUpdateResponse: + """ + Edit Datastore Configuration + + Args: + datastore_id: ID of the datastore to edit + + configuration: Configuration of the datastore. If not provided, current configuration is + retained. + + name: Name of the datastore + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not datastore_id: + raise ValueError(f"Expected a non-empty value for `datastore_id` but received {datastore_id!r}") + return await self._put( + f"/datastores/{datastore_id}", + body=await async_maybe_transform( + { + "configuration": configuration, + "name": name, + }, + datastore_update_params.DatastoreUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=DatastoreUpdateResponse, + ) + def list( self, *, @@ -527,6 +644,9 @@ def __init__(self, datastores: DatastoresResource) -> None: self.create = to_raw_response_wrapper( datastores.create, ) + self.update = to_raw_response_wrapper( + datastores.update, + ) self.list = to_raw_response_wrapper( datastores.list, ) @@ -552,6 +672,9 @@ def __init__(self, datastores: AsyncDatastoresResource) -> None: self.create = async_to_raw_response_wrapper( datastores.create, ) + self.update = async_to_raw_response_wrapper( + datastores.update, + ) self.list = async_to_raw_response_wrapper( datastores.list, ) @@ -577,6 +700,9 @@ def __init__(self, datastores: DatastoresResource) -> None: self.create = to_streamed_response_wrapper( datastores.create, ) + self.update = to_streamed_response_wrapper( + datastores.update, + ) self.list = to_streamed_response_wrapper( datastores.list, ) @@ -602,6 +728,9 @@ def __init__(self, datastores: AsyncDatastoresResource) -> None: self.create = async_to_streamed_response_wrapper( datastores.create, ) + self.update = async_to_streamed_response_wrapper( + datastores.update, + ) self.list = async_to_streamed_response_wrapper( datastores.list, ) diff --git a/src/contextual/resources/datastores/documents.py b/src/contextual/resources/datastores/documents.py index 5129491..5fb3604 100644 --- a/src/contextual/resources/datastores/documents.py +++ b/src/contextual/resources/datastores/documents.py @@ -20,9 +20,15 @@ ) from ...pagination import SyncDocumentsPage, AsyncDocumentsPage from ..._base_client import AsyncPaginator, make_request_options -from ...types.datastores import document_list_params, document_ingest_params, document_set_metadata_params +from ...types.datastores import ( + document_list_params, + document_ingest_params, + document_set_metadata_params, + document_get_parse_result_params, +) from ...types.datastores.document_metadata import DocumentMetadata from ...types.datastores.ingestion_response import IngestionResponse +from ...types.datastores.document_get_parse_result_response import DocumentGetParseResultResponse __all__ = ["DocumentsResource", "AsyncDocumentsResource"] @@ -52,21 +58,8 @@ def list( datastore_id: str, *, cursor: str | NotGiven = NOT_GIVEN, - ingestion_job_status: List[ - Literal[ - "pending", - "processing", - "retrying", - "completed", - "failed", - "cancelled", - "failed_to_provision", - "generating_data", - "training_in_progress", - "failed_to_generate_data", - "provisioning", - ] - ] + document_name_prefix: str | NotGiven = NOT_GIVEN, + ingestion_job_status: List[Literal["pending", "processing", "retrying", "completed", "failed", "cancelled"]] | NotGiven = NOT_GIVEN, limit: int | NotGiven = NOT_GIVEN, uploaded_after: Union[str, datetime] | NotGiven = NOT_GIVEN, @@ -93,6 +86,8 @@ def list( cursor: Cursor from the previous call to list documents, used to retrieve the next set of results + document_name_prefix: Filters documents with the given prefix. + ingestion_job_status: Filters documents whose ingestion job status matches (one of) the provided status(es). @@ -123,6 +118,7 @@ def list( query=maybe_transform( { "cursor": cursor, + "document_name_prefix": document_name_prefix, "ingestion_job_status": ingestion_job_status, "limit": limit, "uploaded_after": uploaded_after, @@ -175,6 +171,62 @@ def delete( cast_to=object, ) + def get_parse_result( + self, + document_id: str, + *, + datastore_id: str, + output_types: List[Literal["markdown-document", "markdown-per-page", "blocks-per-page"]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> DocumentGetParseResultResponse: + """ + Get the parse results that are generated during ingestion for a given document. + Retrieving parse results for existing documents ingested before the release of + this endpoint is not supported and will return a 404 error. + + Args: + datastore_id: Datastore ID of the datastore from which to retrieve the document + + document_id: Document ID of the document to retrieve details for + + output_types: The desired output format(s) of the parsed file. Must be `markdown-document`, + `markdown-per-page`, and/or `blocks-per-page`. Specify multiple values to get + multiple formats in the response. `markdown-document` parses the whole document + into a single concatenated markdown output. `markdown-per-page` provides + markdown output per page. `blocks-per-page` provides a structured JSON + representation of the content blocks on each page, sorted by reading order. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not datastore_id: + raise ValueError(f"Expected a non-empty value for `datastore_id` but received {datastore_id!r}") + if not document_id: + raise ValueError(f"Expected a non-empty value for `document_id` but received {document_id!r}") + return self._get( + f"/datastores/{datastore_id}/documents/{document_id}/parse", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + {"output_types": output_types}, document_get_parse_result_params.DocumentGetParseResultParams + ), + ), + cast_to=DocumentGetParseResultResponse, + ) + def ingest( self, datastore_id: str, @@ -208,21 +260,23 @@ def ingest( file: File to ingest. - metadata: Metadata in `JSON` format. Metadata should be passed as a nested dictionary - structure where: + metadata: Metadata request in JSON format. `custom_metadata` is a flat dictionary + containing one or more key-value pairs, where each value must be a primitive + type (`str`, `bool`, `float`, or `int`). The default maximum metadata fields + that can be used is 15, contact support if more is needed.The combined size of + the metadata must not exceed **2 KB** when encoded as JSON.The strings with date + format must stay in date format or be avoided if not in date format.The + `custom_metadata.url` field is automatically included in returned attributions + during query time, if provided. - - The **metadata type** `custom_metadata` is mapped to a dictionary. - The - **dictionary keys** represent metadata attributes. - The **values** can be of - type `str`, `bool`, `float`, or `int`. - - **Example Metadata JSON:** + **Example Request Body:** ```json - metadata = { - "custom_metadata": { - "field1": "value1", - "field2": "value2" - } + { + "custom_metadata": { + "topic": "science", + "difficulty": 3 + } } ``` @@ -304,6 +358,7 @@ def set_metadata( *, datastore_id: str, custom_metadata: Dict[str, Union[bool, float, str]] | NotGiven = NOT_GIVEN, + custom_metadata_config: Dict[str, document_set_metadata_params.CustomMetadataConfig] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -321,6 +376,27 @@ def set_metadata( document_id: Document ID of the document to retrieve details for + custom_metadata: Custom metadata for the document, provided by the user at ingestion time.Must be + a JSON-serializable dictionary with string keys and simple primitive values + (str, int, float, bool). The total size must not exceed 2 KB.The strings with + date format must stay in date format or be avodied if not in date format.The + 'custom_metadata.url' field is automatically included in returned attributions + during query time, if provided.The default maximum metadata fields that can be + used is 15, contact support if more is needed. + + custom_metadata_config: A dictionary mapping metadata field names to the configuration to use for each + field. + + - If a metadata field is not present in the dictionary, the default configuration will be used. + + - If the dictionary is not provided, metadata will be added in chunks but will not be retrievable. + + Limits: - Maximum characters per metadata field (for prompt or rerank): 400 + + - Maximum number of metadata fields (for prompt or retrieval): 10 + + Contact support@contextual.ai to request quota increases. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -336,7 +412,11 @@ def set_metadata( return self._post( f"/datastores/{datastore_id}/documents/{document_id}/metadata", body=maybe_transform( - {"custom_metadata": custom_metadata}, document_set_metadata_params.DocumentSetMetadataParams + { + "custom_metadata": custom_metadata, + "custom_metadata_config": custom_metadata_config, + }, + document_set_metadata_params.DocumentSetMetadataParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout @@ -370,21 +450,8 @@ def list( datastore_id: str, *, cursor: str | NotGiven = NOT_GIVEN, - ingestion_job_status: List[ - Literal[ - "pending", - "processing", - "retrying", - "completed", - "failed", - "cancelled", - "failed_to_provision", - "generating_data", - "training_in_progress", - "failed_to_generate_data", - "provisioning", - ] - ] + document_name_prefix: str | NotGiven = NOT_GIVEN, + ingestion_job_status: List[Literal["pending", "processing", "retrying", "completed", "failed", "cancelled"]] | NotGiven = NOT_GIVEN, limit: int | NotGiven = NOT_GIVEN, uploaded_after: Union[str, datetime] | NotGiven = NOT_GIVEN, @@ -411,6 +478,8 @@ def list( cursor: Cursor from the previous call to list documents, used to retrieve the next set of results + document_name_prefix: Filters documents with the given prefix. + ingestion_job_status: Filters documents whose ingestion job status matches (one of) the provided status(es). @@ -441,6 +510,7 @@ def list( query=maybe_transform( { "cursor": cursor, + "document_name_prefix": document_name_prefix, "ingestion_job_status": ingestion_job_status, "limit": limit, "uploaded_after": uploaded_after, @@ -493,6 +563,62 @@ async def delete( cast_to=object, ) + async def get_parse_result( + self, + document_id: str, + *, + datastore_id: str, + output_types: List[Literal["markdown-document", "markdown-per-page", "blocks-per-page"]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> DocumentGetParseResultResponse: + """ + Get the parse results that are generated during ingestion for a given document. + Retrieving parse results for existing documents ingested before the release of + this endpoint is not supported and will return a 404 error. + + Args: + datastore_id: Datastore ID of the datastore from which to retrieve the document + + document_id: Document ID of the document to retrieve details for + + output_types: The desired output format(s) of the parsed file. Must be `markdown-document`, + `markdown-per-page`, and/or `blocks-per-page`. Specify multiple values to get + multiple formats in the response. `markdown-document` parses the whole document + into a single concatenated markdown output. `markdown-per-page` provides + markdown output per page. `blocks-per-page` provides a structured JSON + representation of the content blocks on each page, sorted by reading order. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not datastore_id: + raise ValueError(f"Expected a non-empty value for `datastore_id` but received {datastore_id!r}") + if not document_id: + raise ValueError(f"Expected a non-empty value for `document_id` but received {document_id!r}") + return await self._get( + f"/datastores/{datastore_id}/documents/{document_id}/parse", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + {"output_types": output_types}, document_get_parse_result_params.DocumentGetParseResultParams + ), + ), + cast_to=DocumentGetParseResultResponse, + ) + async def ingest( self, datastore_id: str, @@ -526,21 +652,23 @@ async def ingest( file: File to ingest. - metadata: Metadata in `JSON` format. Metadata should be passed as a nested dictionary - structure where: - - - The **metadata type** `custom_metadata` is mapped to a dictionary. - The - **dictionary keys** represent metadata attributes. - The **values** can be of - type `str`, `bool`, `float`, or `int`. + metadata: Metadata request in JSON format. `custom_metadata` is a flat dictionary + containing one or more key-value pairs, where each value must be a primitive + type (`str`, `bool`, `float`, or `int`). The default maximum metadata fields + that can be used is 15, contact support if more is needed.The combined size of + the metadata must not exceed **2 KB** when encoded as JSON.The strings with date + format must stay in date format or be avoided if not in date format.The + `custom_metadata.url` field is automatically included in returned attributions + during query time, if provided. - **Example Metadata JSON:** + **Example Request Body:** ```json - metadata = { - "custom_metadata": { - "field1": "value1", - "field2": "value2" - } + { + "custom_metadata": { + "topic": "science", + "difficulty": 3 + } } ``` @@ -622,6 +750,7 @@ async def set_metadata( *, datastore_id: str, custom_metadata: Dict[str, Union[bool, float, str]] | NotGiven = NOT_GIVEN, + custom_metadata_config: Dict[str, document_set_metadata_params.CustomMetadataConfig] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -639,6 +768,27 @@ async def set_metadata( document_id: Document ID of the document to retrieve details for + custom_metadata: Custom metadata for the document, provided by the user at ingestion time.Must be + a JSON-serializable dictionary with string keys and simple primitive values + (str, int, float, bool). The total size must not exceed 2 KB.The strings with + date format must stay in date format or be avodied if not in date format.The + 'custom_metadata.url' field is automatically included in returned attributions + during query time, if provided.The default maximum metadata fields that can be + used is 15, contact support if more is needed. + + custom_metadata_config: A dictionary mapping metadata field names to the configuration to use for each + field. + + - If a metadata field is not present in the dictionary, the default configuration will be used. + + - If the dictionary is not provided, metadata will be added in chunks but will not be retrievable. + + Limits: - Maximum characters per metadata field (for prompt or rerank): 400 + + - Maximum number of metadata fields (for prompt or retrieval): 10 + + Contact support@contextual.ai to request quota increases. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -654,7 +804,11 @@ async def set_metadata( return await self._post( f"/datastores/{datastore_id}/documents/{document_id}/metadata", body=await async_maybe_transform( - {"custom_metadata": custom_metadata}, document_set_metadata_params.DocumentSetMetadataParams + { + "custom_metadata": custom_metadata, + "custom_metadata_config": custom_metadata_config, + }, + document_set_metadata_params.DocumentSetMetadataParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout @@ -673,6 +827,9 @@ def __init__(self, documents: DocumentsResource) -> None: self.delete = to_raw_response_wrapper( documents.delete, ) + self.get_parse_result = to_raw_response_wrapper( + documents.get_parse_result, + ) self.ingest = to_raw_response_wrapper( documents.ingest, ) @@ -694,6 +851,9 @@ def __init__(self, documents: AsyncDocumentsResource) -> None: self.delete = async_to_raw_response_wrapper( documents.delete, ) + self.get_parse_result = async_to_raw_response_wrapper( + documents.get_parse_result, + ) self.ingest = async_to_raw_response_wrapper( documents.ingest, ) @@ -715,6 +875,9 @@ def __init__(self, documents: DocumentsResource) -> None: self.delete = to_streamed_response_wrapper( documents.delete, ) + self.get_parse_result = to_streamed_response_wrapper( + documents.get_parse_result, + ) self.ingest = to_streamed_response_wrapper( documents.ingest, ) @@ -736,6 +899,9 @@ def __init__(self, documents: AsyncDocumentsResource) -> None: self.delete = async_to_streamed_response_wrapper( documents.delete, ) + self.get_parse_result = async_to_streamed_response_wrapper( + documents.get_parse_result, + ) self.ingest = async_to_streamed_response_wrapper( documents.ingest, ) diff --git a/src/contextual/resources/generate.py b/src/contextual/resources/generate.py index 388f81d..3dae617 100644 --- a/src/contextual/resources/generate.py +++ b/src/contextual/resources/generate.py @@ -81,7 +81,7 @@ def create( messages: List of messages in the conversation so far. The last message must be from the user. - model: The version of the Contextual's GLM to use. Currently, we just have "v1". + model: The version of the Contextual's GLM to use. Currently, we have `v1` and `v2`. avoid_commentary: Flag to indicate whether the model should avoid providing additional commentary in responses. Commentary is conversational in nature and does not contain @@ -189,7 +189,7 @@ async def create( messages: List of messages in the conversation so far. The last message must be from the user. - model: The version of the Contextual's GLM to use. Currently, we just have "v1". + model: The version of the Contextual's GLM to use. Currently, we have `v1` and `v2`. avoid_commentary: Flag to indicate whether the model should avoid providing additional commentary in responses. Commentary is conversational in nature and does not contain diff --git a/src/contextual/resources/parse.py b/src/contextual/resources/parse.py index 24806cf..2485b22 100644 --- a/src/contextual/resources/parse.py +++ b/src/contextual/resources/parse.py @@ -93,7 +93,7 @@ def create( commentary; this mode is in beta. Not permitted in `basic` parsing_mode. max_split_table_cells: Threshold number of table cells beyond which large tables are split if - `enable_split_tables` is True. Not permitted in `basic` parsing_mode. + `enable_split_tables` is True. Must be null if `enable_split_tables` is False. page_range: Optional string representing page range to be parsed. Format: comma-separated indexes (0-based, e.g. `0,1,2,5,6`), or ranges inclusive of both ends (e.g. @@ -228,6 +228,8 @@ def job_status( def jobs( self, *, + cursor: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, uploaded_after: Union[str, datetime] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -243,6 +245,16 @@ def jobs( timestamp. Args: + cursor: Cursor from the previous call to list parse jobs, used to retrieve the next set + of results + + limit: Maximum number of parse jobs to return + + uploaded_after: Filters to only documents uploaded to `/parse` at or after specified UTC + timestamp. If not provided, or if the provided timestamp is before the maximum + parse job retention period (30 days), the maximum retention period will be used + instead. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -258,7 +270,14 @@ def jobs( extra_query=extra_query, extra_body=extra_body, timeout=timeout, - query=maybe_transform({"uploaded_after": uploaded_after}, parse_jobs_params.ParseJobsParams), + query=maybe_transform( + { + "cursor": cursor, + "limit": limit, + "uploaded_after": uploaded_after, + }, + parse_jobs_params.ParseJobsParams, + ), ), cast_to=ParseJobsResponse, ) @@ -329,7 +348,7 @@ async def create( commentary; this mode is in beta. Not permitted in `basic` parsing_mode. max_split_table_cells: Threshold number of table cells beyond which large tables are split if - `enable_split_tables` is True. Not permitted in `basic` parsing_mode. + `enable_split_tables` is True. Must be null if `enable_split_tables` is False. page_range: Optional string representing page range to be parsed. Format: comma-separated indexes (0-based, e.g. `0,1,2,5,6`), or ranges inclusive of both ends (e.g. @@ -466,6 +485,8 @@ async def job_status( async def jobs( self, *, + cursor: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, uploaded_after: Union[str, datetime] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -481,6 +502,16 @@ async def jobs( timestamp. Args: + cursor: Cursor from the previous call to list parse jobs, used to retrieve the next set + of results + + limit: Maximum number of parse jobs to return + + uploaded_after: Filters to only documents uploaded to `/parse` at or after specified UTC + timestamp. If not provided, or if the provided timestamp is before the maximum + parse job retention period (30 days), the maximum retention period will be used + instead. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -497,7 +528,12 @@ async def jobs( extra_body=extra_body, timeout=timeout, query=await async_maybe_transform( - {"uploaded_after": uploaded_after}, parse_jobs_params.ParseJobsParams + { + "cursor": cursor, + "limit": limit, + "uploaded_after": uploaded_after, + }, + parse_jobs_params.ParseJobsParams, ), ), cast_to=ParseJobsResponse, diff --git a/src/contextual/resources/rerank.py b/src/contextual/resources/rerank.py index 425fc35..81d48cc 100644 --- a/src/contextual/resources/rerank.py +++ b/src/contextual/resources/rerank.py @@ -63,7 +63,7 @@ def create( Rank a list of documents according to their relevance to a query primarily and your custom instructions secondarily. We evaluated the model on instructions for recency, document type, source, and metadata, and it can generalize to other - instructions as well. + instructions as well. The reranker supports multilinguality. The total request cannot exceed 400,000 tokens. The combined length of the query, instruction and any document with its metadata must not exceed 8,000 @@ -80,8 +80,10 @@ def create( documents: The texts to be reranked according to their relevance to the query and the optional instruction - model: The version of the reranker to use. Currently, we just have - "ctxl-rerank-en-v1-instruct". + model: + The version of the reranker to use. Currently, we have: + "ctxl-rerank-v2-instruct-multilingual", + "ctxl-rerank-v2-instruct-multilingual-mini", "ctxl-rerank-v1-instruct". query: The string against which documents will be ranked for relevance @@ -167,7 +169,7 @@ async def create( Rank a list of documents according to their relevance to a query primarily and your custom instructions secondarily. We evaluated the model on instructions for recency, document type, source, and metadata, and it can generalize to other - instructions as well. + instructions as well. The reranker supports multilinguality. The total request cannot exceed 400,000 tokens. The combined length of the query, instruction and any document with its metadata must not exceed 8,000 @@ -184,8 +186,10 @@ async def create( documents: The texts to be reranked according to their relevance to the query and the optional instruction - model: The version of the reranker to use. Currently, we just have - "ctxl-rerank-en-v1-instruct". + model: + The version of the reranker to use. Currently, we have: + "ctxl-rerank-v2-instruct-multilingual", + "ctxl-rerank-v2-instruct-multilingual-mini", "ctxl-rerank-v1-instruct". query: The string against which documents will be ranked for relevance diff --git a/src/contextual/resources/users.py b/src/contextual/resources/users.py index 74dd740..e699f9c 100644 --- a/src/contextual/resources/users.py +++ b/src/contextual/resources/users.py @@ -51,7 +51,9 @@ def update( self, *, email: str, + agent_level_roles: List[Literal["AGENT_LEVEL_USER"]] | NotGiven = NOT_GIVEN, is_tenant_admin: bool | NotGiven = NOT_GIVEN, + per_agent_roles: Iterable[user_update_params.PerAgentRole] | NotGiven = NOT_GIVEN, roles: List[ Literal[ "VISITOR", @@ -62,6 +64,7 @@ def update( "CONTEXTUAL_INTERNAL_STAFF_USER", "TENANT_ADMIN", "SUPER_ADMIN", + "SERVICE_ACCOUNT", ] ] | NotGiven = NOT_GIVEN, @@ -80,8 +83,14 @@ def update( Args: email: The email of the user + agent_level_roles: The user level roles of the user for agent level roles. + is_tenant_admin: Flag indicating if the user is a tenant admin + per_agent_roles: Per agent level roles for the user. If a user is granted any role under + `agent_level_roles`, then the user has that role for all the agents. Only the + roles that need to be updated should be part of this. + roles: The user level roles of the user. extra_headers: Send extra headers @@ -97,7 +106,9 @@ def update( body=maybe_transform( { "email": email, + "agent_level_roles": agent_level_roles, "is_tenant_admin": is_tenant_admin, + "per_agent_roles": per_agent_roles, "roles": roles, }, user_update_params.UserUpdateParams, @@ -268,7 +279,9 @@ async def update( self, *, email: str, + agent_level_roles: List[Literal["AGENT_LEVEL_USER"]] | NotGiven = NOT_GIVEN, is_tenant_admin: bool | NotGiven = NOT_GIVEN, + per_agent_roles: Iterable[user_update_params.PerAgentRole] | NotGiven = NOT_GIVEN, roles: List[ Literal[ "VISITOR", @@ -279,6 +292,7 @@ async def update( "CONTEXTUAL_INTERNAL_STAFF_USER", "TENANT_ADMIN", "SUPER_ADMIN", + "SERVICE_ACCOUNT", ] ] | NotGiven = NOT_GIVEN, @@ -297,8 +311,14 @@ async def update( Args: email: The email of the user + agent_level_roles: The user level roles of the user for agent level roles. + is_tenant_admin: Flag indicating if the user is a tenant admin + per_agent_roles: Per agent level roles for the user. If a user is granted any role under + `agent_level_roles`, then the user has that role for all the agents. Only the + roles that need to be updated should be part of this. + roles: The user level roles of the user. extra_headers: Send extra headers @@ -314,7 +334,9 @@ async def update( body=await async_maybe_transform( { "email": email, + "agent_level_roles": agent_level_roles, "is_tenant_admin": is_tenant_admin, + "per_agent_roles": per_agent_roles, "roles": roles, }, user_update_params.UserUpdateParams, diff --git a/src/contextual/types/__init__.py b/src/contextual/types/__init__.py index a57d67b..6ac75ea 100644 --- a/src/contextual/types/__init__.py +++ b/src/contextual/types/__init__.py @@ -2,6 +2,8 @@ from __future__ import annotations +from . import datastores, agent_configs, agent_metadata, filter_and_rerank_config +from .. import _compat from .agent import Agent as Agent from .datastore import Datastore as Datastore from .agent_configs import AgentConfigs as AgentConfigs @@ -36,13 +38,30 @@ from .user_deactivate_params import UserDeactivateParams as UserDeactivateParams from .agent_metadata_response import AgentMetadataResponse as AgentMetadataResponse from .datastore_create_params import DatastoreCreateParams as DatastoreCreateParams +from .datastore_update_params import DatastoreUpdateParams as DatastoreUpdateParams from .filter_and_rerank_config import FilterAndRerankConfig as FilterAndRerankConfig from .generate_create_response import GenerateCreateResponse as GenerateCreateResponse from .generate_response_config import GenerateResponseConfig as GenerateResponseConfig from .list_datastores_response import ListDatastoresResponse as ListDatastoresResponse from .parse_job_results_params import ParseJobResultsParams as ParseJobResultsParams from .create_datastore_response import CreateDatastoreResponse as CreateDatastoreResponse +from .datastore_update_response import DatastoreUpdateResponse as DatastoreUpdateResponse from .parse_job_status_response import ParseJobStatusResponse as ParseJobStatusResponse from .parse_job_results_response import ParseJobResultsResponse as ParseJobResultsResponse from .filter_and_rerank_config_param import FilterAndRerankConfigParam as FilterAndRerankConfigParam from .generate_response_config_param import GenerateResponseConfigParam as GenerateResponseConfigParam + +# Rebuild cyclical models only after all modules are imported. +# This ensures that, when building the deferred (due to cyclical references) model schema, +# Pydantic can resolve the necessary references. +# See: https://github.com/pydantic/pydantic/issues/11250 for more context. +if _compat.PYDANTIC_V2: + datastores.composite_metadata_filter.CompositeMetadataFilter.model_rebuild(_parent_namespace_depth=0) + agent_configs.AgentConfigs.model_rebuild(_parent_namespace_depth=0) + agent_metadata.AgentMetadata.model_rebuild(_parent_namespace_depth=0) + filter_and_rerank_config.FilterAndRerankConfig.model_rebuild(_parent_namespace_depth=0) +else: + datastores.composite_metadata_filter.CompositeMetadataFilter.update_forward_refs() # type: ignore + agent_configs.AgentConfigs.update_forward_refs() # type: ignore + agent_metadata.AgentMetadata.update_forward_refs() # type: ignore + filter_and_rerank_config.FilterAndRerankConfig.update_forward_refs() # type: ignore diff --git a/src/contextual/types/agent_configs.py b/src/contextual/types/agent_configs.py index fcc4397..6acecb7 100644 --- a/src/contextual/types/agent_configs.py +++ b/src/contextual/types/agent_configs.py @@ -1,18 +1,33 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +from __future__ import annotations + from typing import Optional from .._models import BaseModel from .global_config import GlobalConfig from .retrieval_config import RetrievalConfig -from .filter_and_rerank_config import FilterAndRerankConfig from .generate_response_config import GenerateResponseConfig -__all__ = ["AgentConfigs"] +__all__ = ["AgentConfigs", "ReformulationConfig"] + + +class ReformulationConfig(BaseModel): + enable_query_decomposition: Optional[bool] = None + """Whether to enable query decomposition.""" + + enable_query_expansion: Optional[bool] = None + """Whether to enable query expansion.""" + + query_decomposition_prompt: Optional[str] = None + """The prompt to use for query decomposition.""" + + query_expansion_prompt: Optional[str] = None + """The prompt to use for query expansion.""" class AgentConfigs(BaseModel): - filter_and_rerank_config: Optional[FilterAndRerankConfig] = None + filter_and_rerank_config: Optional["FilterAndRerankConfig"] = None """Parameters that affect filtering and reranking of retrieved knowledge""" generate_response_config: Optional[GenerateResponseConfig] = None @@ -21,5 +36,11 @@ class AgentConfigs(BaseModel): global_config: Optional[GlobalConfig] = None """Parameters that affect the agent's overall RAG workflow""" + reformulation_config: Optional[ReformulationConfig] = None + """Parameters that affect the agent's query reformulation""" + retrieval_config: Optional[RetrievalConfig] = None """Parameters that affect how the agent retrieves from datastore(s)""" + + +from .filter_and_rerank_config import FilterAndRerankConfig diff --git a/src/contextual/types/agent_configs_param.py b/src/contextual/types/agent_configs_param.py index 75b329c..909c860 100644 --- a/src/contextual/types/agent_configs_param.py +++ b/src/contextual/types/agent_configs_param.py @@ -6,14 +6,27 @@ from .global_config_param import GlobalConfigParam from .retrieval_config_param import RetrievalConfigParam -from .filter_and_rerank_config_param import FilterAndRerankConfigParam from .generate_response_config_param import GenerateResponseConfigParam -__all__ = ["AgentConfigsParam"] +__all__ = ["AgentConfigsParam", "ReformulationConfig"] + + +class ReformulationConfig(TypedDict, total=False): + enable_query_decomposition: bool + """Whether to enable query decomposition.""" + + enable_query_expansion: bool + """Whether to enable query expansion.""" + + query_decomposition_prompt: str + """The prompt to use for query decomposition.""" + + query_expansion_prompt: str + """The prompt to use for query expansion.""" class AgentConfigsParam(TypedDict, total=False): - filter_and_rerank_config: FilterAndRerankConfigParam + filter_and_rerank_config: "FilterAndRerankConfigParam" """Parameters that affect filtering and reranking of retrieved knowledge""" generate_response_config: GenerateResponseConfigParam @@ -22,5 +35,11 @@ class AgentConfigsParam(TypedDict, total=False): global_config: GlobalConfigParam """Parameters that affect the agent's overall RAG workflow""" + reformulation_config: ReformulationConfig + """Parameters that affect the agent's query reformulation""" + retrieval_config: RetrievalConfigParam """Parameters that affect how the agent retrieves from datastore(s)""" + + +from .filter_and_rerank_config_param import FilterAndRerankConfigParam diff --git a/src/contextual/types/agent_create_params.py b/src/contextual/types/agent_create_params.py index b08ecfa..f6613de 100644 --- a/src/contextual/types/agent_create_params.py +++ b/src/contextual/types/agent_create_params.py @@ -5,8 +5,6 @@ from typing import List from typing_extensions import Required, TypedDict -from .agent_configs_param import AgentConfigsParam - __all__ = ["AgentCreateParams"] @@ -14,7 +12,7 @@ class AgentCreateParams(TypedDict, total=False): name: Required[str] """Name of the agent""" - agent_configs: AgentConfigsParam + agent_configs: "AgentConfigsParam" """The following advanced parameters are experimental and subject to change.""" datastore_ids: List[str] @@ -29,6 +27,9 @@ class AgentCreateParams(TypedDict, total=False): given query and filters out irrelevant chunks. """ + multiturn_system_prompt: str + """Instructions on how the agent should handle multi-turn conversations.""" + no_retrieval_system_prompt: str """ Instructions on how the agent should respond when there are no relevant @@ -49,3 +50,6 @@ class AgentCreateParams(TypedDict, total=False): Note that we do not guarantee that the system will follow these instructions exactly. """ + + +from .agent_configs_param import AgentConfigsParam diff --git a/src/contextual/types/agent_metadata.py b/src/contextual/types/agent_metadata.py index f85393d..a6b12e8 100644 --- a/src/contextual/types/agent_metadata.py +++ b/src/contextual/types/agent_metadata.py @@ -1,9 +1,10 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +from __future__ import annotations + from typing import List, Optional from .._models import BaseModel -from .agent_configs import AgentConfigs __all__ = ["AgentMetadata", "AgentUsages"] @@ -26,7 +27,10 @@ class AgentMetadata(BaseModel): name: str """Name of the agent""" - agent_configs: Optional[AgentConfigs] = None + template_name: str + """The template used to create this agent.""" + + agent_configs: Optional["AgentConfigs"] = None """The following advanced parameters are experimental and subject to change.""" agent_usages: Optional[AgentUsages] = None @@ -49,6 +53,9 @@ class AgentMetadata(BaseModel): tuned model to the default model. """ + multiturn_system_prompt: Optional[str] = None + """Instructions on how the agent should handle multi-turn conversations.""" + no_retrieval_system_prompt: Optional[str] = None """ Instructions on how the agent should respond when there are no relevant @@ -69,3 +76,6 @@ class AgentMetadata(BaseModel): Note that we do not guarantee that the system will follow these instructions exactly. """ + + +from .agent_configs import AgentConfigs diff --git a/src/contextual/types/agent_metadata_response.py b/src/contextual/types/agent_metadata_response.py index 9ad80e8..e514568 100644 --- a/src/contextual/types/agent_metadata_response.py +++ b/src/contextual/types/agent_metadata_response.py @@ -1,10 +1,11 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +from __future__ import annotations + from typing import List, Union, Optional from typing_extensions import TypeAlias from .._models import BaseModel -from .agent_metadata import AgentMetadata __all__ = ["AgentMetadataResponse", "GetTwilightAgentResponse", "GetTwilightAgentResponseAgentUsages"] @@ -27,6 +28,8 @@ class GetTwilightAgentResponse(BaseModel): name: str """Name of the agent""" + template_name: str + agent_configs: Optional[object] = None """The following advanced parameters are experimental and subject to change.""" @@ -37,4 +40,9 @@ class GetTwilightAgentResponse(BaseModel): """Description of the agent""" +from .agent_metadata import AgentMetadata + +# Made a one-time change here to import the AgentMetadata before union, instead of using forward reference. +# Forward reference here violates the Pydantic type system, so it doesn't quite work. +# If there is any issue (circular import, etc) regarding this in the future, we can then find another solution. AgentMetadataResponse: TypeAlias = Union[AgentMetadata, GetTwilightAgentResponse] diff --git a/src/contextual/types/agent_update_params.py b/src/contextual/types/agent_update_params.py index 35b156a..9f58a1c 100644 --- a/src/contextual/types/agent_update_params.py +++ b/src/contextual/types/agent_update_params.py @@ -5,13 +5,11 @@ from typing import List from typing_extensions import TypedDict -from .agent_configs_param import AgentConfigsParam - __all__ = ["AgentUpdateParams"] class AgentUpdateParams(TypedDict, total=False): - agent_configs: AgentConfigsParam + agent_configs: "AgentConfigsParam" """The following advanced parameters are experimental and subject to change.""" datastore_ids: List[str] @@ -31,6 +29,9 @@ class AgentUpdateParams(TypedDict, total=False): tuned model to the default model. """ + multiturn_system_prompt: str + """Instructions on how the agent should handle multi-turn conversations.""" + no_retrieval_system_prompt: str """ Instructions on how the agent should respond when there are no relevant @@ -51,3 +52,6 @@ class AgentUpdateParams(TypedDict, total=False): Note that we do not guarantee that the system will follow these instructions exactly. """ + + +from .agent_configs_param import AgentConfigsParam diff --git a/src/contextual/types/agents/__init__.py b/src/contextual/types/agents/__init__.py index dd29556..561c07d 100644 --- a/src/contextual/types/agents/__init__.py +++ b/src/contextual/types/agents/__init__.py @@ -3,16 +3,9 @@ from __future__ import annotations from .query_response import QueryResponse as QueryResponse -from .dataset_metadata import DatasetMetadata as DatasetMetadata -from .tune_create_params import TuneCreateParams as TuneCreateParams from .query_create_params import QueryCreateParams as QueryCreateParams -from .create_tune_response import CreateTuneResponse as CreateTuneResponse from .query_metrics_params import QueryMetricsParams as QueryMetricsParams from .query_feedback_params import QueryFeedbackParams as QueryFeedbackParams -from .evaluate_create_params import EvaluateCreateParams as EvaluateCreateParams -from .list_datasets_response import ListDatasetsResponse as ListDatasetsResponse from .query_metrics_response import QueryMetricsResponse as QueryMetricsResponse -from .create_dataset_response import CreateDatasetResponse as CreateDatasetResponse from .retrieval_info_response import RetrievalInfoResponse as RetrievalInfoResponse -from .create_evaluation_response import CreateEvaluationResponse as CreateEvaluationResponse from .query_retrieval_info_params import QueryRetrievalInfoParams as QueryRetrievalInfoParams diff --git a/src/contextual/types/agents/create_dataset_response.py b/src/contextual/types/agents/create_dataset_response.py deleted file mode 100644 index b245f27..0000000 --- a/src/contextual/types/agents/create_dataset_response.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["CreateDatasetResponse"] - - -class CreateDatasetResponse(BaseModel): - name: str - """Name of the dataset""" - - type: Literal["tuning_set", "evaluation_set", "evaluation_set_prediction", "evaluation_run_result"] - """Type of the dataset""" - - version: str - """Version number of the dataset""" diff --git a/src/contextual/types/agents/create_evaluation_response.py b/src/contextual/types/agents/create_evaluation_response.py deleted file mode 100644 index 0117583..0000000 --- a/src/contextual/types/agents/create_evaluation_response.py +++ /dev/null @@ -1,10 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from ..._models import BaseModel - -__all__ = ["CreateEvaluationResponse"] - - -class CreateEvaluationResponse(BaseModel): - id: str - """ID of the launched evaluation""" diff --git a/src/contextual/types/agents/create_tune_response.py b/src/contextual/types/agents/create_tune_response.py deleted file mode 100644 index 9ab6762..0000000 --- a/src/contextual/types/agents/create_tune_response.py +++ /dev/null @@ -1,10 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from ..._models import BaseModel - -__all__ = ["CreateTuneResponse"] - - -class CreateTuneResponse(BaseModel): - id: str - """ID of the created tune job""" diff --git a/src/contextual/types/agents/dataset_metadata.py b/src/contextual/types/agents/dataset_metadata.py deleted file mode 100644 index 62ca759..0000000 --- a/src/contextual/types/agents/dataset_metadata.py +++ /dev/null @@ -1,30 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from datetime import datetime -from typing_extensions import Literal - -from pydantic import Field as FieldInfo - -from ..._models import BaseModel - -__all__ = ["DatasetMetadata"] - - -class DatasetMetadata(BaseModel): - created_at: datetime - """Timestamp indicating when the dataset was created""" - - num_samples: int - """Number of samples in the dataset""" - - schema_: object = FieldInfo(alias="schema") - """Schema of the dataset""" - - status: Literal["validated", "validating", "failed"] - """Validation status of the dataset""" - - type: Literal["tuning_set", "evaluation_set", "evaluation_set_prediction", "evaluation_run_result"] - """Type of the dataset""" - - version: str - """Version of the dataset""" diff --git a/src/contextual/types/agents/datasets/__init__.py b/src/contextual/types/agents/datasets/__init__.py deleted file mode 100644 index 56eabef..0000000 --- a/src/contextual/types/agents/datasets/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .tune_list_params import TuneListParams as TuneListParams -from .tune_create_params import TuneCreateParams as TuneCreateParams -from .tune_update_params import TuneUpdateParams as TuneUpdateParams -from .evaluate_list_params import EvaluateListParams as EvaluateListParams -from .tune_metadata_params import TuneMetadataParams as TuneMetadataParams -from .tune_retrieve_params import TuneRetrieveParams as TuneRetrieveParams -from .evaluate_create_params import EvaluateCreateParams as EvaluateCreateParams -from .evaluate_update_params import EvaluateUpdateParams as EvaluateUpdateParams -from .evaluate_metadata_params import EvaluateMetadataParams as EvaluateMetadataParams -from .evaluate_retrieve_params import EvaluateRetrieveParams as EvaluateRetrieveParams diff --git a/src/contextual/types/agents/datasets/evaluate_create_params.py b/src/contextual/types/agents/datasets/evaluate_create_params.py deleted file mode 100644 index 04d8dab..0000000 --- a/src/contextual/types/agents/datasets/evaluate_create_params.py +++ /dev/null @@ -1,20 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, Required, TypedDict - -from ...._types import FileTypes - -__all__ = ["EvaluateCreateParams"] - - -class EvaluateCreateParams(TypedDict, total=False): - dataset_name: Required[str] - """Name of the evaluation dataset""" - - dataset_type: Required[Literal["evaluation_set"]] - """Type of evaluation dataset which determines its schema and validation rules.""" - - file: Required[FileTypes] - """JSONL or CSV file containing the evaluation dataset""" diff --git a/src/contextual/types/agents/datasets/evaluate_list_params.py b/src/contextual/types/agents/datasets/evaluate_list_params.py deleted file mode 100644 index 611f6d0..0000000 --- a/src/contextual/types/agents/datasets/evaluate_list_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["EvaluateListParams"] - - -class EvaluateListParams(TypedDict, total=False): - dataset_name: str - """Optional dataset name to filter the results by. - - If provided, only versions from that dataset are listed. - """ diff --git a/src/contextual/types/agents/datasets/evaluate_metadata_params.py b/src/contextual/types/agents/datasets/evaluate_metadata_params.py deleted file mode 100644 index 003df1e..0000000 --- a/src/contextual/types/agents/datasets/evaluate_metadata_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, TypedDict - -__all__ = ["EvaluateMetadataParams"] - - -class EvaluateMetadataParams(TypedDict, total=False): - agent_id: Required[str] - """Agent ID associated with the evaluation dataset""" - - version: str - """Version number of the dataset. Defaults to the latest version if not specified.""" diff --git a/src/contextual/types/agents/datasets/evaluate_retrieve_params.py b/src/contextual/types/agents/datasets/evaluate_retrieve_params.py deleted file mode 100644 index b980137..0000000 --- a/src/contextual/types/agents/datasets/evaluate_retrieve_params.py +++ /dev/null @@ -1,21 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, TypedDict - -__all__ = ["EvaluateRetrieveParams"] - - -class EvaluateRetrieveParams(TypedDict, total=False): - agent_id: Required[str] - """Agent ID associated with the evaluation dataset""" - - batch_size: int - """Batch size for processing""" - - version: str - """Version number of the evaluation dataset to retrieve. - - Defaults to the latest version if not specified. - """ diff --git a/src/contextual/types/agents/datasets/evaluate_update_params.py b/src/contextual/types/agents/datasets/evaluate_update_params.py deleted file mode 100644 index 7dedca3..0000000 --- a/src/contextual/types/agents/datasets/evaluate_update_params.py +++ /dev/null @@ -1,23 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, Required, TypedDict - -from ...._types import FileTypes - -__all__ = ["EvaluateUpdateParams"] - - -class EvaluateUpdateParams(TypedDict, total=False): - agent_id: Required[str] - """Agent ID associated with the evaluation dataset""" - - dataset_type: Required[Literal["evaluation_set"]] - """Type of evaluation dataset which determines its schema and validation rules. - - Must match the `dataset_type` used at dataset creation time. - """ - - file: Required[FileTypes] - """JSONL or CSV file containing the entries to append to the evaluation dataset""" diff --git a/src/contextual/types/agents/datasets/tune_create_params.py b/src/contextual/types/agents/datasets/tune_create_params.py deleted file mode 100644 index a2504a5..0000000 --- a/src/contextual/types/agents/datasets/tune_create_params.py +++ /dev/null @@ -1,20 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, Required, TypedDict - -from ...._types import FileTypes - -__all__ = ["TuneCreateParams"] - - -class TuneCreateParams(TypedDict, total=False): - dataset_name: Required[str] - """Name of the tune dataset""" - - dataset_type: Required[Literal["tuning_set"]] - """Type of tune dataset which determines its schema and validation rules.""" - - file: Required[FileTypes] - """JSONL or CSV file containing the tune dataset""" diff --git a/src/contextual/types/agents/datasets/tune_list_params.py b/src/contextual/types/agents/datasets/tune_list_params.py deleted file mode 100644 index 59702ad..0000000 --- a/src/contextual/types/agents/datasets/tune_list_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["TuneListParams"] - - -class TuneListParams(TypedDict, total=False): - dataset_name: str - """Optional dataset name to filter the results by. - - If provided, only versions from that dataset are listed. - """ diff --git a/src/contextual/types/agents/datasets/tune_metadata_params.py b/src/contextual/types/agents/datasets/tune_metadata_params.py deleted file mode 100644 index a935263..0000000 --- a/src/contextual/types/agents/datasets/tune_metadata_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, TypedDict - -__all__ = ["TuneMetadataParams"] - - -class TuneMetadataParams(TypedDict, total=False): - agent_id: Required[str] - """Agent ID associated with the tune dataset""" - - version: str - """Version number of the dataset. Defaults to the latest version if not specified.""" diff --git a/src/contextual/types/agents/datasets/tune_retrieve_params.py b/src/contextual/types/agents/datasets/tune_retrieve_params.py deleted file mode 100644 index 2192e7e..0000000 --- a/src/contextual/types/agents/datasets/tune_retrieve_params.py +++ /dev/null @@ -1,21 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, TypedDict - -__all__ = ["TuneRetrieveParams"] - - -class TuneRetrieveParams(TypedDict, total=False): - agent_id: Required[str] - """Agent ID associated with the tune dataset""" - - batch_size: int - """Batch size for processing""" - - version: str - """Version number of the tune dataset to retrieve. - - Defaults to the latest version if not specified. - """ diff --git a/src/contextual/types/agents/datasets/tune_update_params.py b/src/contextual/types/agents/datasets/tune_update_params.py deleted file mode 100644 index 8e08aea..0000000 --- a/src/contextual/types/agents/datasets/tune_update_params.py +++ /dev/null @@ -1,23 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, Required, TypedDict - -from ...._types import FileTypes - -__all__ = ["TuneUpdateParams"] - - -class TuneUpdateParams(TypedDict, total=False): - agent_id: Required[str] - """Agent ID associated with the tune dataset""" - - dataset_type: Required[Literal["tuning_set"]] - """Type of tune dataset which determines its schema and validation rules. - - Must match the `dataset_type` used at dataset creation time. - """ - - file: Required[FileTypes] - """JSONL or CSV file containing the entries to append to the tune dataset""" diff --git a/src/contextual/types/agents/evaluate/__init__.py b/src/contextual/types/agents/evaluate/__init__.py deleted file mode 100644 index 7fc13a2..0000000 --- a/src/contextual/types/agents/evaluate/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .evaluation_job_metadata import EvaluationJobMetadata as EvaluationJobMetadata -from .list_evaluation_jobs_response import ListEvaluationJobsResponse as ListEvaluationJobsResponse diff --git a/src/contextual/types/agents/evaluate/evaluation_job_metadata.py b/src/contextual/types/agents/evaluate/evaluation_job_metadata.py deleted file mode 100644 index c0d9706..0000000 --- a/src/contextual/types/agents/evaluate/evaluation_job_metadata.py +++ /dev/null @@ -1,51 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from typing_extensions import Literal - -from ...._models import BaseModel - -__all__ = ["EvaluationJobMetadata", "JobMetadata"] - - -class JobMetadata(BaseModel): - num_failed_predictions: Optional[int] = None - """Number of predictions that failed during the evaluation job""" - - num_predictions: Optional[int] = None - """Total number of predictions made during the evaluation job""" - - num_processed_predictions: Optional[int] = None - """Number of predictions that were processed during the evaluation job""" - - num_successful_predictions: Optional[int] = None - """Number of predictions that were successful during the evaluation job""" - - -class EvaluationJobMetadata(BaseModel): - dataset_name: str - """Dataset name containing the individual results of the evaluation round""" - - job_metadata: JobMetadata - """ - Metadata of the evaluation round with the number of predictions, failed - predictions, and successful predictions. - """ - - metrics: object - """Results of the evaluation round, grouped by each metric""" - - status: Literal[ - "pending", - "processing", - "retrying", - "completed", - "failed", - "cancelled", - "failed_to_provision", - "generating_data", - "training_in_progress", - "failed_to_generate_data", - "provisioning", - ] - """Status of the evaluation round""" diff --git a/src/contextual/types/agents/evaluate/list_evaluation_jobs_response.py b/src/contextual/types/agents/evaluate/list_evaluation_jobs_response.py deleted file mode 100644 index 7b07c37..0000000 --- a/src/contextual/types/agents/evaluate/list_evaluation_jobs_response.py +++ /dev/null @@ -1,67 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional -from datetime import datetime -from typing_extensions import Literal - -from ...._models import BaseModel - -__all__ = ["ListEvaluationJobsResponse", "EvaluationRound"] - - -class EvaluationRound(BaseModel): - id: str - """ID of the evaluation round""" - - created_at: datetime - """Timestamp indicating when the evaluation round was created""" - - status: Literal[ - "pending", - "processing", - "retrying", - "completed", - "failed", - "cancelled", - "failed_to_provision", - "generating_data", - "training_in_progress", - "failed_to_generate_data", - "provisioning", - ] - """Status of the evaluation round""" - - user_email: str - """Email of the user who launched the evaluation round""" - - finished_at: Optional[datetime] = None - """Timestamp indicating when the evaluation round finished processing""" - - notes: Optional[str] = None - """User notes for the evaluation job""" - - num_failed_predictions: Optional[int] = None - """Number of predictions that failed during the evaluation round""" - - num_predictions: Optional[int] = None - """Total number of predictions made during the evaluation round""" - - num_processed_predictions: Optional[int] = None - """Number of predictions that have been processed during the evaluation round""" - - num_successful_predictions: Optional[int] = None - """Number of predictions that were successful during the evaluation round""" - - processing_started_at: Optional[datetime] = None - """Timestamp indicating when the evaluation round started processing""" - - results_dataset_name: Optional[str] = None - """Name of the dataset with the evaluation results""" - - summary_results: Optional[object] = None - """Score of the evaluation round""" - - -class ListEvaluationJobsResponse(BaseModel): - evaluation_rounds: List[EvaluationRound] - """List of evaluation results""" diff --git a/src/contextual/types/agents/evaluate_create_params.py b/src/contextual/types/agents/evaluate_create_params.py deleted file mode 100644 index 42d3e8f..0000000 --- a/src/contextual/types/agents/evaluate_create_params.py +++ /dev/null @@ -1,41 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import Literal, Required, TypedDict - -from ..._types import FileTypes - -__all__ = ["EvaluateCreateParams"] - - -class EvaluateCreateParams(TypedDict, total=False): - metrics: Required[List[Literal["equivalence", "groundedness"]]] - """List of metrics to use. Supported metrics are `equivalence` and `groundedness`.""" - - evalset_file: FileTypes - """Evalset file (CSV) to use for evaluation, containing the columns `prompt` (i.e. - - question) and `reference` (i.e. ground truth response). Either `evalset_name` or - `evalset_file` must be provided, but not both. - """ - - evalset_name: str - """ - Name of the Dataset to use for evaluation, created through the - `/datasets/evaluate` API. Either `evalset_name` or `evalset_file` must be - provided, but not both. - """ - - llm_model_id: str - """ID of the model to evaluate. Uses the default model if not specified.""" - - notes: str - """User notes for the evaluation job.""" - - override_configuration: str - """Override the configuration for the query. - - This will override the configuration for the agent during evaluation. - """ diff --git a/src/contextual/types/agents/list_datasets_response.py b/src/contextual/types/agents/list_datasets_response.py deleted file mode 100644 index 182f265..0000000 --- a/src/contextual/types/agents/list_datasets_response.py +++ /dev/null @@ -1,41 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List -from datetime import datetime -from typing_extensions import Literal - -from pydantic import Field as FieldInfo - -from ..._models import BaseModel - -__all__ = ["ListDatasetsResponse", "DatasetSummary"] - - -class DatasetSummary(BaseModel): - created_at: datetime - """Timestamp indicating when the dataset was created""" - - name: str - """Name of the dataset""" - - num_samples: int - """Number of samples in the dataset""" - - schema_: object = FieldInfo(alias="schema") - """Schema of the dataset""" - - status: Literal["validated", "validating", "failed"] - """Validation status of the dataset""" - - type: Literal["tuning_set", "evaluation_set", "evaluation_set_prediction", "evaluation_run_result"] - """Type of the dataset""" - - version: str - """Version of the dataset""" - - -class ListDatasetsResponse(BaseModel): - dataset_summaries: List[DatasetSummary] - - total_count: int - """Total number of datasets""" diff --git a/src/contextual/types/agents/query_create_params.py b/src/contextual/types/agents/query_create_params.py index 71a023f..550b0f1 100644 --- a/src/contextual/types/agents/query_create_params.py +++ b/src/contextual/types/agents/query_create_params.py @@ -2,10 +2,12 @@ from __future__ import annotations -from typing import List, Union, Iterable +from typing import Union, Iterable from typing_extensions import Literal, Required, TypeAlias, TypedDict -__all__ = ["QueryCreateParams", "Message", "DocumentsFilters", "DocumentsFiltersBaseMetadataFilter", "StructuredOutput"] +from ..datastores.base_metadata_filter_param import BaseMetadataFilterParam + +__all__ = ["QueryCreateParams", "Message", "DocumentsFilters", "OverrideConfiguration", "StructuredOutput"] class QueryCreateParams(TypedDict, total=False): @@ -83,6 +85,12 @@ class QueryCreateParams(TypedDict, total=False): Defaults to base model if not specified. """ + override_configuration: OverrideConfiguration + """ + This will modify select configuration parameters for the agent during the + response generation. + """ + stream: bool """Set to `true` to receive a streamed response""" @@ -98,23 +106,75 @@ class Message(TypedDict, total=False): """Role of the sender""" -class DocumentsFiltersBaseMetadataFilter(TypedDict, total=False): - field: Required[str] - """Field name to search for in the metadata""" +DocumentsFilters: TypeAlias = Union[BaseMetadataFilterParam, "CompositeMetadataFilterParam"] + + +class OverrideConfiguration(TypedDict, total=False): + enable_filter: bool + """Override the filter_retrievals for the query. + + This will override the filter_retrievals for the agent during evaluation. + """ - operator: Required[ - Literal["equals", "containsany", "exists", "startswith", "gt", "gte", "lt", "lte", "notequals", "between"] - ] - """Operator to be used for the filter.""" + enable_rerank: bool + """Override the rerank_retrievals for the agent during evaluation.""" - value: Union[str, float, bool, List[Union[str, float, bool]], None] - """The value to be searched for in the field. + filter_model: str + """Override the filter_model for the query. - In case of exists operator, it is not needed. + This will override the filter_model for the agent during evaluation. """ + filter_prompt: str + """Override the filter prompt for the agent during evaluation.""" + + lexical_alpha: float + """Override the lexical_alpha for the agent during evaluation.""" + + max_new_tokens: int + """Override the max new tokens for the agent during evaluation.""" + + model: str + """Override the model for the agent during evaluation.""" + + rerank_instructions: str + """Override the rerank_instructions for the agent during evaluation.""" + + reranker: str + """Override the reranker for the agent during evaluation.""" + + reranker_score_filter_threshold: float + """Override the reranker_score_filter_threshold for the agent during evaluation.""" -DocumentsFilters: TypeAlias = Union[DocumentsFiltersBaseMetadataFilter, "CompositeMetadataFilterParam"] + semantic_alpha: float + """Override the semantic_alpha for the agent during evaluation.""" + + system_prompt: str + """Override the system prompt for the agent during evaluation.""" + + temperature: float + """Override the temperature for the query. + + This will override the temperature for the agent during evaluation. + """ + + top_k_reranked_chunks: int + """Override the rerank_top_k for the query. + + This will override the rerank_top_k for the agent during evaluation. + """ + + top_k_retrieved_chunks: int + """Override the top_k for the query. + + This will override the top_k for the agent during evaluation. + """ + + top_p: float + """Override the top_p for the query. + + This will override the top_p for the agent during evaluation. + """ class StructuredOutput(TypedDict, total=False): diff --git a/src/contextual/types/agents/query_response.py b/src/contextual/types/agents/query_response.py index 2e45b09..aca2d22 100644 --- a/src/contextual/types/agents/query_response.py +++ b/src/contextual/types/agents/query_response.py @@ -1,11 +1,77 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List, Optional +from typing import TYPE_CHECKING, Dict, List, Union, Optional from typing_extensions import Literal +from pydantic import Field as FieldInfo + from ..._models import BaseModel -__all__ = ["QueryResponse", "RetrievalContent", "Attribution", "GroundednessScore", "Message"] +__all__ = [ + "QueryResponse", + "RetrievalContent", + "RetrievalContentCtxlMetadata", + "RetrievalContentCustomMetadataConfig", + "Attribution", + "GroundednessScore", + "Message", +] + + +class RetrievalContentCtxlMetadata(BaseModel): + chunk_id: Optional[str] = None + """Unique identifier for the chunk.""" + + chunk_size: Optional[int] = None + """Size of the chunk in tokens or characters.""" + + date_created: Optional[str] = None + """Date when the document or chunk was created.""" + + document_title: Optional[str] = None + """Title of the document.""" + + file_format: Optional[str] = None + """Format of the file (e.g., PDF, DOCX).""" + + file_name: Optional[str] = None + """Name of the source file.""" + + is_figure: Optional[bool] = None + """Whether this chunk represents a figure.""" + + page: Optional[int] = None + """Page number in the source document.""" + + section_id: Optional[str] = None + """The HTML id of the nearest element of the chunk""" + + section_title: Optional[str] = None + """Title of the section.""" + + __pydantic_extra__: Dict[str, object] = FieldInfo(init=False) # pyright: ignore[reportIncompatibleVariableOverride] + if TYPE_CHECKING: + # Stub to indicate that arbitrary properties are accepted. + # To access properties that are not valid identifiers you can use `getattr`, e.g. + # `getattr(obj, '$type')` + def __getattr__(self, attr: str) -> object: ... + + +class RetrievalContentCustomMetadataConfig(BaseModel): + filterable: Optional[bool] = None + """Whether to use in filtering. Defaults to True""" + + in_chunks: Optional[bool] = None + """Whether to add in chunks. + + Defaults to True. The maximum amount of characters per metadata field that can + be added to the prompt or rerank is 400. The maximum amount of metadata fields + that can be added for prompt or retrieval is 10. Contact support@contextual.ai + to request quota increases. + """ + + returned_in_response: Optional[bool] = None + """Whether to add in response. Defaults to False""" class RetrievalContent(BaseModel): @@ -33,6 +99,38 @@ class RetrievalContent(BaseModel): Included in response to a query if `include_retrieval_content_text` is True """ + ctxl_metadata: Optional[RetrievalContentCtxlMetadata] = None + """Default metadata from the retrieval""" + + custom_metadata: Optional[Dict[str, Union[bool, float, str]]] = None + """ + Custom metadata for the document, provided by the user at ingestion time.Must be + a JSON-serializable dictionary with string keys and simple primitive values + (str, int, float, bool). The total size must not exceed 2 KB.The strings with + date format must stay in date format or be avodied if not in date format.The + 'custom_metadata.url' field is automatically included in returned attributions + during query time, if provided.The default maximum metadata fields that can be + used is 15, contact support if more is needed. + """ + + custom_metadata_config: Optional[Dict[str, RetrievalContentCustomMetadataConfig]] = None + """ + A dictionary mapping metadata field names to the configuration to use for each + field. + + - If a metadata field is not present in the dictionary, the default configuration will be used. + + - If the dictionary is not provided, metadata will be added in chunks but will not be retrievable. + + + Limits: - Maximum characters per metadata field (for prompt or rerank): 400 + + - Maximum number of metadata fields (for prompt or retrieval): 10 + + + Contact support@contextual.ai to request quota increases. + """ + number: Optional[int] = None """Index of the retrieved item in the retrieval_contents list (starting from 1)""" diff --git a/src/contextual/types/agents/retrieval_info_response.py b/src/contextual/types/agents/retrieval_info_response.py index 56cb51d..0735892 100644 --- a/src/contextual/types/agents/retrieval_info_response.py +++ b/src/contextual/types/agents/retrieval_info_response.py @@ -21,6 +21,9 @@ class ContentMetadataUnstructuredContentMetadata(BaseModel): content_text: str """Text of the content.""" + document_id: str + """Id of the document which the content belongs to.""" + height: float """Height of the image.""" diff --git a/src/contextual/types/agents/tune/__init__.py b/src/contextual/types/agents/tune/__init__.py deleted file mode 100644 index 15bee22..0000000 --- a/src/contextual/types/agents/tune/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .tune_job_metadata import TuneJobMetadata as TuneJobMetadata -from .list_tune_jobs_response import ListTuneJobsResponse as ListTuneJobsResponse -from .list_tune_models_response import ListTuneModelsResponse as ListTuneModelsResponse diff --git a/src/contextual/types/agents/tune/list_tune_jobs_response.py b/src/contextual/types/agents/tune/list_tune_jobs_response.py deleted file mode 100644 index 1248eb5..0000000 --- a/src/contextual/types/agents/tune/list_tune_jobs_response.py +++ /dev/null @@ -1,16 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List - -from ...._models import BaseModel -from .tune_job_metadata import TuneJobMetadata - -__all__ = ["ListTuneJobsResponse"] - - -class ListTuneJobsResponse(BaseModel): - jobs: List[TuneJobMetadata] - """List of fine-tuning jobs for the agent""" - - total_count: int - """Total number of jobs associated with the agent""" diff --git a/src/contextual/types/agents/tune/list_tune_models_response.py b/src/contextual/types/agents/tune/list_tune_models_response.py deleted file mode 100644 index ffc5136..0000000 --- a/src/contextual/types/agents/tune/list_tune_models_response.py +++ /dev/null @@ -1,39 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List -from datetime import datetime -from typing_extensions import Literal - -from ...._compat import PYDANTIC_V2, ConfigDict -from ...._models import BaseModel - -__all__ = ["ListTuneModelsResponse", "Model"] - - -class Model(BaseModel): - application_id: str - """ID of the associated agent""" - - created_at: datetime - """Timestamp indicating when the model was created""" - - job_id: str - """ID of the tuning job that produced the model""" - - model_id: str - """ID of the registered model""" - - state: Literal["active", "inactive", "pending"] - """State of the model""" - - if PYDANTIC_V2: - # allow fields with a `model_` prefix - model_config = ConfigDict(protected_namespaces=tuple()) - - -class ListTuneModelsResponse(BaseModel): - models: List[Model] - """List of registered models for the agent""" - - total_count: int - """Total number of models associated with the agent""" diff --git a/src/contextual/types/agents/tune/tune_job_metadata.py b/src/contextual/types/agents/tune/tune_job_metadata.py deleted file mode 100644 index ddf9394..0000000 --- a/src/contextual/types/agents/tune/tune_job_metadata.py +++ /dev/null @@ -1,29 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from ...._compat import PYDANTIC_V2, ConfigDict -from ...._models import BaseModel - -__all__ = ["TuneJobMetadata"] - - -class TuneJobMetadata(BaseModel): - id: str - """ID of the tune job""" - - job_status: str - """Status of the tune job""" - - evaluation_metadata: Optional[List[object]] = None - """Metadata about the model evaluation, including status and results if completed.""" - - model_id: Optional[str] = None - """ID of the tuned model. - - Omitted if the tuning job failed or is still in progress. - """ - - if PYDANTIC_V2: - # allow fields with a `model_` prefix - model_config = ConfigDict(protected_namespaces=tuple()) diff --git a/src/contextual/types/agents/tune_create_params.py b/src/contextual/types/agents/tune_create_params.py deleted file mode 100644 index 2c5dc14..0000000 --- a/src/contextual/types/agents/tune_create_params.py +++ /dev/null @@ -1,112 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Optional -from typing_extensions import Literal, Annotated, TypedDict - -from ..._types import FileTypes -from ..._utils import PropertyInfo - -__all__ = ["TuneCreateParams"] - - -class TuneCreateParams(TypedDict, total=False): - hyperparams_learning_rate: Annotated[float, PropertyInfo(alias="hyperparams[learning_rate]")] - """Controls how quickly the model adapts to the training data. - - Must be greater than 0 and less than or equal to 0.1. - """ - - hyperparams_lora_alpha: Annotated[Literal[8, 16, 32, 64, 128], PropertyInfo(alias="hyperparams[lora_alpha]")] - """Scaling factor that controls the magnitude of LoRA updates. - - Higher values lead to stronger adaptation effects. The effective learning - strength is determined by the ratio of lora_alpha/lora_rank. Must be one of: 8, - 16, 32, 64 or 128 - """ - - hyperparams_lora_dropout: Annotated[float, PropertyInfo(alias="hyperparams[lora_dropout]")] - """ - LoRA dropout randomly disables connections during training to prevent - overfitting and improve generalization when fine-tuning language models with - Low-Rank Adaptation. Must be between 0 and 1 (exclusive). - """ - - hyperparams_lora_rank: Annotated[Literal[8, 16, 32, 64], PropertyInfo(alias="hyperparams[lora_rank]")] - """Controls the capacity of the LoRA adapters. Must be one of: 8, 16, 32, or 64.""" - - hyperparams_num_epochs: Annotated[int, PropertyInfo(alias="hyperparams[num_epochs]")] - """Number of complete passes through the training dataset.""" - - hyperparams_warmup_ratio: Annotated[float, PropertyInfo(alias="hyperparams[warmup_ratio]")] - """Fraction of training steps used for learning rate warmup. - - Must be between 0 and 1 (exclusive). - """ - - metadata_file: FileTypes - """Optional. Metadata file to use for synthetic data pipeline.""" - - sdp_only: bool - """Runs the SDP pipeline only if set to True.""" - - synth_data: bool - """Optional. Whether to generate synthetic data for training""" - - test_dataset_name: Optional[str] - """Optional. - - `Dataset` to use for testing model checkpoints, created through the - `/datasets/evaluate` API. - """ - - test_file: Optional[FileTypes] - """Optional. - - Local path to the test data file. The test file should follow the same format as - the training data file. - """ - - train_dataset_name: Optional[str] - """`Dataset` to use for training, created through the `/datasets/tune` API. - - Either `train_dataset_name` or `training_file` must be provided, but not both. - """ - - training_file: Optional[FileTypes] - """Local path to the training data file. - - The file should be in JSON array format, where each element of the array is a - JSON object represents a single training example. The four required fields are - `guideline`, `prompt`, `reference`, and `knowledge`. - - - `knowledge` (`list[str]`): Retrieved knowledge used to generate the reference - answer. `knowledge` is a list of retrieved text chunks. - - - `reference` (`str`): The gold-standard answer to the prompt. - - - `guideline` (`str`): Guidelines for model output. If you do not have special - guidelines for the model's output, you can use the `System Prompt` defined in - your Agent configuration as the `guideline`. - - - `prompt` (`str`): Question for the model to respond to. - - Example: - - ```json - [ - { - "guideline": "The answer should be accurate.", - "prompt": "What was last quarter's revenue?", - "reference": "According to recent reports, the Q3 revenue was $1.2 million, a 0.1 million increase from Q2.", - "knowledge": [ - "Quarterly report: Q3 revenue was $1.2 million.", - "Quarterly report: Q2 revenue was $1.1 million.", - ... - ], - }, - ... - ] - ``` - """ diff --git a/src/contextual/types/datastore.py b/src/contextual/types/datastore.py index d04d0b4..d440903 100644 --- a/src/contextual/types/datastore.py +++ b/src/contextual/types/datastore.py @@ -1,10 +1,92 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +from typing import Optional from datetime import datetime +from typing_extensions import Literal from .._models import BaseModel -__all__ = ["Datastore"] +__all__ = ["Datastore", "Configuration", "ConfigurationChunking", "ConfigurationHTMLConfig", "ConfigurationParsing"] + + +class ConfigurationChunking(BaseModel): + chunking_mode: Optional[Literal["hierarchy_depth", "hierarchy_heading", "static_length", "page_level"]] = None + """Chunking mode to use. + + Options are: `hierarchy_depth`, `hierarchy_heading`, `static_length`, + `page_level`. `hierarchy_depth` groups chunks of the same hierarchy level or + below, additionally merging or splitting based on length constraints. + `hierarchy_heading` splits chunks at every heading in the document hierarchy, + additionally merging or splitting based on length constraints. `static_length` + creates chunks of a fixed length. `page_level` creates chunks that cannot run + over page boundaries. + """ + + enable_hierarchy_based_contextualization: Optional[bool] = None + """Whether to enable section-based contextualization for chunking""" + + max_chunk_length_tokens: Optional[int] = None + """Target maximum length of text tokens chunks for chunking. + + Chunk length may exceed this value in some edge cases. + """ + + min_chunk_length_tokens: Optional[int] = None + """Target minimum length of chunks in tokens. + + Must be at least 384 tokens less than `max_chunk_length_tokens`. Chunk length + may be shorter than this value in some edge cases. Ignored if `chunking_mode` is + `page_level`. + """ + + +class ConfigurationHTMLConfig(BaseModel): + max_chunk_length_tokens: Optional[int] = None + """Target maximum length of text tokens chunks for chunking. + + Chunk length may exceed this value in some edge cases. + """ + + +class ConfigurationParsing(BaseModel): + enable_split_tables: Optional[bool] = None + """ + Whether to enable table splitting, which splits large tables into smaller tables + with at most `max_split_table_cells` cells each. In each split table, the table + headers are reproduced as the first row(s). This is useful for preserving + context when tables are too large to fit into one chunk. + """ + + figure_caption_mode: Optional[Literal["default", "custom", "ignore"]] = None + """Mode for figure captioning. + + Options are `default`, `custom`, or `ignore`. Set to `ignore` to disable figure + captioning. Set to `default` to use the default figure prompt, which generates a + detailed caption for each figure. Set to `custom` to use a custom prompt. + """ + + figure_captioning_prompt: Optional[str] = None + """Prompt to use for generating image captions. + + Must be non-empty if `figure_caption_mode` is `custom`. Otherwise, must be null. + """ + + max_split_table_cells: Optional[int] = None + """Maximum number of cells for split tables. + + Ignored if `enable_split_tables` is False. + """ + + +class Configuration(BaseModel): + chunking: Optional[ConfigurationChunking] = None + """Configuration for document chunking""" + + html_config: Optional[ConfigurationHTMLConfig] = None + """Configuration for HTML Extraction""" + + parsing: Optional[ConfigurationParsing] = None + """Configuration for document parsing""" class Datastore(BaseModel): @@ -12,7 +94,13 @@ class Datastore(BaseModel): """ID of the datastore""" created_at: datetime - """Timestamp of when the datastore was created""" + """Timestamp of when the datastore was created, in ISO format""" + + datastore_type: Literal["UNSTRUCTURED"] + """Type of the datastore""" name: str """Name of the datastore""" + + configuration: Optional[Configuration] = None + """Configuration of the datastore""" diff --git a/src/contextual/types/datastore_create_params.py b/src/contextual/types/datastore_create_params.py index 46863eb..0ab8d94 100644 --- a/src/contextual/types/datastore_create_params.py +++ b/src/contextual/types/datastore_create_params.py @@ -2,11 +2,100 @@ from __future__ import annotations -from typing_extensions import Required, TypedDict +from typing_extensions import Literal, Required, TypedDict -__all__ = ["DatastoreCreateParams"] +__all__ = [ + "DatastoreCreateParams", + "Configuration", + "ConfigurationChunking", + "ConfigurationHTMLConfig", + "ConfigurationParsing", +] class DatastoreCreateParams(TypedDict, total=False): name: Required[str] """Name of the datastore""" + + configuration: Configuration + """Configuration of the datastore. If not provided, default configuration is used.""" + + +class ConfigurationChunking(TypedDict, total=False): + chunking_mode: Literal["hierarchy_depth", "hierarchy_heading", "static_length", "page_level"] + """Chunking mode to use. + + Options are: `hierarchy_depth`, `hierarchy_heading`, `static_length`, + `page_level`. `hierarchy_depth` groups chunks of the same hierarchy level or + below, additionally merging or splitting based on length constraints. + `hierarchy_heading` splits chunks at every heading in the document hierarchy, + additionally merging or splitting based on length constraints. `static_length` + creates chunks of a fixed length. `page_level` creates chunks that cannot run + over page boundaries. + """ + + enable_hierarchy_based_contextualization: bool + """Whether to enable section-based contextualization for chunking""" + + max_chunk_length_tokens: int + """Target maximum length of text tokens chunks for chunking. + + Chunk length may exceed this value in some edge cases. + """ + + min_chunk_length_tokens: int + """Target minimum length of chunks in tokens. + + Must be at least 384 tokens less than `max_chunk_length_tokens`. Chunk length + may be shorter than this value in some edge cases. Ignored if `chunking_mode` is + `page_level`. + """ + + +class ConfigurationHTMLConfig(TypedDict, total=False): + max_chunk_length_tokens: int + """Target maximum length of text tokens chunks for chunking. + + Chunk length may exceed this value in some edge cases. + """ + + +class ConfigurationParsing(TypedDict, total=False): + enable_split_tables: bool + """ + Whether to enable table splitting, which splits large tables into smaller tables + with at most `max_split_table_cells` cells each. In each split table, the table + headers are reproduced as the first row(s). This is useful for preserving + context when tables are too large to fit into one chunk. + """ + + figure_caption_mode: Literal["default", "custom", "ignore"] + """Mode for figure captioning. + + Options are `default`, `custom`, or `ignore`. Set to `ignore` to disable figure + captioning. Set to `default` to use the default figure prompt, which generates a + detailed caption for each figure. Set to `custom` to use a custom prompt. + """ + + figure_captioning_prompt: str + """Prompt to use for generating image captions. + + Must be non-empty if `figure_caption_mode` is `custom`. Otherwise, must be null. + """ + + max_split_table_cells: int + """Maximum number of cells for split tables. + + Ignored if `enable_split_tables` is False. + """ + + +class Configuration(TypedDict, total=False): + chunking: ConfigurationChunking + """Configuration for document chunking""" + + html_config: ConfigurationHTMLConfig + """Configuration for HTML Extraction""" + + parsing: ConfigurationParsing + """Configuration for document parsing""" diff --git a/src/contextual/types/datastore_metadata.py b/src/contextual/types/datastore_metadata.py index 39208c0..bacf5d9 100644 --- a/src/contextual/types/datastore_metadata.py +++ b/src/contextual/types/datastore_metadata.py @@ -2,10 +2,98 @@ from typing import List, Optional from datetime import datetime +from typing_extensions import Literal from .._models import BaseModel -__all__ = ["DatastoreMetadata", "DatastoreUsages"] +__all__ = [ + "DatastoreMetadata", + "Configuration", + "ConfigurationChunking", + "ConfigurationHTMLConfig", + "ConfigurationParsing", + "DatastoreUsages", +] + + +class ConfigurationChunking(BaseModel): + chunking_mode: Optional[Literal["hierarchy_depth", "hierarchy_heading", "static_length", "page_level"]] = None + """Chunking mode to use. + + Options are: `hierarchy_depth`, `hierarchy_heading`, `static_length`, + `page_level`. `hierarchy_depth` groups chunks of the same hierarchy level or + below, additionally merging or splitting based on length constraints. + `hierarchy_heading` splits chunks at every heading in the document hierarchy, + additionally merging or splitting based on length constraints. `static_length` + creates chunks of a fixed length. `page_level` creates chunks that cannot run + over page boundaries. + """ + + enable_hierarchy_based_contextualization: Optional[bool] = None + """Whether to enable section-based contextualization for chunking""" + + max_chunk_length_tokens: Optional[int] = None + """Target maximum length of text tokens chunks for chunking. + + Chunk length may exceed this value in some edge cases. + """ + + min_chunk_length_tokens: Optional[int] = None + """Target minimum length of chunks in tokens. + + Must be at least 384 tokens less than `max_chunk_length_tokens`. Chunk length + may be shorter than this value in some edge cases. Ignored if `chunking_mode` is + `page_level`. + """ + + +class ConfigurationHTMLConfig(BaseModel): + max_chunk_length_tokens: Optional[int] = None + """Target maximum length of text tokens chunks for chunking. + + Chunk length may exceed this value in some edge cases. + """ + + +class ConfigurationParsing(BaseModel): + enable_split_tables: Optional[bool] = None + """ + Whether to enable table splitting, which splits large tables into smaller tables + with at most `max_split_table_cells` cells each. In each split table, the table + headers are reproduced as the first row(s). This is useful for preserving + context when tables are too large to fit into one chunk. + """ + + figure_caption_mode: Optional[Literal["default", "custom", "ignore"]] = None + """Mode for figure captioning. + + Options are `default`, `custom`, or `ignore`. Set to `ignore` to disable figure + captioning. Set to `default` to use the default figure prompt, which generates a + detailed caption for each figure. Set to `custom` to use a custom prompt. + """ + + figure_captioning_prompt: Optional[str] = None + """Prompt to use for generating image captions. + + Must be non-empty if `figure_caption_mode` is `custom`. Otherwise, must be null. + """ + + max_split_table_cells: Optional[int] = None + """Maximum number of cells for split tables. + + Ignored if `enable_split_tables` is False. + """ + + +class Configuration(BaseModel): + chunking: Optional[ConfigurationChunking] = None + """Configuration for document chunking""" + + html_config: Optional[ConfigurationHTMLConfig] = None + """Configuration for HTML Extraction""" + + parsing: Optional[ConfigurationParsing] = None + """Configuration for document parsing""" class DatastoreUsages(BaseModel): @@ -23,5 +111,11 @@ class DatastoreMetadata(BaseModel): name: str """Name of the datastore""" + configuration: Optional[Configuration] = None + """Configuration of the datastore. Not set if default configuration is in use.""" + + datastore_type: Optional[Literal["UNSTRUCTURED"]] = None + """Type of the datastore""" + datastore_usages: Optional[DatastoreUsages] = None """Datastore usage""" diff --git a/src/contextual/types/datastore_update_params.py b/src/contextual/types/datastore_update_params.py new file mode 100644 index 0000000..8a4e448 --- /dev/null +++ b/src/contextual/types/datastore_update_params.py @@ -0,0 +1,104 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, TypedDict + +__all__ = [ + "DatastoreUpdateParams", + "Configuration", + "ConfigurationChunking", + "ConfigurationHTMLConfig", + "ConfigurationParsing", +] + + +class DatastoreUpdateParams(TypedDict, total=False): + configuration: Configuration + """Configuration of the datastore. + + If not provided, current configuration is retained. + """ + + name: str + """Name of the datastore""" + + +class ConfigurationChunking(TypedDict, total=False): + chunking_mode: Literal["hierarchy_depth", "hierarchy_heading", "static_length", "page_level"] + """Chunking mode to use. + + Options are: `hierarchy_depth`, `hierarchy_heading`, `static_length`, + `page_level`. `hierarchy_depth` groups chunks of the same hierarchy level or + below, additionally merging or splitting based on length constraints. + `hierarchy_heading` splits chunks at every heading in the document hierarchy, + additionally merging or splitting based on length constraints. `static_length` + creates chunks of a fixed length. `page_level` creates chunks that cannot run + over page boundaries. + """ + + enable_hierarchy_based_contextualization: bool + """Whether to enable section-based contextualization for chunking""" + + max_chunk_length_tokens: int + """Target maximum length of text tokens chunks for chunking. + + Chunk length may exceed this value in some edge cases. + """ + + min_chunk_length_tokens: int + """Target minimum length of chunks in tokens. + + Must be at least 384 tokens less than `max_chunk_length_tokens`. Chunk length + may be shorter than this value in some edge cases. Ignored if `chunking_mode` is + `page_level`. + """ + + +class ConfigurationHTMLConfig(TypedDict, total=False): + max_chunk_length_tokens: int + """Target maximum length of text tokens chunks for chunking. + + Chunk length may exceed this value in some edge cases. + """ + + +class ConfigurationParsing(TypedDict, total=False): + enable_split_tables: bool + """ + Whether to enable table splitting, which splits large tables into smaller tables + with at most `max_split_table_cells` cells each. In each split table, the table + headers are reproduced as the first row(s). This is useful for preserving + context when tables are too large to fit into one chunk. + """ + + figure_caption_mode: Literal["default", "custom", "ignore"] + """Mode for figure captioning. + + Options are `default`, `custom`, or `ignore`. Set to `ignore` to disable figure + captioning. Set to `default` to use the default figure prompt, which generates a + detailed caption for each figure. Set to `custom` to use a custom prompt. + """ + + figure_captioning_prompt: str + """Prompt to use for generating image captions. + + Must be non-empty if `figure_caption_mode` is `custom`. Otherwise, must be null. + """ + + max_split_table_cells: int + """Maximum number of cells for split tables. + + Ignored if `enable_split_tables` is False. + """ + + +class Configuration(TypedDict, total=False): + chunking: ConfigurationChunking + """Configuration for document chunking""" + + html_config: ConfigurationHTMLConfig + """Configuration for HTML Extraction""" + + parsing: ConfigurationParsing + """Configuration for document parsing""" diff --git a/src/contextual/types/datastore_update_response.py b/src/contextual/types/datastore_update_response.py new file mode 100644 index 0000000..1e5a48d --- /dev/null +++ b/src/contextual/types/datastore_update_response.py @@ -0,0 +1,10 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .._models import BaseModel + +__all__ = ["DatastoreUpdateResponse"] + + +class DatastoreUpdateResponse(BaseModel): + id: str + """ID of the datastore""" diff --git a/src/contextual/types/datastores/__init__.py b/src/contextual/types/datastores/__init__.py index 23e7d4b..5a18d8e 100644 --- a/src/contextual/types/datastores/__init__.py +++ b/src/contextual/types/datastores/__init__.py @@ -4,8 +4,13 @@ from .document_metadata import DocumentMetadata as DocumentMetadata from .ingestion_response import IngestionResponse as IngestionResponse +from .base_metadata_filter import BaseMetadataFilter as BaseMetadataFilter from .document_list_params import DocumentListParams as DocumentListParams from .document_ingest_params import DocumentIngestParams as DocumentIngestParams from .list_documents_response import ListDocumentsResponse as ListDocumentsResponse +from .composite_metadata_filter import CompositeMetadataFilter as CompositeMetadataFilter +from .base_metadata_filter_param import BaseMetadataFilterParam as BaseMetadataFilterParam from .document_set_metadata_params import DocumentSetMetadataParams as DocumentSetMetadataParams from .composite_metadata_filter_param import CompositeMetadataFilterParam as CompositeMetadataFilterParam +from .document_get_parse_result_params import DocumentGetParseResultParams as DocumentGetParseResultParams +from .document_get_parse_result_response import DocumentGetParseResultResponse as DocumentGetParseResultResponse diff --git a/src/contextual/types/datastores/base_metadata_filter.py b/src/contextual/types/datastores/base_metadata_filter.py new file mode 100644 index 0000000..8843ee5 --- /dev/null +++ b/src/contextual/types/datastores/base_metadata_filter.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Union +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["BaseMetadataFilter"] + + +class BaseMetadataFilter(BaseModel): + field: str + """Field name to search for in the metadata""" + + operator: Literal[ + "equals", "containsany", "exists", "startswith", "gt", "gte", "lt", "lte", "notequals", "between", "wildcard" + ] + """Operator to be used for the filter.""" + + value: Union[str, float, bool, List[Union[str, float, bool]], None] = None + """The value to be searched for in the field. + + In case of exists operator, it is not needed. + """ diff --git a/src/contextual/types/datastores/base_metadata_filter_param.py b/src/contextual/types/datastores/base_metadata_filter_param.py new file mode 100644 index 0000000..66ab145 --- /dev/null +++ b/src/contextual/types/datastores/base_metadata_filter_param.py @@ -0,0 +1,36 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["BaseMetadataFilterParam"] + + +class BaseMetadataFilterParam(TypedDict, total=False): + field: Required[str] + """Field name to search for in the metadata""" + + operator: Required[ + Literal[ + "equals", + "containsany", + "exists", + "startswith", + "gt", + "gte", + "lt", + "lte", + "notequals", + "between", + "wildcard", + ] + ] + """Operator to be used for the filter.""" + + value: Union[str, float, bool, List[Union[str, float, bool]], None] + """The value to be searched for in the field. + + In case of exists operator, it is not needed. + """ diff --git a/src/contextual/types/datastores/composite_metadata_filter.py b/src/contextual/types/datastores/composite_metadata_filter.py new file mode 100644 index 0000000..513d2fc --- /dev/null +++ b/src/contextual/types/datastores/composite_metadata_filter.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import TYPE_CHECKING, List, Union, Optional +from typing_extensions import Literal, TypeAlias, TypeAliasType + +from ..._compat import PYDANTIC_V2 +from ..._models import BaseModel +from .base_metadata_filter import BaseMetadataFilter + +__all__ = ["CompositeMetadataFilter", "Filter"] + +if TYPE_CHECKING or PYDANTIC_V2: + Filter = TypeAliasType("Filter", Union[BaseMetadataFilter, "CompositeMetadataFilter"]) +else: + Filter: TypeAlias = Union[BaseMetadataFilter, "CompositeMetadataFilter"] + + +class CompositeMetadataFilter(BaseModel): + filters: List[Filter] + """Filters added to the query for filtering docs""" + + operator: Optional[Literal["AND", "OR", "AND_NOT"]] = None + """Composite operator to be used to combine filters""" diff --git a/src/contextual/types/datastores/composite_metadata_filter_param.py b/src/contextual/types/datastores/composite_metadata_filter_param.py index eaf0e55..20a30cd 100644 --- a/src/contextual/types/datastores/composite_metadata_filter_param.py +++ b/src/contextual/types/datastores/composite_metadata_filter_param.py @@ -2,34 +2,18 @@ from __future__ import annotations -from typing import TYPE_CHECKING, List, Union, Iterable, Optional +from typing import TYPE_CHECKING, Union, Iterable, Optional from typing_extensions import Literal, Required, TypeAlias, TypedDict, TypeAliasType from ..._compat import PYDANTIC_V2 +from .base_metadata_filter_param import BaseMetadataFilterParam -__all__ = ["CompositeMetadataFilterParam", "Filter", "FilterBaseMetadataFilter"] - - -class FilterBaseMetadataFilter(TypedDict, total=False): - field: Required[str] - """Field name to search for in the metadata""" - - operator: Required[ - Literal["equals", "containsany", "exists", "startswith", "gt", "gte", "lt", "lte", "notequals", "between"] - ] - """Operator to be used for the filter.""" - - value: Union[str, float, bool, List[Union[str, float, bool]], None] - """The value to be searched for in the field. - - In case of exists operator, it is not needed. - """ - +__all__ = ["CompositeMetadataFilterParam", "Filter"] if TYPE_CHECKING or PYDANTIC_V2: - Filter = TypeAliasType("Filter", Union[FilterBaseMetadataFilter, "CompositeMetadataFilterParam"]) + Filter = TypeAliasType("Filter", Union[BaseMetadataFilterParam, "CompositeMetadataFilterParam"]) else: - Filter: TypeAlias = Union[FilterBaseMetadataFilter, "CompositeMetadataFilterParam"] + Filter: TypeAlias = Union[BaseMetadataFilterParam, "CompositeMetadataFilterParam"] class CompositeMetadataFilterParam(TypedDict, total=False): diff --git a/src/contextual/types/datastores/document_get_parse_result_params.py b/src/contextual/types/datastores/document_get_parse_result_params.py new file mode 100644 index 0000000..031649b --- /dev/null +++ b/src/contextual/types/datastores/document_get_parse_result_params.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["DocumentGetParseResultParams"] + + +class DocumentGetParseResultParams(TypedDict, total=False): + datastore_id: Required[str] + """Datastore ID of the datastore from which to retrieve the document""" + + output_types: List[Literal["markdown-document", "markdown-per-page", "blocks-per-page"]] + """The desired output format(s) of the parsed file. + + Must be `markdown-document`, `markdown-per-page`, and/or `blocks-per-page`. + Specify multiple values to get multiple formats in the response. + `markdown-document` parses the whole document into a single concatenated + markdown output. `markdown-per-page` provides markdown output per page. + `blocks-per-page` provides a structured JSON representation of the content + blocks on each page, sorted by reading order. + """ diff --git a/src/contextual/types/datastores/document_get_parse_result_response.py b/src/contextual/types/datastores/document_get_parse_result_response.py new file mode 100644 index 0000000..18e64c8 --- /dev/null +++ b/src/contextual/types/datastores/document_get_parse_result_response.py @@ -0,0 +1,187 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = [ + "DocumentGetParseResultResponse", + "DocumentMetadata", + "DocumentMetadataHierarchy", + "DocumentMetadataHierarchyBlock", + "DocumentMetadataHierarchyBlockBoundingBox", + "Page", + "PageBlock", + "PageBlockBoundingBox", +] + + +class DocumentMetadataHierarchyBlockBoundingBox(BaseModel): + x0: float + """The x-coordinate of the top-left corner of the bounding box""" + + x1: float + """The x-coordinate of the bottom-right corner of the bounding box""" + + y0: float + """The y-coordinate of the top-left corner of the bounding box""" + + y1: float + """The y-coordinate of the bottom-right corner of the bounding box""" + + +class DocumentMetadataHierarchyBlock(BaseModel): + id: str + """Unique ID of the block""" + + bounding_box: DocumentMetadataHierarchyBlockBoundingBox + """ + The normalized bounding box of the block, as relative percentages of the page + width and height + """ + + markdown: str + """The Markdown representation of the block""" + + type: Literal["heading", "text", "table", "figure"] + """The type of the block""" + + confidence_level: Optional[Literal["low", "medium", "high"]] = None + """The confidence level of this block categorized as 'low', 'medium', or 'high'. + + Only available for blocks of type 'table' currently. + """ + + hierarchy_level: Optional[int] = None + """ + The level of the block in the document hierarchy, starting at 0 for the + root-level title block. Only present if `enable_document_hierarchy` was set to + true in the request. + """ + + page_index: Optional[int] = None + """The page (0-indexed) that this block belongs to. + + Only set for heading blocks that are returned in the table of contents. + """ + + parent_ids: Optional[List[str]] = None + """ + The IDs of the parent in the document hierarchy, sorted from root-level to + bottom. For root-level heading blocks, this will be an empty list. Only present + if `enable_document_hierarchy` was set to true in the request. + """ + + +class DocumentMetadataHierarchy(BaseModel): + blocks: Optional[List[DocumentMetadataHierarchyBlock]] = None + """Heading blocks which define the hierarchy of the document""" + + table_of_contents: Optional[str] = None + """Markdown representation of the table of contents for this document""" + + +class DocumentMetadata(BaseModel): + hierarchy: Optional[DocumentMetadataHierarchy] = None + """ + Hierarchy of the document, as both heading blocks and a markdown table of + contents + """ + + +class PageBlockBoundingBox(BaseModel): + x0: float + """The x-coordinate of the top-left corner of the bounding box""" + + x1: float + """The x-coordinate of the bottom-right corner of the bounding box""" + + y0: float + """The y-coordinate of the top-left corner of the bounding box""" + + y1: float + """The y-coordinate of the bottom-right corner of the bounding box""" + + +class PageBlock(BaseModel): + id: str + """Unique ID of the block""" + + bounding_box: PageBlockBoundingBox + """ + The normalized bounding box of the block, as relative percentages of the page + width and height + """ + + markdown: str + """The Markdown representation of the block""" + + type: Literal["heading", "text", "table", "figure"] + """The type of the block""" + + confidence_level: Optional[Literal["low", "medium", "high"]] = None + """The confidence level of this block categorized as 'low', 'medium', or 'high'. + + Only available for blocks of type 'table' currently. + """ + + hierarchy_level: Optional[int] = None + """ + The level of the block in the document hierarchy, starting at 0 for the + root-level title block. Only present if `enable_document_hierarchy` was set to + true in the request. + """ + + page_index: Optional[int] = None + """The page (0-indexed) that this block belongs to. + + Only set for heading blocks that are returned in the table of contents. + """ + + parent_ids: Optional[List[str]] = None + """ + The IDs of the parent in the document hierarchy, sorted from root-level to + bottom. For root-level heading blocks, this will be an empty list. Only present + if `enable_document_hierarchy` was set to true in the request. + """ + + +class Page(BaseModel): + index: int + """The index of the parsed page (zero-indexed)""" + + blocks: Optional[List[PageBlock]] = None + """The parsed, structured blocks of this page. + + Present if `blocks-per-page` was among the requested output types. + """ + + markdown: Optional[str] = None + """The parsed, structured Markdown of this page. + + Present if `markdown-per-page` was among the requested output types. + """ + + +class DocumentGetParseResultResponse(BaseModel): + file_name: str + """The name of the file that was uploaded for parsing""" + + status: Literal["pending", "processing", "retrying", "completed", "failed", "cancelled"] + """The current status of the parse job""" + + document_metadata: Optional[DocumentMetadata] = None + """Document-level metadata parsed from the document""" + + markdown_document: Optional[str] = None + """The parsed, structured Markdown of the input file. + + Only present if `markdown-document` was among the requested output types. + """ + + pages: Optional[List[Page]] = None + """ + Per-page parse results, containing per-page Markdown (if `markdown-per-page` was + requested) and/or per-page `ParsedBlock`s (if `blocks-per-page` was requested). + """ diff --git a/src/contextual/types/datastores/document_ingest_params.py b/src/contextual/types/datastores/document_ingest_params.py index 92b0ab4..5179f7d 100644 --- a/src/contextual/types/datastores/document_ingest_params.py +++ b/src/contextual/types/datastores/document_ingest_params.py @@ -14,22 +14,24 @@ class DocumentIngestParams(TypedDict, total=False): """File to ingest.""" metadata: str - """Metadata in `JSON` format. + """Metadata request in JSON format. - Metadata should be passed as a nested dictionary structure where: + `custom_metadata` is a flat dictionary containing one or more key-value pairs, + where each value must be a primitive type (`str`, `bool`, `float`, or `int`). + The default maximum metadata fields that can be used is 15, contact support if + more is needed.The combined size of the metadata must not exceed **2 KB** when + encoded as JSON.The strings with date format must stay in date format or be + avoided if not in date format.The `custom_metadata.url` field is automatically + included in returned attributions during query time, if provided. - - The **metadata type** `custom_metadata` is mapped to a dictionary. - The - **dictionary keys** represent metadata attributes. - The **values** can be of - type `str`, `bool`, `float`, or `int`. - - **Example Metadata JSON:** + **Example Request Body:** ```json - metadata = { - "custom_metadata": { - "field1": "value1", - "field2": "value2" - } + { + "custom_metadata": { + "topic": "science", + "difficulty": 3 + } } ``` """ diff --git a/src/contextual/types/datastores/document_list_params.py b/src/contextual/types/datastores/document_list_params.py index 596f428..86f816f 100644 --- a/src/contextual/types/datastores/document_list_params.py +++ b/src/contextual/types/datastores/document_list_params.py @@ -18,21 +18,10 @@ class DocumentListParams(TypedDict, total=False): of results """ - ingestion_job_status: List[ - Literal[ - "pending", - "processing", - "retrying", - "completed", - "failed", - "cancelled", - "failed_to_provision", - "generating_data", - "training_in_progress", - "failed_to_generate_data", - "provisioning", - ] - ] + document_name_prefix: str + """Filters documents with the given prefix.""" + + ingestion_job_status: List[Literal["pending", "processing", "retrying", "completed", "failed", "cancelled"]] """ Filters documents whose ingestion job status matches (one of) the provided status(es). diff --git a/src/contextual/types/datastores/document_metadata.py b/src/contextual/types/datastores/document_metadata.py index 6d8dbda..790f0ad 100644 --- a/src/contextual/types/datastores/document_metadata.py +++ b/src/contextual/types/datastores/document_metadata.py @@ -5,7 +5,24 @@ from ..._models import BaseModel -__all__ = ["DocumentMetadata"] +__all__ = ["DocumentMetadata", "CustomMetadataConfig"] + + +class CustomMetadataConfig(BaseModel): + filterable: Optional[bool] = None + """Whether to use in filtering. Defaults to True""" + + in_chunks: Optional[bool] = None + """Whether to add in chunks. + + Defaults to True. The maximum amount of characters per metadata field that can + be added to the prompt or rerank is 400. The maximum amount of metadata fields + that can be added for prompt or retrieval is 10. Contact support@contextual.ai + to request quota increases. + """ + + returned_in_response: Optional[bool] = None + """Whether to add in response. Defaults to False""" class DocumentMetadata(BaseModel): @@ -22,6 +39,42 @@ class DocumentMetadata(BaseModel): """Status of this document's ingestion job""" custom_metadata: Optional[Dict[str, Union[bool, float, str]]] = None + """ + Custom metadata for the document, provided by the user at ingestion time.Must be + a JSON-serializable dictionary with string keys and simple primitive values + (str, int, float, bool). The total size must not exceed 2 KB.The strings with + date format must stay in date format or be avodied if not in date format.The + 'custom_metadata.url' field is automatically included in returned attributions + during query time, if provided.The default maximum metadata fields that can be + used is 15, contact support if more is needed. + """ + + custom_metadata_config: Optional[Dict[str, CustomMetadataConfig]] = None + """ + A dictionary mapping metadata field names to the configuration to use for each + field. + + - If a metadata field is not present in the dictionary, the default configuration will be used. + + - If the dictionary is not provided, metadata will be added in chunks but will not be retrievable. + + + Limits: - Maximum characters per metadata field (for prompt or rerank): 400 + + - Maximum number of metadata fields (for prompt or retrieval): 10 + + + Contact support@contextual.ai to request quota increases. + """ + + has_access: Optional[bool] = None + """Whether the user has access to this document.""" + + ingestion_config: Optional[object] = None + """Ingestion configuration for the document when the document was ingested. + + It may be different from the current datastore configuration. + """ updated_at: Optional[str] = None """Timestamp of when the document was modified in ISO format.""" diff --git a/src/contextual/types/datastores/document_set_metadata_params.py b/src/contextual/types/datastores/document_set_metadata_params.py index f92d6b6..4c21822 100644 --- a/src/contextual/types/datastores/document_set_metadata_params.py +++ b/src/contextual/types/datastores/document_set_metadata_params.py @@ -5,7 +5,7 @@ from typing import Dict, Union from typing_extensions import Required, TypedDict -__all__ = ["DocumentSetMetadataParams"] +__all__ = ["DocumentSetMetadataParams", "CustomMetadataConfig"] class DocumentSetMetadataParams(TypedDict, total=False): @@ -13,3 +13,47 @@ class DocumentSetMetadataParams(TypedDict, total=False): """Datastore ID of the datastore from which to retrieve the document""" custom_metadata: Dict[str, Union[bool, float, str]] + """ + Custom metadata for the document, provided by the user at ingestion time.Must be + a JSON-serializable dictionary with string keys and simple primitive values + (str, int, float, bool). The total size must not exceed 2 KB.The strings with + date format must stay in date format or be avodied if not in date format.The + 'custom_metadata.url' field is automatically included in returned attributions + during query time, if provided.The default maximum metadata fields that can be + used is 15, contact support if more is needed. + """ + + custom_metadata_config: Dict[str, CustomMetadataConfig] + """ + A dictionary mapping metadata field names to the configuration to use for each + field. + + - If a metadata field is not present in the dictionary, the default configuration will be used. + + - If the dictionary is not provided, metadata will be added in chunks but will not be retrievable. + + + Limits: - Maximum characters per metadata field (for prompt or rerank): 400 + + - Maximum number of metadata fields (for prompt or retrieval): 10 + + + Contact support@contextual.ai to request quota increases. + """ + + +class CustomMetadataConfig(TypedDict, total=False): + filterable: bool + """Whether to use in filtering. Defaults to True""" + + in_chunks: bool + """Whether to add in chunks. + + Defaults to True. The maximum amount of characters per metadata field that can + be added to the prompt or rerank is 400. The maximum amount of metadata fields + that can be added for prompt or retrieval is 10. Contact support@contextual.ai + to request quota increases. + """ + + returned_in_response: bool + """Whether to add in response. Defaults to False""" diff --git a/src/contextual/types/filter_and_rerank_config.py b/src/contextual/types/filter_and_rerank_config.py index 0507174..5057861 100644 --- a/src/contextual/types/filter_and_rerank_config.py +++ b/src/contextual/types/filter_and_rerank_config.py @@ -1,13 +1,34 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import Optional +from __future__ import annotations + +from typing import Dict, Union, Optional +from typing_extensions import TypeAlias from .._models import BaseModel +from .datastores.base_metadata_filter import BaseMetadataFilter + +__all__ = ["FilterAndRerankConfig", "DefaultMetadataFilters"] -__all__ = ["FilterAndRerankConfig"] +DefaultMetadataFilters: TypeAlias = Union[BaseMetadataFilter, "CompositeMetadataFilter"] class FilterAndRerankConfig(BaseModel): + default_metadata_filters: Optional[DefaultMetadataFilters] = None + """ + Optional metadata filter which is applied while retrieving from every datastore + linked to this agent. + """ + + per_datastore_metadata_filters: Optional[Dict[str, "CompositeMetadataFilter"]] = None + """Defines an optional custom metadata filter per datastore ID. + + Each entry in the dictionary should have a datastore UUID as the key, and the + value should be a metadata filter definition. The filter will be applied in + addition to filter(s) specified in `default_metadata_filters` and in the + `documents_filters` field in the `/query` request during retrieval. + """ + rerank_instructions: Optional[str] = None """Instructions that the reranker references when ranking retrievals. @@ -29,3 +50,6 @@ class FilterAndRerankConfig(BaseModel): top_k_reranked_chunks: Optional[int] = None """The number of highest ranked chunks after reranking to be used""" + + +from .datastores.composite_metadata_filter import CompositeMetadataFilter diff --git a/src/contextual/types/filter_and_rerank_config_param.py b/src/contextual/types/filter_and_rerank_config_param.py index d06d6ce..415293e 100644 --- a/src/contextual/types/filter_and_rerank_config_param.py +++ b/src/contextual/types/filter_and_rerank_config_param.py @@ -2,12 +2,32 @@ from __future__ import annotations -from typing_extensions import TypedDict +from typing import Dict, Union +from typing_extensions import TypeAlias, TypedDict -__all__ = ["FilterAndRerankConfigParam"] +from .datastores.base_metadata_filter_param import BaseMetadataFilterParam + +__all__ = ["FilterAndRerankConfigParam", "DefaultMetadataFilters"] + +DefaultMetadataFilters: TypeAlias = Union[BaseMetadataFilterParam, "CompositeMetadataFilterParam"] class FilterAndRerankConfigParam(TypedDict, total=False): + default_metadata_filters: DefaultMetadataFilters + """ + Optional metadata filter which is applied while retrieving from every datastore + linked to this agent. + """ + + per_datastore_metadata_filters: Dict[str, "CompositeMetadataFilterParam"] + """Defines an optional custom metadata filter per datastore ID. + + Each entry in the dictionary should have a datastore UUID as the key, and the + value should be a metadata filter definition. The filter will be applied in + addition to filter(s) specified in `default_metadata_filters` and in the + `documents_filters` field in the `/query` request during retrieval. + """ + rerank_instructions: str """Instructions that the reranker references when ranking retrievals. @@ -29,3 +49,6 @@ class FilterAndRerankConfigParam(TypedDict, total=False): top_k_reranked_chunks: int """The number of highest ranked chunks after reranking to be used""" + + +from .datastores.composite_metadata_filter_param import CompositeMetadataFilterParam diff --git a/src/contextual/types/generate_create_params.py b/src/contextual/types/generate_create_params.py index c75e86c..61dafc8 100644 --- a/src/contextual/types/generate_create_params.py +++ b/src/contextual/types/generate_create_params.py @@ -19,7 +19,7 @@ class GenerateCreateParams(TypedDict, total=False): """ model: Required[str] - """The version of the Contextual's GLM to use. Currently, we just have "v1".""" + """The version of the Contextual's GLM to use. Currently, we have `v1` and `v2`.""" avoid_commentary: bool """ diff --git a/src/contextual/types/list_users_response.py b/src/contextual/types/list_users_response.py index 80d8cb2..aac19ee 100644 --- a/src/contextual/types/list_users_response.py +++ b/src/contextual/types/list_users_response.py @@ -5,7 +5,18 @@ from .._models import BaseModel -__all__ = ["ListUsersResponse", "User"] +__all__ = ["ListUsersResponse", "User", "UserPerAgentRole"] + + +class UserPerAgentRole(BaseModel): + agent_id: str + """ID of the agent on which to grant/revoke the role.""" + + grant: bool + """When set to true, the roles will be granted o/w revoked.""" + + roles: List[Literal["AGENT_LEVEL_USER"]] + """The roles that are granted/revoked""" class User(BaseModel): @@ -14,6 +25,9 @@ class User(BaseModel): email: str """The email of the user""" + agent_level_roles: Optional[List[Literal["AGENT_LEVEL_USER"]]] = None + """The user level roles of the user for agent level roles.""" + effective_roles: Optional[ List[ Literal[ @@ -25,6 +39,7 @@ class User(BaseModel): "CONTEXTUAL_INTERNAL_STAFF_USER", "TENANT_ADMIN", "SUPER_ADMIN", + "SERVICE_ACCOUNT", ] ] ] = None @@ -33,6 +48,14 @@ class User(BaseModel): is_tenant_admin: Optional[bool] = None """Flag indicating if the user is a tenant admin""" + per_agent_roles: Optional[List[UserPerAgentRole]] = None + """Per agent level roles for the user. + + If a user is granted any role under `agent_level_roles`, then the user has that + role for all the agents. Only the roles that need to be updated should be part + of this. + """ + roles: Optional[ List[ Literal[ @@ -44,6 +67,7 @@ class User(BaseModel): "CONTEXTUAL_INTERNAL_STAFF_USER", "TENANT_ADMIN", "SUPER_ADMIN", + "SERVICE_ACCOUNT", ] ] ] = None diff --git a/src/contextual/types/new_user_param.py b/src/contextual/types/new_user_param.py index d809bb7..09bc557 100644 --- a/src/contextual/types/new_user_param.py +++ b/src/contextual/types/new_user_param.py @@ -2,19 +2,41 @@ from __future__ import annotations -from typing import List +from typing import List, Iterable from typing_extensions import Literal, Required, TypedDict -__all__ = ["NewUserParam"] +__all__ = ["NewUserParam", "PerAgentRole"] + + +class PerAgentRole(TypedDict, total=False): + agent_id: Required[str] + """ID of the agent on which to grant/revoke the role.""" + + grant: Required[bool] + """When set to true, the roles will be granted o/w revoked.""" + + roles: Required[List[Literal["AGENT_LEVEL_USER"]]] + """The roles that are granted/revoked""" class NewUserParam(TypedDict, total=False): email: Required[str] """The email of the user""" + agent_level_roles: List[Literal["AGENT_LEVEL_USER"]] + """The user level roles of the user for agent level roles.""" + is_tenant_admin: bool """Flag indicating if the user is a tenant admin""" + per_agent_roles: Iterable[PerAgentRole] + """Per agent level roles for the user. + + If a user is granted any role under `agent_level_roles`, then the user has that + role for all the agents. Only the roles that need to be updated should be part + of this. + """ + roles: List[ Literal[ "VISITOR", @@ -25,6 +47,7 @@ class NewUserParam(TypedDict, total=False): "CONTEXTUAL_INTERNAL_STAFF_USER", "TENANT_ADMIN", "SUPER_ADMIN", + "SERVICE_ACCOUNT", ] ] """The user level roles of the user.""" diff --git a/src/contextual/types/parse_create_params.py b/src/contextual/types/parse_create_params.py index 84c33b0..2d236c3 100644 --- a/src/contextual/types/parse_create_params.py +++ b/src/contextual/types/parse_create_params.py @@ -39,7 +39,7 @@ class ParseCreateParams(TypedDict, total=False): max_split_table_cells: int """ Threshold number of table cells beyond which large tables are split if - `enable_split_tables` is True. Not permitted in `basic` parsing_mode. + `enable_split_tables` is True. Must be null if `enable_split_tables` is False. """ page_range: str diff --git a/src/contextual/types/parse_jobs_params.py b/src/contextual/types/parse_jobs_params.py index f44f301..64b92a0 100644 --- a/src/contextual/types/parse_jobs_params.py +++ b/src/contextual/types/parse_jobs_params.py @@ -12,4 +12,19 @@ class ParseJobsParams(TypedDict, total=False): + cursor: str + """ + Cursor from the previous call to list parse jobs, used to retrieve the next set + of results + """ + + limit: int + """Maximum number of parse jobs to return""" + uploaded_after: Annotated[Union[str, datetime], PropertyInfo(format="iso8601")] + """ + Filters to only documents uploaded to `/parse` at or after specified UTC + timestamp. If not provided, or if the provided timestamp is before the maximum + parse job retention period (30 days), the maximum retention period will be used + instead. + """ diff --git a/src/contextual/types/parse_jobs_response.py b/src/contextual/types/parse_jobs_response.py index 01b87c0..d511f7f 100644 --- a/src/contextual/types/parse_jobs_response.py +++ b/src/contextual/types/parse_jobs_response.py @@ -1,6 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List +from typing import List, Optional from typing_extensions import Literal from .._models import BaseModel @@ -25,3 +25,9 @@ class ParseJobsResponse(BaseModel): total_jobs: int """Total number of parse jobs""" + + next_cursor: Optional[str] = None + """Next cursor to continue pagination. + + Omitted if there are no more parse jobs after these ones. + """ diff --git a/src/contextual/types/rerank_create_params.py b/src/contextual/types/rerank_create_params.py index cd218b3..5e59ef6 100644 --- a/src/contextual/types/rerank_create_params.py +++ b/src/contextual/types/rerank_create_params.py @@ -18,7 +18,8 @@ class RerankCreateParams(TypedDict, total=False): model: Required[str] """The version of the reranker to use. - Currently, we just have "ctxl-rerank-en-v1-instruct". + Currently, we have: "ctxl-rerank-v2-instruct-multilingual", + "ctxl-rerank-v2-instruct-multilingual-mini", "ctxl-rerank-v1-instruct". """ query: Required[str] diff --git a/src/contextual/types/user_update_params.py b/src/contextual/types/user_update_params.py index be6ee2e..8656183 100644 --- a/src/contextual/types/user_update_params.py +++ b/src/contextual/types/user_update_params.py @@ -2,19 +2,30 @@ from __future__ import annotations -from typing import List +from typing import List, Iterable from typing_extensions import Literal, Required, TypedDict -__all__ = ["UserUpdateParams"] +__all__ = ["UserUpdateParams", "PerAgentRole"] class UserUpdateParams(TypedDict, total=False): email: Required[str] """The email of the user""" + agent_level_roles: List[Literal["AGENT_LEVEL_USER"]] + """The user level roles of the user for agent level roles.""" + is_tenant_admin: bool """Flag indicating if the user is a tenant admin""" + per_agent_roles: Iterable[PerAgentRole] + """Per agent level roles for the user. + + If a user is granted any role under `agent_level_roles`, then the user has that + role for all the agents. Only the roles that need to be updated should be part + of this. + """ + roles: List[ Literal[ "VISITOR", @@ -25,6 +36,18 @@ class UserUpdateParams(TypedDict, total=False): "CONTEXTUAL_INTERNAL_STAFF_USER", "TENANT_ADMIN", "SUPER_ADMIN", + "SERVICE_ACCOUNT", ] ] """The user level roles of the user.""" + + +class PerAgentRole(TypedDict, total=False): + agent_id: Required[str] + """ID of the agent on which to grant/revoke the role.""" + + grant: Required[bool] + """When set to true, the roles will be granted o/w revoked.""" + + roles: Required[List[Literal["AGENT_LEVEL_USER"]]] + """The roles that are granted/revoked""" diff --git a/tests/api_resources/agents/datasets/__init__.py b/tests/api_resources/agents/datasets/__init__.py deleted file mode 100644 index fd8019a..0000000 --- a/tests/api_resources/agents/datasets/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/agents/datasets/test_evaluate.py b/tests/api_resources/agents/datasets/test_evaluate.py deleted file mode 100644 index 752fcd6..0000000 --- a/tests/api_resources/agents/datasets/test_evaluate.py +++ /dev/null @@ -1,716 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import httpx -import pytest -from respx import MockRouter - -from contextual import ContextualAI, AsyncContextualAI -from tests.utils import assert_matches_type -from contextual._response import ( - BinaryAPIResponse, - AsyncBinaryAPIResponse, - StreamedBinaryAPIResponse, - AsyncStreamedBinaryAPIResponse, -) -from contextual.types.agents import DatasetMetadata, ListDatasetsResponse, CreateDatasetResponse - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestEvaluate: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @parametrize - def test_method_create(self, client: ContextualAI) -> None: - evaluate = client.agents.datasets.evaluate.create( - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - dataset_name="dataset_name", - dataset_type="evaluation_set", - file=b"raw file contents", - ) - assert_matches_type(CreateDatasetResponse, evaluate, path=["response"]) - - @parametrize - def test_raw_response_create(self, client: ContextualAI) -> None: - response = client.agents.datasets.evaluate.with_raw_response.create( - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - dataset_name="dataset_name", - dataset_type="evaluation_set", - file=b"raw file contents", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - evaluate = response.parse() - assert_matches_type(CreateDatasetResponse, evaluate, path=["response"]) - - @parametrize - def test_streaming_response_create(self, client: ContextualAI) -> None: - with client.agents.datasets.evaluate.with_streaming_response.create( - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - dataset_name="dataset_name", - dataset_type="evaluation_set", - file=b"raw file contents", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - evaluate = response.parse() - assert_matches_type(CreateDatasetResponse, evaluate, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - def test_path_params_create(self, client: ContextualAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): - client.agents.datasets.evaluate.with_raw_response.create( - agent_id="", - dataset_name="dataset_name", - dataset_type="evaluation_set", - file=b"raw file contents", - ) - - @parametrize - @pytest.mark.respx(base_url=base_url) - def test_method_retrieve(self, client: ContextualAI, respx_mock: MockRouter) -> None: - respx_mock.get("/agents/182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e/datasets/evaluate/dataset_name").mock( - return_value=httpx.Response(200, json={"foo": "bar"}) - ) - evaluate = client.agents.datasets.evaluate.retrieve( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - assert evaluate.is_closed - assert evaluate.json() == {"foo": "bar"} - assert cast(Any, evaluate.is_closed) is True - assert isinstance(evaluate, BinaryAPIResponse) - - @parametrize - @pytest.mark.respx(base_url=base_url) - def test_method_retrieve_with_all_params(self, client: ContextualAI, respx_mock: MockRouter) -> None: - respx_mock.get("/agents/182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e/datasets/evaluate/dataset_name").mock( - return_value=httpx.Response(200, json={"foo": "bar"}) - ) - evaluate = client.agents.datasets.evaluate.retrieve( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - batch_size=1, - version="version", - ) - assert evaluate.is_closed - assert evaluate.json() == {"foo": "bar"} - assert cast(Any, evaluate.is_closed) is True - assert isinstance(evaluate, BinaryAPIResponse) - - @parametrize - @pytest.mark.respx(base_url=base_url) - def test_raw_response_retrieve(self, client: ContextualAI, respx_mock: MockRouter) -> None: - respx_mock.get("/agents/182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e/datasets/evaluate/dataset_name").mock( - return_value=httpx.Response(200, json={"foo": "bar"}) - ) - - evaluate = client.agents.datasets.evaluate.with_raw_response.retrieve( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - assert evaluate.is_closed is True - assert evaluate.http_request.headers.get("X-Stainless-Lang") == "python" - assert evaluate.json() == {"foo": "bar"} - assert isinstance(evaluate, BinaryAPIResponse) - - @parametrize - @pytest.mark.respx(base_url=base_url) - def test_streaming_response_retrieve(self, client: ContextualAI, respx_mock: MockRouter) -> None: - respx_mock.get("/agents/182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e/datasets/evaluate/dataset_name").mock( - return_value=httpx.Response(200, json={"foo": "bar"}) - ) - with client.agents.datasets.evaluate.with_streaming_response.retrieve( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) as evaluate: - assert not evaluate.is_closed - assert evaluate.http_request.headers.get("X-Stainless-Lang") == "python" - - assert evaluate.json() == {"foo": "bar"} - assert cast(Any, evaluate.is_closed) is True - assert isinstance(evaluate, StreamedBinaryAPIResponse) - - assert cast(Any, evaluate.is_closed) is True - - @parametrize - @pytest.mark.respx(base_url=base_url) - def test_path_params_retrieve(self, client: ContextualAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): - client.agents.datasets.evaluate.with_raw_response.retrieve( - dataset_name="dataset_name", - agent_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_name` but received ''"): - client.agents.datasets.evaluate.with_raw_response.retrieve( - dataset_name="", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - @parametrize - def test_method_update(self, client: ContextualAI) -> None: - evaluate = client.agents.datasets.evaluate.update( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - dataset_type="evaluation_set", - file=b"raw file contents", - ) - assert_matches_type(CreateDatasetResponse, evaluate, path=["response"]) - - @parametrize - def test_raw_response_update(self, client: ContextualAI) -> None: - response = client.agents.datasets.evaluate.with_raw_response.update( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - dataset_type="evaluation_set", - file=b"raw file contents", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - evaluate = response.parse() - assert_matches_type(CreateDatasetResponse, evaluate, path=["response"]) - - @parametrize - def test_streaming_response_update(self, client: ContextualAI) -> None: - with client.agents.datasets.evaluate.with_streaming_response.update( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - dataset_type="evaluation_set", - file=b"raw file contents", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - evaluate = response.parse() - assert_matches_type(CreateDatasetResponse, evaluate, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - def test_path_params_update(self, client: ContextualAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): - client.agents.datasets.evaluate.with_raw_response.update( - dataset_name="dataset_name", - agent_id="", - dataset_type="evaluation_set", - file=b"raw file contents", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_name` but received ''"): - client.agents.datasets.evaluate.with_raw_response.update( - dataset_name="", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - dataset_type="evaluation_set", - file=b"raw file contents", - ) - - @parametrize - def test_method_list(self, client: ContextualAI) -> None: - evaluate = client.agents.datasets.evaluate.list( - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - assert_matches_type(ListDatasetsResponse, evaluate, path=["response"]) - - @parametrize - def test_method_list_with_all_params(self, client: ContextualAI) -> None: - evaluate = client.agents.datasets.evaluate.list( - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - dataset_name="dataset_name", - ) - assert_matches_type(ListDatasetsResponse, evaluate, path=["response"]) - - @parametrize - def test_raw_response_list(self, client: ContextualAI) -> None: - response = client.agents.datasets.evaluate.with_raw_response.list( - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - evaluate = response.parse() - assert_matches_type(ListDatasetsResponse, evaluate, path=["response"]) - - @parametrize - def test_streaming_response_list(self, client: ContextualAI) -> None: - with client.agents.datasets.evaluate.with_streaming_response.list( - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - evaluate = response.parse() - assert_matches_type(ListDatasetsResponse, evaluate, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - def test_path_params_list(self, client: ContextualAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): - client.agents.datasets.evaluate.with_raw_response.list( - agent_id="", - ) - - @parametrize - def test_method_delete(self, client: ContextualAI) -> None: - evaluate = client.agents.datasets.evaluate.delete( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - assert_matches_type(object, evaluate, path=["response"]) - - @parametrize - def test_raw_response_delete(self, client: ContextualAI) -> None: - response = client.agents.datasets.evaluate.with_raw_response.delete( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - evaluate = response.parse() - assert_matches_type(object, evaluate, path=["response"]) - - @parametrize - def test_streaming_response_delete(self, client: ContextualAI) -> None: - with client.agents.datasets.evaluate.with_streaming_response.delete( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - evaluate = response.parse() - assert_matches_type(object, evaluate, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - def test_path_params_delete(self, client: ContextualAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): - client.agents.datasets.evaluate.with_raw_response.delete( - dataset_name="dataset_name", - agent_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_name` but received ''"): - client.agents.datasets.evaluate.with_raw_response.delete( - dataset_name="", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - @parametrize - def test_method_metadata(self, client: ContextualAI) -> None: - evaluate = client.agents.datasets.evaluate.metadata( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - assert_matches_type(DatasetMetadata, evaluate, path=["response"]) - - @parametrize - def test_method_metadata_with_all_params(self, client: ContextualAI) -> None: - evaluate = client.agents.datasets.evaluate.metadata( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - version="version", - ) - assert_matches_type(DatasetMetadata, evaluate, path=["response"]) - - @parametrize - def test_raw_response_metadata(self, client: ContextualAI) -> None: - response = client.agents.datasets.evaluate.with_raw_response.metadata( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - evaluate = response.parse() - assert_matches_type(DatasetMetadata, evaluate, path=["response"]) - - @parametrize - def test_streaming_response_metadata(self, client: ContextualAI) -> None: - with client.agents.datasets.evaluate.with_streaming_response.metadata( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - evaluate = response.parse() - assert_matches_type(DatasetMetadata, evaluate, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - def test_path_params_metadata(self, client: ContextualAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): - client.agents.datasets.evaluate.with_raw_response.metadata( - dataset_name="dataset_name", - agent_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_name` but received ''"): - client.agents.datasets.evaluate.with_raw_response.metadata( - dataset_name="", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - -class TestAsyncEvaluate: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @parametrize - async def test_method_create(self, async_client: AsyncContextualAI) -> None: - evaluate = await async_client.agents.datasets.evaluate.create( - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - dataset_name="dataset_name", - dataset_type="evaluation_set", - file=b"raw file contents", - ) - assert_matches_type(CreateDatasetResponse, evaluate, path=["response"]) - - @parametrize - async def test_raw_response_create(self, async_client: AsyncContextualAI) -> None: - response = await async_client.agents.datasets.evaluate.with_raw_response.create( - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - dataset_name="dataset_name", - dataset_type="evaluation_set", - file=b"raw file contents", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - evaluate = await response.parse() - assert_matches_type(CreateDatasetResponse, evaluate, path=["response"]) - - @parametrize - async def test_streaming_response_create(self, async_client: AsyncContextualAI) -> None: - async with async_client.agents.datasets.evaluate.with_streaming_response.create( - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - dataset_name="dataset_name", - dataset_type="evaluation_set", - file=b"raw file contents", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - evaluate = await response.parse() - assert_matches_type(CreateDatasetResponse, evaluate, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - async def test_path_params_create(self, async_client: AsyncContextualAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): - await async_client.agents.datasets.evaluate.with_raw_response.create( - agent_id="", - dataset_name="dataset_name", - dataset_type="evaluation_set", - file=b"raw file contents", - ) - - @parametrize - @pytest.mark.respx(base_url=base_url) - async def test_method_retrieve(self, async_client: AsyncContextualAI, respx_mock: MockRouter) -> None: - respx_mock.get("/agents/182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e/datasets/evaluate/dataset_name").mock( - return_value=httpx.Response(200, json={"foo": "bar"}) - ) - evaluate = await async_client.agents.datasets.evaluate.retrieve( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - assert evaluate.is_closed - assert await evaluate.json() == {"foo": "bar"} - assert cast(Any, evaluate.is_closed) is True - assert isinstance(evaluate, AsyncBinaryAPIResponse) - - @parametrize - @pytest.mark.respx(base_url=base_url) - async def test_method_retrieve_with_all_params( - self, async_client: AsyncContextualAI, respx_mock: MockRouter - ) -> None: - respx_mock.get("/agents/182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e/datasets/evaluate/dataset_name").mock( - return_value=httpx.Response(200, json={"foo": "bar"}) - ) - evaluate = await async_client.agents.datasets.evaluate.retrieve( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - batch_size=1, - version="version", - ) - assert evaluate.is_closed - assert await evaluate.json() == {"foo": "bar"} - assert cast(Any, evaluate.is_closed) is True - assert isinstance(evaluate, AsyncBinaryAPIResponse) - - @parametrize - @pytest.mark.respx(base_url=base_url) - async def test_raw_response_retrieve(self, async_client: AsyncContextualAI, respx_mock: MockRouter) -> None: - respx_mock.get("/agents/182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e/datasets/evaluate/dataset_name").mock( - return_value=httpx.Response(200, json={"foo": "bar"}) - ) - - evaluate = await async_client.agents.datasets.evaluate.with_raw_response.retrieve( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - assert evaluate.is_closed is True - assert evaluate.http_request.headers.get("X-Stainless-Lang") == "python" - assert await evaluate.json() == {"foo": "bar"} - assert isinstance(evaluate, AsyncBinaryAPIResponse) - - @parametrize - @pytest.mark.respx(base_url=base_url) - async def test_streaming_response_retrieve(self, async_client: AsyncContextualAI, respx_mock: MockRouter) -> None: - respx_mock.get("/agents/182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e/datasets/evaluate/dataset_name").mock( - return_value=httpx.Response(200, json={"foo": "bar"}) - ) - async with async_client.agents.datasets.evaluate.with_streaming_response.retrieve( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) as evaluate: - assert not evaluate.is_closed - assert evaluate.http_request.headers.get("X-Stainless-Lang") == "python" - - assert await evaluate.json() == {"foo": "bar"} - assert cast(Any, evaluate.is_closed) is True - assert isinstance(evaluate, AsyncStreamedBinaryAPIResponse) - - assert cast(Any, evaluate.is_closed) is True - - @parametrize - @pytest.mark.respx(base_url=base_url) - async def test_path_params_retrieve(self, async_client: AsyncContextualAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): - await async_client.agents.datasets.evaluate.with_raw_response.retrieve( - dataset_name="dataset_name", - agent_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_name` but received ''"): - await async_client.agents.datasets.evaluate.with_raw_response.retrieve( - dataset_name="", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - @parametrize - async def test_method_update(self, async_client: AsyncContextualAI) -> None: - evaluate = await async_client.agents.datasets.evaluate.update( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - dataset_type="evaluation_set", - file=b"raw file contents", - ) - assert_matches_type(CreateDatasetResponse, evaluate, path=["response"]) - - @parametrize - async def test_raw_response_update(self, async_client: AsyncContextualAI) -> None: - response = await async_client.agents.datasets.evaluate.with_raw_response.update( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - dataset_type="evaluation_set", - file=b"raw file contents", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - evaluate = await response.parse() - assert_matches_type(CreateDatasetResponse, evaluate, path=["response"]) - - @parametrize - async def test_streaming_response_update(self, async_client: AsyncContextualAI) -> None: - async with async_client.agents.datasets.evaluate.with_streaming_response.update( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - dataset_type="evaluation_set", - file=b"raw file contents", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - evaluate = await response.parse() - assert_matches_type(CreateDatasetResponse, evaluate, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - async def test_path_params_update(self, async_client: AsyncContextualAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): - await async_client.agents.datasets.evaluate.with_raw_response.update( - dataset_name="dataset_name", - agent_id="", - dataset_type="evaluation_set", - file=b"raw file contents", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_name` but received ''"): - await async_client.agents.datasets.evaluate.with_raw_response.update( - dataset_name="", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - dataset_type="evaluation_set", - file=b"raw file contents", - ) - - @parametrize - async def test_method_list(self, async_client: AsyncContextualAI) -> None: - evaluate = await async_client.agents.datasets.evaluate.list( - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - assert_matches_type(ListDatasetsResponse, evaluate, path=["response"]) - - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncContextualAI) -> None: - evaluate = await async_client.agents.datasets.evaluate.list( - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - dataset_name="dataset_name", - ) - assert_matches_type(ListDatasetsResponse, evaluate, path=["response"]) - - @parametrize - async def test_raw_response_list(self, async_client: AsyncContextualAI) -> None: - response = await async_client.agents.datasets.evaluate.with_raw_response.list( - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - evaluate = await response.parse() - assert_matches_type(ListDatasetsResponse, evaluate, path=["response"]) - - @parametrize - async def test_streaming_response_list(self, async_client: AsyncContextualAI) -> None: - async with async_client.agents.datasets.evaluate.with_streaming_response.list( - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - evaluate = await response.parse() - assert_matches_type(ListDatasetsResponse, evaluate, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - async def test_path_params_list(self, async_client: AsyncContextualAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): - await async_client.agents.datasets.evaluate.with_raw_response.list( - agent_id="", - ) - - @parametrize - async def test_method_delete(self, async_client: AsyncContextualAI) -> None: - evaluate = await async_client.agents.datasets.evaluate.delete( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - assert_matches_type(object, evaluate, path=["response"]) - - @parametrize - async def test_raw_response_delete(self, async_client: AsyncContextualAI) -> None: - response = await async_client.agents.datasets.evaluate.with_raw_response.delete( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - evaluate = await response.parse() - assert_matches_type(object, evaluate, path=["response"]) - - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncContextualAI) -> None: - async with async_client.agents.datasets.evaluate.with_streaming_response.delete( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - evaluate = await response.parse() - assert_matches_type(object, evaluate, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - async def test_path_params_delete(self, async_client: AsyncContextualAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): - await async_client.agents.datasets.evaluate.with_raw_response.delete( - dataset_name="dataset_name", - agent_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_name` but received ''"): - await async_client.agents.datasets.evaluate.with_raw_response.delete( - dataset_name="", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - @parametrize - async def test_method_metadata(self, async_client: AsyncContextualAI) -> None: - evaluate = await async_client.agents.datasets.evaluate.metadata( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - assert_matches_type(DatasetMetadata, evaluate, path=["response"]) - - @parametrize - async def test_method_metadata_with_all_params(self, async_client: AsyncContextualAI) -> None: - evaluate = await async_client.agents.datasets.evaluate.metadata( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - version="version", - ) - assert_matches_type(DatasetMetadata, evaluate, path=["response"]) - - @parametrize - async def test_raw_response_metadata(self, async_client: AsyncContextualAI) -> None: - response = await async_client.agents.datasets.evaluate.with_raw_response.metadata( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - evaluate = await response.parse() - assert_matches_type(DatasetMetadata, evaluate, path=["response"]) - - @parametrize - async def test_streaming_response_metadata(self, async_client: AsyncContextualAI) -> None: - async with async_client.agents.datasets.evaluate.with_streaming_response.metadata( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - evaluate = await response.parse() - assert_matches_type(DatasetMetadata, evaluate, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - async def test_path_params_metadata(self, async_client: AsyncContextualAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): - await async_client.agents.datasets.evaluate.with_raw_response.metadata( - dataset_name="dataset_name", - agent_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_name` but received ''"): - await async_client.agents.datasets.evaluate.with_raw_response.metadata( - dataset_name="", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) diff --git a/tests/api_resources/agents/datasets/test_tune.py b/tests/api_resources/agents/datasets/test_tune.py deleted file mode 100644 index 704dac4..0000000 --- a/tests/api_resources/agents/datasets/test_tune.py +++ /dev/null @@ -1,716 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import httpx -import pytest -from respx import MockRouter - -from contextual import ContextualAI, AsyncContextualAI -from tests.utils import assert_matches_type -from contextual._response import ( - BinaryAPIResponse, - AsyncBinaryAPIResponse, - StreamedBinaryAPIResponse, - AsyncStreamedBinaryAPIResponse, -) -from contextual.types.agents import DatasetMetadata, ListDatasetsResponse, CreateDatasetResponse - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestTune: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @parametrize - def test_method_create(self, client: ContextualAI) -> None: - tune = client.agents.datasets.tune.create( - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - dataset_name="dataset_name", - dataset_type="tuning_set", - file=b"raw file contents", - ) - assert_matches_type(CreateDatasetResponse, tune, path=["response"]) - - @parametrize - def test_raw_response_create(self, client: ContextualAI) -> None: - response = client.agents.datasets.tune.with_raw_response.create( - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - dataset_name="dataset_name", - dataset_type="tuning_set", - file=b"raw file contents", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - tune = response.parse() - assert_matches_type(CreateDatasetResponse, tune, path=["response"]) - - @parametrize - def test_streaming_response_create(self, client: ContextualAI) -> None: - with client.agents.datasets.tune.with_streaming_response.create( - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - dataset_name="dataset_name", - dataset_type="tuning_set", - file=b"raw file contents", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - tune = response.parse() - assert_matches_type(CreateDatasetResponse, tune, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - def test_path_params_create(self, client: ContextualAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): - client.agents.datasets.tune.with_raw_response.create( - agent_id="", - dataset_name="dataset_name", - dataset_type="tuning_set", - file=b"raw file contents", - ) - - @parametrize - @pytest.mark.respx(base_url=base_url) - def test_method_retrieve(self, client: ContextualAI, respx_mock: MockRouter) -> None: - respx_mock.get("/agents/182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e/datasets/tune/dataset_name").mock( - return_value=httpx.Response(200, json={"foo": "bar"}) - ) - tune = client.agents.datasets.tune.retrieve( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - assert tune.is_closed - assert tune.json() == {"foo": "bar"} - assert cast(Any, tune.is_closed) is True - assert isinstance(tune, BinaryAPIResponse) - - @parametrize - @pytest.mark.respx(base_url=base_url) - def test_method_retrieve_with_all_params(self, client: ContextualAI, respx_mock: MockRouter) -> None: - respx_mock.get("/agents/182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e/datasets/tune/dataset_name").mock( - return_value=httpx.Response(200, json={"foo": "bar"}) - ) - tune = client.agents.datasets.tune.retrieve( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - batch_size=1, - version="version", - ) - assert tune.is_closed - assert tune.json() == {"foo": "bar"} - assert cast(Any, tune.is_closed) is True - assert isinstance(tune, BinaryAPIResponse) - - @parametrize - @pytest.mark.respx(base_url=base_url) - def test_raw_response_retrieve(self, client: ContextualAI, respx_mock: MockRouter) -> None: - respx_mock.get("/agents/182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e/datasets/tune/dataset_name").mock( - return_value=httpx.Response(200, json={"foo": "bar"}) - ) - - tune = client.agents.datasets.tune.with_raw_response.retrieve( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - assert tune.is_closed is True - assert tune.http_request.headers.get("X-Stainless-Lang") == "python" - assert tune.json() == {"foo": "bar"} - assert isinstance(tune, BinaryAPIResponse) - - @parametrize - @pytest.mark.respx(base_url=base_url) - def test_streaming_response_retrieve(self, client: ContextualAI, respx_mock: MockRouter) -> None: - respx_mock.get("/agents/182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e/datasets/tune/dataset_name").mock( - return_value=httpx.Response(200, json={"foo": "bar"}) - ) - with client.agents.datasets.tune.with_streaming_response.retrieve( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) as tune: - assert not tune.is_closed - assert tune.http_request.headers.get("X-Stainless-Lang") == "python" - - assert tune.json() == {"foo": "bar"} - assert cast(Any, tune.is_closed) is True - assert isinstance(tune, StreamedBinaryAPIResponse) - - assert cast(Any, tune.is_closed) is True - - @parametrize - @pytest.mark.respx(base_url=base_url) - def test_path_params_retrieve(self, client: ContextualAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): - client.agents.datasets.tune.with_raw_response.retrieve( - dataset_name="dataset_name", - agent_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_name` but received ''"): - client.agents.datasets.tune.with_raw_response.retrieve( - dataset_name="", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - @parametrize - def test_method_update(self, client: ContextualAI) -> None: - tune = client.agents.datasets.tune.update( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - dataset_type="tuning_set", - file=b"raw file contents", - ) - assert_matches_type(CreateDatasetResponse, tune, path=["response"]) - - @parametrize - def test_raw_response_update(self, client: ContextualAI) -> None: - response = client.agents.datasets.tune.with_raw_response.update( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - dataset_type="tuning_set", - file=b"raw file contents", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - tune = response.parse() - assert_matches_type(CreateDatasetResponse, tune, path=["response"]) - - @parametrize - def test_streaming_response_update(self, client: ContextualAI) -> None: - with client.agents.datasets.tune.with_streaming_response.update( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - dataset_type="tuning_set", - file=b"raw file contents", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - tune = response.parse() - assert_matches_type(CreateDatasetResponse, tune, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - def test_path_params_update(self, client: ContextualAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): - client.agents.datasets.tune.with_raw_response.update( - dataset_name="dataset_name", - agent_id="", - dataset_type="tuning_set", - file=b"raw file contents", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_name` but received ''"): - client.agents.datasets.tune.with_raw_response.update( - dataset_name="", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - dataset_type="tuning_set", - file=b"raw file contents", - ) - - @parametrize - def test_method_list(self, client: ContextualAI) -> None: - tune = client.agents.datasets.tune.list( - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - assert_matches_type(ListDatasetsResponse, tune, path=["response"]) - - @parametrize - def test_method_list_with_all_params(self, client: ContextualAI) -> None: - tune = client.agents.datasets.tune.list( - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - dataset_name="dataset_name", - ) - assert_matches_type(ListDatasetsResponse, tune, path=["response"]) - - @parametrize - def test_raw_response_list(self, client: ContextualAI) -> None: - response = client.agents.datasets.tune.with_raw_response.list( - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - tune = response.parse() - assert_matches_type(ListDatasetsResponse, tune, path=["response"]) - - @parametrize - def test_streaming_response_list(self, client: ContextualAI) -> None: - with client.agents.datasets.tune.with_streaming_response.list( - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - tune = response.parse() - assert_matches_type(ListDatasetsResponse, tune, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - def test_path_params_list(self, client: ContextualAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): - client.agents.datasets.tune.with_raw_response.list( - agent_id="", - ) - - @parametrize - def test_method_delete(self, client: ContextualAI) -> None: - tune = client.agents.datasets.tune.delete( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - assert_matches_type(object, tune, path=["response"]) - - @parametrize - def test_raw_response_delete(self, client: ContextualAI) -> None: - response = client.agents.datasets.tune.with_raw_response.delete( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - tune = response.parse() - assert_matches_type(object, tune, path=["response"]) - - @parametrize - def test_streaming_response_delete(self, client: ContextualAI) -> None: - with client.agents.datasets.tune.with_streaming_response.delete( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - tune = response.parse() - assert_matches_type(object, tune, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - def test_path_params_delete(self, client: ContextualAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): - client.agents.datasets.tune.with_raw_response.delete( - dataset_name="dataset_name", - agent_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_name` but received ''"): - client.agents.datasets.tune.with_raw_response.delete( - dataset_name="", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - @parametrize - def test_method_metadata(self, client: ContextualAI) -> None: - tune = client.agents.datasets.tune.metadata( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - assert_matches_type(DatasetMetadata, tune, path=["response"]) - - @parametrize - def test_method_metadata_with_all_params(self, client: ContextualAI) -> None: - tune = client.agents.datasets.tune.metadata( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - version="version", - ) - assert_matches_type(DatasetMetadata, tune, path=["response"]) - - @parametrize - def test_raw_response_metadata(self, client: ContextualAI) -> None: - response = client.agents.datasets.tune.with_raw_response.metadata( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - tune = response.parse() - assert_matches_type(DatasetMetadata, tune, path=["response"]) - - @parametrize - def test_streaming_response_metadata(self, client: ContextualAI) -> None: - with client.agents.datasets.tune.with_streaming_response.metadata( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - tune = response.parse() - assert_matches_type(DatasetMetadata, tune, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - def test_path_params_metadata(self, client: ContextualAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): - client.agents.datasets.tune.with_raw_response.metadata( - dataset_name="dataset_name", - agent_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_name` but received ''"): - client.agents.datasets.tune.with_raw_response.metadata( - dataset_name="", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - -class TestAsyncTune: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @parametrize - async def test_method_create(self, async_client: AsyncContextualAI) -> None: - tune = await async_client.agents.datasets.tune.create( - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - dataset_name="dataset_name", - dataset_type="tuning_set", - file=b"raw file contents", - ) - assert_matches_type(CreateDatasetResponse, tune, path=["response"]) - - @parametrize - async def test_raw_response_create(self, async_client: AsyncContextualAI) -> None: - response = await async_client.agents.datasets.tune.with_raw_response.create( - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - dataset_name="dataset_name", - dataset_type="tuning_set", - file=b"raw file contents", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - tune = await response.parse() - assert_matches_type(CreateDatasetResponse, tune, path=["response"]) - - @parametrize - async def test_streaming_response_create(self, async_client: AsyncContextualAI) -> None: - async with async_client.agents.datasets.tune.with_streaming_response.create( - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - dataset_name="dataset_name", - dataset_type="tuning_set", - file=b"raw file contents", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - tune = await response.parse() - assert_matches_type(CreateDatasetResponse, tune, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - async def test_path_params_create(self, async_client: AsyncContextualAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): - await async_client.agents.datasets.tune.with_raw_response.create( - agent_id="", - dataset_name="dataset_name", - dataset_type="tuning_set", - file=b"raw file contents", - ) - - @parametrize - @pytest.mark.respx(base_url=base_url) - async def test_method_retrieve(self, async_client: AsyncContextualAI, respx_mock: MockRouter) -> None: - respx_mock.get("/agents/182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e/datasets/tune/dataset_name").mock( - return_value=httpx.Response(200, json={"foo": "bar"}) - ) - tune = await async_client.agents.datasets.tune.retrieve( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - assert tune.is_closed - assert await tune.json() == {"foo": "bar"} - assert cast(Any, tune.is_closed) is True - assert isinstance(tune, AsyncBinaryAPIResponse) - - @parametrize - @pytest.mark.respx(base_url=base_url) - async def test_method_retrieve_with_all_params( - self, async_client: AsyncContextualAI, respx_mock: MockRouter - ) -> None: - respx_mock.get("/agents/182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e/datasets/tune/dataset_name").mock( - return_value=httpx.Response(200, json={"foo": "bar"}) - ) - tune = await async_client.agents.datasets.tune.retrieve( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - batch_size=1, - version="version", - ) - assert tune.is_closed - assert await tune.json() == {"foo": "bar"} - assert cast(Any, tune.is_closed) is True - assert isinstance(tune, AsyncBinaryAPIResponse) - - @parametrize - @pytest.mark.respx(base_url=base_url) - async def test_raw_response_retrieve(self, async_client: AsyncContextualAI, respx_mock: MockRouter) -> None: - respx_mock.get("/agents/182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e/datasets/tune/dataset_name").mock( - return_value=httpx.Response(200, json={"foo": "bar"}) - ) - - tune = await async_client.agents.datasets.tune.with_raw_response.retrieve( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - assert tune.is_closed is True - assert tune.http_request.headers.get("X-Stainless-Lang") == "python" - assert await tune.json() == {"foo": "bar"} - assert isinstance(tune, AsyncBinaryAPIResponse) - - @parametrize - @pytest.mark.respx(base_url=base_url) - async def test_streaming_response_retrieve(self, async_client: AsyncContextualAI, respx_mock: MockRouter) -> None: - respx_mock.get("/agents/182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e/datasets/tune/dataset_name").mock( - return_value=httpx.Response(200, json={"foo": "bar"}) - ) - async with async_client.agents.datasets.tune.with_streaming_response.retrieve( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) as tune: - assert not tune.is_closed - assert tune.http_request.headers.get("X-Stainless-Lang") == "python" - - assert await tune.json() == {"foo": "bar"} - assert cast(Any, tune.is_closed) is True - assert isinstance(tune, AsyncStreamedBinaryAPIResponse) - - assert cast(Any, tune.is_closed) is True - - @parametrize - @pytest.mark.respx(base_url=base_url) - async def test_path_params_retrieve(self, async_client: AsyncContextualAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): - await async_client.agents.datasets.tune.with_raw_response.retrieve( - dataset_name="dataset_name", - agent_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_name` but received ''"): - await async_client.agents.datasets.tune.with_raw_response.retrieve( - dataset_name="", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - @parametrize - async def test_method_update(self, async_client: AsyncContextualAI) -> None: - tune = await async_client.agents.datasets.tune.update( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - dataset_type="tuning_set", - file=b"raw file contents", - ) - assert_matches_type(CreateDatasetResponse, tune, path=["response"]) - - @parametrize - async def test_raw_response_update(self, async_client: AsyncContextualAI) -> None: - response = await async_client.agents.datasets.tune.with_raw_response.update( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - dataset_type="tuning_set", - file=b"raw file contents", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - tune = await response.parse() - assert_matches_type(CreateDatasetResponse, tune, path=["response"]) - - @parametrize - async def test_streaming_response_update(self, async_client: AsyncContextualAI) -> None: - async with async_client.agents.datasets.tune.with_streaming_response.update( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - dataset_type="tuning_set", - file=b"raw file contents", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - tune = await response.parse() - assert_matches_type(CreateDatasetResponse, tune, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - async def test_path_params_update(self, async_client: AsyncContextualAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): - await async_client.agents.datasets.tune.with_raw_response.update( - dataset_name="dataset_name", - agent_id="", - dataset_type="tuning_set", - file=b"raw file contents", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_name` but received ''"): - await async_client.agents.datasets.tune.with_raw_response.update( - dataset_name="", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - dataset_type="tuning_set", - file=b"raw file contents", - ) - - @parametrize - async def test_method_list(self, async_client: AsyncContextualAI) -> None: - tune = await async_client.agents.datasets.tune.list( - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - assert_matches_type(ListDatasetsResponse, tune, path=["response"]) - - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncContextualAI) -> None: - tune = await async_client.agents.datasets.tune.list( - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - dataset_name="dataset_name", - ) - assert_matches_type(ListDatasetsResponse, tune, path=["response"]) - - @parametrize - async def test_raw_response_list(self, async_client: AsyncContextualAI) -> None: - response = await async_client.agents.datasets.tune.with_raw_response.list( - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - tune = await response.parse() - assert_matches_type(ListDatasetsResponse, tune, path=["response"]) - - @parametrize - async def test_streaming_response_list(self, async_client: AsyncContextualAI) -> None: - async with async_client.agents.datasets.tune.with_streaming_response.list( - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - tune = await response.parse() - assert_matches_type(ListDatasetsResponse, tune, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - async def test_path_params_list(self, async_client: AsyncContextualAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): - await async_client.agents.datasets.tune.with_raw_response.list( - agent_id="", - ) - - @parametrize - async def test_method_delete(self, async_client: AsyncContextualAI) -> None: - tune = await async_client.agents.datasets.tune.delete( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - assert_matches_type(object, tune, path=["response"]) - - @parametrize - async def test_raw_response_delete(self, async_client: AsyncContextualAI) -> None: - response = await async_client.agents.datasets.tune.with_raw_response.delete( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - tune = await response.parse() - assert_matches_type(object, tune, path=["response"]) - - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncContextualAI) -> None: - async with async_client.agents.datasets.tune.with_streaming_response.delete( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - tune = await response.parse() - assert_matches_type(object, tune, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - async def test_path_params_delete(self, async_client: AsyncContextualAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): - await async_client.agents.datasets.tune.with_raw_response.delete( - dataset_name="dataset_name", - agent_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_name` but received ''"): - await async_client.agents.datasets.tune.with_raw_response.delete( - dataset_name="", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - @parametrize - async def test_method_metadata(self, async_client: AsyncContextualAI) -> None: - tune = await async_client.agents.datasets.tune.metadata( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - assert_matches_type(DatasetMetadata, tune, path=["response"]) - - @parametrize - async def test_method_metadata_with_all_params(self, async_client: AsyncContextualAI) -> None: - tune = await async_client.agents.datasets.tune.metadata( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - version="version", - ) - assert_matches_type(DatasetMetadata, tune, path=["response"]) - - @parametrize - async def test_raw_response_metadata(self, async_client: AsyncContextualAI) -> None: - response = await async_client.agents.datasets.tune.with_raw_response.metadata( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - tune = await response.parse() - assert_matches_type(DatasetMetadata, tune, path=["response"]) - - @parametrize - async def test_streaming_response_metadata(self, async_client: AsyncContextualAI) -> None: - async with async_client.agents.datasets.tune.with_streaming_response.metadata( - dataset_name="dataset_name", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - tune = await response.parse() - assert_matches_type(DatasetMetadata, tune, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - async def test_path_params_metadata(self, async_client: AsyncContextualAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): - await async_client.agents.datasets.tune.with_raw_response.metadata( - dataset_name="dataset_name", - agent_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_name` but received ''"): - await async_client.agents.datasets.tune.with_raw_response.metadata( - dataset_name="", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) diff --git a/tests/api_resources/agents/evaluate/__init__.py b/tests/api_resources/agents/evaluate/__init__.py deleted file mode 100644 index fd8019a..0000000 --- a/tests/api_resources/agents/evaluate/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/agents/evaluate/test_jobs.py b/tests/api_resources/agents/evaluate/test_jobs.py deleted file mode 100644 index 75f186f..0000000 --- a/tests/api_resources/agents/evaluate/test_jobs.py +++ /dev/null @@ -1,290 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from contextual import ContextualAI, AsyncContextualAI -from tests.utils import assert_matches_type -from contextual.types.agents.evaluate import EvaluationJobMetadata, ListEvaluationJobsResponse - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestJobs: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @parametrize - def test_method_list(self, client: ContextualAI) -> None: - job = client.agents.evaluate.jobs.list( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - assert_matches_type(ListEvaluationJobsResponse, job, path=["response"]) - - @parametrize - def test_raw_response_list(self, client: ContextualAI) -> None: - response = client.agents.evaluate.jobs.with_raw_response.list( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - job = response.parse() - assert_matches_type(ListEvaluationJobsResponse, job, path=["response"]) - - @parametrize - def test_streaming_response_list(self, client: ContextualAI) -> None: - with client.agents.evaluate.jobs.with_streaming_response.list( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - job = response.parse() - assert_matches_type(ListEvaluationJobsResponse, job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - def test_path_params_list(self, client: ContextualAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): - client.agents.evaluate.jobs.with_raw_response.list( - "", - ) - - @parametrize - def test_method_cancel(self, client: ContextualAI) -> None: - job = client.agents.evaluate.jobs.cancel( - job_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - assert_matches_type(object, job, path=["response"]) - - @parametrize - def test_raw_response_cancel(self, client: ContextualAI) -> None: - response = client.agents.evaluate.jobs.with_raw_response.cancel( - job_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - job = response.parse() - assert_matches_type(object, job, path=["response"]) - - @parametrize - def test_streaming_response_cancel(self, client: ContextualAI) -> None: - with client.agents.evaluate.jobs.with_streaming_response.cancel( - job_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - job = response.parse() - assert_matches_type(object, job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - def test_path_params_cancel(self, client: ContextualAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): - client.agents.evaluate.jobs.with_raw_response.cancel( - job_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - agent_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `job_id` but received ''"): - client.agents.evaluate.jobs.with_raw_response.cancel( - job_id="", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - @parametrize - def test_method_metadata(self, client: ContextualAI) -> None: - job = client.agents.evaluate.jobs.metadata( - job_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - assert_matches_type(EvaluationJobMetadata, job, path=["response"]) - - @parametrize - def test_raw_response_metadata(self, client: ContextualAI) -> None: - response = client.agents.evaluate.jobs.with_raw_response.metadata( - job_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - job = response.parse() - assert_matches_type(EvaluationJobMetadata, job, path=["response"]) - - @parametrize - def test_streaming_response_metadata(self, client: ContextualAI) -> None: - with client.agents.evaluate.jobs.with_streaming_response.metadata( - job_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - job = response.parse() - assert_matches_type(EvaluationJobMetadata, job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - def test_path_params_metadata(self, client: ContextualAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): - client.agents.evaluate.jobs.with_raw_response.metadata( - job_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - agent_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `job_id` but received ''"): - client.agents.evaluate.jobs.with_raw_response.metadata( - job_id="", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - -class TestAsyncJobs: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @parametrize - async def test_method_list(self, async_client: AsyncContextualAI) -> None: - job = await async_client.agents.evaluate.jobs.list( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - assert_matches_type(ListEvaluationJobsResponse, job, path=["response"]) - - @parametrize - async def test_raw_response_list(self, async_client: AsyncContextualAI) -> None: - response = await async_client.agents.evaluate.jobs.with_raw_response.list( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - job = await response.parse() - assert_matches_type(ListEvaluationJobsResponse, job, path=["response"]) - - @parametrize - async def test_streaming_response_list(self, async_client: AsyncContextualAI) -> None: - async with async_client.agents.evaluate.jobs.with_streaming_response.list( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - job = await response.parse() - assert_matches_type(ListEvaluationJobsResponse, job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - async def test_path_params_list(self, async_client: AsyncContextualAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): - await async_client.agents.evaluate.jobs.with_raw_response.list( - "", - ) - - @parametrize - async def test_method_cancel(self, async_client: AsyncContextualAI) -> None: - job = await async_client.agents.evaluate.jobs.cancel( - job_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - assert_matches_type(object, job, path=["response"]) - - @parametrize - async def test_raw_response_cancel(self, async_client: AsyncContextualAI) -> None: - response = await async_client.agents.evaluate.jobs.with_raw_response.cancel( - job_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - job = await response.parse() - assert_matches_type(object, job, path=["response"]) - - @parametrize - async def test_streaming_response_cancel(self, async_client: AsyncContextualAI) -> None: - async with async_client.agents.evaluate.jobs.with_streaming_response.cancel( - job_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - job = await response.parse() - assert_matches_type(object, job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - async def test_path_params_cancel(self, async_client: AsyncContextualAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): - await async_client.agents.evaluate.jobs.with_raw_response.cancel( - job_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - agent_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `job_id` but received ''"): - await async_client.agents.evaluate.jobs.with_raw_response.cancel( - job_id="", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - @parametrize - async def test_method_metadata(self, async_client: AsyncContextualAI) -> None: - job = await async_client.agents.evaluate.jobs.metadata( - job_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - assert_matches_type(EvaluationJobMetadata, job, path=["response"]) - - @parametrize - async def test_raw_response_metadata(self, async_client: AsyncContextualAI) -> None: - response = await async_client.agents.evaluate.jobs.with_raw_response.metadata( - job_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - job = await response.parse() - assert_matches_type(EvaluationJobMetadata, job, path=["response"]) - - @parametrize - async def test_streaming_response_metadata(self, async_client: AsyncContextualAI) -> None: - async with async_client.agents.evaluate.jobs.with_streaming_response.metadata( - job_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - job = await response.parse() - assert_matches_type(EvaluationJobMetadata, job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - async def test_path_params_metadata(self, async_client: AsyncContextualAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): - await async_client.agents.evaluate.jobs.with_raw_response.metadata( - job_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - agent_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `job_id` but received ''"): - await async_client.agents.evaluate.jobs.with_raw_response.metadata( - job_id="", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) diff --git a/tests/api_resources/agents/test_evaluate.py b/tests/api_resources/agents/test_evaluate.py deleted file mode 100644 index ab28d72..0000000 --- a/tests/api_resources/agents/test_evaluate.py +++ /dev/null @@ -1,132 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from contextual import ContextualAI, AsyncContextualAI -from tests.utils import assert_matches_type -from contextual.types.agents import CreateEvaluationResponse - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestEvaluate: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @parametrize - def test_method_create(self, client: ContextualAI) -> None: - evaluate = client.agents.evaluate.create( - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - metrics=["equivalence"], - ) - assert_matches_type(CreateEvaluationResponse, evaluate, path=["response"]) - - @parametrize - def test_method_create_with_all_params(self, client: ContextualAI) -> None: - evaluate = client.agents.evaluate.create( - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - metrics=["equivalence"], - evalset_file=b"raw file contents", - evalset_name="evalset_name", - llm_model_id="llm_model_id", - notes="notes", - override_configuration="override_configuration", - ) - assert_matches_type(CreateEvaluationResponse, evaluate, path=["response"]) - - @parametrize - def test_raw_response_create(self, client: ContextualAI) -> None: - response = client.agents.evaluate.with_raw_response.create( - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - metrics=["equivalence"], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - evaluate = response.parse() - assert_matches_type(CreateEvaluationResponse, evaluate, path=["response"]) - - @parametrize - def test_streaming_response_create(self, client: ContextualAI) -> None: - with client.agents.evaluate.with_streaming_response.create( - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - metrics=["equivalence"], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - evaluate = response.parse() - assert_matches_type(CreateEvaluationResponse, evaluate, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - def test_path_params_create(self, client: ContextualAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): - client.agents.evaluate.with_raw_response.create( - agent_id="", - metrics=["equivalence"], - ) - - -class TestAsyncEvaluate: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @parametrize - async def test_method_create(self, async_client: AsyncContextualAI) -> None: - evaluate = await async_client.agents.evaluate.create( - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - metrics=["equivalence"], - ) - assert_matches_type(CreateEvaluationResponse, evaluate, path=["response"]) - - @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncContextualAI) -> None: - evaluate = await async_client.agents.evaluate.create( - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - metrics=["equivalence"], - evalset_file=b"raw file contents", - evalset_name="evalset_name", - llm_model_id="llm_model_id", - notes="notes", - override_configuration="override_configuration", - ) - assert_matches_type(CreateEvaluationResponse, evaluate, path=["response"]) - - @parametrize - async def test_raw_response_create(self, async_client: AsyncContextualAI) -> None: - response = await async_client.agents.evaluate.with_raw_response.create( - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - metrics=["equivalence"], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - evaluate = await response.parse() - assert_matches_type(CreateEvaluationResponse, evaluate, path=["response"]) - - @parametrize - async def test_streaming_response_create(self, async_client: AsyncContextualAI) -> None: - async with async_client.agents.evaluate.with_streaming_response.create( - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - metrics=["equivalence"], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - evaluate = await response.parse() - assert_matches_type(CreateEvaluationResponse, evaluate, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - async def test_path_params_create(self, async_client: AsyncContextualAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): - await async_client.agents.evaluate.with_raw_response.create( - agent_id="", - metrics=["equivalence"], - ) diff --git a/tests/api_resources/agents/test_query.py b/tests/api_resources/agents/test_query.py index 26b5b1d..d706d78 100644 --- a/tests/api_resources/agents/test_query.py +++ b/tests/api_resources/agents/test_query.py @@ -49,16 +49,28 @@ def test_method_create_with_all_params(self, client: ContextualAI) -> None: retrievals_only=True, conversation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", documents_filters={ - "filters": [ - { - "field": "field1", - "operator": "equals", - "value": "value1", - } - ], + "filters": [], "operator": "AND", }, llm_model_id="llm_model_id", + override_configuration={ + "enable_filter": True, + "enable_rerank": True, + "filter_model": "filter_model", + "filter_prompt": "filter_prompt", + "lexical_alpha": 0, + "max_new_tokens": 0, + "model": "model", + "rerank_instructions": "rerank_instructions", + "reranker": "reranker", + "reranker_score_filter_threshold": 0, + "semantic_alpha": 0, + "system_prompt": "system_prompt", + "temperature": 0, + "top_k_reranked_chunks": 0, + "top_k_retrieved_chunks": 0, + "top_p": 0, + }, stream=True, structured_output={ "json_schema": {}, @@ -279,7 +291,9 @@ def test_path_params_retrieval_info(self, client: ContextualAI) -> None: class TestAsyncQuery: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @parametrize async def test_method_create(self, async_client: AsyncContextualAI) -> None: @@ -308,16 +322,28 @@ async def test_method_create_with_all_params(self, async_client: AsyncContextual retrievals_only=True, conversation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", documents_filters={ - "filters": [ - { - "field": "field1", - "operator": "equals", - "value": "value1", - } - ], + "filters": [], "operator": "AND", }, llm_model_id="llm_model_id", + override_configuration={ + "enable_filter": True, + "enable_rerank": True, + "filter_model": "filter_model", + "filter_prompt": "filter_prompt", + "lexical_alpha": 0, + "max_new_tokens": 0, + "model": "model", + "rerank_instructions": "rerank_instructions", + "reranker": "reranker", + "reranker_score_filter_threshold": 0, + "semantic_alpha": 0, + "system_prompt": "system_prompt", + "temperature": 0, + "top_k_reranked_chunks": 0, + "top_k_retrieved_chunks": 0, + "top_p": 0, + }, stream=True, structured_output={ "json_schema": {}, diff --git a/tests/api_resources/agents/test_tune.py b/tests/api_resources/agents/test_tune.py deleted file mode 100644 index c9dacef..0000000 --- a/tests/api_resources/agents/test_tune.py +++ /dev/null @@ -1,138 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from contextual import ContextualAI, AsyncContextualAI -from tests.utils import assert_matches_type -from contextual.types.agents import CreateTuneResponse - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestTune: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @parametrize - def test_method_create(self, client: ContextualAI) -> None: - tune = client.agents.tune.create( - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - assert_matches_type(CreateTuneResponse, tune, path=["response"]) - - @parametrize - def test_method_create_with_all_params(self, client: ContextualAI) -> None: - tune = client.agents.tune.create( - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - hyperparams_learning_rate=0.05, - hyperparams_lora_alpha=8, - hyperparams_lora_dropout=0, - hyperparams_lora_rank=8, - hyperparams_num_epochs=1, - hyperparams_warmup_ratio=0, - metadata_file=b"raw file contents", - sdp_only=True, - synth_data=True, - test_dataset_name="test_dataset_name", - test_file=b"raw file contents", - train_dataset_name="train_dataset_name", - training_file=b"raw file contents", - ) - assert_matches_type(CreateTuneResponse, tune, path=["response"]) - - @parametrize - def test_raw_response_create(self, client: ContextualAI) -> None: - response = client.agents.tune.with_raw_response.create( - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - tune = response.parse() - assert_matches_type(CreateTuneResponse, tune, path=["response"]) - - @parametrize - def test_streaming_response_create(self, client: ContextualAI) -> None: - with client.agents.tune.with_streaming_response.create( - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - tune = response.parse() - assert_matches_type(CreateTuneResponse, tune, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - def test_path_params_create(self, client: ContextualAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): - client.agents.tune.with_raw_response.create( - agent_id="", - ) - - -class TestAsyncTune: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @parametrize - async def test_method_create(self, async_client: AsyncContextualAI) -> None: - tune = await async_client.agents.tune.create( - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - assert_matches_type(CreateTuneResponse, tune, path=["response"]) - - @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncContextualAI) -> None: - tune = await async_client.agents.tune.create( - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - hyperparams_learning_rate=0.05, - hyperparams_lora_alpha=8, - hyperparams_lora_dropout=0, - hyperparams_lora_rank=8, - hyperparams_num_epochs=1, - hyperparams_warmup_ratio=0, - metadata_file=b"raw file contents", - sdp_only=True, - synth_data=True, - test_dataset_name="test_dataset_name", - test_file=b"raw file contents", - train_dataset_name="train_dataset_name", - training_file=b"raw file contents", - ) - assert_matches_type(CreateTuneResponse, tune, path=["response"]) - - @parametrize - async def test_raw_response_create(self, async_client: AsyncContextualAI) -> None: - response = await async_client.agents.tune.with_raw_response.create( - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - tune = await response.parse() - assert_matches_type(CreateTuneResponse, tune, path=["response"]) - - @parametrize - async def test_streaming_response_create(self, async_client: AsyncContextualAI) -> None: - async with async_client.agents.tune.with_streaming_response.create( - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - tune = await response.parse() - assert_matches_type(CreateTuneResponse, tune, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - async def test_path_params_create(self, async_client: AsyncContextualAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): - await async_client.agents.tune.with_raw_response.create( - agent_id="", - ) diff --git a/tests/api_resources/agents/tune/__init__.py b/tests/api_resources/agents/tune/__init__.py deleted file mode 100644 index fd8019a..0000000 --- a/tests/api_resources/agents/tune/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/agents/tune/test_jobs.py b/tests/api_resources/agents/tune/test_jobs.py deleted file mode 100644 index 0074de1..0000000 --- a/tests/api_resources/agents/tune/test_jobs.py +++ /dev/null @@ -1,290 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from contextual import ContextualAI, AsyncContextualAI -from tests.utils import assert_matches_type -from contextual.types.agents.tune import TuneJobMetadata, ListTuneJobsResponse - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestJobs: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @parametrize - def test_method_list(self, client: ContextualAI) -> None: - job = client.agents.tune.jobs.list( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - assert_matches_type(ListTuneJobsResponse, job, path=["response"]) - - @parametrize - def test_raw_response_list(self, client: ContextualAI) -> None: - response = client.agents.tune.jobs.with_raw_response.list( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - job = response.parse() - assert_matches_type(ListTuneJobsResponse, job, path=["response"]) - - @parametrize - def test_streaming_response_list(self, client: ContextualAI) -> None: - with client.agents.tune.jobs.with_streaming_response.list( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - job = response.parse() - assert_matches_type(ListTuneJobsResponse, job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - def test_path_params_list(self, client: ContextualAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): - client.agents.tune.jobs.with_raw_response.list( - "", - ) - - @parametrize - def test_method_delete(self, client: ContextualAI) -> None: - job = client.agents.tune.jobs.delete( - job_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - assert_matches_type(object, job, path=["response"]) - - @parametrize - def test_raw_response_delete(self, client: ContextualAI) -> None: - response = client.agents.tune.jobs.with_raw_response.delete( - job_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - job = response.parse() - assert_matches_type(object, job, path=["response"]) - - @parametrize - def test_streaming_response_delete(self, client: ContextualAI) -> None: - with client.agents.tune.jobs.with_streaming_response.delete( - job_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - job = response.parse() - assert_matches_type(object, job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - def test_path_params_delete(self, client: ContextualAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): - client.agents.tune.jobs.with_raw_response.delete( - job_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - agent_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `job_id` but received ''"): - client.agents.tune.jobs.with_raw_response.delete( - job_id="", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - @parametrize - def test_method_metadata(self, client: ContextualAI) -> None: - job = client.agents.tune.jobs.metadata( - job_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - assert_matches_type(TuneJobMetadata, job, path=["response"]) - - @parametrize - def test_raw_response_metadata(self, client: ContextualAI) -> None: - response = client.agents.tune.jobs.with_raw_response.metadata( - job_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - job = response.parse() - assert_matches_type(TuneJobMetadata, job, path=["response"]) - - @parametrize - def test_streaming_response_metadata(self, client: ContextualAI) -> None: - with client.agents.tune.jobs.with_streaming_response.metadata( - job_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - job = response.parse() - assert_matches_type(TuneJobMetadata, job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - def test_path_params_metadata(self, client: ContextualAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): - client.agents.tune.jobs.with_raw_response.metadata( - job_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - agent_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `job_id` but received ''"): - client.agents.tune.jobs.with_raw_response.metadata( - job_id="", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - -class TestAsyncJobs: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @parametrize - async def test_method_list(self, async_client: AsyncContextualAI) -> None: - job = await async_client.agents.tune.jobs.list( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - assert_matches_type(ListTuneJobsResponse, job, path=["response"]) - - @parametrize - async def test_raw_response_list(self, async_client: AsyncContextualAI) -> None: - response = await async_client.agents.tune.jobs.with_raw_response.list( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - job = await response.parse() - assert_matches_type(ListTuneJobsResponse, job, path=["response"]) - - @parametrize - async def test_streaming_response_list(self, async_client: AsyncContextualAI) -> None: - async with async_client.agents.tune.jobs.with_streaming_response.list( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - job = await response.parse() - assert_matches_type(ListTuneJobsResponse, job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - async def test_path_params_list(self, async_client: AsyncContextualAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): - await async_client.agents.tune.jobs.with_raw_response.list( - "", - ) - - @parametrize - async def test_method_delete(self, async_client: AsyncContextualAI) -> None: - job = await async_client.agents.tune.jobs.delete( - job_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - assert_matches_type(object, job, path=["response"]) - - @parametrize - async def test_raw_response_delete(self, async_client: AsyncContextualAI) -> None: - response = await async_client.agents.tune.jobs.with_raw_response.delete( - job_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - job = await response.parse() - assert_matches_type(object, job, path=["response"]) - - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncContextualAI) -> None: - async with async_client.agents.tune.jobs.with_streaming_response.delete( - job_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - job = await response.parse() - assert_matches_type(object, job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - async def test_path_params_delete(self, async_client: AsyncContextualAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): - await async_client.agents.tune.jobs.with_raw_response.delete( - job_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - agent_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `job_id` but received ''"): - await async_client.agents.tune.jobs.with_raw_response.delete( - job_id="", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - @parametrize - async def test_method_metadata(self, async_client: AsyncContextualAI) -> None: - job = await async_client.agents.tune.jobs.metadata( - job_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - assert_matches_type(TuneJobMetadata, job, path=["response"]) - - @parametrize - async def test_raw_response_metadata(self, async_client: AsyncContextualAI) -> None: - response = await async_client.agents.tune.jobs.with_raw_response.metadata( - job_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - job = await response.parse() - assert_matches_type(TuneJobMetadata, job, path=["response"]) - - @parametrize - async def test_streaming_response_metadata(self, async_client: AsyncContextualAI) -> None: - async with async_client.agents.tune.jobs.with_streaming_response.metadata( - job_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - job = await response.parse() - assert_matches_type(TuneJobMetadata, job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - async def test_path_params_metadata(self, async_client: AsyncContextualAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): - await async_client.agents.tune.jobs.with_raw_response.metadata( - job_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - agent_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `job_id` but received ''"): - await async_client.agents.tune.jobs.with_raw_response.metadata( - job_id="", - agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) diff --git a/tests/api_resources/agents/tune/test_models.py b/tests/api_resources/agents/tune/test_models.py deleted file mode 100644 index f2918fe..0000000 --- a/tests/api_resources/agents/tune/test_models.py +++ /dev/null @@ -1,98 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from contextual import ContextualAI, AsyncContextualAI -from tests.utils import assert_matches_type -from contextual.types.agents.tune import ListTuneModelsResponse - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestModels: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @parametrize - def test_method_list(self, client: ContextualAI) -> None: - model = client.agents.tune.models.list( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - assert_matches_type(ListTuneModelsResponse, model, path=["response"]) - - @parametrize - def test_raw_response_list(self, client: ContextualAI) -> None: - response = client.agents.tune.models.with_raw_response.list( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - model = response.parse() - assert_matches_type(ListTuneModelsResponse, model, path=["response"]) - - @parametrize - def test_streaming_response_list(self, client: ContextualAI) -> None: - with client.agents.tune.models.with_streaming_response.list( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - model = response.parse() - assert_matches_type(ListTuneModelsResponse, model, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - def test_path_params_list(self, client: ContextualAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): - client.agents.tune.models.with_raw_response.list( - "", - ) - - -class TestAsyncModels: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @parametrize - async def test_method_list(self, async_client: AsyncContextualAI) -> None: - model = await async_client.agents.tune.models.list( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - assert_matches_type(ListTuneModelsResponse, model, path=["response"]) - - @parametrize - async def test_raw_response_list(self, async_client: AsyncContextualAI) -> None: - response = await async_client.agents.tune.models.with_raw_response.list( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - model = await response.parse() - assert_matches_type(ListTuneModelsResponse, model, path=["response"]) - - @parametrize - async def test_streaming_response_list(self, async_client: AsyncContextualAI) -> None: - async with async_client.agents.tune.models.with_streaming_response.list( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - model = await response.parse() - assert_matches_type(ListTuneModelsResponse, model, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @parametrize - async def test_path_params_list(self, async_client: AsyncContextualAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): - await async_client.agents.tune.models.with_raw_response.list( - "", - ) diff --git a/tests/api_resources/datastores/test_documents.py b/tests/api_resources/datastores/test_documents.py index 525988a..d18c95c 100644 --- a/tests/api_resources/datastores/test_documents.py +++ b/tests/api_resources/datastores/test_documents.py @@ -14,6 +14,7 @@ from contextual.types.datastores import ( DocumentMetadata, IngestionResponse, + DocumentGetParseResultResponse, ) base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -34,6 +35,7 @@ def test_method_list_with_all_params(self, client: ContextualAI) -> None: document = client.datastores.documents.list( datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", cursor="cursor", + document_name_prefix="document_name_prefix", ingestion_job_status=["pending"], limit=1, uploaded_after=parse_datetime("2019-12-27T18:11:19.117Z"), @@ -120,6 +122,63 @@ def test_path_params_delete(self, client: ContextualAI) -> None: datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", ) + @parametrize + def test_method_get_parse_result(self, client: ContextualAI) -> None: + document = client.datastores.documents.get_parse_result( + document_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert_matches_type(DocumentGetParseResultResponse, document, path=["response"]) + + @parametrize + def test_method_get_parse_result_with_all_params(self, client: ContextualAI) -> None: + document = client.datastores.documents.get_parse_result( + document_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + output_types=["markdown-document"], + ) + assert_matches_type(DocumentGetParseResultResponse, document, path=["response"]) + + @parametrize + def test_raw_response_get_parse_result(self, client: ContextualAI) -> None: + response = client.datastores.documents.with_raw_response.get_parse_result( + document_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + document = response.parse() + assert_matches_type(DocumentGetParseResultResponse, document, path=["response"]) + + @parametrize + def test_streaming_response_get_parse_result(self, client: ContextualAI) -> None: + with client.datastores.documents.with_streaming_response.get_parse_result( + document_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + document = response.parse() + assert_matches_type(DocumentGetParseResultResponse, document, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_path_params_get_parse_result(self, client: ContextualAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `datastore_id` but received ''"): + client.datastores.documents.with_raw_response.get_parse_result( + document_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + datastore_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `document_id` but received ''"): + client.datastores.documents.with_raw_response.get_parse_result( + document_id="", + datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + @parametrize def test_method_ingest(self, client: ContextualAI) -> None: document = client.datastores.documents.ingest( @@ -133,7 +192,7 @@ def test_method_ingest_with_all_params(self, client: ContextualAI) -> None: document = client.datastores.documents.ingest( datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", file=b"raw file contents", - metadata='{"field1": "value1", "field2": "value2"}}', + metadata="metadata", ) assert_matches_type(IngestionResponse, document, path=["response"]) @@ -233,6 +292,13 @@ def test_method_set_metadata_with_all_params(self, client: ContextualAI) -> None document_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", custom_metadata={"foo": True}, + custom_metadata_config={ + "foo": { + "filterable": True, + "in_chunks": True, + "returned_in_response": True, + } + }, ) assert_matches_type(DocumentMetadata, document, path=["response"]) @@ -278,7 +344,9 @@ def test_path_params_set_metadata(self, client: ContextualAI) -> None: class TestAsyncDocuments: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @parametrize async def test_method_list(self, async_client: AsyncContextualAI) -> None: @@ -292,6 +360,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncContextualAI document = await async_client.datastores.documents.list( datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", cursor="cursor", + document_name_prefix="document_name_prefix", ingestion_job_status=["pending"], limit=1, uploaded_after=parse_datetime("2019-12-27T18:11:19.117Z"), @@ -378,6 +447,63 @@ async def test_path_params_delete(self, async_client: AsyncContextualAI) -> None datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", ) + @parametrize + async def test_method_get_parse_result(self, async_client: AsyncContextualAI) -> None: + document = await async_client.datastores.documents.get_parse_result( + document_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert_matches_type(DocumentGetParseResultResponse, document, path=["response"]) + + @parametrize + async def test_method_get_parse_result_with_all_params(self, async_client: AsyncContextualAI) -> None: + document = await async_client.datastores.documents.get_parse_result( + document_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + output_types=["markdown-document"], + ) + assert_matches_type(DocumentGetParseResultResponse, document, path=["response"]) + + @parametrize + async def test_raw_response_get_parse_result(self, async_client: AsyncContextualAI) -> None: + response = await async_client.datastores.documents.with_raw_response.get_parse_result( + document_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + document = await response.parse() + assert_matches_type(DocumentGetParseResultResponse, document, path=["response"]) + + @parametrize + async def test_streaming_response_get_parse_result(self, async_client: AsyncContextualAI) -> None: + async with async_client.datastores.documents.with_streaming_response.get_parse_result( + document_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + document = await response.parse() + assert_matches_type(DocumentGetParseResultResponse, document, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_get_parse_result(self, async_client: AsyncContextualAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `datastore_id` but received ''"): + await async_client.datastores.documents.with_raw_response.get_parse_result( + document_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + datastore_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `document_id` but received ''"): + await async_client.datastores.documents.with_raw_response.get_parse_result( + document_id="", + datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + @parametrize async def test_method_ingest(self, async_client: AsyncContextualAI) -> None: document = await async_client.datastores.documents.ingest( @@ -391,7 +517,7 @@ async def test_method_ingest_with_all_params(self, async_client: AsyncContextual document = await async_client.datastores.documents.ingest( datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", file=b"raw file contents", - metadata='{"field1": "value1", "field2": "value2"}}', + metadata="metadata", ) assert_matches_type(IngestionResponse, document, path=["response"]) @@ -491,6 +617,13 @@ async def test_method_set_metadata_with_all_params(self, async_client: AsyncCont document_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", custom_metadata={"foo": True}, + custom_metadata_config={ + "foo": { + "filterable": True, + "in_chunks": True, + "returned_in_response": True, + } + }, ) assert_matches_type(DocumentMetadata, document, path=["response"]) diff --git a/tests/api_resources/test_agents.py b/tests/api_resources/test_agents.py index 9009cee..f9467ac 100644 --- a/tests/api_resources/test_agents.py +++ b/tests/api_resources/test_agents.py @@ -35,6 +35,16 @@ def test_method_create_with_all_params(self, client: ContextualAI) -> None: name="xxx", agent_configs={ "filter_and_rerank_config": { + "default_metadata_filters": { + "filters": [], + "operator": "AND", + }, + "per_datastore_metadata_filters": { + "d49609d9-61c3-4a67-b3bd-5196b10da560": { + "filters": [], + "operator": "AND", + } + }, "rerank_instructions": "rerank_instructions", "reranker_score_filter_threshold": 0, "top_k_reranked_chunks": 0, @@ -54,6 +64,12 @@ def test_method_create_with_all_params(self, client: ContextualAI) -> None: "enable_rerank": True, "should_check_retrieval_need": True, }, + "reformulation_config": { + "enable_query_decomposition": True, + "enable_query_expansion": True, + "query_decomposition_prompt": "query_decomposition_prompt", + "query_expansion_prompt": "query_expansion_prompt", + }, "retrieval_config": { "lexical_alpha": 0, "semantic_alpha": 0, @@ -63,6 +79,7 @@ def test_method_create_with_all_params(self, client: ContextualAI) -> None: datastore_ids=["182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e"], description="description", filter_prompt="filter_prompt", + multiturn_system_prompt="multiturn_system_prompt", no_retrieval_system_prompt="no_retrieval_system_prompt", suggested_queries=["string"], system_prompt="system_prompt", @@ -106,6 +123,16 @@ def test_method_update_with_all_params(self, client: ContextualAI) -> None: agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", agent_configs={ "filter_and_rerank_config": { + "default_metadata_filters": { + "filters": [], + "operator": "AND", + }, + "per_datastore_metadata_filters": { + "d49609d9-61c3-4a67-b3bd-5196b10da560": { + "filters": [], + "operator": "AND", + } + }, "rerank_instructions": "rerank_instructions", "reranker_score_filter_threshold": 0, "top_k_reranked_chunks": 0, @@ -125,6 +152,12 @@ def test_method_update_with_all_params(self, client: ContextualAI) -> None: "enable_rerank": True, "should_check_retrieval_need": True, }, + "reformulation_config": { + "enable_query_decomposition": True, + "enable_query_expansion": True, + "query_decomposition_prompt": "query_decomposition_prompt", + "query_expansion_prompt": "query_expansion_prompt", + }, "retrieval_config": { "lexical_alpha": 0, "semantic_alpha": 0, @@ -134,6 +167,7 @@ def test_method_update_with_all_params(self, client: ContextualAI) -> None: datastore_ids=["182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e"], filter_prompt="filter_prompt", llm_model_id="llm_model_id", + multiturn_system_prompt="multiturn_system_prompt", no_retrieval_system_prompt="no_retrieval_system_prompt", suggested_queries=["string"], system_prompt="system_prompt", @@ -242,6 +276,44 @@ def test_path_params_delete(self, client: ContextualAI) -> None: "", ) + @parametrize + def test_method_copy(self, client: ContextualAI) -> None: + agent = client.agents.copy( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert_matches_type(CreateAgentOutput, agent, path=["response"]) + + @parametrize + def test_raw_response_copy(self, client: ContextualAI) -> None: + response = client.agents.with_raw_response.copy( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + agent = response.parse() + assert_matches_type(CreateAgentOutput, agent, path=["response"]) + + @parametrize + def test_streaming_response_copy(self, client: ContextualAI) -> None: + with client.agents.with_streaming_response.copy( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + agent = response.parse() + assert_matches_type(CreateAgentOutput, agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_path_params_copy(self, client: ContextualAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): + client.agents.with_raw_response.copy( + "", + ) + @parametrize def test_method_metadata(self, client: ContextualAI) -> None: agent = client.agents.metadata( @@ -320,7 +392,9 @@ def test_path_params_reset(self, client: ContextualAI) -> None: class TestAsyncAgents: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @parametrize async def test_method_create(self, async_client: AsyncContextualAI) -> None: @@ -335,6 +409,16 @@ async def test_method_create_with_all_params(self, async_client: AsyncContextual name="xxx", agent_configs={ "filter_and_rerank_config": { + "default_metadata_filters": { + "filters": [], + "operator": "AND", + }, + "per_datastore_metadata_filters": { + "d49609d9-61c3-4a67-b3bd-5196b10da560": { + "filters": [], + "operator": "AND", + } + }, "rerank_instructions": "rerank_instructions", "reranker_score_filter_threshold": 0, "top_k_reranked_chunks": 0, @@ -354,6 +438,12 @@ async def test_method_create_with_all_params(self, async_client: AsyncContextual "enable_rerank": True, "should_check_retrieval_need": True, }, + "reformulation_config": { + "enable_query_decomposition": True, + "enable_query_expansion": True, + "query_decomposition_prompt": "query_decomposition_prompt", + "query_expansion_prompt": "query_expansion_prompt", + }, "retrieval_config": { "lexical_alpha": 0, "semantic_alpha": 0, @@ -363,6 +453,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncContextual datastore_ids=["182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e"], description="description", filter_prompt="filter_prompt", + multiturn_system_prompt="multiturn_system_prompt", no_retrieval_system_prompt="no_retrieval_system_prompt", suggested_queries=["string"], system_prompt="system_prompt", @@ -406,6 +497,16 @@ async def test_method_update_with_all_params(self, async_client: AsyncContextual agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", agent_configs={ "filter_and_rerank_config": { + "default_metadata_filters": { + "filters": [], + "operator": "AND", + }, + "per_datastore_metadata_filters": { + "d49609d9-61c3-4a67-b3bd-5196b10da560": { + "filters": [], + "operator": "AND", + } + }, "rerank_instructions": "rerank_instructions", "reranker_score_filter_threshold": 0, "top_k_reranked_chunks": 0, @@ -425,6 +526,12 @@ async def test_method_update_with_all_params(self, async_client: AsyncContextual "enable_rerank": True, "should_check_retrieval_need": True, }, + "reformulation_config": { + "enable_query_decomposition": True, + "enable_query_expansion": True, + "query_decomposition_prompt": "query_decomposition_prompt", + "query_expansion_prompt": "query_expansion_prompt", + }, "retrieval_config": { "lexical_alpha": 0, "semantic_alpha": 0, @@ -434,6 +541,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncContextual datastore_ids=["182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e"], filter_prompt="filter_prompt", llm_model_id="llm_model_id", + multiturn_system_prompt="multiturn_system_prompt", no_retrieval_system_prompt="no_retrieval_system_prompt", suggested_queries=["string"], system_prompt="system_prompt", @@ -542,6 +650,44 @@ async def test_path_params_delete(self, async_client: AsyncContextualAI) -> None "", ) + @parametrize + async def test_method_copy(self, async_client: AsyncContextualAI) -> None: + agent = await async_client.agents.copy( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert_matches_type(CreateAgentOutput, agent, path=["response"]) + + @parametrize + async def test_raw_response_copy(self, async_client: AsyncContextualAI) -> None: + response = await async_client.agents.with_raw_response.copy( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + agent = await response.parse() + assert_matches_type(CreateAgentOutput, agent, path=["response"]) + + @parametrize + async def test_streaming_response_copy(self, async_client: AsyncContextualAI) -> None: + async with async_client.agents.with_streaming_response.copy( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + agent = await response.parse() + assert_matches_type(CreateAgentOutput, agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_copy(self, async_client: AsyncContextualAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): + await async_client.agents.with_raw_response.copy( + "", + ) + @parametrize async def test_method_metadata(self, async_client: AsyncContextualAI) -> None: agent = await async_client.agents.metadata( diff --git a/tests/api_resources/test_datastores.py b/tests/api_resources/test_datastores.py index fe99724..053c3ed 100644 --- a/tests/api_resources/test_datastores.py +++ b/tests/api_resources/test_datastores.py @@ -13,6 +13,7 @@ Datastore, DatastoreMetadata, CreateDatastoreResponse, + DatastoreUpdateResponse, ) from contextual.pagination import SyncDatastoresPage, AsyncDatastoresPage @@ -29,6 +30,28 @@ def test_method_create(self, client: ContextualAI) -> None: ) assert_matches_type(CreateDatastoreResponse, datastore, path=["response"]) + @parametrize + def test_method_create_with_all_params(self, client: ContextualAI) -> None: + datastore = client.datastores.create( + name="name", + configuration={ + "chunking": { + "chunking_mode": "hierarchy_depth", + "enable_hierarchy_based_contextualization": True, + "max_chunk_length_tokens": 512, + "min_chunk_length_tokens": 128, + }, + "html_config": {"max_chunk_length_tokens": 512}, + "parsing": { + "enable_split_tables": True, + "figure_caption_mode": "default", + "figure_captioning_prompt": "figure_captioning_prompt", + "max_split_table_cells": 0, + }, + }, + ) + assert_matches_type(CreateDatastoreResponse, datastore, path=["response"]) + @parametrize def test_raw_response_create(self, client: ContextualAI) -> None: response = client.datastores.with_raw_response.create( @@ -53,6 +76,67 @@ def test_streaming_response_create(self, client: ContextualAI) -> None: assert cast(Any, response.is_closed) is True + @parametrize + def test_method_update(self, client: ContextualAI) -> None: + datastore = client.datastores.update( + datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert_matches_type(DatastoreUpdateResponse, datastore, path=["response"]) + + @parametrize + def test_method_update_with_all_params(self, client: ContextualAI) -> None: + datastore = client.datastores.update( + datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + configuration={ + "chunking": { + "chunking_mode": "hierarchy_depth", + "enable_hierarchy_based_contextualization": True, + "max_chunk_length_tokens": 512, + "min_chunk_length_tokens": 128, + }, + "html_config": {"max_chunk_length_tokens": 512}, + "parsing": { + "enable_split_tables": True, + "figure_caption_mode": "default", + "figure_captioning_prompt": "figure_captioning_prompt", + "max_split_table_cells": 0, + }, + }, + name="name", + ) + assert_matches_type(DatastoreUpdateResponse, datastore, path=["response"]) + + @parametrize + def test_raw_response_update(self, client: ContextualAI) -> None: + response = client.datastores.with_raw_response.update( + datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + datastore = response.parse() + assert_matches_type(DatastoreUpdateResponse, datastore, path=["response"]) + + @parametrize + def test_streaming_response_update(self, client: ContextualAI) -> None: + with client.datastores.with_streaming_response.update( + datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + datastore = response.parse() + assert_matches_type(DatastoreUpdateResponse, datastore, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_path_params_update(self, client: ContextualAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `datastore_id` but received ''"): + client.datastores.with_raw_response.update( + datastore_id="", + ) + @parametrize def test_method_list(self, client: ContextualAI) -> None: datastore = client.datastores.list() @@ -203,7 +287,9 @@ def test_path_params_reset(self, client: ContextualAI) -> None: class TestAsyncDatastores: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @parametrize async def test_method_create(self, async_client: AsyncContextualAI) -> None: @@ -212,6 +298,28 @@ async def test_method_create(self, async_client: AsyncContextualAI) -> None: ) assert_matches_type(CreateDatastoreResponse, datastore, path=["response"]) + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncContextualAI) -> None: + datastore = await async_client.datastores.create( + name="name", + configuration={ + "chunking": { + "chunking_mode": "hierarchy_depth", + "enable_hierarchy_based_contextualization": True, + "max_chunk_length_tokens": 512, + "min_chunk_length_tokens": 128, + }, + "html_config": {"max_chunk_length_tokens": 512}, + "parsing": { + "enable_split_tables": True, + "figure_caption_mode": "default", + "figure_captioning_prompt": "figure_captioning_prompt", + "max_split_table_cells": 0, + }, + }, + ) + assert_matches_type(CreateDatastoreResponse, datastore, path=["response"]) + @parametrize async def test_raw_response_create(self, async_client: AsyncContextualAI) -> None: response = await async_client.datastores.with_raw_response.create( @@ -236,6 +344,67 @@ async def test_streaming_response_create(self, async_client: AsyncContextualAI) assert cast(Any, response.is_closed) is True + @parametrize + async def test_method_update(self, async_client: AsyncContextualAI) -> None: + datastore = await async_client.datastores.update( + datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert_matches_type(DatastoreUpdateResponse, datastore, path=["response"]) + + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncContextualAI) -> None: + datastore = await async_client.datastores.update( + datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + configuration={ + "chunking": { + "chunking_mode": "hierarchy_depth", + "enable_hierarchy_based_contextualization": True, + "max_chunk_length_tokens": 512, + "min_chunk_length_tokens": 128, + }, + "html_config": {"max_chunk_length_tokens": 512}, + "parsing": { + "enable_split_tables": True, + "figure_caption_mode": "default", + "figure_captioning_prompt": "figure_captioning_prompt", + "max_split_table_cells": 0, + }, + }, + name="name", + ) + assert_matches_type(DatastoreUpdateResponse, datastore, path=["response"]) + + @parametrize + async def test_raw_response_update(self, async_client: AsyncContextualAI) -> None: + response = await async_client.datastores.with_raw_response.update( + datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + datastore = await response.parse() + assert_matches_type(DatastoreUpdateResponse, datastore, path=["response"]) + + @parametrize + async def test_streaming_response_update(self, async_client: AsyncContextualAI) -> None: + async with async_client.datastores.with_streaming_response.update( + datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + datastore = await response.parse() + assert_matches_type(DatastoreUpdateResponse, datastore, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_update(self, async_client: AsyncContextualAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `datastore_id` but received ''"): + await async_client.datastores.with_raw_response.update( + datastore_id="", + ) + @parametrize async def test_method_list(self, async_client: AsyncContextualAI) -> None: datastore = await async_client.datastores.list() diff --git a/tests/api_resources/test_generate.py b/tests/api_resources/test_generate.py index 44938ae..bd288b7 100644 --- a/tests/api_resources/test_generate.py +++ b/tests/api_resources/test_generate.py @@ -90,7 +90,9 @@ def test_streaming_response_create(self, client: ContextualAI) -> None: class TestAsyncGenerate: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @parametrize async def test_method_create(self, async_client: AsyncContextualAI) -> None: diff --git a/tests/api_resources/test_lmunit.py b/tests/api_resources/test_lmunit.py index 4c5a5a2..367b4bd 100644 --- a/tests/api_resources/test_lmunit.py +++ b/tests/api_resources/test_lmunit.py @@ -56,7 +56,9 @@ def test_streaming_response_create(self, client: ContextualAI) -> None: class TestAsyncLMUnit: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @parametrize async def test_method_create(self, async_client: AsyncContextualAI) -> None: diff --git a/tests/api_resources/test_parse.py b/tests/api_resources/test_parse.py index 08898d9..1415ec0 100644 --- a/tests/api_resources/test_parse.py +++ b/tests/api_resources/test_parse.py @@ -35,7 +35,7 @@ def test_method_create_with_all_params(self, client: ContextualAI) -> None: parse = client.parse.create( raw_file=b"raw file contents", enable_document_hierarchy=True, - enable_split_tables=True, + enable_split_tables=False, figure_caption_mode="concise", max_split_table_cells=0, page_range="page_range", @@ -159,6 +159,8 @@ def test_method_jobs(self, client: ContextualAI) -> None: @parametrize def test_method_jobs_with_all_params(self, client: ContextualAI) -> None: parse = client.parse.jobs( + cursor="cursor", + limit=1, uploaded_after=parse_datetime("2019-12-27T18:11:19.117Z"), ) assert_matches_type(ParseJobsResponse, parse, path=["response"]) @@ -185,7 +187,9 @@ def test_streaming_response_jobs(self, client: ContextualAI) -> None: class TestAsyncParse: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @parametrize async def test_method_create(self, async_client: AsyncContextualAI) -> None: @@ -199,7 +203,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncContextual parse = await async_client.parse.create( raw_file=b"raw file contents", enable_document_hierarchy=True, - enable_split_tables=True, + enable_split_tables=False, figure_caption_mode="concise", max_split_table_cells=0, page_range="page_range", @@ -323,6 +327,8 @@ async def test_method_jobs(self, async_client: AsyncContextualAI) -> None: @parametrize async def test_method_jobs_with_all_params(self, async_client: AsyncContextualAI) -> None: parse = await async_client.parse.jobs( + cursor="cursor", + limit=1, uploaded_after=parse_datetime("2019-12-27T18:11:19.117Z"), ) assert_matches_type(ParseJobsResponse, parse, path=["response"]) diff --git a/tests/api_resources/test_rerank.py b/tests/api_resources/test_rerank.py index 6f97de0..68fbefc 100644 --- a/tests/api_resources/test_rerank.py +++ b/tests/api_resources/test_rerank.py @@ -68,7 +68,9 @@ def test_streaming_response_create(self, client: ContextualAI) -> None: class TestAsyncRerank: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @parametrize async def test_method_create(self, async_client: AsyncContextualAI) -> None: diff --git a/tests/api_resources/test_users.py b/tests/api_resources/test_users.py index acd99a9..e798c03 100644 --- a/tests/api_resources/test_users.py +++ b/tests/api_resources/test_users.py @@ -32,7 +32,15 @@ def test_method_update(self, client: ContextualAI) -> None: def test_method_update_with_all_params(self, client: ContextualAI) -> None: user = client.users.update( email="email", + agent_level_roles=["AGENT_LEVEL_USER"], is_tenant_admin=True, + per_agent_roles=[ + { + "agent_id": "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + "grant": True, + "roles": ["AGENT_LEVEL_USER"], + } + ], roles=["VISITOR"], ) assert_matches_type(object, user, path=["response"]) @@ -163,7 +171,9 @@ def test_streaming_response_invite(self, client: ContextualAI) -> None: class TestAsyncUsers: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @parametrize async def test_method_update(self, async_client: AsyncContextualAI) -> None: @@ -176,7 +186,15 @@ async def test_method_update(self, async_client: AsyncContextualAI) -> None: async def test_method_update_with_all_params(self, async_client: AsyncContextualAI) -> None: user = await async_client.users.update( email="email", + agent_level_roles=["AGENT_LEVEL_USER"], is_tenant_admin=True, + per_agent_roles=[ + { + "agent_id": "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + "grant": True, + "roles": ["AGENT_LEVEL_USER"], + } + ], roles=["VISITOR"], ) assert_matches_type(object, user, path=["response"]) diff --git a/tests/conftest.py b/tests/conftest.py index aa99261..125175d 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,13 +1,17 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + from __future__ import annotations import os import logging from typing import TYPE_CHECKING, Iterator, AsyncIterator +import httpx import pytest from pytest_asyncio import is_async_test -from contextual import ContextualAI, AsyncContextualAI +from contextual import ContextualAI, AsyncContextualAI, DefaultAioHttpClient +from contextual._utils import is_dict if TYPE_CHECKING: from _pytest.fixtures import FixtureRequest # pyright: ignore[reportPrivateImportUsage] @@ -25,6 +29,19 @@ def pytest_collection_modifyitems(items: list[pytest.Function]) -> None: for async_test in pytest_asyncio_tests: async_test.add_marker(session_scope_marker, append=False) + # We skip tests that use both the aiohttp client and respx_mock as respx_mock + # doesn't support custom transports. + for item in items: + if "async_client" not in item.fixturenames or "respx_mock" not in item.fixturenames: + continue + + if not hasattr(item, "callspec"): + continue + + async_client_param = item.callspec.params.get("async_client") + if is_dict(async_client_param) and async_client_param.get("http_client") == "aiohttp": + item.add_marker(pytest.mark.skip(reason="aiohttp client is not compatible with respx_mock")) + base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -43,9 +60,25 @@ def client(request: FixtureRequest) -> Iterator[ContextualAI]: @pytest.fixture(scope="session") async def async_client(request: FixtureRequest) -> AsyncIterator[AsyncContextualAI]: - strict = getattr(request, "param", True) - if not isinstance(strict, bool): - raise TypeError(f"Unexpected fixture parameter type {type(strict)}, expected {bool}") - - async with AsyncContextualAI(base_url=base_url, api_key=api_key, _strict_response_validation=strict) as client: + param = getattr(request, "param", True) + + # defaults + strict = True + http_client: None | httpx.AsyncClient = None + + if isinstance(param, bool): + strict = param + elif is_dict(param): + strict = param.get("strict", True) + assert isinstance(strict, bool) + + http_client_type = param.get("http_client", "httpx") + if http_client_type == "aiohttp": + http_client = DefaultAioHttpClient() + else: + raise TypeError(f"Unexpected fixture parameter type {type(param)}, expected bool or dict") + + async with AsyncContextualAI( + base_url=base_url, api_key=api_key, _strict_response_validation=strict, http_client=http_client + ) as client: yield client diff --git a/tests/test_client.py b/tests/test_client.py index 4617f67..df3fc8d 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -23,17 +23,16 @@ from contextual import ContextualAI, AsyncContextualAI, APIResponseValidationError from contextual._types import Omit -from contextual._utils import maybe_transform from contextual._models import BaseModel, FinalRequestOptions -from contextual._constants import RAW_RESPONSE_HEADER from contextual._exceptions import APIStatusError, APITimeoutError, ContextualAIError, APIResponseValidationError from contextual._base_client import ( DEFAULT_TIMEOUT, HTTPX_DEFAULT_TIMEOUT, BaseClient, + DefaultHttpxClient, + DefaultAsyncHttpxClient, make_request_options, ) -from contextual.types.agent_create_params import AgentCreateParams from .utils import update_env @@ -192,6 +191,7 @@ def test_copy_signature(self) -> None: copy_param = copy_signature.parameters.get(name) assert copy_param is not None, f"copy() signature is missing the {name} param" + @pytest.mark.skipif(sys.version_info >= (3, 10), reason="fails because of a memory leak that started from 3.12") def test_copy_build_request(self) -> None: options = FinalRequestOptions(method="get", url="/foo") @@ -464,7 +464,7 @@ def test_request_extra_query(self) -> None: def test_multipart_repeating_array(self, client: ContextualAI) -> None: request = client._build_request( FinalRequestOptions.construct( - method="get", + method="post", url="/foo", headers={"Content-Type": "multipart/form-data; boundary=6b7ba517decee4a450543ea6ae821c82"}, json_data={"array": ["foo", "bar"]}, @@ -723,32 +723,21 @@ def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str @mock.patch("contextual._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) - def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter) -> None: + def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, client: ContextualAI) -> None: respx_mock.post("/agents").mock(side_effect=httpx.TimeoutException("Test timeout error")) with pytest.raises(APITimeoutError): - self.client.post( - "/agents", - body=cast(object, maybe_transform(dict(name="Example"), AgentCreateParams)), - cast_to=httpx.Response, - options={"headers": {RAW_RESPONSE_HEADER: "stream"}}, - ) + client.agents.with_streaming_response.create(name="xxx").__enter__() assert _get_open_connections(self.client) == 0 @mock.patch("contextual._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) - def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter) -> None: + def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, client: ContextualAI) -> None: respx_mock.post("/agents").mock(return_value=httpx.Response(500)) with pytest.raises(APIStatusError): - self.client.post( - "/agents", - body=cast(object, maybe_transform(dict(name="Example"), AgentCreateParams)), - cast_to=httpx.Response, - options={"headers": {RAW_RESPONSE_HEADER: "stream"}}, - ) - + client.agents.with_streaming_response.create(name="xxx").__enter__() assert _get_open_connections(self.client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) @@ -828,6 +817,55 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert response.http_request.headers.get("x-stainless-retry-count") == "42" + def test_proxy_environment_variables(self, monkeypatch: pytest.MonkeyPatch) -> None: + # Test that the proxy environment variables are set correctly + monkeypatch.setenv("HTTPS_PROXY", "https://example.org") + + client = DefaultHttpxClient() + + mounts = tuple(client._mounts.items()) + assert len(mounts) == 1 + assert mounts[0][0].pattern == "https://" + + @pytest.mark.filterwarnings("ignore:.*deprecated.*:DeprecationWarning") + def test_default_client_creation(self) -> None: + # Ensure that the client can be initialized without any exceptions + DefaultHttpxClient( + verify=True, + cert=None, + trust_env=True, + http1=True, + http2=False, + limits=httpx.Limits(max_connections=100, max_keepalive_connections=20), + ) + + @pytest.mark.respx(base_url=base_url) + def test_follow_redirects(self, respx_mock: MockRouter) -> None: + # Test that the default follow_redirects=True allows following redirects + respx_mock.post("/redirect").mock( + return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"}) + ) + respx_mock.get("/redirected").mock(return_value=httpx.Response(200, json={"status": "ok"})) + + response = self.client.post("/redirect", body={"key": "value"}, cast_to=httpx.Response) + assert response.status_code == 200 + assert response.json() == {"status": "ok"} + + @pytest.mark.respx(base_url=base_url) + def test_follow_redirects_disabled(self, respx_mock: MockRouter) -> None: + # Test that follow_redirects=False prevents following redirects + respx_mock.post("/redirect").mock( + return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"}) + ) + + with pytest.raises(APIStatusError) as exc_info: + self.client.post( + "/redirect", body={"key": "value"}, options={"follow_redirects": False}, cast_to=httpx.Response + ) + + assert exc_info.value.response.status_code == 302 + assert exc_info.value.response.headers["Location"] == f"{base_url}/redirected" + class TestAsyncContextualAI: client = AsyncContextualAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) @@ -964,6 +1002,7 @@ def test_copy_signature(self) -> None: copy_param = copy_signature.parameters.get(name) assert copy_param is not None, f"copy() signature is missing the {name} param" + @pytest.mark.skipif(sys.version_info >= (3, 10), reason="fails because of a memory leak that started from 3.12") def test_copy_build_request(self) -> None: options = FinalRequestOptions(method="get", url="/foo") @@ -1236,7 +1275,7 @@ def test_request_extra_query(self) -> None: def test_multipart_repeating_array(self, async_client: AsyncContextualAI) -> None: request = async_client._build_request( FinalRequestOptions.construct( - method="get", + method="post", url="/foo", headers={"Content-Type": "multipart/form-data; boundary=6b7ba517decee4a450543ea6ae821c82"}, json_data={"array": ["foo", "bar"]}, @@ -1499,32 +1538,25 @@ async def test_parse_retry_after_header(self, remaining_retries: int, retry_afte @mock.patch("contextual._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) - async def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter) -> None: + async def test_retrying_timeout_errors_doesnt_leak( + self, respx_mock: MockRouter, async_client: AsyncContextualAI + ) -> None: respx_mock.post("/agents").mock(side_effect=httpx.TimeoutException("Test timeout error")) with pytest.raises(APITimeoutError): - await self.client.post( - "/agents", - body=cast(object, maybe_transform(dict(name="Example"), AgentCreateParams)), - cast_to=httpx.Response, - options={"headers": {RAW_RESPONSE_HEADER: "stream"}}, - ) + await async_client.agents.with_streaming_response.create(name="xxx").__aenter__() assert _get_open_connections(self.client) == 0 @mock.patch("contextual._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) - async def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter) -> None: + async def test_retrying_status_errors_doesnt_leak( + self, respx_mock: MockRouter, async_client: AsyncContextualAI + ) -> None: respx_mock.post("/agents").mock(return_value=httpx.Response(500)) with pytest.raises(APIStatusError): - await self.client.post( - "/agents", - body=cast(object, maybe_transform(dict(name="Example"), AgentCreateParams)), - cast_to=httpx.Response, - options={"headers": {RAW_RESPONSE_HEADER: "stream"}}, - ) - + await async_client.agents.with_streaming_response.create(name="xxx").__aenter__() assert _get_open_connections(self.client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) @@ -1657,3 +1689,52 @@ async def test_main() -> None: raise AssertionError("calling get_platform using asyncify resulted in a hung process") time.sleep(0.1) + + async def test_proxy_environment_variables(self, monkeypatch: pytest.MonkeyPatch) -> None: + # Test that the proxy environment variables are set correctly + monkeypatch.setenv("HTTPS_PROXY", "https://example.org") + + client = DefaultAsyncHttpxClient() + + mounts = tuple(client._mounts.items()) + assert len(mounts) == 1 + assert mounts[0][0].pattern == "https://" + + @pytest.mark.filterwarnings("ignore:.*deprecated.*:DeprecationWarning") + async def test_default_client_creation(self) -> None: + # Ensure that the client can be initialized without any exceptions + DefaultAsyncHttpxClient( + verify=True, + cert=None, + trust_env=True, + http1=True, + http2=False, + limits=httpx.Limits(max_connections=100, max_keepalive_connections=20), + ) + + @pytest.mark.respx(base_url=base_url) + async def test_follow_redirects(self, respx_mock: MockRouter) -> None: + # Test that the default follow_redirects=True allows following redirects + respx_mock.post("/redirect").mock( + return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"}) + ) + respx_mock.get("/redirected").mock(return_value=httpx.Response(200, json={"status": "ok"})) + + response = await self.client.post("/redirect", body={"key": "value"}, cast_to=httpx.Response) + assert response.status_code == 200 + assert response.json() == {"status": "ok"} + + @pytest.mark.respx(base_url=base_url) + async def test_follow_redirects_disabled(self, respx_mock: MockRouter) -> None: + # Test that follow_redirects=False prevents following redirects + respx_mock.post("/redirect").mock( + return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"}) + ) + + with pytest.raises(APIStatusError) as exc_info: + await self.client.post( + "/redirect", body={"key": "value"}, options={"follow_redirects": False}, cast_to=httpx.Response + ) + + assert exc_info.value.response.status_code == 302 + assert exc_info.value.response.headers["Location"] == f"{base_url}/redirected" diff --git a/tests/test_models.py b/tests/test_models.py index 5adfed9..ae4b3f0 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -1,5 +1,5 @@ import json -from typing import Any, Dict, List, Union, Optional, cast +from typing import TYPE_CHECKING, Any, Dict, List, Union, Optional, cast from datetime import datetime, timezone from typing_extensions import Literal, Annotated, TypeAliasType @@ -889,3 +889,75 @@ class ModelB(BaseModel): ) assert isinstance(m, ModelB) + + +def test_nested_discriminated_union() -> None: + class InnerType1(BaseModel): + type: Literal["type_1"] + + class InnerModel(BaseModel): + inner_value: str + + class InnerType2(BaseModel): + type: Literal["type_2"] + some_inner_model: InnerModel + + class Type1(BaseModel): + base_type: Literal["base_type_1"] + value: Annotated[ + Union[ + InnerType1, + InnerType2, + ], + PropertyInfo(discriminator="type"), + ] + + class Type2(BaseModel): + base_type: Literal["base_type_2"] + + T = Annotated[ + Union[ + Type1, + Type2, + ], + PropertyInfo(discriminator="base_type"), + ] + + model = construct_type( + type_=T, + value={ + "base_type": "base_type_1", + "value": { + "type": "type_2", + }, + }, + ) + assert isinstance(model, Type1) + assert isinstance(model.value, InnerType2) + + +@pytest.mark.skipif(not PYDANTIC_V2, reason="this is only supported in pydantic v2 for now") +def test_extra_properties() -> None: + class Item(BaseModel): + prop: int + + class Model(BaseModel): + __pydantic_extra__: Dict[str, Item] = Field(init=False) # pyright: ignore[reportIncompatibleVariableOverride] + + other: str + + if TYPE_CHECKING: + + def __getattr__(self, attr: str) -> Item: ... + + model = construct_type( + type_=Model, + value={ + "a": {"prop": 1}, + "other": "foo", + }, + ) + assert isinstance(model, Model) + assert model.a.prop == 1 + assert isinstance(model.a, Item) + assert model.other == "foo"