diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 714fa9ae3..051dfa030 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -4,6 +4,7 @@ updates: directory: "/" schedule: interval: "weekly" + versioning-strategy: increase-if-necessary groups: dependencies: dependency-type: "production" @@ -15,6 +16,9 @@ updates: update-types: - "minor" - "patch" + allow: + - dependency-type: production + - dependency-type: development - package-ecosystem: "github-actions" directory: "/" schedule: diff --git a/.github/workflows/docs-integration-tests.yml b/.github/workflows/docs-integration-tests.yml index 77dab4a5b..d8e2162ed 100644 --- a/.github/workflows/docs-integration-tests.yml +++ b/.github/workflows/docs-integration-tests.yml @@ -123,6 +123,8 @@ jobs: QDRANT_CLUSTER_API_KEY: ${{ secrets.INTEG_QDRANT_CLUSTER_API_KEY }} ASTRA_DB_API_ENDPOINT: ${{ secrets.INTEG_ASTRA_DB_API_ENDPOINT }} ASTRA_DB_APPLICATION_TOKEN: ${{ secrets.INTEG_ASTRA_DB_APPLICATION_TOKEN }} + TAVILY_API_KEY: ${{ secrets.INTEG_TAVILY_API_KEY }} + EXA_API_KEY: ${{ secrets.INTEG_EXA_API_KEY }} services: postgres: image: ankane/pgvector:v0.5.0 diff --git a/CHANGELOG.md b/CHANGELOG.md index 6c394b0ef..f3ba8c51b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,85 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## Unreleased +## [0.33.0] - 2024-10-09 + +## Added +- `Workflow.input_tasks` and `Workflow.output_tasks` to access the input and output tasks of a Workflow. +- Ability to pass nested list of `Tasks` to `Structure.tasks` allowing for more complex declarative Structure definitions. +- `TavilyWebSearchDriver` to integrate Tavily's web search capabilities. +- `ExaWebSearchDriver` to integrate Exa's web search capabilities. +- `Workflow.outputs` to access the outputs of a Workflow. +- `BaseFileLoader` for Loaders that load from a path. +- `BaseLoader.fetch()` method for fetching data from a source. +- `BaseLoader.parse()` method for parsing fetched data. +- `BaseFileManager.encoding` to specify the encoding when loading and saving files. +- `BaseWebScraperDriver.extract_page()` method for extracting data from an already scraped web page. +- `TextLoaderRetrievalRagModule.chunker` for specifying the chunking strategy. +- `file_utils.get_mime_type` utility for getting the MIME type of a file. +- `BaseRulesetDriver` for loading a `Ruleset` from an external source. + - `LocalRulesetDriver` for loading a `Ruleset` from a local `.json` file. + - `GriptapeCloudRulesetDriver` for loading a `Ruleset` resource from Griptape Cloud. +- Parameter `alias` on `GriptapeCloudConversationMemoryDriver` for fetching a Thread by alias. +- Basic support for OpenAi Structured Output via `OpenAiChatPromptDriver.response_format` parameter. +- Ability to pass callable to `activity.schema` for dynamic schema generation. + +### Changed +- **BREAKING**: Renamed parameters on several classes to `client`: + - `bedrock_client` on `AmazonBedrockCohereEmbeddingDriver`. + - `bedrock_client` on `AmazonBedrockCohereEmbeddingDriver`. + - `bedrock_client` on `AmazonBedrockTitanEmbeddingDriver`. + - `bedrock_client` on `AmazonBedrockImageGenerationDriver`. + - `bedrock_client` on `AmazonBedrockImageQueryDriver`. + - `bedrock_client` on `AmazonBedrockPromptDriver`. + - `sagemaker_client` on `AmazonSageMakerJumpstartEmbeddingDriver`. + - `sagemaker_client` on `AmazonSageMakerJumpstartPromptDriver`. + - `sqs_client` on `AmazonSqsEventListenerDriver`. + - `iotdata_client` on `AwsIotCoreEventListenerDriver`. + - `s3_client` on `AmazonS3FileManagerDriver`. + - `s3_client` on `AwsS3Tool`. + - `iam_client` on `AwsIamTool`. + - `pusher_client` on `PusherEventListenerDriver`. + - `mq` on `MarqoVectorStoreDriver`. + - `model_client` on `GooglePromptDriver`. + - `model_client` on `GoogleTokenizer`. +- **BREAKING**: Renamed parameter `pipe` on `HuggingFacePipelinePromptDriver` to `pipeline`. +- **BREAKING**: Removed `BaseFileManager.default_loader` and `BaseFileManager.loaders`. +- **BREAKING**: Loaders no longer chunk data, use a Chunker to chunk the data. +- **BREAKING**: Removed `fileutils.load_file` and `fileutils.load_files`. +- **BREAKING**: Removed `loaders-dataframe` and `loaders-audio` extras as they are no longer needed. +- **BREKING**: `TextLoader`, `PdfLoader`, `ImageLoader`, and `AudioLoader` now take a `str | PathLike` instead of `bytes`. Passing `bytes` is still supported but deprecated. +- **BREAKING**: Removed `DataframeLoader`. +- **BREAKING**: Update `pypdf` dependency to `^5.0.1`. +- **BREAKING**: Update `redis` dependency to `^5.1.0`. +- **BREAKING**: Remove `torch` extra from `transformers` dependency. This must be installed separately. +- **BREAKING**: Split `BaseExtractionEngine.extract` into `extract_text` and `extract_artifacts` for consistency with `BaseSummaryEngine`. +- **BREAKING**: `BaseExtractionEngine` no longer catches exceptions and returns `ErrorArtifact`s. +- **BREAKING**: `JsonExtractionEngine.template_schema` is now required. +- **BREAKING**: `CsvExtractionEngine.column_names` is now required. +- **BREAKING**: Renamed`RuleMixin.all_rulesets` to `RuleMixin.rulesets`. +- **BREAKING**: Renamed `GriptapeCloudKnowledgeBaseVectorStoreDriver` to `GriptapeCloudVectorStoreDriver`. +- **BREAKING**: `OpenAiChatPromptDriver.response_format` is now a `dict` instead of a `str`. +- `MarkdownifyWebScraperDriver.DEFAULT_EXCLUDE_TAGS` now includes media/blob-like HTML tags +- `StructureRunTask` now inherits from `PromptTask`. +- Several places where API clients are initialized are now lazy loaded. +- `BaseVectorStoreDriver.upsert_text_artifacts` now returns a list or dictionary of upserted vector ids. +- `LocalFileManagerDriver.workdir` is now optional. +- `filetype` is now a core dependency. +- `FileManagerTool` now uses `filetype` for more accurate file type detection. +- `BaseFileLoader.load_file()` will now either return a `TextArtifact` or a `BlobArtifact` depending on whether `BaseFileManager.encoding` is set. +- `Structure.output`'s type is now `BaseArtifact` and raises an exception if the output is `None`. +- `JsonExtractionEngine.extract_artifacts` now returns a `ListArtifact[JsonArtifact]`. +- `CsvExtractionEngine.extract_artifacts` now returns a `ListArtifact[CsvRowArtifact]`. +- Remove `manifest.yml` requirements for custom tool creation. + +### Fixed +- Anthropic native Tool calling. +- Empty `ActionsSubtask.thought` being logged. +- `RuleMixin` no longer prevents setting `rulesets` _and_ `rules` at the same time. +- `PromptTask` will merge in its Structure's Rulesets and Rules. +- `PromptTask` not checking whether Structure was set before building Prompt Stack. +- `BaseTask.full_context` context being empty when not connected to a Structure. + ## [0.32.0] - 2024-09-17 ### Added @@ -22,8 +101,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changed - **BREAKING**: Removed `CsvRowArtifact`. Use `TextArtifact` instead. +- **BREAKING**: Removed `DataframeLoader`. - **BREAKING**: Removed `MediaArtifact`, use `ImageArtifact` or `AudioArtifact` instead. -- **BREAKING**: `CsvLoader`, `DataframeLoader`, and `SqlLoader` now return `list[TextArtifact]`. +- **BREAKING**: `CsvLoader` and `SqlLoader` now return `ListArtifact[TextArtifact]`. - **BREAKING**: Removed `ImageArtifact.media_type`. - **BREAKING**: Removed `AudioArtifact.media_type`. - **BREAKING**: Removed `BlobArtifact.dir_name`. @@ -44,6 +124,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added - Parameter `meta: dict` on `BaseEvent`. +- `AzureOpenAiTextToSpeechDriver`. +- Ability to use Event Listeners as Context Managers for temporarily setting the Event Bus listeners. +- `JsonSchemaRule` for instructing the LLM to output a JSON object that conforms to a schema. +- Ability to use Drivers Configs as Context Managers for temporarily setting the default Drivers. ### Changed - **BREAKING**: Drivers, Loaders, and Engines now raise exceptions rather than returning `ErrorArtifacts`. @@ -52,6 +136,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - **BREAKING**: `BaseConversationMemoryDriver.load` now returns `tuple[list[Run], dict]`. This represents the runs and metadata. - **BREAKING**: `BaseConversationMemoryDriver.store` now takes `runs: list[Run]` and `metadata: dict` as input. - **BREAKING**: Parameter `file_path` on `LocalConversationMemoryDriver` renamed to `persist_file` and is now type `Optional[str]`. +- **BREAKING**: Removed the `__all__` declaration from the `griptape.mixins` module. - `Defaults.drivers_config.conversation_memory_driver` now defaults to `LocalConversationMemoryDriver` instead of `None`. - `CsvRowArtifact.to_text()` now includes the header. @@ -62,6 +147,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Missing `maxTokens` inference parameter in `AmazonBedrockPromptDriver`. - Incorrect model in `OpenAiDriverConfig`'s `text_to_speech_driver`. - Crash when using `CohereRerankDriver` with `CsvRowArtifact`s. +- Crash when passing "empty" Artifacts or no Artifacts to `CohereRerankDriver`. ## [0.30.2] - 2024-08-26 diff --git a/MIGRATION.md b/MIGRATION.md index 016a93f03..9dd5a6fef 100644 --- a/MIGRATION.md +++ b/MIGRATION.md @@ -1,6 +1,165 @@ # Migration Guide This document provides instructions for migrating your codebase to accommodate breaking changes introduced in new versions of Griptape. +## 0.32.X to 0.33.X + +### Removed `DataframeLoader` + +`DataframeLoader` has been removed. Use `CsvLoader.parse` or build `TextArtifact`s from the dataframe instead. + +#### Before + +```python +DataframeLoader().load(df) +``` + +#### After +```python +# Convert the dataframe to csv bytes and parse it +CsvLoader().parse(bytes(df.to_csv(line_terminator='\r\n', index=False), encoding='utf-8')) +# Or build TextArtifacts from the dataframe +[TextArtifact(row) for row in source.to_dict(orient="records")] +``` + +### `TextLoader`, `PdfLoader`, `ImageLoader`, and `AudioLoader` now take a `str | PathLike` instead of `bytes`. + +#### Before +```python +PdfLoader().load(Path("attention.pdf").read_bytes()) +PdfLoader().load_collection([Path("attention.pdf").read_bytes(), Path("CoT.pdf").read_bytes()]) +``` + +#### After +```python +PdfLoader().load("attention.pdf") +PdfLoader().load_collection([Path("attention.pdf"), "CoT.pdf"]) +``` + +### Removed `fileutils.load_file` and `fileutils.load_files` + +`griptape.utils.file_utils.load_file` and `griptape.utils.file_utils.load_files` have been removed. +You can now pass the file path directly to the Loader. + +#### Before + +```python +PdfLoader().load(load_file("attention.pdf").read_bytes()) +PdfLoader().load_collection(list(load_files(["attention.pdf", "CoT.pdf"]).values())) +``` + +```python +PdfLoader().load("attention.pdf") +PdfLoader().load_collection(["attention.pdf", "CoT.pdf"]) +``` + +### Loaders no longer chunk data + +Loaders no longer chunk the data after loading it. If you need to chunk the data, use a [Chunker](https://docs.griptape.ai/stable/griptape-framework/data/chunkers/) after loading the data. + +#### Before + +```python +chunks = PdfLoader().load("attention.pdf") +vector_store.upsert_text_artifacts( + { + "griptape": chunks, + } +) +``` + +#### After +```python +artifact = PdfLoader().load("attention.pdf") +chunks = Chunker().chunk(artifact) +vector_store.upsert_text_artifacts( + { + "griptape": chunks, + } +) +``` + + +### Removed `torch` extra from `transformers` dependency + +The `torch` extra has been removed from the `transformers` dependency. If you require `torch`, install it separately. + +#### Before +```bash +pip install griptape[drivers-prompt-huggingface-hub] +``` + +#### After +```bash +pip install griptape[drivers-prompt-huggingface-hub] +pip install torch +``` + +### `CsvLoader`, `DataframeLoader`, and `SqlLoader` return types + +`CsvLoader`, `DataframeLoader`, and `SqlLoader` now return a `list[TextArtifact]` instead of `list[CsvRowArtifact]`. + +If you require a dictionary, set a custom `formatter_fn` and then parse the text to a dictionary. + +#### Before + +```python +results = CsvLoader().load(Path("people.csv").read_text()) + +print(results[0].value) # {"name": "John", "age": 30} +print(type(results[0].value)) # +``` + +#### After +```python +results = CsvLoader().load(Path("people.csv").read_text()) + +print(results[0].value) # name: John\nAge: 30 +print(type(results[0].value)) # + +# Customize formatter_fn +results = CsvLoader(formatter_fn=lambda x: json.dumps(x)).load(Path("people.csv").read_text()) +print(results[0].value) # {"name": "John", "age": 30} +print(type(results[0].value)) # + +dict_results = [json.loads(result.value) for result in results] +print(dict_results[0]) # {"name": "John", "age": 30} +print(type(dict_results[0])) # +``` + +Renamed `GriptapeCloudKnowledgeBaseVectorStoreDriver` to `GriptapeCloudVectorStoreDriver`. + +#### Before +```python +from griptape.drivers.griptape_cloud_knowledge_base_vector_store_driver import GriptapeCloudKnowledgeBaseVectorStoreDriver + +driver = GriptapeCloudKnowledgeBaseVectorStoreDriver(...) +``` + +#### After +```python +from griptape.drivers.griptape_cloud_vector_store_driver import GriptapeCloudVectorStoreDriver + +driver = GriptapeCloudVectorStoreDriver(...) +``` + +### `OpenAiChatPromptDriver.response_format` is now a `dict` instead of a `str`. + +`OpenAiChatPromptDriver.response_format` is now structured as the `openai` SDK accepts it. + +#### Before +```python +driver = OpenAiChatPromptDriver( + response_format="json_object" +) +``` + +#### After +```python +driver = OpenAiChatPromptDriver( + response_format={"type": "json_object"} +) +``` + ## 0.31.X to 0.32.X ### Removed `MediaArtifact` diff --git a/docs/examples/src/load_query_and_chat_marqo_1.py b/docs/examples/src/load_query_and_chat_marqo_1.py index cdcb376bb..f318abb20 100644 --- a/docs/examples/src/load_query_and_chat_marqo_1.py +++ b/docs/examples/src/load_query_and_chat_marqo_1.py @@ -1,6 +1,7 @@ import os from griptape import utils +from griptape.chunkers import TextChunker from griptape.drivers import MarqoVectorStoreDriver, OpenAiEmbeddingDriver from griptape.loaders import WebLoader from griptape.structures import Agent @@ -25,11 +26,12 @@ # Load artifacts from the web artifacts = WebLoader().load("https://www.griptape.ai") +chunks = TextChunker().chunk(artifacts) # Upsert the artifacts into the vector store vector_store.upsert_text_artifacts( { - namespace: artifacts, + namespace: chunks, } ) diff --git a/docs/examples/src/query_webpage_1.py b/docs/examples/src/query_webpage_1.py index b9e3286d6..b839b1302 100644 --- a/docs/examples/src/query_webpage_1.py +++ b/docs/examples/src/query_webpage_1.py @@ -1,14 +1,15 @@ import os +from griptape.chunkers import TextChunker from griptape.drivers import LocalVectorStoreDriver, OpenAiEmbeddingDriver from griptape.loaders import WebLoader vector_store = LocalVectorStoreDriver(embedding_driver=OpenAiEmbeddingDriver(api_key=os.environ["OPENAI_API_KEY"])) -artifacts = WebLoader(max_tokens=100).load("https://www.griptape.ai") +artifacts = WebLoader().load("https://www.griptape.ai") +chunks = TextChunker().chunk(artifacts) -for a in artifacts: - vector_store.upsert_text_artifact(a, namespace="griptape") +vector_store.upsert_text_artifacts({"griptape": chunks}) results = vector_store.query("creativity", count=3, namespace="griptape") diff --git a/docs/examples/src/query_webpage_astra_db_1.py b/docs/examples/src/query_webpage_astra_db_1.py index 4590a6b59..8c71fbe5a 100644 --- a/docs/examples/src/query_webpage_astra_db_1.py +++ b/docs/examples/src/query_webpage_astra_db_1.py @@ -1,5 +1,6 @@ import os +from griptape.chunkers import TextChunker from griptape.drivers import ( AstraDbVectorStoreDriver, OpenAiChatPromptDriver, @@ -43,9 +44,9 @@ ), ) -artifacts = WebLoader(max_tokens=256).load(input_blogpost) - -vector_store_driver.upsert_text_artifacts({namespace: artifacts}) +artifacts = WebLoader().load(input_blogpost) +chunks = TextChunker(max_tokens=256).chunk(artifacts) +vector_store_driver.upsert_text_artifacts({namespace: chunks}) rag_tool = RagTool( description="A DataStax blog post", diff --git a/docs/examples/src/talk_to_a_pdf_1.py b/docs/examples/src/talk_to_a_pdf_1.py index 3c29f4c74..be0848b9e 100644 --- a/docs/examples/src/talk_to_a_pdf_1.py +++ b/docs/examples/src/talk_to_a_pdf_1.py @@ -1,5 +1,6 @@ import requests +from griptape.chunkers import TextChunker from griptape.drivers import LocalVectorStoreDriver, OpenAiChatPromptDriver, OpenAiEmbeddingDriver from griptape.engines.rag import RagEngine from griptape.engines.rag.modules import PromptResponseRagModule, VectorStoreRetrievalRagModule @@ -30,9 +31,10 @@ rag_engine=engine, ) -artifacts = PdfLoader().load(response.content) +artifacts = PdfLoader().parse(response.content) +chunks = TextChunker().chunk(artifacts) -vector_store.upsert_text_artifacts({namespace: artifacts}) +vector_store.upsert_text_artifacts({namespace: chunks}) agent = Agent(tools=[rag_tool]) diff --git a/docs/examples/src/talk_to_a_webpage_1.py b/docs/examples/src/talk_to_a_webpage_1.py index 3e973da2d..5414c8769 100644 --- a/docs/examples/src/talk_to_a_webpage_1.py +++ b/docs/examples/src/talk_to_a_webpage_1.py @@ -1,3 +1,4 @@ +from griptape.chunkers import TextChunker from griptape.drivers import LocalVectorStoreDriver, OpenAiChatPromptDriver, OpenAiEmbeddingDriver from griptape.engines.rag import RagEngine from griptape.engines.rag.modules import PromptResponseRagModule, VectorStoreRetrievalRagModule @@ -26,8 +27,9 @@ ) artifacts = WebLoader().load("https://en.wikipedia.org/wiki/Physics") +chunks = TextChunker().chunk(artifacts) -vector_store_driver.upsert_text_artifacts({namespace: artifacts}) +vector_store_driver.upsert_text_artifacts({namespace: chunks}) rag_tool = RagTool( description="Contains information about physics. " "Use it to answer any physics-related questions.", diff --git a/docs/griptape-cloud/data-sources/create-data-source.md b/docs/griptape-cloud/data-sources/create-data-source.md index 347f52725..87a5286a0 100644 --- a/docs/griptape-cloud/data-sources/create-data-source.md +++ b/docs/griptape-cloud/data-sources/create-data-source.md @@ -10,6 +10,10 @@ You can [create a Data Source in the Griptape Cloud console](https://cloud.gript You can scrape and ingest a single, public web page by providing a URL. If you wish to scrape multiple pages, you must create multiple Data Sources. However, you can then add all of the pages to the same Knowledge Base if you wish to access all the pages together. +### Amazon S3 + +You can connect Amazon S3 buckets, objects, and prefixes by providing their S3 URI(s). Supported file extensions include .pdf, .csv, .md, and most text-based file types. + ### Google Drive You can ingest documents and spreadsheets stored in a Google Drive account. We support all standard file formats such as text, markdown, spreadsheets, and presentations. diff --git a/docs/griptape-cloud/index.md b/docs/griptape-cloud/index.md index 74a78eaf1..74556ab62 100644 --- a/docs/griptape-cloud/index.md +++ b/docs/griptape-cloud/index.md @@ -8,5 +8,8 @@ Connect to your data with our [Data Sources](data-sources/create-data-source.md) ## Host and Run Your Code Have Griptape code? Have existing code with another LLM framework? You can host your Python code using [Structures](structures/create-structure.md) whether it uses the Griptape Framework or not. +## Store Configuration for LLM Agents +[Rules and Rulesets](rules/rulesets.md) enable rapid and collabortive iteration for managing LLM behavior. [Threads and Messages](threads/threads.md) allow for persisted and editable conversation memory across any LLM invocation. + ## APIs All of our features can be called via API with a [Griptape Cloud API Key](https://cloud.griptape.ai/configuration/api-keys). See the [API Reference](api/api-reference.md) for detailed information. diff --git a/docs/griptape-cloud/knowledge-bases/accessing-data.md b/docs/griptape-cloud/knowledge-bases/accessing-data.md index 8cb2f7e7b..8343933dd 100644 --- a/docs/griptape-cloud/knowledge-bases/accessing-data.md +++ b/docs/griptape-cloud/knowledge-bases/accessing-data.md @@ -30,4 +30,4 @@ curl -H "Authorization: Bearer ${GT_CLOUD_API_KEY}" --json '{"query": "test ques ## Using the Griptape Framework -You can use the [GriptapeCloudKnowledgeBaseVectorStoreDriver](../../griptape-framework/drivers/vector-store-drivers.md/#griptape-cloud-knowledge-base) to query your Knowledge Base with Griptape and the [GriptapeCloudKnowledgeBaseTool](../../griptape-tools/official-tools/griptape-cloud-knowledge-base-tool.md) to search. +You can use the [GriptapeCloudVectorStoreDriver](../../griptape-framework/drivers/vector-store-drivers.md/#griptape-cloud-knowledge-base) to query your Knowledge Base with Griptape and the [GriptapeCloudKnowledgeBaseTool](../../griptape-tools/official-tools/griptape-cloud-knowledge-base-tool.md) to search. diff --git a/docs/griptape-cloud/rules/rulesets.md b/docs/griptape-cloud/rules/rulesets.md new file mode 100644 index 000000000..d1ac5ed7a --- /dev/null +++ b/docs/griptape-cloud/rules/rulesets.md @@ -0,0 +1,9 @@ +# Rulesets + +A [Ruleset can be created](https://cloud.griptape.ai/rulesets/create) to store sets of rules and pull them in dynamically for faster iteration on LLM behavior in a deployed environment. A Ruleset takes a list of [Rules](https://cloud.griptape.ai/rules/create). A Ruleset can be given an `alias` so it can be referenced by a user-provided unique identifier: + +```bash +export GT_CLOUD_API_KEY= +export ALIAS= +curl -H "Authorization: Bearer ${GT_CLOUD_API_KEY}" https://cloud.griptape.ai/api/rulesets?alias=${ALIAS} +``` diff --git a/docs/griptape-cloud/structures/run-structure.md b/docs/griptape-cloud/structures/run-structure.md index 995fcff01..24ddf668c 100644 --- a/docs/griptape-cloud/structures/run-structure.md +++ b/docs/griptape-cloud/structures/run-structure.md @@ -1,6 +1,8 @@ # Running a Structure -Once your Structure is created and deployed, you can run your Structure one of three ways outlined below. You view the output of any of your runs, no matter how you created them, in the `Runs` tab of your Structure. +Once your Structure is created and deployed, you can run your Structure one of three ways outlined below. You may view the details of any of your runs, no matter how you created them, in the `Runs` tab of your Structure. + +To learn more about Structure Run details and output, look at the [Structure Run Events](./structure-run-events.md) documentation. ## From the Cloud Console @@ -8,21 +10,21 @@ In the cloud console, click on the name of the Structure you wish to run and the When passing arguments through the cloud console, pass each new argument on a new line. For example if your local code is ran with the inputs `-i input_file.txt` then the arguments you would pass in the cloud would be: -``` +```bash -i input_file.txt ``` ## From the API -You can run your Structure via the API using CURL or any other code that can make HTTP requests. You will need a [Griptape Cloud API Key](https://cloud.griptape.ai/configuration/api-keys) and the `Structure Invocation URL` which is located on the `Config` tab of your Structure. +You can run your Structure via the API using cURL or any other code that can make HTTP requests. You will need a [Griptape Cloud API Key](https://cloud.griptape.ai/configuration/api-keys) and the `Structure Invocation URL` which is located on the `Config` tab of your Structure. The example below will kick off a run with the args you pass as a json object. ```shell export GT_CLOUD_API_KEY= export INVOCATION_URL= -curl -H "Authorization: Bearer ${GT_CLOUD_API_KEY}" --json '{"args": ["arg1"], ""env_vars"": [{"name":"var1", "value": "value"}]}' ${INVOCATION_URL} +curl -H "Authorization: Bearer ${GT_CLOUD_API_KEY}" --json '{"args": ["arg1"], "env_vars": [{"name":"var1", "value": "value"}]}' ${INVOCATION_URL} ``` For more information on other Structure run APIs, check out the [StructureRuns API docs](../api/api-reference.md/#/StructureRuns). diff --git a/docs/griptape-cloud/structures/structure-run-events.md b/docs/griptape-cloud/structures/structure-run-events.md new file mode 100644 index 000000000..f13dfb8ae --- /dev/null +++ b/docs/griptape-cloud/structures/structure-run-events.md @@ -0,0 +1,127 @@ +# Structure Run Events + +Running a Structure will result in Events being created for that Structure Run. Events are used to report data about the progress of your Structure Run. There are two types of Events: `SYSTEM` Events and `USER` Events. + +For information about the Events APIs, check out the [Events API docs](../api/api-reference.md/#/Events). + +## Event Types + +### System Events + +The Griptape Cloud Structure Runtime manages the lifecycle of your Structure Run. To communicate near real-time updates about the status of your Structure Run, the Cloud will emit `SYSTEM` Events. + +The following types of `SYSTEM` Events are emitted: + +1. `StructureRunStarting` +1. `StructureRunRunning` +1. `StructureRunCompleted` +1. `StructureRunError` + +#### StructureRunStarting + +This Event indicates that your Structure code has been successfully loaded in the Runtime. + +Example Event body: + +```json +{ + "event_id": "12345678-1909-4900-8eca-6f7d82da9fdc", + "timestamp": 1726096542.71688, + "type": "StructureRunStarting", + "payload": { + "status": "STARTING" + }, + "created_at": "2024-09-11T23:15:42.834783Z", + "origin": "SYSTEM", + "structure_run": "12345678-e125-4c38-b67f-02b82aa443ce" +} +``` + +#### StructureRunRunning + +This Event indicates that your Structure code is now executing. + +Example Event body: + +```json +{ + "event_id": "22345678-1909-4900-8eca-6f7d82da9fdc", + "timestamp": 1726096548.683578, + "type": "StructureRunRunning", + "payload": { + "status": "RUNNING", + "started_at": "2024-09-11T23:15:47" + }, + "created_at": "2024-09-11T23:15:48.787162Z", + "origin": "SYSTEM", + "structure_run": "12345678-e125-4c38-b67f-02b82aa443ce" +} +``` + +#### StructureRunCompleted + +This Event indicates that your Structure code has exited. + +Example Event body: + +```json +{ + "event_id": "32345678-f277-4f12-939f-707804f2fdf0", + "timestamp": 1726096554.70717, + "type": "StructureRunCompleted", + "payload": { + "status": "SUCCEEDED", + "started_at": "2024-09-11T23:15:47", + "completed_at": "2024-09-11T23:15:50", + "status_detail": { + "reason": "Completed", + "message": null, + "exit_code": 0 + } + }, + "created_at": "2024-09-11T23:15:54.754921Z", + "origin": "SYSTEM", + "structure_run": "12345678-e125-4c38-b67f-02b82aa443ce" +} +``` + +#### StructureRunError + +This Event indicates that your Structure Run encountered an Error from the Griptape Cloud Runtime. + +Example Event body: + +```json +{ + "event_id": "42345678-f277-4f12-939f-707804f2fdf0", + "timestamp": 1726096555.70717, + "type": "StructureRunError", + "payload": { + "status": "ERROR", + "status_detail": { + "error": "Error message" + } + }, + "created_at": "2024-09-11T23:15:54.754921Z", + "origin": "SYSTEM", + "structure_run": "12345678-e125-4c38-b67f-02b82aa443ce" +} +``` + +### User Events + +`USER` Events are any Events emitted by your Structure code to the Events API. The recommended approach for emitting those Events is to make use of the +[Griptape Cloud Event Listener Driver](../../griptape-framework/drivers/event-listener-drivers.md#griptape-cloud) in the Griptape framework. + +For a full example of Structure code that makes use of that driver, refer to the [Managed Structure Template](https://github.com/griptape-ai/managed-structure-template/blob/main/structure.py). + +#### Structure Run Output + +In order for Griptape Cloud to populate the `output` field for your Structure Run, you must send a `USER` Event of type `FinishStructureRunEvent`. + +By using the Griptape Cloud Event Listener Driver and an [Event Bus](../../griptape-framework/misc/events.md) that emits that Event type in your Structure code, the Cloud will automatically populate your Structure Run's output. + +## Example Client Event Listener + +For an example of client code listening to Structure Run Events, check out the example client in the +[Managed Structure Template](https://github.com/griptape-ai/managed-structure-template/blob/main/example-client/client.py). diff --git a/docs/griptape-cloud/threads/threads.md b/docs/griptape-cloud/threads/threads.md new file mode 100644 index 000000000..32456c32d --- /dev/null +++ b/docs/griptape-cloud/threads/threads.md @@ -0,0 +1,11 @@ +# Threads + +A [Thread can be created](https://cloud.griptape.ai/threads/create) to store conversation history across any LLM invocation. A Thread contains a list of [Messages](https://cloud.griptape.ai/messages/create). Messages can be updated and deleted, in order to control how the LLM recalls past conversations. + +A Thread can be given an `alias` so it can be referenced by a user-provided unique identifier: + +```bash +export GT_CLOUD_API_KEY= +export ALIAS= +curl -H "Authorization: Bearer ${GT_CLOUD_API_KEY}" https://cloud.griptape.ai/api/threads?alias=${ALIAS} +``` diff --git a/docs/griptape-framework/data/chunkers.md b/docs/griptape-framework/data/chunkers.md index 507645923..bafbc1c80 100644 --- a/docs/griptape-framework/data/chunkers.md +++ b/docs/griptape-framework/data/chunkers.md @@ -18,3 +18,7 @@ Here is how to use a chunker: ```python --8<-- "docs/griptape-framework/data/src/chunkers_1.py" ``` + +The most common use of a Chunker is to split up a long text into smaller chunks for inserting into a Vector Database when doing Retrieval Augmented Generation (RAG). + +See [RagEngine](../../griptape-framework/engines/rag-engines.md) for more information on how to use Chunkers in RAG pipelines. diff --git a/docs/griptape-framework/data/loaders.md b/docs/griptape-framework/data/loaders.md index 0c0fc3ead..a8a8cb7c5 100644 --- a/docs/griptape-framework/data/loaders.md +++ b/docs/griptape-framework/data/loaders.md @@ -5,109 +5,96 @@ search: ## Overview -Loaders are used to load textual data from different sources and chunk it into [TextArtifact](../../reference/griptape/artifacts/text_artifact.md)s. -Each loader can be used to load a single "document" with [load()](../../reference/griptape/loaders/base_loader.md#griptape.loaders.base_loader.BaseLoader.load) or -multiple documents with [load_collection()](../../reference/griptape/loaders/base_loader.md#griptape.loaders.base_loader.BaseLoader.load_collection). +Loaders are used to load data from sources and parse it into [Artifact](../../griptape-framework/data/artifacts.md)s. +Each loader can be used to load a single "source" with [load()](../../reference/griptape/loaders/base_loader.md#griptape.loaders.base_loader.BaseLoader.load) or +multiple sources with [load_collection()](../../reference/griptape/loaders/base_loader.md#griptape.loaders.base_loader.BaseLoader.load_collection). -## PDF -!!! info - This driver requires the `loaders-pdf` [extra](../index.md#extras). +## File + +The following Loaders load a file using a [FileManagerDriver](../../reference/griptape/drivers/file_manager/base_file_manager_driver.md) and loads the resulting data into an [Artifact](../../griptape-framework/data/artifacts.md) for the respective file type. + +### Text -Inherits from the [TextLoader](../../reference/griptape/loaders/text_loader.md) and can be used to load PDFs from a path or from an IO stream: +Loads text files into [TextArtifact](../../griptape-framework/data/artifacts.md#text)s: ```python ---8<-- "docs/griptape-framework/data/src/loaders_1.py" +--8<-- "docs/griptape-framework/data/src/loaders_5.py" ``` -## SQL +### PDF -Can be used to load data from a SQL database into [TextArtifact](../../reference/griptape/artifacts/text_artifact.md)s: +!!! info + This driver requires the `loaders-pdf` [extra](../index.md#extras). + +Loads PDF files into [ListArtifact](../../griptape-framework/data/artifacts.md#list)s, where each element is a [TextArtifact](../../griptape-framework/data/artifacts.md#text) containing a page of the PDF: ```python ---8<-- "docs/griptape-framework/data/src/loaders_2.py" +--8<-- "docs/griptape-framework/data/src/loaders_1.py" ``` -## CSV +### CSV -Can be used to load CSV files into [TextArtifact](../../reference/griptape/artifacts/text_artifact.md)s: +Loads CSV files into [ListArtifact](../../griptape-framework/data/artifacts.md#list)s, where each element is a [TextArtifact](../../griptape-framework/data/artifacts.md#text) containing a row of the CSV: ```python --8<-- "docs/griptape-framework/data/src/loaders_3.py" ``` - -## DataFrame +### Image !!! info - This driver requires the `loaders-dataframe` [extra](../index.md#extras). + This driver requires the `loaders-image` [extra](../index.md#extras). + +Loads images into [ImageArtifact](../../griptape-framework/data/artifacts.md#image)s: -Can be used to load [pandas](https://pandas.pydata.org/) [DataFrame](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.html)s into [TextArtifact](../../reference/griptape/artifacts/text_artifact.md)s: ```python ---8<-- "docs/griptape-framework/data/src/loaders_4.py" +--8<-- "docs/griptape-framework/data/src/loaders_7.py" ``` - -## Text - -Used to load arbitrary text and text files: +By default, the Image Loader will load images in their native format, but not all models work on all formats. To normalize the format of Artifacts returned by the Loader, set the `format` field. ```python ---8<-- "docs/griptape-framework/data/src/loaders_5.py" +--8<-- "docs/griptape-framework/data/src/loaders_8.py" ``` -You can set a custom [tokenizer](../../reference/griptape/loaders/text_loader.md#griptape.loaders.text_loader.TextLoader.tokenizer), [max_tokens](../../reference/griptape/loaders/text_loader.md#griptape.loaders.text_loader.TextLoader.max_tokens) parameter, and [chunker](../../reference/griptape/loaders/text_loader.md#griptape.loaders.text_loader.TextLoader.chunker). - -## Web +### Audio -!!! info - This driver requires the `loaders-web` [extra](../index.md#extras). +Loads audio files into [AudioArtifact](../../griptape-framework/data/artifacts.md#audio)s: -Inherits from the [TextLoader](../../reference/griptape/loaders/text_loader.md) and can be used to load web pages: +The Loader will load audio in its native format and populates the resulting Artifact's `format` field by making a best-effort guess of the underlying audio format using the `filetype` package. ```python ---8<-- "docs/griptape-framework/data/src/loaders_6.py" +--8<-- "docs/griptape-framework/data/src/loaders_10.py" ``` -## Image +## Web !!! info - This driver requires the `loaders-image` [extra](../index.md#extras). + This driver requires the `loaders-web` [extra](../index.md#extras). -The Image Loader is used to load an image as an [ImageArtifact](./artifacts.md#image). The Loader operates on image bytes that can be sourced from files on disk, downloaded images, or images in memory. +Scrapes web pages using a [WebScraperDriver](../drivers/web-scraper-drivers.md) and loads the resulting text into [TextArtifact](../../griptape-framework/data/artifacts.md#text)s. ```python ---8<-- "docs/griptape-framework/data/src/loaders_7.py" +--8<-- "docs/griptape-framework/data/src/loaders_6.py" ``` -By default, the Image Loader will load images in their native format, but not all models work on all formats. To normalize the format of Artifacts returned by the Loader, set the `format` field. +## SQL + +Loads data from a SQL database using a [SQLDriver](../drivers/sql-drivers.md) and loads the resulting data into [ListArtifact](../../griptape-framework/data/artifacts.md#list)s, where each element is a [CsvRowArtifact](../../griptape-framework/data/artifacts.md#csv) containing a row of the SQL query. ```python ---8<-- "docs/griptape-framework/data/src/loaders_8.py" +--8<-- "docs/griptape-framework/data/src/loaders_2.py" ``` - ## Email !!! info This driver requires the `loaders-email` [extra](../index.md#extras). -Can be used to load email from an imap server: +Loads data from an imap email server into a [ListArtifact](../../reference/griptape/artifacts/list_artifact.md)s, where each element is a [TextArtifact](../../reference/griptape/artifacts/text_artifact.md) containing an email. ```python --8<-- "docs/griptape-framework/data/src/loaders_9.py" ``` - -## Audio - -!!! info - This driver requires the `loaders-audio` [extra](../index.md#extras). - -The [Audio Loader](../../reference/griptape/loaders/audio_loader.md) is used to load audio content as an [AudioArtifact](./artifacts.md#audio). The Loader operates on audio bytes that can be sourced from files on disk, downloaded audio, or audio in memory. - -The Loader will load audio in its native format and populates the resulting Artifact's `format` field by making a best-effort guess of the underlying audio format using the `filetype` package. - -```python ---8<-- "docs/griptape-framework/data/src/loaders_10.py" -``` diff --git a/docs/griptape-framework/data/src/loaders_1.py b/docs/griptape-framework/data/src/loaders_1.py index 2b7f31613..3732d8ac6 100644 --- a/docs/griptape-framework/data/src/loaders_1.py +++ b/docs/griptape-framework/data/src/loaders_1.py @@ -2,18 +2,15 @@ from pathlib import Path from griptape.loaders import PdfLoader -from griptape.utils import load_file, load_files urllib.request.urlretrieve("https://arxiv.org/pdf/1706.03762.pdf", "attention.pdf") # Load a single PDF file -PdfLoader().load(Path("attention.pdf").read_bytes()) -# You can also use the load_file utility function -PdfLoader().load(load_file("attention.pdf")) +PdfLoader().load("attention.pdf") +# You can also pass a Path object +PdfLoader().load(Path("attention.pdf")) urllib.request.urlretrieve("https://arxiv.org/pdf/1706.03762.pdf", "CoT.pdf") # Load multiple PDF files -PdfLoader().load_collection([Path("attention.pdf").read_bytes(), Path("CoT.pdf").read_bytes()]) -# You can also use the load_files utility function -PdfLoader().load_collection(list(load_files(["attention.pdf", "CoT.pdf"]).values())) +PdfLoader().load_collection([Path("attention.pdf"), Path("CoT.pdf")]) diff --git a/docs/griptape-framework/data/src/loaders_10.py b/docs/griptape-framework/data/src/loaders_10.py index 42a64c40e..723fbcdf1 100644 --- a/docs/griptape-framework/data/src/loaders_10.py +++ b/docs/griptape-framework/data/src/loaders_10.py @@ -1,10 +1,9 @@ from pathlib import Path from griptape.loaders import AudioLoader -from griptape.utils import load_file # Load an image from disk -audio_artifact = AudioLoader().load(Path("tests/resources/sentences.wav").read_bytes()) +AudioLoader().load("tests/resources/sentences.wav") -# You can also use the load_file utility function -AudioLoader().load(load_file("tests/resources/sentences.wav")) +# You can also pass a Path object +AudioLoader().load(Path("tests/resources/sentences.wav")) diff --git a/docs/griptape-framework/data/src/loaders_3.py b/docs/griptape-framework/data/src/loaders_3.py index 35af0fdfc..3bc3ceb81 100644 --- a/docs/griptape-framework/data/src/loaders_3.py +++ b/docs/griptape-framework/data/src/loaders_3.py @@ -1,16 +1,11 @@ from pathlib import Path from griptape.loaders import CsvLoader -from griptape.utils import load_file, load_files # Load a single CSV file -CsvLoader().load(Path("tests/resources/cities.csv").read_text()) -# You can also use the load_file utility function -CsvLoader().load(load_file("tests/resources/cities.csv")) +CsvLoader().load("tests/resources/cities.csv") +# You can also pass a Path object +CsvLoader().load(Path("tests/resources/cities.csv")) # Load multiple CSV files -CsvLoader().load_collection( - [Path("tests/resources/cities.csv").read_text(), Path("tests/resources/addresses.csv").read_text()] -) -# You can also use the load_files utility function -CsvLoader().load_collection(list(load_files(["tests/resources/cities.csv", "tests/resources/addresses.csv"]).values())) +CsvLoader().load_collection([Path("tests/resources/cities.csv"), "tests/resources/addresses.csv"]) diff --git a/docs/griptape-framework/data/src/loaders_4.py b/docs/griptape-framework/data/src/loaders_4.py deleted file mode 100644 index 8d5883adf..000000000 --- a/docs/griptape-framework/data/src/loaders_4.py +++ /dev/null @@ -1,13 +0,0 @@ -import urllib.request - -import pandas as pd - -from griptape.loaders import DataFrameLoader - -urllib.request.urlretrieve("https://people.sc.fsu.edu/~jburkardt/data/csv/cities.csv", "cities.csv") - -DataFrameLoader().load(pd.read_csv("cities.csv")) - -urllib.request.urlretrieve("https://people.sc.fsu.edu/~jburkardt/data/csv/addresses.csv", "addresses.csv") - -DataFrameLoader().load_collection([pd.read_csv("cities.csv"), pd.read_csv("addresses.csv")]) diff --git a/docs/griptape-framework/data/src/loaders_5.py b/docs/griptape-framework/data/src/loaders_5.py index 0eefda776..4bdf79160 100644 --- a/docs/griptape-framework/data/src/loaders_5.py +++ b/docs/griptape-framework/data/src/loaders_5.py @@ -3,10 +3,10 @@ from griptape.loaders import TextLoader -TextLoader().load("my text") +TextLoader().load("tests/resources/test.txt") urllib.request.urlretrieve("https://example-files.online-convert.com/document/txt/example.txt", "example.txt") -TextLoader().load(Path("example.txt").read_text()) +TextLoader().load(Path("example.txt")) -TextLoader().load_collection(["my text", "my other text", Path("example.txt").read_text()]) +TextLoader().load_collection(["tests/resources/test.txt", Path("example.txt")]) diff --git a/docs/griptape-framework/data/src/loaders_7.py b/docs/griptape-framework/data/src/loaders_7.py index 6857886e8..177471fd2 100644 --- a/docs/griptape-framework/data/src/loaders_7.py +++ b/docs/griptape-framework/data/src/loaders_7.py @@ -1,9 +1,9 @@ from pathlib import Path from griptape.loaders import ImageLoader -from griptape.utils import load_file # Load an image from disk -disk_image_artifact = ImageLoader().load(Path("tests/resources/mountain.png").read_bytes()) -# You can also use the load_file utility function -ImageLoader().load(load_file("tests/resources/mountain.png")) +ImageLoader().load("tests/resources/mountain.png") + +# You can also pass a Path object +ImageLoader().load(Path("tests/resources/mountain.png")) diff --git a/docs/griptape-framework/data/src/loaders_8.py b/docs/griptape-framework/data/src/loaders_8.py index e85992d45..d54c31246 100644 --- a/docs/griptape-framework/data/src/loaders_8.py +++ b/docs/griptape-framework/data/src/loaders_8.py @@ -1,16 +1,11 @@ from pathlib import Path from griptape.loaders import ImageLoader -from griptape.utils import load_file, load_files # Load a single image in BMP format -image_artifact_jpeg = ImageLoader(format="bmp").load(Path("tests/resources/mountain.png").read_bytes()) -# You can also use the load_file utility function -ImageLoader(format="bmp").load(load_file("tests/resources/mountain.png")) +ImageLoader(format="bmp").load("tests/resources/mountain.png") +# You can also pass a Path object +ImageLoader(format="bmp").load(Path("tests/resources/mountain.png")) # Load multiple images in BMP format -ImageLoader().load_collection( - [Path("tests/resources/mountain.png").read_bytes(), Path("tests/resources/cow.png").read_bytes()] -) -# You can also use the load_files utility function -ImageLoader().load_collection(list(load_files(["tests/resources/mountain.png", "tests/resources/cow.png"]).values())) +ImageLoader().load_collection([Path("tests/resources/mountain.png"), "tests/resources/cow.png"]) diff --git a/docs/griptape-framework/drivers/ruleset-drivers.md b/docs/griptape-framework/drivers/ruleset-drivers.md new file mode 100644 index 000000000..117f5836b --- /dev/null +++ b/docs/griptape-framework/drivers/ruleset-drivers.md @@ -0,0 +1,26 @@ +--- +search: + boost: 2 +--- + +## Overview + +Ruleset Drivers can be used to load rules in from external sources. + +## Ruleset Drivers + +### Local + +The [LocalRulesetDriver](../../reference/griptape/drivers/ruleset/local_ruleset_driver.md) allows you to load a Ruleset from a local JSON file. The `persist_dir` parameter is used to specify a local directory where one or more Ruleset files are located. If no `persist_dir` parameter is given, the `.load` method is a no-op. + +```python +--8<-- "docs/griptape-framework/drivers/src/local_ruleset_driver.py" +``` + +### Griptape Cloud + +The [GriptapeCloudRulesetDriver](../../reference/griptape/drivers/ruleset/griptape_cloud_ruleset_driver.md) allows you to load a Griptape Cloud Ruleset resource. `Ruleset.name` is used to try and find a Griptape Cloud Ruleset with that alias. + +```python +--8<-- "docs/griptape-framework/drivers/src/griptape_cloud_ruleset_driver.py" +``` diff --git a/docs/griptape-framework/drivers/src/conversation_memory_drivers_griptape_cloud.py b/docs/griptape-framework/drivers/src/conversation_memory_drivers_griptape_cloud.py index 0723b5f75..0a8f3bb93 100644 --- a/docs/griptape-framework/drivers/src/conversation_memory_drivers_griptape_cloud.py +++ b/docs/griptape-framework/drivers/src/conversation_memory_drivers_griptape_cloud.py @@ -1,13 +1,12 @@ import os -import uuid from griptape.drivers import GriptapeCloudConversationMemoryDriver from griptape.memory.structure import ConversationMemory from griptape.structures import Agent -conversation_id = uuid.uuid4().hex cloud_conversation_driver = GriptapeCloudConversationMemoryDriver( api_key=os.environ["GT_CLOUD_API_KEY"], + alias="my_thread_alias", ) agent = Agent(conversation_memory=ConversationMemory(conversation_memory_driver=cloud_conversation_driver)) diff --git a/docs/griptape-framework/drivers/src/griptape_cloud_ruleset_driver.py b/docs/griptape-framework/drivers/src/griptape_cloud_ruleset_driver.py new file mode 100644 index 000000000..377795d0e --- /dev/null +++ b/docs/griptape-framework/drivers/src/griptape_cloud_ruleset_driver.py @@ -0,0 +1,7 @@ +from griptape.drivers import GriptapeCloudRulesetDriver +from griptape.rules import Ruleset + +rulset = Ruleset( + name="my_griptape_cloud_ruleset_alias", + ruleset_driver=GriptapeCloudRulesetDriver(), +) diff --git a/docs/griptape-framework/drivers/src/image_generation_drivers_8.py b/docs/griptape-framework/drivers/src/image_generation_drivers_8.py index 69437a3a5..470b47707 100644 --- a/docs/griptape-framework/drivers/src/image_generation_drivers_8.py +++ b/docs/griptape-framework/drivers/src/image_generation_drivers_8.py @@ -1,5 +1,3 @@ -from pathlib import Path - from griptape.artifacts import TextArtifact from griptape.drivers import ( HuggingFacePipelineImageGenerationDriver, @@ -11,7 +9,7 @@ from griptape.tasks import VariationImageGenerationTask prompt_artifact = TextArtifact("landscape photograph, verdant, countryside, 8k") -input_image_artifact = ImageLoader().load(Path("tests/resources/mountain.png").read_bytes()) +input_image_artifact = ImageLoader().load("tests/resources/mountain.png") image_variation_task = VariationImageGenerationTask( input=(prompt_artifact, input_image_artifact), diff --git a/docs/griptape-framework/drivers/src/image_generation_drivers_9.py b/docs/griptape-framework/drivers/src/image_generation_drivers_9.py index 2054588d9..ab3dc3113 100644 --- a/docs/griptape-framework/drivers/src/image_generation_drivers_9.py +++ b/docs/griptape-framework/drivers/src/image_generation_drivers_9.py @@ -1,5 +1,3 @@ -from pathlib import Path - from griptape.artifacts import TextArtifact from griptape.drivers import ( HuggingFacePipelineImageGenerationDriver, @@ -11,7 +9,7 @@ from griptape.tasks import VariationImageGenerationTask prompt_artifact = TextArtifact("landscape photograph, verdant, countryside, 8k") -control_image_artifact = ImageLoader().load(Path("canny_control_image.png").read_bytes()) +control_image_artifact = ImageLoader().load("canny_control_image.png") controlnet_task = VariationImageGenerationTask( input=(prompt_artifact, control_image_artifact), diff --git a/docs/griptape-framework/drivers/src/image_query_drivers_1.py b/docs/griptape-framework/drivers/src/image_query_drivers_1.py index 0c9db5be7..0e0165d97 100644 --- a/docs/griptape-framework/drivers/src/image_query_drivers_1.py +++ b/docs/griptape-framework/drivers/src/image_query_drivers_1.py @@ -1,5 +1,3 @@ -from pathlib import Path - from griptape.drivers import AnthropicImageQueryDriver from griptape.engines import ImageQueryEngine from griptape.loaders import ImageLoader @@ -13,6 +11,6 @@ image_query_driver=driver, ) -image_artifact = ImageLoader().load(Path("tests/resources/mountain.png").read_bytes()) +image_artifact = ImageLoader().load("tests/resources/mountain.png") engine.run("Describe the weather in the image", [image_artifact]) diff --git a/docs/griptape-framework/drivers/src/image_query_drivers_2.py b/docs/griptape-framework/drivers/src/image_query_drivers_2.py index 8d605c0d9..4b5b3cc9f 100644 --- a/docs/griptape-framework/drivers/src/image_query_drivers_2.py +++ b/docs/griptape-framework/drivers/src/image_query_drivers_2.py @@ -1,5 +1,3 @@ -from pathlib import Path - from griptape.drivers import AnthropicImageQueryDriver from griptape.engines import ImageQueryEngine from griptape.loaders import ImageLoader @@ -13,9 +11,9 @@ image_query_driver=driver, ) -image_artifact1 = ImageLoader().load(Path("tests/resources/mountain.png").read_bytes()) +image_artifact1 = ImageLoader().load("tests/resources/mountain.png") -image_artifact2 = ImageLoader().load(Path("tests/resources/cow.png").read_bytes()) +image_artifact2 = ImageLoader().load("tests/resources/cow.png") result = engine.run("Describe the weather in the image", [image_artifact1, image_artifact2]) diff --git a/docs/griptape-framework/drivers/src/image_query_drivers_3.py b/docs/griptape-framework/drivers/src/image_query_drivers_3.py index 14070312b..0653d3f6e 100644 --- a/docs/griptape-framework/drivers/src/image_query_drivers_3.py +++ b/docs/griptape-framework/drivers/src/image_query_drivers_3.py @@ -1,5 +1,3 @@ -from pathlib import Path - from griptape.drivers import OpenAiImageQueryDriver from griptape.engines import ImageQueryEngine from griptape.loaders import ImageLoader @@ -13,6 +11,6 @@ image_query_driver=driver, ) -image_artifact = ImageLoader().load(Path("tests/resources/mountain.png").read_bytes()) +image_artifact = ImageLoader().load("tests/resources/mountain.png") engine.run("Describe the weather in the image", [image_artifact]) diff --git a/docs/griptape-framework/drivers/src/image_query_drivers_4.py b/docs/griptape-framework/drivers/src/image_query_drivers_4.py index 9ebf5ef59..cff4c2a10 100644 --- a/docs/griptape-framework/drivers/src/image_query_drivers_4.py +++ b/docs/griptape-framework/drivers/src/image_query_drivers_4.py @@ -1,5 +1,4 @@ import os -from pathlib import Path from griptape.drivers import AzureOpenAiImageQueryDriver from griptape.engines import ImageQueryEngine @@ -17,6 +16,6 @@ image_query_driver=driver, ) -image_artifact = ImageLoader().load(Path("tests/resources/mountain.png").read_bytes()) +image_artifact = ImageLoader().load("tests/resources/mountain.png") engine.run("Describe the weather in the image", [image_artifact]) diff --git a/docs/griptape-framework/drivers/src/image_query_drivers_5.py b/docs/griptape-framework/drivers/src/image_query_drivers_5.py index 2bab9a7fd..c364a24cc 100644 --- a/docs/griptape-framework/drivers/src/image_query_drivers_5.py +++ b/docs/griptape-framework/drivers/src/image_query_drivers_5.py @@ -1,5 +1,3 @@ -from pathlib import Path - import boto3 from griptape.drivers import AmazonBedrockImageQueryDriver, BedrockClaudeImageQueryModelDriver @@ -16,7 +14,7 @@ engine = ImageQueryEngine(image_query_driver=driver) -image_artifact = ImageLoader().load(Path("tests/resources/mountain.png").read_bytes()) +image_artifact = ImageLoader().load("tests/resources/mountain.png") result = engine.run("Describe the weather in the image", [image_artifact]) diff --git a/docs/griptape-framework/drivers/src/local_ruleset_driver.py b/docs/griptape-framework/drivers/src/local_ruleset_driver.py new file mode 100644 index 000000000..56c44b260 --- /dev/null +++ b/docs/griptape-framework/drivers/src/local_ruleset_driver.py @@ -0,0 +1,19 @@ +import json +import os +from pathlib import Path + +from griptape.drivers import LocalRulesetDriver +from griptape.rules import Ruleset + +ruleset_dir = "path/to/ruleset/dir" +ruleset_name = "my_local_ruleset.json" +ruleset_path = Path(os.path.join(ruleset_dir, ruleset_name)) + +os.makedirs(ruleset_dir, exist_ok=True) + +ruleset_path.write_text(json.dumps({"rules": [{"value": "Always talk like a pirate."}]})) + +ruleset = Ruleset( + name=ruleset_name, + ruleset_driver=LocalRulesetDriver(persist_dir=ruleset_dir), +) diff --git a/docs/griptape-framework/drivers/src/prompt_drivers_3.py b/docs/griptape-framework/drivers/src/prompt_drivers_3.py index 8e85ce887..bf37f2d72 100644 --- a/docs/griptape-framework/drivers/src/prompt_drivers_3.py +++ b/docs/griptape-framework/drivers/src/prompt_drivers_3.py @@ -1,19 +1,26 @@ import os +import schema + from griptape.drivers import OpenAiChatPromptDriver -from griptape.rules import Rule from griptape.structures import Agent agent = Agent( prompt_driver=OpenAiChatPromptDriver( api_key=os.environ["OPENAI_API_KEY"], + model="gpt-4o-2024-08-06", temperature=0.1, - model="gpt-4o", - response_format="json_object", seed=42, + response_format={ + "type": "json_schema", + "json_schema": { + "strict": True, + "name": "Output", + "schema": schema.Schema({"css_code": str, "relevant_emojies": [str]}).json_schema("Output Schema"), + }, + }, ), - input="You will be provided with a description of a mood, and your task is to generate the CSS code for a color that matches it. Description: {{ args[0] }}", - rules=[Rule(value='Write your output in json with a single key called "css_code".')], + input="You will be provided with a description of a mood, and your task is to generate the CSS color code for a color that matches it. Description: {{ args[0] }}", ) agent.run("Blue sky at dusk.") diff --git a/docs/griptape-framework/drivers/src/vector_store_drivers_1.py b/docs/griptape-framework/drivers/src/vector_store_drivers_1.py index 7f7e98e13..f531aa618 100644 --- a/docs/griptape-framework/drivers/src/vector_store_drivers_1.py +++ b/docs/griptape-framework/drivers/src/vector_store_drivers_1.py @@ -1,5 +1,6 @@ import os +from griptape.chunkers import TextChunker from griptape.drivers import LocalVectorStoreDriver, OpenAiEmbeddingDriver from griptape.loaders import WebLoader @@ -9,10 +10,11 @@ vector_store_driver = LocalVectorStoreDriver(embedding_driver=embedding_driver) # Load Artifacts from the web -artifacts = WebLoader(max_tokens=100).load("https://www.griptape.ai") +artifact = WebLoader().load("https://www.griptape.ai") +chunks = TextChunker(max_tokens=100).chunk(artifact) # Upsert Artifacts into the Vector Store Driver -[vector_store_driver.upsert_text_artifact(a, namespace="griptape") for a in artifacts] +vector_store_driver.upsert_text_artifacts({"griptape": chunks}) results = vector_store_driver.query(query="What is griptape?") diff --git a/docs/griptape-framework/drivers/src/vector_store_drivers_10.py b/docs/griptape-framework/drivers/src/vector_store_drivers_10.py index 39a21121d..4599cfa47 100644 --- a/docs/griptape-framework/drivers/src/vector_store_drivers_10.py +++ b/docs/griptape-framework/drivers/src/vector_store_drivers_10.py @@ -1,5 +1,6 @@ import os +from griptape.chunkers import TextChunker from griptape.drivers import OpenAiEmbeddingDriver, QdrantVectorStoreDriver from griptape.loaders import WebLoader @@ -19,7 +20,8 @@ ) # Load Artifacts from the web -artifacts = WebLoader().load("https://www.griptape.ai") +artifact = WebLoader().load("https://www.griptape.ai") +chunks = TextChunker(max_tokens=100).chunk(artifact) # Recreate Qdrant collection vector_store_driver.client.recreate_collection( @@ -28,7 +30,7 @@ ) # Upsert Artifacts into the Vector Store Driver -[vector_store_driver.upsert_text_artifact(a, namespace="griptape") for a in artifacts] +vector_store_driver.upsert_text_artifacts({"griptape": chunks}) results = vector_store_driver.query(query="What is griptape?") diff --git a/docs/griptape-framework/drivers/src/vector_store_drivers_11.py b/docs/griptape-framework/drivers/src/vector_store_drivers_11.py index a8d9ceed1..144f14c59 100644 --- a/docs/griptape-framework/drivers/src/vector_store_drivers_11.py +++ b/docs/griptape-framework/drivers/src/vector_store_drivers_11.py @@ -1,5 +1,6 @@ import os +from griptape.chunkers import TextChunker from griptape.drivers import AstraDbVectorStoreDriver, OpenAiEmbeddingDriver from griptape.loaders import WebLoader @@ -20,10 +21,11 @@ ) # Load Artifacts from the web -artifacts = WebLoader().load("https://www.griptape.ai") +artifact = WebLoader().load("https://www.griptape.ai") +chunks = TextChunker().chunk(artifact) # Upsert Artifacts into the Vector Store Driver -[vector_store_driver.upsert_text_artifact(a, namespace="griptape") for a in artifacts] +vector_store_driver.upsert_text_artifacts({"griptape": chunks}) results = vector_store_driver.query(query="What is griptape?") diff --git a/docs/griptape-framework/drivers/src/vector_store_drivers_2.py b/docs/griptape-framework/drivers/src/vector_store_drivers_2.py index f8b500924..8c8d67ac8 100644 --- a/docs/griptape-framework/drivers/src/vector_store_drivers_2.py +++ b/docs/griptape-framework/drivers/src/vector_store_drivers_2.py @@ -1,12 +1,12 @@ import os -from griptape.drivers import GriptapeCloudKnowledgeBaseVectorStoreDriver +from griptape.drivers import GriptapeCloudVectorStoreDriver # Initialize environment variables gt_cloud_api_key = os.environ["GRIPTAPE_CLOUD_API_KEY"] gt_cloud_knowledge_base_id = os.environ["GRIPTAPE_CLOUD_KB_ID"] -vector_store_driver = GriptapeCloudKnowledgeBaseVectorStoreDriver( +vector_store_driver = GriptapeCloudVectorStoreDriver( api_key=gt_cloud_api_key, knowledge_base_id=gt_cloud_knowledge_base_id ) diff --git a/docs/griptape-framework/drivers/src/vector_store_drivers_3.py b/docs/griptape-framework/drivers/src/vector_store_drivers_3.py index 559eaec5a..d84164cb4 100644 --- a/docs/griptape-framework/drivers/src/vector_store_drivers_3.py +++ b/docs/griptape-framework/drivers/src/vector_store_drivers_3.py @@ -1,5 +1,6 @@ import os +from griptape.chunkers import TextChunker from griptape.drivers import OpenAiEmbeddingDriver, PineconeVectorStoreDriver from griptape.loaders import WebLoader @@ -14,10 +15,11 @@ ) # Load Artifacts from the web -artifacts = WebLoader(max_tokens=100).load("https://www.griptape.ai") +artifact = WebLoader().load("https://www.griptape.ai") +chunks = TextChunker(max_tokens=100).chunk(artifact) # Upsert Artifacts into the Vector Store Driver -[vector_store_driver.upsert_text_artifact(a, namespace="griptape") for a in artifacts] +vector_store_driver.upsert_text_artifacts({"griptape": chunks}) results = vector_store_driver.query(query="What is griptape?") diff --git a/docs/griptape-framework/drivers/src/vector_store_drivers_4.py b/docs/griptape-framework/drivers/src/vector_store_drivers_4.py index f2f0091a0..ad15f0744 100644 --- a/docs/griptape-framework/drivers/src/vector_store_drivers_4.py +++ b/docs/griptape-framework/drivers/src/vector_store_drivers_4.py @@ -1,5 +1,6 @@ import os +from griptape.chunkers import TextChunker from griptape.drivers import MarqoVectorStoreDriver, OpenAiChatPromptDriver, OpenAiEmbeddingDriver from griptape.loaders import WebLoader @@ -19,12 +20,13 @@ ) # Load Artifacts from the web -artifacts = WebLoader(max_tokens=200).load("https://www.griptape.ai") +artifact = WebLoader().load("https://www.griptape.ai") +chunks = TextChunker(max_tokens=200).chunk(artifact) # Upsert Artifacts into the Vector Store Driver vector_store_driver.upsert_text_artifacts( { - "griptape": artifacts, + "griptape": chunks, } ) diff --git a/docs/griptape-framework/drivers/src/vector_store_drivers_5.py b/docs/griptape-framework/drivers/src/vector_store_drivers_5.py index 7649579c7..33d541f2f 100644 --- a/docs/griptape-framework/drivers/src/vector_store_drivers_5.py +++ b/docs/griptape-framework/drivers/src/vector_store_drivers_5.py @@ -1,5 +1,6 @@ import os +from griptape.chunkers import TextChunker from griptape.drivers import MongoDbAtlasVectorStoreDriver, OpenAiEmbeddingDriver from griptape.loaders import WebLoader @@ -25,14 +26,11 @@ ) # Load Artifacts from the web -artifacts = WebLoader(max_tokens=200).load("https://www.griptape.ai") +artifact = WebLoader().load("https://www.griptape.ai") +chunks = TextChunker(max_tokens=200).chunk(artifact) # Upsert Artifacts into the Vector Store Driver -vector_store_driver.upsert_text_artifacts( - { - "griptape": artifacts, - } -) +vector_store_driver.upsert_text_artifacts({"griptape": chunks}) results = vector_store_driver.query(query="What is griptape?") diff --git a/docs/griptape-framework/drivers/src/vector_store_drivers_6.py b/docs/griptape-framework/drivers/src/vector_store_drivers_6.py index 78a7cc3e6..6a8a8bb04 100644 --- a/docs/griptape-framework/drivers/src/vector_store_drivers_6.py +++ b/docs/griptape-framework/drivers/src/vector_store_drivers_6.py @@ -1,5 +1,6 @@ import os +from griptape.chunkers import TextChunker from griptape.drivers import AzureMongoDbVectorStoreDriver, OpenAiEmbeddingDriver from griptape.loaders import WebLoader @@ -25,12 +26,13 @@ ) # Load Artifacts from the web -artifacts = WebLoader(max_tokens=200).load("https://www.griptape.ai") +artifact = WebLoader().load("https://www.griptape.ai") +chunks = TextChunker(max_tokens=200).chunk(artifact) # Upsert Artifacts into the Vector Store Driver vector_store_driver.upsert_text_artifacts( { - "griptape": artifacts, + "griptape": chunks, } ) diff --git a/docs/griptape-framework/drivers/src/vector_store_drivers_7.py b/docs/griptape-framework/drivers/src/vector_store_drivers_7.py index d34ff8649..7d14504bb 100644 --- a/docs/griptape-framework/drivers/src/vector_store_drivers_7.py +++ b/docs/griptape-framework/drivers/src/vector_store_drivers_7.py @@ -1,5 +1,6 @@ import os +from griptape.chunkers import TextChunker from griptape.drivers import OpenAiEmbeddingDriver, RedisVectorStoreDriver from griptape.loaders import WebLoader @@ -15,12 +16,13 @@ ) # Load Artifacts from the web -artifacts = WebLoader(max_tokens=200).load("https://www.griptape.ai") +artifact = WebLoader().load("https://www.griptape.ai") +chunks = TextChunker(max_tokens=200).chunk(artifact) # Upsert Artifacts into the Vector Store Driver vector_store_driver.upsert_text_artifacts( { - "griptape": artifacts, + "griptape": chunks, } ) diff --git a/docs/griptape-framework/drivers/src/vector_store_drivers_8.py b/docs/griptape-framework/drivers/src/vector_store_drivers_8.py index 18e50a397..0ecb49723 100644 --- a/docs/griptape-framework/drivers/src/vector_store_drivers_8.py +++ b/docs/griptape-framework/drivers/src/vector_store_drivers_8.py @@ -2,6 +2,7 @@ import boto3 +from griptape.chunkers import TextChunker from griptape.drivers import AmazonOpenSearchVectorStoreDriver, OpenAiEmbeddingDriver from griptape.loaders import WebLoader @@ -16,12 +17,13 @@ ) # Load Artifacts from the web -artifacts = WebLoader(max_tokens=200).load("https://www.griptape.ai") +artifact = WebLoader().load("https://www.griptape.ai") +chunks = TextChunker(max_tokens=200).chunk(artifact) # Upsert Artifacts into the Vector Store Driver vector_store_driver.upsert_text_artifacts( { - "griptape": artifacts, + "griptape": chunks, } ) diff --git a/docs/griptape-framework/drivers/src/vector_store_drivers_9.py b/docs/griptape-framework/drivers/src/vector_store_drivers_9.py index ad5abf932..9feb47761 100644 --- a/docs/griptape-framework/drivers/src/vector_store_drivers_9.py +++ b/docs/griptape-framework/drivers/src/vector_store_drivers_9.py @@ -1,5 +1,6 @@ import os +from griptape.chunkers import TextChunker from griptape.drivers import OpenAiEmbeddingDriver, PgVectorVectorStoreDriver from griptape.loaders import WebLoader @@ -22,12 +23,13 @@ vector_store_driver.setup() # Load Artifacts from the web -artifacts = WebLoader().load("https://www.griptape.ai") +artifact = WebLoader().load("https://www.griptape.ai") +chunks = TextChunker().chunk(artifact) # Upsert Artifacts into the Vector Store Driver vector_store_driver.upsert_text_artifacts( { - "griptape": artifacts, + "griptape": chunks, } ) diff --git a/docs/griptape-framework/drivers/src/web_search_drivers_4.py b/docs/griptape-framework/drivers/src/web_search_drivers_4.py new file mode 100644 index 000000000..1119f8bed --- /dev/null +++ b/docs/griptape-framework/drivers/src/web_search_drivers_4.py @@ -0,0 +1,7 @@ +import os + +from griptape.drivers import TavilyWebSearchDriver + +driver = TavilyWebSearchDriver(api_key=os.environ["TAVILY_API_KEY"]) + +driver.search("griptape ai") diff --git a/docs/griptape-framework/drivers/src/web_search_drivers_5.py b/docs/griptape-framework/drivers/src/web_search_drivers_5.py new file mode 100644 index 000000000..fd880736b --- /dev/null +++ b/docs/griptape-framework/drivers/src/web_search_drivers_5.py @@ -0,0 +1,9 @@ +from griptape.drivers import DuckDuckGoWebSearchDriver +from griptape.structures import Agent +from griptape.tools import PromptSummaryTool, WebSearchTool + +agent = Agent( + tools=[WebSearchTool(web_search_driver=DuckDuckGoWebSearchDriver()), PromptSummaryTool(off_prompt=False)], +) + +agent.run("Give me some websites with information about AI frameworks.") diff --git a/docs/griptape-framework/drivers/src/web_search_drivers_6.py b/docs/griptape-framework/drivers/src/web_search_drivers_6.py new file mode 100644 index 000000000..133b22ed1 --- /dev/null +++ b/docs/griptape-framework/drivers/src/web_search_drivers_6.py @@ -0,0 +1,7 @@ +import os + +from griptape.drivers import ExaWebSearchDriver + +driver = ExaWebSearchDriver(api_key=os.environ["EXA_API_KEY"]) + +driver.search("griptape ai") diff --git a/docs/griptape-framework/drivers/vector-store-drivers.md b/docs/griptape-framework/drivers/vector-store-drivers.md index 7cca64a46..6a76a8cf5 100644 --- a/docs/griptape-framework/drivers/vector-store-drivers.md +++ b/docs/griptape-framework/drivers/vector-store-drivers.md @@ -33,7 +33,7 @@ The [LocalVectorStoreDriver](../../reference/griptape/drivers/vector/local_vecto ### Griptape Cloud Knowledge Base -The [GriptapeCloudKnowledgeBaseVectorStoreDriver](../../reference/griptape/drivers/vector/griptape_cloud_knowledge_base_vector_store_driver.md) can be used to query data from a Griptape Cloud Knowledge Base. Loading into Knowledge Bases is not supported at this time, only querying. Here is a complete example of how the Driver can be used to query an existing Knowledge Base: +The [GriptapeCloudVectorStoreDriver](../../reference/griptape/drivers/vector/griptape_cloud_vector_store_driver.md) can be used to query data from a Griptape Cloud Knowledge Base. Loading into Knowledge Bases is not supported at this time, only querying. Here is a complete example of how the Driver can be used to query an existing Knowledge Base: ```python --8<-- "docs/griptape-framework/drivers/src/vector_store_drivers_2.py" diff --git a/docs/griptape-framework/drivers/web-search-drivers.md b/docs/griptape-framework/drivers/web-search-drivers.md index b2400fe28..2c64ceba8 100644 --- a/docs/griptape-framework/drivers/web-search-drivers.md +++ b/docs/griptape-framework/drivers/web-search-drivers.md @@ -1,6 +1,6 @@ --- search: - boost: 2 + boost: 2 --- ## Overview @@ -9,7 +9,47 @@ Web Search Drivers can be used to search for links from a search query. They are * `search()` searches the web and returns a [ListArtifact](../../reference/griptape/artifacts/list_artifact.md) that contains JSON-serializable [TextArtifact](../../reference/griptape/artifacts/text_artifact.md)s with the search results. -## Vector Store Drivers +You can use Web Search Drivers with [Structures](../structures/agents.md): + +```python +--8<-- "docs/griptape-framework/drivers/src/web_search_drivers_5.py" +``` +``` +ToolkitTask 45a53f1024494baab41a1f10a67017b1 + Output: Here are some websites with information about AI + frameworks: + + 1. [The Top 16 AI Frameworks and Libraries: A Beginner's Guide - + DataCamp](https://www.datacamp.com/blog/top-ai-frameworks-and-lib + raries) + 2. [AI Frameworks: Top Types To Adopt in 2024 - + Splunk](https://www.splunk.com/en_us/blog/learn/ai-frameworks.htm + l) + 3. [Top AI Frameworks in 2024: A Review - + BairesDev](https://www.bairesdev.com/blog/ai-frameworks/) + 4. [The Top 16 AI Frameworks and Libraries - AI + Slackers](https://aislackers.com/the-top-16-ai-frameworks-and-lib + raries/) + 5. [Top AI Frameworks in 2024: Artificial Intelligence Frameworks + Comparison - Clockwise + Software](https://clockwise.software/blog/artificial-intelligence + -framework/) +``` +Or use them independently: + +```python +--8<-- "docs/griptape-framework/drivers/src/web_search_drivers_3.py" +``` +``` +{"title": "The Top 16 AI Frameworks and Libraries: A Beginner's Guide", "url": "https://www.datacamp.com/blog/top-ai-frameworks-and-libraries", "description": "PyTorch. Torch is an open-source machine learning library known for its dynamic computational graph and is favored by researchers. The framework is excellent for prototyping and experimentation. Moreover, it's empowered by growing community support, with tools like PyTorch being built on the library."} + +{"title": "Top 11 AI Frameworks and Tools in 2024 | Fively | 5ly.co", "url": "https://5ly.co/blog/best-ai-frameworks/", "description": "Discover the top 11 modern artificial intelligence tools and frameworks to build robust architectures for your AI-powered apps. ... - Some advanced use cases may need further fine-tuning. Caffe 2. Now we move on to deep learning tools and frameworks. The first one is Caffe 2: an open-source deep learning framework with modularity and speed in ..."} + +{"title": "The Top 16 AI Frameworks and Libraries | AI Slackers", "url": "https://aislackers.com/the-top-16-ai-frameworks-and-libraries/", "description": "Experiment with different frameworks to find the one that aligns with your needs and goals as a data practitioner. Embrace the world of AI frameworks, and embark on a journey of building smarter software with confidence. Discover the top AI frameworks and libraries like PyTorch, Scikit-Learn, TensorFlow, Keras, LangChain, and more."} +``` + + +## Web Search Drivers ### Google @@ -21,12 +61,6 @@ Example using `GoogleWebSearchDriver` directly: --8<-- "docs/griptape-framework/drivers/src/web_search_drivers_1.py" ``` -Example of using `GoogleWebSearchDriver` with an agent: - -```python ---8<-- "docs/griptape-framework/drivers/src/web_search_drivers_2.py" -``` - ### DuckDuckGo !!! info @@ -39,3 +73,24 @@ Example of using `DuckDuckGoWebSearchDriver` directly: ```python --8<-- "docs/griptape-framework/drivers/src/web_search_drivers_3.py" ``` + +### Tavily +!!! info + This driver requires the `drivers-web-search-tavily` [extra](../index.md#extras), and a Tavily [api key](https://app.tavily.com). + +Example of using `TavilyWebSearchDriver` directly: + +```python +--8<-- "docs/griptape-framework/drivers/src/web_search_drivers_4.py" +``` + +### Exa +!!! info + This driver requires the `drivers-web-search-exa` [extra](../index.md#extras), + and an Exa [api key](https://dashboard.exa.ai/api-keys) + +Example of using `ExaWebSearchDriver` directly: + +```python +--8<-- "docs/griptape-framework/drivers/src/web_search_drivers_6.py" +``` \ No newline at end of file diff --git a/docs/griptape-framework/engines/extraction-engines.md b/docs/griptape-framework/engines/extraction-engines.md index b971e63cc..c00352691 100644 --- a/docs/griptape-framework/engines/extraction-engines.md +++ b/docs/griptape-framework/engines/extraction-engines.md @@ -10,10 +10,7 @@ As of now, Griptape supports two types of Extraction Engines: the CSV Extraction ## CSV -The CSV Extraction Engine is designed specifically for extracting data from CSV-formatted content. - -!!! info - The CSV Extraction Engine requires the `column_names` parameter for specifying the columns to be extracted. +The CSV Extraction Engine extracts tabular content from unstructured text. ```python --8<-- "docs/griptape-framework/engines/src/extraction_engines_1.py" @@ -27,15 +24,27 @@ Charlie,40,Texas ## JSON -The JSON Extraction Engine is tailored for extracting data from JSON-formatted content. +The JSON Extraction Engine extracts JSON-formatted content from unstructured text. -!!! info - The JSON Extraction Engine requires the `template_schema` parameter for specifying the structure to be extracted. ```python --8<-- "docs/griptape-framework/engines/src/extraction_engines_2.py" ``` ``` -{'name': 'Alice', 'age': 28, 'location': 'New York'} -{'name': 'Bob', 'age': 35, 'location': 'California'} +{ + "model": "GPT-3.5", + "notes": [ + "Part of OpenAI's GPT series.", + "Used in ChatGPT and Microsoft Copilot." + ] +} +{ + "model": "GPT-4", + "notes": [ + "Part of OpenAI's GPT series.", + "Praised for increased accuracy and multimodal capabilities.", + "Architecture and number of parameters not revealed." + ] +} +...Output truncated for brevity... ``` diff --git a/docs/griptape-framework/engines/src/audio_engines_2.py b/docs/griptape-framework/engines/src/audio_engines_2.py index c04b466f8..92c87d638 100644 --- a/docs/griptape-framework/engines/src/audio_engines_2.py +++ b/docs/griptape-framework/engines/src/audio_engines_2.py @@ -1,7 +1,6 @@ from griptape.drivers import OpenAiAudioTranscriptionDriver from griptape.engines import AudioTranscriptionEngine from griptape.loaders import AudioLoader -from griptape.utils import load_file driver = OpenAiAudioTranscriptionDriver(model="whisper-1") @@ -9,5 +8,5 @@ audio_transcription_driver=driver, ) -audio_artifact = AudioLoader().load(load_file("tests/resources/sentences.wav")) +audio_artifact = AudioLoader().load("tests/resources/sentences.wav") engine.run(audio_artifact) diff --git a/docs/griptape-framework/engines/src/extraction_engines_1.py b/docs/griptape-framework/engines/src/extraction_engines_1.py index 17644ebf2..45ccfd3e0 100644 --- a/docs/griptape-framework/engines/src/extraction_engines_1.py +++ b/docs/griptape-framework/engines/src/extraction_engines_1.py @@ -1,4 +1,3 @@ -from griptape.artifacts import ListArtifact from griptape.drivers import OpenAiChatPromptDriver from griptape.engines import CsvExtractionEngine @@ -15,10 +14,7 @@ """ # Extract CSV rows using the engine -result = csv_engine.extract(sample_text) +result = csv_engine.extract_text(sample_text) -if isinstance(result, ListArtifact): - for row in result.value: - print(row.to_text()) -else: - print(result.to_text()) +for row in result: + print(row.to_text()) diff --git a/docs/griptape-framework/engines/src/extraction_engines_2.py b/docs/griptape-framework/engines/src/extraction_engines_2.py index a100754b3..ee34f779d 100644 --- a/docs/griptape-framework/engines/src/extraction_engines_2.py +++ b/docs/griptape-framework/engines/src/extraction_engines_2.py @@ -1,28 +1,31 @@ -from schema import Schema +import json -from griptape.artifacts.list_artifact import ListArtifact +from schema import Literal, Schema + +from griptape.artifacts import ListArtifact +from griptape.chunkers import TextChunker from griptape.drivers import OpenAiChatPromptDriver from griptape.engines import JsonExtractionEngine +from griptape.loaders import WebLoader # Define a schema for extraction -user_schema = Schema({"users": [{"name": str, "age": int, "location": str}]}).json_schema("UserSchema") - - json_engine = JsonExtractionEngine( - prompt_driver=OpenAiChatPromptDriver(model="gpt-3.5-turbo"), template_schema=user_schema + prompt_driver=OpenAiChatPromptDriver(model="gpt-4o"), + template_schema=Schema( + { + Literal("model", description="Name of an LLM model."): str, + Literal("notes", description="Any notes of substance about the model."): Schema([str]), + } + ).json_schema("ProductSchema"), ) -# Define some unstructured data -sample_json_text = """ -Alice (Age 28) lives in New York. -Bob (Age 35) lives in California. -""" +# Load data from the web +web_data = WebLoader().load("https://en.wikipedia.org/wiki/Large_language_model") +chunks = TextChunker().chunk(web_data) + # Extract data using the engine -result = json_engine.extract(sample_json_text) +result = json_engine.extract_artifacts(ListArtifact(chunks)) -if isinstance(result, ListArtifact): - for artifact in result.value: - print(artifact.value) -else: - print(result.to_text()) +for artifact in result: + print(json.dumps(artifact.value, indent=2)) diff --git a/docs/griptape-framework/engines/src/image_generation_engines_3.py b/docs/griptape-framework/engines/src/image_generation_engines_3.py index 83822b1bc..4bcd976d4 100644 --- a/docs/griptape-framework/engines/src/image_generation_engines_3.py +++ b/docs/griptape-framework/engines/src/image_generation_engines_3.py @@ -1,5 +1,3 @@ -from pathlib import Path - from griptape.drivers import AmazonBedrockImageGenerationDriver, BedrockStableDiffusionImageGenerationModelDriver from griptape.engines import VariationImageGenerationEngine from griptape.loaders import ImageLoader @@ -15,7 +13,7 @@ image_generation_driver=driver, ) -image_artifact = ImageLoader().load(Path("tests/resources/mountain.png").read_bytes()) +image_artifact = ImageLoader().load("tests/resources/mountain.png") engine.run( prompts=["A photo of a mountain landscape in winter"], diff --git a/docs/griptape-framework/engines/src/image_generation_engines_4.py b/docs/griptape-framework/engines/src/image_generation_engines_4.py index c258e1cce..e7b46b341 100644 --- a/docs/griptape-framework/engines/src/image_generation_engines_4.py +++ b/docs/griptape-framework/engines/src/image_generation_engines_4.py @@ -1,5 +1,3 @@ -from pathlib import Path - from griptape.drivers import AmazonBedrockImageGenerationDriver, BedrockStableDiffusionImageGenerationModelDriver from griptape.engines import InpaintingImageGenerationEngine from griptape.loaders import ImageLoader @@ -15,9 +13,9 @@ image_generation_driver=driver, ) -image_artifact = ImageLoader().load(Path("tests/resources/mountain.png").read_bytes()) +image_artifact = ImageLoader().load("tests/resources/mountain.png") -mask_artifact = ImageLoader().load(Path("tests/resources/mountain-mask.png").read_bytes()) +mask_artifact = ImageLoader().load("tests/resources/mountain-mask.png") engine.run( prompts=["A photo of a castle built into the side of a mountain"], diff --git a/docs/griptape-framework/engines/src/image_generation_engines_5.py b/docs/griptape-framework/engines/src/image_generation_engines_5.py index f91a48ec0..526ebff50 100644 --- a/docs/griptape-framework/engines/src/image_generation_engines_5.py +++ b/docs/griptape-framework/engines/src/image_generation_engines_5.py @@ -1,5 +1,3 @@ -from pathlib import Path - from griptape.drivers import AmazonBedrockImageGenerationDriver, BedrockStableDiffusionImageGenerationModelDriver from griptape.engines import OutpaintingImageGenerationEngine from griptape.loaders import ImageLoader @@ -15,9 +13,9 @@ image_generation_driver=driver, ) -image_artifact = ImageLoader().load(Path("tests/resources/mountain.png").read_bytes()) +image_artifact = ImageLoader().load("tests/resources/mountain.png") -mask_artifact = ImageLoader().load(Path("tests/resources/mountain-mask.png").read_bytes()) +mask_artifact = ImageLoader().load("tests/resources/mountain-mask.png") engine.run( prompts=["A photo of a mountain shrouded in clouds"], diff --git a/docs/griptape-framework/engines/src/image_query_engines_1.py b/docs/griptape-framework/engines/src/image_query_engines_1.py index b0920392a..c2d08e9a9 100644 --- a/docs/griptape-framework/engines/src/image_query_engines_1.py +++ b/docs/griptape-framework/engines/src/image_query_engines_1.py @@ -1,5 +1,3 @@ -from pathlib import Path - from griptape.drivers import OpenAiImageQueryDriver from griptape.engines import ImageQueryEngine from griptape.loaders import ImageLoader @@ -8,6 +6,6 @@ engine = ImageQueryEngine(image_query_driver=driver) -image_artifact = ImageLoader().load(Path("tests/resources/mountain.png").read_bytes()) +image_artifact = ImageLoader().load("tests/resources/mountain.png") engine.run("Describe the weather in the image", [image_artifact]) diff --git a/docs/griptape-framework/engines/src/rag_engines_1.py b/docs/griptape-framework/engines/src/rag_engines_1.py index a8a9cc06b..6ad28545f 100644 --- a/docs/griptape-framework/engines/src/rag_engines_1.py +++ b/docs/griptape-framework/engines/src/rag_engines_1.py @@ -1,3 +1,4 @@ +from griptape.chunkers import TextChunker from griptape.drivers import LocalVectorStoreDriver, OpenAiChatPromptDriver, OpenAiEmbeddingDriver from griptape.engines.rag import RagContext, RagEngine from griptape.engines.rag.modules import PromptResponseRagModule, TranslateQueryRagModule, VectorStoreRetrievalRagModule @@ -8,12 +9,12 @@ prompt_driver = OpenAiChatPromptDriver(model="gpt-4o", temperature=0) vector_store = LocalVectorStoreDriver(embedding_driver=OpenAiEmbeddingDriver()) -artifacts = WebLoader(max_tokens=500).load("https://www.griptape.ai") - +artifact = WebLoader().load("https://www.griptape.ai") +chunks = TextChunker(max_tokens=500).chunk(artifact) vector_store.upsert_text_artifacts( { - "griptape": artifacts, + "griptape": chunks, } ) diff --git a/docs/griptape-framework/engines/src/summary_engines_1.py b/docs/griptape-framework/engines/src/summary_engines_1.py index b5adf2a5a..5a16e4819 100644 --- a/docs/griptape-framework/engines/src/summary_engines_1.py +++ b/docs/griptape-framework/engines/src/summary_engines_1.py @@ -9,8 +9,6 @@ prompt_driver=OpenAiChatPromptDriver(model="gpt-3.5-turbo"), ) -artifacts = PdfLoader().load(response.content) +artifact = PdfLoader().parse(response.content) -text = "\n\n".join([a.value for a in artifacts]) - -engine.summarize_text(text) +engine.summarize_artifacts(artifact) diff --git a/docs/griptape-framework/structures/src/tasks_12.py b/docs/griptape-framework/structures/src/tasks_12.py index 917b50607..1fdc99e1c 100644 --- a/docs/griptape-framework/structures/src/tasks_12.py +++ b/docs/griptape-framework/structures/src/tasks_12.py @@ -1,5 +1,3 @@ -from pathlib import Path - from griptape.drivers import AmazonBedrockImageGenerationDriver, BedrockStableDiffusionImageGenerationModelDriver from griptape.engines import VariationImageGenerationEngine from griptape.loaders import ImageLoader @@ -18,7 +16,7 @@ ) # Load input image artifact. -image_artifact = ImageLoader().load(Path("tests/resources/mountain.png").read_bytes()) +image_artifact = ImageLoader().load("tests/resources/mountain.png") # Instantiate a pipeline. pipeline = Pipeline() diff --git a/docs/griptape-framework/structures/src/tasks_13.py b/docs/griptape-framework/structures/src/tasks_13.py index d2aa45983..4b7616d94 100644 --- a/docs/griptape-framework/structures/src/tasks_13.py +++ b/docs/griptape-framework/structures/src/tasks_13.py @@ -1,5 +1,3 @@ -from pathlib import Path - from griptape.drivers import AmazonBedrockImageGenerationDriver, BedrockStableDiffusionImageGenerationModelDriver from griptape.engines import InpaintingImageGenerationEngine from griptape.loaders import ImageLoader @@ -18,9 +16,9 @@ ) # Load input image artifacts. -image_artifact = ImageLoader().load(Path("tests/resources/mountain.png").read_bytes()) +image_artifact = ImageLoader().load("tests/resources/mountain.png") -mask_artifact = ImageLoader().load(Path("tests/resources/mountain-mask.png").read_bytes()) +mask_artifact = ImageLoader().load("tests/resources/mountain-mask.png") # Instantiate a pipeline. pipeline = Pipeline() diff --git a/docs/griptape-framework/structures/src/tasks_14.py b/docs/griptape-framework/structures/src/tasks_14.py index ec489096d..d2e6ba2dd 100644 --- a/docs/griptape-framework/structures/src/tasks_14.py +++ b/docs/griptape-framework/structures/src/tasks_14.py @@ -1,5 +1,3 @@ -from pathlib import Path - from griptape.drivers import AmazonBedrockImageGenerationDriver, BedrockStableDiffusionImageGenerationModelDriver from griptape.engines import OutpaintingImageGenerationEngine from griptape.loaders import ImageLoader @@ -18,9 +16,9 @@ ) # Load input image artifacts. -image_artifact = ImageLoader().load(Path("tests/resources/mountain.png").read_bytes()) +image_artifact = ImageLoader().load("tests/resources/mountain.png") -mask_artifact = ImageLoader().load(Path("tests/resources/mountain-mask.png").read_bytes()) +mask_artifact = ImageLoader().load("tests/resources/mountain-mask.png") # Instantiate a pipeline. pipeline = Pipeline() diff --git a/docs/griptape-framework/structures/src/tasks_15.py b/docs/griptape-framework/structures/src/tasks_15.py index 0c60864f7..802ac3397 100644 --- a/docs/griptape-framework/structures/src/tasks_15.py +++ b/docs/griptape-framework/structures/src/tasks_15.py @@ -1,5 +1,3 @@ -from pathlib import Path - from griptape.drivers import OpenAiImageQueryDriver from griptape.engines import ImageQueryEngine from griptape.loaders import ImageLoader @@ -18,7 +16,7 @@ ) # Load the input image artifact. -image_artifact = ImageLoader().load(Path("tests/resources/mountain.png").read_bytes()) +image_artifact = ImageLoader().load("tests/resources/mountain.png") # Instantiate a pipeline. pipeline = Pipeline() diff --git a/docs/griptape-framework/structures/src/tasks_18.py b/docs/griptape-framework/structures/src/tasks_18.py index 08ece5a92..0d3312d4c 100644 --- a/docs/griptape-framework/structures/src/tasks_18.py +++ b/docs/griptape-framework/structures/src/tasks_18.py @@ -3,12 +3,11 @@ from griptape.loaders import AudioLoader from griptape.structures import Pipeline from griptape.tasks import AudioTranscriptionTask -from griptape.utils import load_file driver = OpenAiAudioTranscriptionDriver(model="whisper-1") task = AudioTranscriptionTask( - input=lambda _: AudioLoader().load(load_file("tests/resources/sentences2.wav")), + input=lambda _: AudioLoader().load("tests/resources/sentences2.wav"), audio_transcription_engine=AudioTranscriptionEngine( audio_transcription_driver=driver, ), diff --git a/docs/griptape-framework/structures/src/tasks_3.py b/docs/griptape-framework/structures/src/tasks_3.py index 6584049d0..cdfe894bd 100644 --- a/docs/griptape-framework/structures/src/tasks_3.py +++ b/docs/griptape-framework/structures/src/tasks_3.py @@ -1,10 +1,8 @@ -from pathlib import Path - from griptape.loaders import ImageLoader from griptape.structures import Agent agent = Agent() -image_artifact = ImageLoader().load(Path("tests/resources/mountain.jpg").read_bytes()) +image_artifact = ImageLoader().load("tests/resources/mountain.jpg") agent.run([image_artifact, "What's in this image?"]) diff --git a/docs/griptape-tools/custom-tools/index.md b/docs/griptape-tools/custom-tools/index.md index 0cb248cf4..3715b7be6 100644 --- a/docs/griptape-tools/custom-tools/index.md +++ b/docs/griptape-tools/custom-tools/index.md @@ -2,46 +2,25 @@ Building your own tools is easy with Griptape! -To start, create a directory for your tool inside your project. All tool directories should have the following components: +Tools are nothing more than Python classes that inherit from [BaseTool](../../reference/griptape/tools/base_tool.md). +Each method in the class is decorated with an [activity](../../reference/griptape/utils/decorators.md#griptape.utils.decorators.activity) decorator which informs the LLM how and when it should use that Tool Activity. -* `manifest.yml` with a YAML manifest. -* `tool.py` file with a tool Python class. -* `requirements.txt` file with tool Python dependencies. - -Let's build a simple random number generator tool! First, create a new directory for your tool `rng_tool`. This is where all tool files will go. - -## Tool Manifest - -Tool YAML manifests are for humans and downstream systems, like ChatGPT Plugins, to generate manifests of their own. Create a `manifest.yml` file in the `rng_tool` directory: - -```yaml -version: "v1" -name: Random Number Generator -description: Tool for generating random numbers. -contact_email: hello@griptape.ai -legal_info_url: https://www.griptape.ai/legal -``` - -## Tool Dependencies +## Random Number Generator Tool -To add Python dependencies for your tool, add a `requirements.txt` file. The tool we are building is pretty simple, so you can leave that file empty. - -## Tool Implementation - -Next, create a `tool.py` file with the following code: +Here is a simple random number generator Tool: ```python --8<-- "docs/griptape-tools/custom-tools/src/index_1.py" ``` -## Testing Custom Tools +Check out other [Griptape Tools](https://github.com/griptape-ai/griptape/tree/main/griptape/tools) to learn more about tool implementation details. -Finally, let's test our tool: +## Tool Dependencies -```python ---8<-- "docs/griptape-tools/custom-tools/src/index_2.py" -``` +Each Tool can also have its own dependencies. You can specify them in a `requirements.txt` file in the tool directory and Griptape will install them during Tool execution. +To start, create a directory for your Tool inside your project. The directory must have the following structure: -That's it! You can start using this tool with any converter or directly via Griptape. +* `tool.py` file with a tool Python class. +* `requirements.txt` file with tool Python dependencies. -Check out other [Griptape Tools](https://github.com/griptape-ai/griptape/tree/main/griptape/tools) to learn more about tool implementation details. +That's it! Import and use your Tool in your project as you would with any other Griptape Tool. diff --git a/docs/griptape-tools/official-tools/src/vector_store_tool_1.py b/docs/griptape-tools/official-tools/src/vector_store_tool_1.py index 26c87e255..bdb60d98b 100644 --- a/docs/griptape-tools/official-tools/src/vector_store_tool_1.py +++ b/docs/griptape-tools/official-tools/src/vector_store_tool_1.py @@ -1,3 +1,4 @@ +from griptape.chunkers import TextChunker from griptape.drivers import LocalVectorStoreDriver, OpenAiEmbeddingDriver from griptape.loaders import WebLoader from griptape.structures import Agent @@ -8,8 +9,9 @@ ) artifacts = WebLoader().load("https://www.griptape.ai") +chunks = TextChunker().chunk(artifacts) -vector_store_driver.upsert_text_artifacts({"griptape": artifacts}) +vector_store_driver.upsert_text_artifacts({"griptape": chunks}) vector_db = VectorStoreTool( description="This DB has information about the Griptape Python framework", vector_store_driver=vector_store_driver, diff --git a/griptape/artifacts/list_artifact.py b/griptape/artifacts/list_artifact.py index 0e6f81ca5..02dd295cd 100644 --- a/griptape/artifacts/list_artifact.py +++ b/griptape/artifacts/list_artifact.py @@ -18,18 +18,6 @@ class ListArtifact(BaseArtifact, Generic[T]): item_separator: str = field(default="\n\n", kw_only=True, metadata={"serializable": True}) validate_uniform_types: bool = field(default=False, kw_only=True, metadata={"serializable": True}) - def __getitem__(self, key: int) -> T: - return self.value[key] - - def __bool__(self) -> bool: - return len(self) > 0 - - def __add__(self, other: BaseArtifact) -> ListArtifact[T]: - return ListArtifact(self.value + other.value) - - def __iter__(self) -> Iterator[T]: - return iter(self.value) - @value.validator # pyright: ignore[reportAttributeAccessIssue] def validate_value(self, _: Attribute, value: list[T]) -> None: if self.validate_uniform_types and len(value) > 0: @@ -45,6 +33,18 @@ def child_type(self) -> Optional[type]: else: return None + def __getitem__(self, key: int) -> T: + return self.value[key] + + def __bool__(self) -> bool: + return len(self) > 0 + + def __add__(self, other: BaseArtifact) -> ListArtifact[T]: + return ListArtifact(self.value + other.value) + + def __iter__(self) -> Iterator[T]: + return iter(self.value) + def to_text(self) -> str: return self.item_separator.join([v.to_text() for v in self.value]) diff --git a/griptape/chunkers/base_chunker.py b/griptape/chunkers/base_chunker.py index 623185237..9b6ef64b9 100644 --- a/griptape/chunkers/base_chunker.py +++ b/griptape/chunkers/base_chunker.py @@ -6,6 +6,7 @@ from attrs import Attribute, Factory, define, field from griptape.artifacts import TextArtifact +from griptape.artifacts.list_artifact import ListArtifact from griptape.chunkers import ChunkSeparator from griptape.tokenizers import BaseTokenizer, OpenAiTokenizer @@ -32,8 +33,8 @@ def validate_max_tokens(self, _: Attribute, max_tokens: int) -> None: if max_tokens < 0: raise ValueError("max_tokens must be 0 or greater.") - def chunk(self, text: TextArtifact | str) -> list[TextArtifact]: - text = text.value if isinstance(text, TextArtifact) else text + def chunk(self, text: TextArtifact | ListArtifact | str) -> list[TextArtifact]: + text = text.to_text() if isinstance(text, (TextArtifact, ListArtifact)) else text return [TextArtifact(c) for c in self._chunk_recursively(text)] diff --git a/griptape/common/prompt_stack/contents/text_message_content.py b/griptape/common/prompt_stack/contents/text_message_content.py index c862564f3..39e678f28 100644 --- a/griptape/common/prompt_stack/contents/text_message_content.py +++ b/griptape/common/prompt_stack/contents/text_message_content.py @@ -4,7 +4,7 @@ from attrs import define, field -from griptape.artifacts import TextArtifact +from griptape.artifacts import BaseArtifact, TextArtifact from griptape.common import BaseDeltaMessageContent, BaseMessageContent, TextDeltaMessageContent if TYPE_CHECKING: @@ -13,7 +13,7 @@ @define class TextMessageContent(BaseMessageContent): - artifact: TextArtifact = field(metadata={"serializable": True}) + artifact: BaseArtifact = field(metadata={"serializable": True}) @classmethod def from_deltas(cls, deltas: Sequence[BaseDeltaMessageContent]) -> TextMessageContent: diff --git a/griptape/configs/drivers/base_drivers_config.py b/griptape/configs/drivers/base_drivers_config.py index ceead5c84..1c1ae149b 100644 --- a/griptape/configs/drivers/base_drivers_config.py +++ b/griptape/configs/drivers/base_drivers_config.py @@ -16,6 +16,7 @@ BaseImageGenerationDriver, BaseImageQueryDriver, BasePromptDriver, + BaseRulesetDriver, BaseTextToSpeechDriver, BaseVectorStoreDriver, ) @@ -47,6 +48,9 @@ class BaseDriversConfig(ABC, SerializableMixin): _audio_transcription_driver: BaseAudioTranscriptionDriver = field( default=None, kw_only=True, metadata={"serializable": True}, alias="audio_transcription_driver" ) + _ruleset_driver: BaseRulesetDriver = field( + default=None, kw_only=True, metadata={"serializable": True}, alias="ruleset_driver" + ) _last_drivers_config: Optional[BaseDriversConfig] = field(default=None) @@ -98,3 +102,7 @@ def text_to_speech_driver(self) -> BaseTextToSpeechDriver: ... @lazy_property() @abstractmethod def audio_transcription_driver(self) -> BaseAudioTranscriptionDriver: ... + + @lazy_property() + @abstractmethod + def ruleset_driver(self) -> BaseRulesetDriver: ... diff --git a/griptape/configs/drivers/drivers_config.py b/griptape/configs/drivers/drivers_config.py index 04edfd303..5261640c8 100644 --- a/griptape/configs/drivers/drivers_config.py +++ b/griptape/configs/drivers/drivers_config.py @@ -14,6 +14,7 @@ DummyTextToSpeechDriver, DummyVectorStoreDriver, LocalConversationMemoryDriver, + LocalRulesetDriver, ) from griptape.utils.decorators import lazy_property @@ -25,6 +26,7 @@ BaseImageGenerationDriver, BaseImageQueryDriver, BasePromptDriver, + BaseRulesetDriver, BaseTextToSpeechDriver, BaseVectorStoreDriver, ) @@ -63,3 +65,7 @@ def text_to_speech_driver(self) -> BaseTextToSpeechDriver: @lazy_property() def audio_transcription_driver(self) -> BaseAudioTranscriptionDriver: return DummyAudioTranscriptionDriver() + + @lazy_property() + def ruleset_driver(self) -> BaseRulesetDriver: + return LocalRulesetDriver() diff --git a/griptape/drivers/__init__.py b/griptape/drivers/__init__.py index 7d2de3552..a4806fc72 100644 --- a/griptape/drivers/__init__.py +++ b/griptape/drivers/__init__.py @@ -43,7 +43,7 @@ from .vector.dummy_vector_store_driver import DummyVectorStoreDriver from .vector.qdrant_vector_store_driver import QdrantVectorStoreDriver from .vector.astradb_vector_store_driver import AstraDbVectorStoreDriver -from .vector.griptape_cloud_knowledge_base_vector_store_driver import GriptapeCloudKnowledgeBaseVectorStoreDriver +from .vector.griptape_cloud_vector_store_driver import GriptapeCloudVectorStoreDriver from .sql.base_sql_driver import BaseSqlDriver from .sql.amazon_redshift_sql_driver import AmazonRedshiftSqlDriver @@ -99,6 +99,8 @@ from .web_search.base_web_search_driver import BaseWebSearchDriver from .web_search.google_web_search_driver import GoogleWebSearchDriver from .web_search.duck_duck_go_web_search_driver import DuckDuckGoWebSearchDriver +from .web_search.exa_web_search_driver import ExaWebSearchDriver +from .web_search.tavily_web_search_driver import TavilyWebSearchDriver from .event_listener.base_event_listener_driver import BaseEventListenerDriver from .event_listener.amazon_sqs_event_listener_driver import AmazonSqsEventListenerDriver @@ -114,6 +116,10 @@ from .rerank.base_rerank_driver import BaseRerankDriver from .rerank.cohere_rerank_driver import CohereRerankDriver +from .ruleset.base_ruleset_driver import BaseRulesetDriver +from .ruleset.local_ruleset_driver import LocalRulesetDriver +from .ruleset.griptape_cloud_ruleset_driver import GriptapeCloudRulesetDriver + from .text_to_speech.base_text_to_speech_driver import BaseTextToSpeechDriver from .text_to_speech.dummy_text_to_speech_driver import DummyTextToSpeechDriver from .text_to_speech.elevenlabs_text_to_speech_driver import ElevenLabsTextToSpeechDriver @@ -177,7 +183,7 @@ "QdrantVectorStoreDriver", "AstraDbVectorStoreDriver", "DummyVectorStoreDriver", - "GriptapeCloudKnowledgeBaseVectorStoreDriver", + "GriptapeCloudVectorStoreDriver", "BaseSqlDriver", "AmazonRedshiftSqlDriver", "SnowflakeSqlDriver", @@ -213,6 +219,8 @@ "BaseWebSearchDriver", "GoogleWebSearchDriver", "DuckDuckGoWebSearchDriver", + "ExaWebSearchDriver", + "TavilyWebSearchDriver", "BaseEventListenerDriver", "AmazonSqsEventListenerDriver", "WebhookEventListenerDriver", @@ -224,6 +232,9 @@ "AmazonS3FileManagerDriver", "BaseRerankDriver", "CohereRerankDriver", + "BaseRulesetDriver", + "LocalRulesetDriver", + "GriptapeCloudRulesetDriver", "BaseTextToSpeechDriver", "DummyTextToSpeechDriver", "ElevenLabsTextToSpeechDriver", diff --git a/griptape/drivers/audio_transcription/openai_audio_transcription_driver.py b/griptape/drivers/audio_transcription/openai_audio_transcription_driver.py index 312fa8318..f81031897 100644 --- a/griptape/drivers/audio_transcription/openai_audio_transcription_driver.py +++ b/griptape/drivers/audio_transcription/openai_audio_transcription_driver.py @@ -4,10 +4,11 @@ from typing import Optional import openai -from attrs import Factory, define, field +from attrs import define, field from griptape.artifacts import AudioArtifact, TextArtifact from griptape.drivers import BaseAudioTranscriptionDriver +from griptape.utils.decorators import lazy_property @define @@ -17,12 +18,11 @@ class OpenAiAudioTranscriptionDriver(BaseAudioTranscriptionDriver): base_url: Optional[str] = field(default=None, kw_only=True, metadata={"serializable": True}) api_key: Optional[str] = field(default=None, kw_only=True, metadata={"serializable": False}) organization: Optional[str] = field(default=openai.organization, kw_only=True, metadata={"serializable": True}) - client: openai.OpenAI = field( - default=Factory( - lambda self: openai.OpenAI(api_key=self.api_key, base_url=self.base_url, organization=self.organization), - takes_self=True, - ), - ) + _client: openai.OpenAI = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) + + @lazy_property() + def client(self) -> openai.OpenAI: + return openai.OpenAI(api_key=self.api_key, base_url=self.base_url, organization=self.organization) def try_run(self, audio: AudioArtifact, prompts: Optional[list[str]] = None) -> TextArtifact: additional_params = {} diff --git a/griptape/drivers/embedding/amazon_bedrock_cohere_embedding_driver.py b/griptape/drivers/embedding/amazon_bedrock_cohere_embedding_driver.py index 4e4f4aa31..c1b2069c8 100644 --- a/griptape/drivers/embedding/amazon_bedrock_cohere_embedding_driver.py +++ b/griptape/drivers/embedding/amazon_bedrock_cohere_embedding_driver.py @@ -1,16 +1,18 @@ from __future__ import annotations import json -from typing import TYPE_CHECKING, Any +from typing import TYPE_CHECKING from attrs import Factory, define, field from griptape.drivers import BaseEmbeddingDriver from griptape.tokenizers.amazon_bedrock_tokenizer import AmazonBedrockTokenizer from griptape.utils import import_optional_dependency +from griptape.utils.decorators import lazy_property if TYPE_CHECKING: import boto3 + from mypy_boto3_bedrock import BedrockClient from griptape.tokenizers.base_tokenizer import BaseTokenizer @@ -26,7 +28,7 @@ class AmazonBedrockCohereEmbeddingDriver(BaseEmbeddingDriver): `search_query` when querying your vector DB to find relevant documents. session: Optionally provide custom `boto3.Session`. tokenizer: Optionally provide custom `BedrockCohereTokenizer`. - bedrock_client: Optionally provide custom `bedrock-runtime` client. + client: Optionally provide custom `bedrock-runtime` client. """ DEFAULT_MODEL = "cohere.embed-english-v3" @@ -38,15 +40,16 @@ class AmazonBedrockCohereEmbeddingDriver(BaseEmbeddingDriver): default=Factory(lambda self: AmazonBedrockTokenizer(model=self.model), takes_self=True), kw_only=True, ) - bedrock_client: Any = field( - default=Factory(lambda self: self.session.client("bedrock-runtime"), takes_self=True), - kw_only=True, - ) + _client: BedrockClient = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) + + @lazy_property() + def client(self) -> BedrockClient: + return self.session.client("bedrock-runtime") def try_embed_chunk(self, chunk: str) -> list[float]: payload = {"input_type": self.input_type, "texts": [chunk]} - response = self.bedrock_client.invoke_model( + response = self.client.invoke_model( body=json.dumps(payload), modelId=self.model, accept="*/*", diff --git a/griptape/drivers/embedding/amazon_bedrock_titan_embedding_driver.py b/griptape/drivers/embedding/amazon_bedrock_titan_embedding_driver.py index 5900d7d86..a17af9aee 100644 --- a/griptape/drivers/embedding/amazon_bedrock_titan_embedding_driver.py +++ b/griptape/drivers/embedding/amazon_bedrock_titan_embedding_driver.py @@ -1,16 +1,18 @@ from __future__ import annotations import json -from typing import TYPE_CHECKING, Any +from typing import TYPE_CHECKING from attrs import Factory, define, field from griptape.drivers import BaseEmbeddingDriver from griptape.tokenizers.amazon_bedrock_tokenizer import AmazonBedrockTokenizer from griptape.utils import import_optional_dependency +from griptape.utils.decorators import lazy_property if TYPE_CHECKING: import boto3 + from mypy_boto3_bedrock import BedrockClient from griptape.tokenizers.base_tokenizer import BaseTokenizer @@ -23,7 +25,7 @@ class AmazonBedrockTitanEmbeddingDriver(BaseEmbeddingDriver): model: Embedding model name. Defaults to DEFAULT_MODEL. tokenizer: Optionally provide custom `BedrockTitanTokenizer`. session: Optionally provide custom `boto3.Session`. - bedrock_client: Optionally provide custom `bedrock-runtime` client. + client: Optionally provide custom `bedrock-runtime` client. """ DEFAULT_MODEL = "amazon.titan-embed-text-v1" @@ -34,15 +36,16 @@ class AmazonBedrockTitanEmbeddingDriver(BaseEmbeddingDriver): default=Factory(lambda self: AmazonBedrockTokenizer(model=self.model), takes_self=True), kw_only=True, ) - bedrock_client: Any = field( - default=Factory(lambda self: self.session.client("bedrock-runtime"), takes_self=True), - kw_only=True, - ) + _client: BedrockClient = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) + + @lazy_property() + def client(self) -> BedrockClient: + return self.session.client("bedrock-runtime") def try_embed_chunk(self, chunk: str) -> list[float]: payload = {"inputText": chunk} - response = self.bedrock_client.invoke_model( + response = self.client.invoke_model( body=json.dumps(payload), modelId=self.model, accept="application/json", diff --git a/griptape/drivers/embedding/amazon_sagemaker_jumpstart_embedding_driver.py b/griptape/drivers/embedding/amazon_sagemaker_jumpstart_embedding_driver.py index c4feb8a1d..c047236de 100644 --- a/griptape/drivers/embedding/amazon_sagemaker_jumpstart_embedding_driver.py +++ b/griptape/drivers/embedding/amazon_sagemaker_jumpstart_embedding_driver.py @@ -1,32 +1,35 @@ from __future__ import annotations import json -from typing import TYPE_CHECKING, Any, Optional +from typing import TYPE_CHECKING, Optional from attrs import Factory, define, field from griptape.drivers import BaseEmbeddingDriver from griptape.utils import import_optional_dependency +from griptape.utils.decorators import lazy_property if TYPE_CHECKING: import boto3 + from mypy_boto3_sagemaker import SageMakerClient @define class AmazonSageMakerJumpstartEmbeddingDriver(BaseEmbeddingDriver): session: boto3.Session = field(default=Factory(lambda: import_optional_dependency("boto3").Session()), kw_only=True) - sagemaker_client: Any = field( - default=Factory(lambda self: self.session.client("sagemaker-runtime"), takes_self=True), - kw_only=True, - ) endpoint: str = field(kw_only=True, metadata={"serializable": True}) custom_attributes: str = field(default="accept_eula=true", kw_only=True, metadata={"serializable": True}) inference_component_name: Optional[str] = field(default=None, kw_only=True, metadata={"serializable": True}) + _client: SageMakerClient = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) + + @lazy_property() + def client(self) -> SageMakerClient: + return self.session.client("sagemaker-runtime") def try_embed_chunk(self, chunk: str) -> list[float]: payload = {"text_inputs": chunk, "mode": "embedding"} - endpoint_response = self.sagemaker_client.invoke_endpoint( + endpoint_response = self.client.invoke_endpoint( EndpointName=self.endpoint, ContentType="application/json", Body=json.dumps(payload).encode("utf-8"), diff --git a/griptape/drivers/embedding/azure_openai_embedding_driver.py b/griptape/drivers/embedding/azure_openai_embedding_driver.py index c1e601aef..366a91460 100644 --- a/griptape/drivers/embedding/azure_openai_embedding_driver.py +++ b/griptape/drivers/embedding/azure_openai_embedding_driver.py @@ -7,6 +7,7 @@ from griptape.drivers import OpenAiEmbeddingDriver from griptape.tokenizers import OpenAiTokenizer +from griptape.utils.decorators import lazy_property @define @@ -40,17 +41,16 @@ class AzureOpenAiEmbeddingDriver(OpenAiEmbeddingDriver): default=Factory(lambda self: OpenAiTokenizer(model=self.model), takes_self=True), kw_only=True, ) - client: openai.AzureOpenAI = field( - default=Factory( - lambda self: openai.AzureOpenAI( - organization=self.organization, - api_key=self.api_key, - api_version=self.api_version, - azure_endpoint=self.azure_endpoint, - azure_deployment=self.azure_deployment, - azure_ad_token=self.azure_ad_token, - azure_ad_token_provider=self.azure_ad_token_provider, - ), - takes_self=True, - ), - ) + _client: openai.AzureOpenAI = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) + + @lazy_property() + def client(self) -> openai.AzureOpenAI: + return openai.AzureOpenAI( + organization=self.organization, + api_key=self.api_key, + api_version=self.api_version, + azure_endpoint=self.azure_endpoint, + azure_deployment=self.azure_deployment, + azure_ad_token=self.azure_ad_token, + azure_ad_token_provider=self.azure_ad_token_provider, + ) diff --git a/griptape/drivers/embedding/cohere_embedding_driver.py b/griptape/drivers/embedding/cohere_embedding_driver.py index 365dc972e..42e89ff70 100644 --- a/griptape/drivers/embedding/cohere_embedding_driver.py +++ b/griptape/drivers/embedding/cohere_embedding_driver.py @@ -7,6 +7,7 @@ from griptape.drivers import BaseEmbeddingDriver from griptape.tokenizers import CohereTokenizer from griptape.utils import import_optional_dependency +from griptape.utils.decorators import lazy_property if TYPE_CHECKING: from cohere import Client @@ -27,16 +28,16 @@ class CohereEmbeddingDriver(BaseEmbeddingDriver): DEFAULT_MODEL = "models/embedding-001" api_key: str = field(kw_only=True, metadata={"serializable": False}) - client: Client = field( - default=Factory(lambda self: import_optional_dependency("cohere").Client(self.api_key), takes_self=True), - kw_only=True, - ) + input_type: str = field(kw_only=True, metadata={"serializable": True}) + _client: Client = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) tokenizer: CohereTokenizer = field( default=Factory(lambda self: CohereTokenizer(model=self.model, client=self.client), takes_self=True), kw_only=True, ) - input_type: str = field(kw_only=True, metadata={"serializable": True}) + @lazy_property() + def client(self) -> Client: + return import_optional_dependency("cohere").Client(self.api_key) def try_embed_chunk(self, chunk: str) -> list[float]: result = self.client.embed(texts=[chunk], model=self.model, input_type=self.input_type) diff --git a/griptape/drivers/embedding/huggingface_hub_embedding_driver.py b/griptape/drivers/embedding/huggingface_hub_embedding_driver.py index c1be2ec96..573bfc379 100644 --- a/griptape/drivers/embedding/huggingface_hub_embedding_driver.py +++ b/griptape/drivers/embedding/huggingface_hub_embedding_driver.py @@ -2,10 +2,11 @@ from typing import TYPE_CHECKING -from attrs import Factory, define, field +from attrs import define, field from griptape.drivers import BaseEmbeddingDriver from griptape.utils import import_optional_dependency +from griptape.utils.decorators import lazy_property if TYPE_CHECKING: from huggingface_hub import InferenceClient @@ -22,16 +23,14 @@ class HuggingFaceHubEmbeddingDriver(BaseEmbeddingDriver): """ api_token: str = field(kw_only=True, metadata={"serializable": True}) - client: InferenceClient = field( - default=Factory( - lambda self: import_optional_dependency("huggingface_hub").InferenceClient( - model=self.model, - token=self.api_token, - ), - takes_self=True, - ), - kw_only=True, - ) + _client: InferenceClient = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) + + @lazy_property() + def client(self) -> InferenceClient: + return import_optional_dependency("huggingface_hub").InferenceClient( + model=self.model, + token=self.api_token, + ) def try_embed_chunk(self, chunk: str) -> list[float]: response = self.client.feature_extraction(chunk) diff --git a/griptape/drivers/embedding/ollama_embedding_driver.py b/griptape/drivers/embedding/ollama_embedding_driver.py index c5c30d5af..1b32a21f3 100644 --- a/griptape/drivers/embedding/ollama_embedding_driver.py +++ b/griptape/drivers/embedding/ollama_embedding_driver.py @@ -2,10 +2,11 @@ from typing import TYPE_CHECKING, Optional -from attrs import Factory, define, field +from attrs import define, field from griptape.drivers import BaseEmbeddingDriver from griptape.utils import import_optional_dependency +from griptape.utils.decorators import lazy_property if TYPE_CHECKING: from ollama import Client @@ -23,10 +24,11 @@ class OllamaEmbeddingDriver(BaseEmbeddingDriver): model: str = field(kw_only=True, metadata={"serializable": True}) host: Optional[str] = field(default=None, kw_only=True, metadata={"serializable": True}) - client: Client = field( - default=Factory(lambda self: import_optional_dependency("ollama").Client(host=self.host), takes_self=True), - kw_only=True, - ) + _client: Client = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) + + @lazy_property() + def client(self) -> Client: + return import_optional_dependency("ollama").Client(host=self.host) def try_embed_chunk(self, chunk: str) -> list[float]: return list(self.client.embeddings(model=self.model, prompt=chunk)["embedding"]) diff --git a/griptape/drivers/embedding/openai_embedding_driver.py b/griptape/drivers/embedding/openai_embedding_driver.py index 0995fba68..b0b799790 100644 --- a/griptape/drivers/embedding/openai_embedding_driver.py +++ b/griptape/drivers/embedding/openai_embedding_driver.py @@ -7,6 +7,7 @@ from griptape.drivers import BaseEmbeddingDriver from griptape.tokenizers import OpenAiTokenizer +from griptape.utils.decorators import lazy_property @define @@ -33,16 +34,15 @@ class OpenAiEmbeddingDriver(BaseEmbeddingDriver): base_url: Optional[str] = field(default=None, kw_only=True, metadata={"serializable": True}) api_key: Optional[str] = field(default=None, kw_only=True, metadata={"serializable": False}) organization: Optional[str] = field(default=None, kw_only=True, metadata={"serializable": True}) - client: openai.OpenAI = field( - default=Factory( - lambda self: openai.OpenAI(api_key=self.api_key, base_url=self.base_url, organization=self.organization), - takes_self=True, - ), - ) tokenizer: OpenAiTokenizer = field( default=Factory(lambda self: OpenAiTokenizer(model=self.model), takes_self=True), kw_only=True, ) + _client: openai.OpenAI = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) + + @lazy_property() + def client(self) -> openai.OpenAI: + return openai.OpenAI(api_key=self.api_key, base_url=self.base_url, organization=self.organization) def try_embed_chunk(self, chunk: str) -> list[float]: # Address a performance issue in older ada models diff --git a/griptape/drivers/embedding/voyageai_embedding_driver.py b/griptape/drivers/embedding/voyageai_embedding_driver.py index c5e418ed1..bc4e78bf1 100644 --- a/griptape/drivers/embedding/voyageai_embedding_driver.py +++ b/griptape/drivers/embedding/voyageai_embedding_driver.py @@ -1,12 +1,16 @@ from __future__ import annotations -from typing import Any, Optional +from typing import TYPE_CHECKING, Any, Optional from attrs import Factory, define, field from griptape.drivers import BaseEmbeddingDriver from griptape.tokenizers import VoyageAiTokenizer from griptape.utils import import_optional_dependency +from griptape.utils.decorators import lazy_property + +if TYPE_CHECKING: + import voyageai @define @@ -25,17 +29,16 @@ class VoyageAiEmbeddingDriver(BaseEmbeddingDriver): model: str = field(default=DEFAULT_MODEL, kw_only=True, metadata={"serializable": True}) api_key: Optional[str] = field(default=None, kw_only=True, metadata={"serializable": False}) - client: Any = field( - default=Factory( - lambda self: import_optional_dependency("voyageai").Client(api_key=self.api_key), - takes_self=True, - ), - ) tokenizer: VoyageAiTokenizer = field( default=Factory(lambda self: VoyageAiTokenizer(model=self.model, api_key=self.api_key), takes_self=True), kw_only=True, ) input_type: str = field(default="document", kw_only=True, metadata={"serializable": True}) + _client: voyageai.Client = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) + + @lazy_property() + def client(self) -> Any: + return import_optional_dependency("voyageai").Client(api_key=self.api_key) def try_embed_chunk(self, chunk: str) -> list[float]: return self.client.embed([chunk], model=self.model, input_type=self.input_type).embeddings[0] diff --git a/griptape/drivers/event_listener/amazon_sqs_event_listener_driver.py b/griptape/drivers/event_listener/amazon_sqs_event_listener_driver.py index 4c632cb01..9030f5d77 100644 --- a/griptape/drivers/event_listener/amazon_sqs_event_listener_driver.py +++ b/griptape/drivers/event_listener/amazon_sqs_event_listener_driver.py @@ -1,25 +1,31 @@ from __future__ import annotations import json -from typing import TYPE_CHECKING, Any +from typing import TYPE_CHECKING from attrs import Factory, define, field from griptape.drivers.event_listener.base_event_listener_driver import BaseEventListenerDriver from griptape.utils import import_optional_dependency +from griptape.utils.decorators import lazy_property if TYPE_CHECKING: import boto3 + from mypy_boto3_sqs import SQSClient @define class AmazonSqsEventListenerDriver(BaseEventListenerDriver): queue_url: str = field(kw_only=True) session: boto3.Session = field(default=Factory(lambda: import_optional_dependency("boto3").Session()), kw_only=True) - sqs_client: Any = field(default=Factory(lambda self: self.session.client("sqs"), takes_self=True)) + _client: SQSClient = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) + + @lazy_property() + def client(self) -> SQSClient: + return self.session.client("sqs") def try_publish_event_payload(self, event_payload: dict) -> None: - self.sqs_client.send_message(QueueUrl=self.queue_url, MessageBody=json.dumps(event_payload)) + self.client.send_message(QueueUrl=self.queue_url, MessageBody=json.dumps(event_payload)) def try_publish_event_payload_batch(self, event_payload_batch: list[dict]) -> None: entries = [ @@ -27,4 +33,4 @@ def try_publish_event_payload_batch(self, event_payload_batch: list[dict]) -> No for event_payload in event_payload_batch ] - self.sqs_client.send_message_batch(QueueUrl=self.queue_url, Entries=entries) + self.client.send_message_batch(QueueUrl=self.queue_url, Entries=entries) diff --git a/griptape/drivers/event_listener/aws_iot_core_event_listener_driver.py b/griptape/drivers/event_listener/aws_iot_core_event_listener_driver.py index 3b014aed4..c3a5a55e7 100644 --- a/griptape/drivers/event_listener/aws_iot_core_event_listener_driver.py +++ b/griptape/drivers/event_listener/aws_iot_core_event_listener_driver.py @@ -1,15 +1,17 @@ from __future__ import annotations import json -from typing import TYPE_CHECKING, Any +from typing import TYPE_CHECKING from attrs import Factory, define, field from griptape.drivers.event_listener.base_event_listener_driver import BaseEventListenerDriver from griptape.utils import import_optional_dependency +from griptape.utils.decorators import lazy_property if TYPE_CHECKING: import boto3 + from mypy_boto3_iot_data import IoTDataPlaneClient @define @@ -17,10 +19,14 @@ class AwsIotCoreEventListenerDriver(BaseEventListenerDriver): iot_endpoint: str = field(kw_only=True) topic: str = field(kw_only=True) session: boto3.Session = field(default=Factory(lambda: import_optional_dependency("boto3").Session()), kw_only=True) - iotdata_client: Any = field(default=Factory(lambda self: self.session.client("iot-data"), takes_self=True)) + _client: IoTDataPlaneClient = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) + + @lazy_property() + def client(self) -> IoTDataPlaneClient: + return self.session.client("iot-data") def try_publish_event_payload(self, event_payload: dict) -> None: - self.iotdata_client.publish(topic=self.topic, payload=json.dumps(event_payload)) + self.client.publish(topic=self.topic, payload=json.dumps(event_payload)) def try_publish_event_payload_batch(self, event_payload_batch: list[dict]) -> None: - self.iotdata_client.publish(topic=self.topic, payload=json.dumps(event_payload_batch)) + self.client.publish(topic=self.topic, payload=json.dumps(event_payload_batch)) diff --git a/griptape/drivers/event_listener/pusher_event_listener_driver.py b/griptape/drivers/event_listener/pusher_event_listener_driver.py index ce9a4fb34..33d160b46 100644 --- a/griptape/drivers/event_listener/pusher_event_listener_driver.py +++ b/griptape/drivers/event_listener/pusher_event_listener_driver.py @@ -2,10 +2,11 @@ from typing import TYPE_CHECKING -from attrs import Factory, define, field +from attrs import define, field from griptape.drivers import BaseEventListenerDriver from griptape.utils import import_optional_dependency +from griptape.utils.decorators import lazy_property if TYPE_CHECKING: from pusher import Pusher @@ -13,25 +14,24 @@ @define class PusherEventListenerDriver(BaseEventListenerDriver): - app_id: str = field(kw_only=True) - key: str = field(kw_only=True) - secret: str = field(kw_only=True) - cluster: str = field(kw_only=True) - channel: str = field(kw_only=True) - event_name: str = field(kw_only=True) - pusher_client: Pusher = field( - default=Factory( - lambda self: import_optional_dependency("pusher").Pusher( - app_id=self.app_id, - key=self.key, - secret=self.secret, - cluster=self.cluster, - ssl=True, - ), - takes_self=True, - ), - kw_only=True, - ) + app_id: str = field(kw_only=True, metadata={"serializable": True}) + key: str = field(kw_only=True, metadata={"serializable": True}) + secret: str = field(kw_only=True, metadata={"serializable": False}) + cluster: str = field(kw_only=True, metadata={"serializable": True}) + channel: str = field(kw_only=True, metadata={"serializable": True}) + event_name: str = field(kw_only=True, metadata={"serializable": True}) + ssl: bool = field(default=True, kw_only=True, metadata={"serializable": True}) + _client: Pusher = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) + + @lazy_property() + def client(self) -> Pusher: + return import_optional_dependency("pusher").Pusher( + app_id=self.app_id, + key=self.key, + secret=self.secret, + cluster=self.cluster, + ssl=self.ssl, + ) def try_publish_event_payload_batch(self, event_payload_batch: list[dict]) -> None: data = [ @@ -39,7 +39,7 @@ def try_publish_event_payload_batch(self, event_payload_batch: list[dict]) -> No for event_payload in event_payload_batch ] - self.pusher_client.trigger_batch(data) + self.client.trigger_batch(data) def try_publish_event_payload(self, event_payload: dict) -> None: - self.pusher_client.trigger(channels=self.channel, event_name=self.event_name, data=event_payload) + self.client.trigger(channels=self.channel, event_name=self.event_name, data=event_payload) diff --git a/griptape/drivers/file_manager/amazon_s3_file_manager_driver.py b/griptape/drivers/file_manager/amazon_s3_file_manager_driver.py index 20e432c0b..1e841866a 100644 --- a/griptape/drivers/file_manager/amazon_s3_file_manager_driver.py +++ b/griptape/drivers/file_manager/amazon_s3_file_manager_driver.py @@ -1,15 +1,17 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Any +from typing import TYPE_CHECKING from attrs import Attribute, Factory, define, field +from griptape.utils.decorators import lazy_property from griptape.utils.import_utils import import_optional_dependency from .base_file_manager_driver import BaseFileManagerDriver if TYPE_CHECKING: import boto3 + from mypy_boto3_s3 import S3Client @define @@ -21,13 +23,17 @@ class AmazonS3FileManagerDriver(BaseFileManagerDriver): bucket: The name of the S3 bucket. workdir: The absolute working directory (must start with "/"). List, load, and save operations will be performed relative to this directory. - s3_client: The S3 client to use for S3 operations. + client: The S3 client to use for S3 operations. """ session: boto3.Session = field(default=Factory(lambda: import_optional_dependency("boto3").Session()), kw_only=True) bucket: str = field(kw_only=True) workdir: str = field(default="/", kw_only=True) - s3_client: Any = field(default=Factory(lambda self: self.session.client("s3"), takes_self=True), kw_only=True) + _client: S3Client = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) + + @lazy_property() + def client(self) -> S3Client: + return self.session.client("s3") @workdir.validator # pyright: ignore[reportAttributeAccessIssue] def validate_workdir(self, _: Attribute, workdir: str) -> None: @@ -51,7 +57,7 @@ def try_load_file(self, path: str) -> bytes: raise IsADirectoryError try: - response = self.s3_client.get_object(Bucket=self.bucket, Key=full_key) + response = self.client.get_object(Bucket=self.bucket, Key=full_key) return response["Body"].read() except botocore.exceptions.ClientError as e: if e.response["Error"]["Code"] in {"NoSuchKey", "404"}: @@ -62,7 +68,7 @@ def try_save_file(self, path: str, value: bytes) -> None: full_key = self._to_full_key(path) if self._is_a_directory(full_key): raise IsADirectoryError - self.s3_client.put_object(Bucket=self.bucket, Key=full_key, Body=value) + self.client.put_object(Bucket=self.bucket, Key=full_key, Body=value) def _to_full_key(self, path: str) -> str: path = path.lstrip("/") @@ -90,7 +96,7 @@ def _list_files_and_dirs(self, full_key: str, **kwargs) -> list[str]: if max_items is not None: pagination_config["MaxItems"] = max_items - paginator = self.s3_client.get_paginator("list_objects_v2") + paginator = self.client.get_paginator("list_objects_v2") pages = paginator.paginate( Bucket=self.bucket, Prefix=full_key, @@ -116,7 +122,7 @@ def _is_a_directory(self, full_key: str) -> bool: return True try: - self.s3_client.head_object(Bucket=self.bucket, Key=full_key) + self.client.head_object(Bucket=self.bucket, Key=full_key) except botocore.exceptions.ClientError as e: if e.response["Error"]["Code"] in {"NoSuchKey", "404"}: return len(self._list_files_and_dirs(full_key, max_items=1)) > 0 diff --git a/griptape/drivers/file_manager/base_file_manager_driver.py b/griptape/drivers/file_manager/base_file_manager_driver.py index dce538812..c904f1532 100644 --- a/griptape/drivers/file_manager/base_file_manager_driver.py +++ b/griptape/drivers/file_manager/base_file_manager_driver.py @@ -1,11 +1,11 @@ from __future__ import annotations from abc import ABC, abstractmethod +from typing import Optional -from attrs import Factory, define, field +from attrs import define, field -import griptape.loaders as loaders -from griptape.artifacts import BaseArtifact, ErrorArtifact, InfoArtifact, ListArtifact, TextArtifact +from griptape.artifacts import BlobArtifact, InfoArtifact, TextArtifact @define @@ -17,57 +17,28 @@ class BaseFileManagerDriver(ABC): loaders: Dictionary of file extension specific loaders to use for loading file contents into artifacts. """ - default_loader: loaders.BaseLoader = field(default=Factory(lambda: loaders.BlobLoader()), kw_only=True) - loaders: dict[str, loaders.BaseLoader] = field( - default=Factory( - lambda: { - "pdf": loaders.PdfLoader(), - "csv": loaders.CsvLoader(), - "txt": loaders.TextLoader(), - "html": loaders.TextLoader(), - "json": loaders.TextLoader(), - "yaml": loaders.TextLoader(), - "xml": loaders.TextLoader(), - "png": loaders.ImageLoader(), - "jpg": loaders.ImageLoader(), - "jpeg": loaders.ImageLoader(), - "webp": loaders.ImageLoader(), - "gif": loaders.ImageLoader(), - "bmp": loaders.ImageLoader(), - "tiff": loaders.ImageLoader(), - }, - ), - kw_only=True, - ) + workdir: str = field(kw_only=True) + encoding: Optional[str] = field(default=None, kw_only=True) - def list_files(self, path: str) -> TextArtifact | ErrorArtifact: + def list_files(self, path: str) -> TextArtifact: entries = self.try_list_files(path) return TextArtifact("\n".join(list(entries))) @abstractmethod def try_list_files(self, path: str) -> list[str]: ... - def load_file(self, path: str) -> BaseArtifact: - extension = path.split(".")[-1] - loader = self.loaders.get(extension) or self.default_loader - source = self.try_load_file(path) - result = loader.load(source) - - if isinstance(result, BaseArtifact): - return result + def load_file(self, path: str) -> BlobArtifact | TextArtifact: + if self.encoding is None: + return BlobArtifact(self.try_load_file(path)) else: - return ListArtifact(result) + return TextArtifact(self.try_load_file(path).decode(encoding=self.encoding), encoding=self.encoding) @abstractmethod def try_load_file(self, path: str) -> bytes: ... def save_file(self, path: str, value: bytes | str) -> InfoArtifact: - extension = path.split(".")[-1] - loader = self.loaders.get(extension) or self.default_loader - encoding = None if loader is None else loader.encoding - if isinstance(value, str): - value = value.encode() if encoding is None else value.encode(encoding=encoding) + value = value.encode() if self.encoding is None else value.encode(encoding=self.encoding) elif isinstance(value, (bytearray, memoryview)): raise ValueError(f"Unsupported type: {type(value)}") diff --git a/griptape/drivers/file_manager/local_file_manager_driver.py b/griptape/drivers/file_manager/local_file_manager_driver.py index a6f1f0726..b383ff7d7 100644 --- a/griptape/drivers/file_manager/local_file_manager_driver.py +++ b/griptape/drivers/file_manager/local_file_manager_driver.py @@ -2,6 +2,7 @@ import os from pathlib import Path +from typing import Optional from attrs import Attribute, Factory, define, field @@ -16,11 +17,11 @@ class LocalFileManagerDriver(BaseFileManagerDriver): workdir: The absolute working directory. List, load, and save operations will be performed relative to this directory. """ - workdir: str = field(default=Factory(lambda: os.getcwd()), kw_only=True) + workdir: Optional[str] = field(default=Factory(lambda: os.getcwd()), kw_only=True) @workdir.validator # pyright: ignore[reportAttributeAccessIssue] def validate_workdir(self, _: Attribute, workdir: str) -> None: - if not Path(workdir).is_absolute(): + if self.workdir is not None and not Path(workdir).is_absolute(): raise ValueError("Workdir must be an absolute path") def try_list_files(self, path: str) -> list[str]: @@ -41,8 +42,7 @@ def try_save_file(self, path: str, value: bytes) -> None: Path(full_path).write_bytes(value) def _full_path(self, path: str) -> str: - path = path.lstrip("/") - full_path = os.path.join(self.workdir, path) + full_path = path if self.workdir is None else os.path.join(self.workdir, path.lstrip("/")) # Need to keep the trailing slash if it was there, # because it means the path is a directory. ended_with_slash = path.endswith("/") diff --git a/griptape/drivers/image_generation/amazon_bedrock_image_generation_driver.py b/griptape/drivers/image_generation/amazon_bedrock_image_generation_driver.py index 4db302f6f..3e69036f6 100644 --- a/griptape/drivers/image_generation/amazon_bedrock_image_generation_driver.py +++ b/griptape/drivers/image_generation/amazon_bedrock_image_generation_driver.py @@ -1,16 +1,18 @@ from __future__ import annotations import json -from typing import TYPE_CHECKING, Any, Optional +from typing import TYPE_CHECKING, Optional from attrs import Factory, define, field from griptape.artifacts import ImageArtifact from griptape.drivers import BaseMultiModelImageGenerationDriver from griptape.utils import import_optional_dependency +from griptape.utils.decorators import lazy_property if TYPE_CHECKING: import boto3 + from mypy_boto3_bedrock import BedrockClient @define @@ -20,19 +22,21 @@ class AmazonBedrockImageGenerationDriver(BaseMultiModelImageGenerationDriver): Attributes: model: Bedrock model ID. session: boto3 session. - bedrock_client: Bedrock runtime client. + client: Bedrock runtime client. image_width: Width of output images. Defaults to 512 and must be a multiple of 64. image_height: Height of output images. Defaults to 512 and must be a multiple of 64. seed: Optionally provide a consistent seed to generation requests, increasing consistency in output. """ session: boto3.Session = field(default=Factory(lambda: import_optional_dependency("boto3").Session()), kw_only=True) - bedrock_client: Any = field( - default=Factory(lambda self: self.session.client(service_name="bedrock-runtime"), takes_self=True), - ) image_width: int = field(default=512, kw_only=True, metadata={"serializable": True}) image_height: int = field(default=512, kw_only=True, metadata={"serializable": True}) seed: Optional[int] = field(default=None, kw_only=True, metadata={"serializable": True}) + _client: BedrockClient = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) + + @lazy_property() + def client(self) -> BedrockClient: + return self.session.client("bedrock-runtime") def try_text_to_image(self, prompts: list[str], negative_prompts: Optional[list[str]] = None) -> ImageArtifact: request = self.image_generation_model_driver.text_to_image_request_parameters( @@ -127,7 +131,7 @@ def try_image_outpainting( ) def _make_request(self, request: dict) -> bytes: - response = self.bedrock_client.invoke_model( + response = self.client.invoke_model( body=json.dumps(request), modelId=self.model, accept="application/json", diff --git a/griptape/drivers/image_generation/azure_openai_image_generation_driver.py b/griptape/drivers/image_generation/azure_openai_image_generation_driver.py index 85facda4c..2555fcfd0 100644 --- a/griptape/drivers/image_generation/azure_openai_image_generation_driver.py +++ b/griptape/drivers/image_generation/azure_openai_image_generation_driver.py @@ -6,6 +6,7 @@ from attrs import Factory, define, field from griptape.drivers import OpenAiImageGenerationDriver +from griptape.utils.decorators import lazy_property @define @@ -34,17 +35,16 @@ class AzureOpenAiImageGenerationDriver(OpenAiImageGenerationDriver): metadata={"serializable": False}, ) api_version: str = field(default="2024-02-01", kw_only=True, metadata={"serializable": True}) - client: openai.AzureOpenAI = field( - default=Factory( - lambda self: openai.AzureOpenAI( - organization=self.organization, - api_key=self.api_key, - api_version=self.api_version, - azure_endpoint=self.azure_endpoint, - azure_deployment=self.azure_deployment, - azure_ad_token=self.azure_ad_token, - azure_ad_token_provider=self.azure_ad_token_provider, - ), - takes_self=True, - ), - ) + _client: openai.AzureOpenAI = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) + + @lazy_property() + def client(self) -> openai.AzureOpenAI: + return openai.AzureOpenAI( + organization=self.organization, + api_key=self.api_key, + api_version=self.api_version, + azure_endpoint=self.azure_endpoint, + azure_deployment=self.azure_deployment, + azure_ad_token=self.azure_ad_token, + azure_ad_token_provider=self.azure_ad_token_provider, + ) diff --git a/griptape/drivers/image_generation/openai_image_generation_driver.py b/griptape/drivers/image_generation/openai_image_generation_driver.py index bf77ac300..ec8129e89 100644 --- a/griptape/drivers/image_generation/openai_image_generation_driver.py +++ b/griptape/drivers/image_generation/openai_image_generation_driver.py @@ -4,10 +4,11 @@ from typing import TYPE_CHECKING, Literal, Optional, Union, cast import openai -from attrs import Factory, define, field +from attrs import define, field from griptape.artifacts import ImageArtifact from griptape.drivers import BaseImageGenerationDriver +from griptape.utils.decorators import lazy_property if TYPE_CHECKING: from openai.types.images_response import ImagesResponse @@ -38,12 +39,6 @@ class OpenAiImageGenerationDriver(BaseImageGenerationDriver): base_url: Optional[str] = field(default=None, kw_only=True, metadata={"serializable": True}) api_key: Optional[str] = field(default=None, kw_only=True, metadata={"serializable": False}) organization: Optional[str] = field(default=openai.organization, kw_only=True, metadata={"serializable": True}) - client: openai.OpenAI = field( - default=Factory( - lambda self: openai.OpenAI(api_key=self.api_key, base_url=self.base_url, organization=self.organization), - takes_self=True, - ), - ) style: Optional[str] = field(default=None, kw_only=True, metadata={"serializable": True}) quality: Union[Literal["standard"], Literal["hd"]] = field( default="standard", @@ -58,6 +53,11 @@ class OpenAiImageGenerationDriver(BaseImageGenerationDriver): Literal["1792x1024"], ] = field(default="1024x1024", kw_only=True, metadata={"serializable": True}) response_format: Literal["b64_json"] = field(default="b64_json", kw_only=True, metadata={"serializable": True}) + _client: openai.OpenAI = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) + + @lazy_property() + def client(self) -> openai.OpenAI: + return openai.OpenAI(api_key=self.api_key, base_url=self.base_url, organization=self.organization) def try_text_to_image(self, prompts: list[str], negative_prompts: Optional[list[str]] = None) -> ImageArtifact: prompt = ", ".join(prompts) diff --git a/griptape/drivers/image_query/amazon_bedrock_image_query_driver.py b/griptape/drivers/image_query/amazon_bedrock_image_query_driver.py index 46406d972..9742cb9c7 100644 --- a/griptape/drivers/image_query/amazon_bedrock_image_query_driver.py +++ b/griptape/drivers/image_query/amazon_bedrock_image_query_driver.py @@ -1,15 +1,17 @@ from __future__ import annotations import json -from typing import TYPE_CHECKING, Any +from typing import TYPE_CHECKING from attrs import Factory, define, field from griptape.drivers import BaseMultiModelImageQueryDriver from griptape.utils import import_optional_dependency +from griptape.utils.decorators import lazy_property if TYPE_CHECKING: import boto3 + from mypy_boto3_bedrock import BedrockClient from griptape.artifacts import ImageArtifact, TextArtifact @@ -17,15 +19,16 @@ @define class AmazonBedrockImageQueryDriver(BaseMultiModelImageQueryDriver): session: boto3.Session = field(default=Factory(lambda: import_optional_dependency("boto3").Session()), kw_only=True) - bedrock_client: Any = field( - default=Factory(lambda self: self.session.client("bedrock-runtime"), takes_self=True), - kw_only=True, - ) + _client: BedrockClient = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) + + @lazy_property() + def client(self) -> BedrockClient: + return self.session.client("bedrock-runtime") def try_query(self, query: str, images: list[ImageArtifact]) -> TextArtifact: payload = self.image_query_model_driver.image_query_request_parameters(query, images, self.max_tokens) - response = self.bedrock_client.invoke_model( + response = self.client.invoke_model( modelId=self.model, contentType="application/json", accept="application/json", diff --git a/griptape/drivers/image_query/anthropic_image_query_driver.py b/griptape/drivers/image_query/anthropic_image_query_driver.py index a50685724..191d95373 100644 --- a/griptape/drivers/image_query/anthropic_image_query_driver.py +++ b/griptape/drivers/image_query/anthropic_image_query_driver.py @@ -1,12 +1,16 @@ from __future__ import annotations -from typing import Any, Optional +from typing import TYPE_CHECKING, Optional -from attrs import Factory, define, field +from attrs import define, field from griptape.artifacts import ImageArtifact, TextArtifact from griptape.drivers import BaseImageQueryDriver from griptape.utils import import_optional_dependency +from griptape.utils.decorators import lazy_property + +if TYPE_CHECKING: + from anthropic import Anthropic @define @@ -21,13 +25,11 @@ class AnthropicImageQueryDriver(BaseImageQueryDriver): api_key: Optional[str] = field(default=None, kw_only=True, metadata={"serializable": False}) model: str = field(kw_only=True, metadata={"serializable": True}) - client: Any = field( - default=Factory( - lambda self: import_optional_dependency("anthropic").Anthropic(api_key=self.api_key), - takes_self=True, - ), - kw_only=True, - ) + _client: Anthropic = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) + + @lazy_property() + def client(self) -> Anthropic: + return import_optional_dependency("anthropic").Anthropic(api_key=self.api_key) def try_query(self, query: str, images: list[ImageArtifact]) -> TextArtifact: if self.max_tokens is None: diff --git a/griptape/drivers/image_query/azure_openai_image_query_driver.py b/griptape/drivers/image_query/azure_openai_image_query_driver.py index 04492e471..637fa11cc 100644 --- a/griptape/drivers/image_query/azure_openai_image_query_driver.py +++ b/griptape/drivers/image_query/azure_openai_image_query_driver.py @@ -6,6 +6,7 @@ from attrs import Factory, define, field from griptape.drivers.image_query.openai_image_query_driver import OpenAiImageQueryDriver +from griptape.utils.decorators import lazy_property @define @@ -34,17 +35,16 @@ class AzureOpenAiImageQueryDriver(OpenAiImageQueryDriver): metadata={"serializable": False}, ) api_version: str = field(default="2024-02-01", kw_only=True, metadata={"serializable": True}) - client: openai.AzureOpenAI = field( - default=Factory( - lambda self: openai.AzureOpenAI( - organization=self.organization, - api_key=self.api_key, - api_version=self.api_version, - azure_endpoint=self.azure_endpoint, - azure_deployment=self.azure_deployment, - azure_ad_token=self.azure_ad_token, - azure_ad_token_provider=self.azure_ad_token_provider, - ), - takes_self=True, - ), - ) + _client: openai.AzureOpenAI = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) + + @lazy_property() + def client(self) -> openai.AzureOpenAI: + return openai.AzureOpenAI( + organization=self.organization, + api_key=self.api_key, + api_version=self.api_version, + azure_endpoint=self.azure_endpoint, + azure_deployment=self.azure_deployment, + azure_ad_token=self.azure_ad_token, + azure_ad_token_provider=self.azure_ad_token_provider, + ) diff --git a/griptape/drivers/image_query/openai_image_query_driver.py b/griptape/drivers/image_query/openai_image_query_driver.py index 6399efa95..f0ef9e148 100644 --- a/griptape/drivers/image_query/openai_image_query_driver.py +++ b/griptape/drivers/image_query/openai_image_query_driver.py @@ -3,7 +3,7 @@ from typing import Literal, Optional import openai -from attrs import Factory, define, field +from attrs import define, field from openai.types.chat import ( ChatCompletionContentPartImageParam, ChatCompletionContentPartParam, @@ -13,6 +13,7 @@ from griptape.artifacts import ImageArtifact, TextArtifact from griptape.drivers.image_query.base_image_query_driver import BaseImageQueryDriver +from griptape.utils.decorators import lazy_property @define @@ -24,12 +25,11 @@ class OpenAiImageQueryDriver(BaseImageQueryDriver): api_key: Optional[str] = field(default=None, kw_only=True) organization: Optional[str] = field(default=openai.organization, kw_only=True, metadata={"serializable": True}) image_quality: Literal["auto", "low", "high"] = field(default="auto", kw_only=True, metadata={"serializable": True}) - client: openai.OpenAI = field( - default=Factory( - lambda self: openai.OpenAI(api_key=self.api_key, base_url=self.base_url, organization=self.organization), - takes_self=True, - ), - ) + _client: openai.OpenAI = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) + + @lazy_property() + def client(self) -> openai.OpenAI: + return openai.OpenAI(api_key=self.api_key, base_url=self.base_url, organization=self.organization) def try_query(self, query: str, images: list[ImageArtifact]) -> TextArtifact: message_parts: list[ChatCompletionContentPartParam] = [ diff --git a/griptape/drivers/memory/conversation/amazon_dynamodb_conversation_memory_driver.py b/griptape/drivers/memory/conversation/amazon_dynamodb_conversation_memory_driver.py index 0842870eb..47ea13e0a 100644 --- a/griptape/drivers/memory/conversation/amazon_dynamodb_conversation_memory_driver.py +++ b/griptape/drivers/memory/conversation/amazon_dynamodb_conversation_memory_driver.py @@ -7,9 +7,11 @@ from griptape.drivers import BaseConversationMemoryDriver from griptape.utils import import_optional_dependency +from griptape.utils.decorators import lazy_property if TYPE_CHECKING: import boto3 + from mypy_boto3_dynamodb.service_resource import Table from griptape.memory.structure import Run @@ -23,11 +25,11 @@ class AmazonDynamoDbConversationMemoryDriver(BaseConversationMemoryDriver): partition_key_value: str = field(kw_only=True, metadata={"serializable": True}) sort_key: Optional[str] = field(default=None, metadata={"serializable": True}) sort_key_value: Optional[str | int] = field(default=None, metadata={"serializable": True}) + _table: Table = field(default=None, kw_only=True, alias="table", metadata={"serializable": False}) - table: Any = field(init=False) - - def __attrs_post_init__(self) -> None: - self.table = self.session.resource("dynamodb").Table(self.table_name) + @lazy_property() + def table(self) -> Table: + return self.session.resource("dynamodb").Table(self.table_name) def store(self, runs: list[Run], metadata: dict) -> None: self.table.update_item( diff --git a/griptape/drivers/memory/conversation/griptape_cloud_conversation_memory_driver.py b/griptape/drivers/memory/conversation/griptape_cloud_conversation_memory_driver.py index 3aac74090..6c1783519 100644 --- a/griptape/drivers/memory/conversation/griptape_cloud_conversation_memory_driver.py +++ b/griptape/drivers/memory/conversation/griptape_cloud_conversation_memory_driver.py @@ -22,8 +22,8 @@ class GriptapeCloudConversationMemoryDriver(BaseConversationMemoryDriver): Attributes: thread_id: The ID of the Thread to store the conversation memory in. If not provided, the driver will attempt to - retrieve the ID from the environment variable `GT_CLOUD_THREAD_ID`. If that is not set, a new Thread will be - created. + retrieve the ID from the environment variable `GT_CLOUD_THREAD_ID`. + alias: The alias of the Thread to store the conversation memory in. base_url: The base URL of the Griptape Cloud API. Defaults to the value of the environment variable `GT_CLOUD_BASE_URL` or `https://cloud.griptape.ai`. api_key: The API key to use for authenticating with the Griptape Cloud API. If not provided, the driver will @@ -33,7 +33,11 @@ class GriptapeCloudConversationMemoryDriver(BaseConversationMemoryDriver): ValueError: If `api_key` is not provided. """ - thread_id: str = field( + thread_id: Optional[str] = field( + default=None, + metadata={"serializable": True}, + ) + alias: Optional[str] = field( default=None, metadata={"serializable": True}, ) @@ -45,10 +49,7 @@ class GriptapeCloudConversationMemoryDriver(BaseConversationMemoryDriver): default=Factory(lambda self: {"Authorization": f"Bearer {self.api_key}"}, takes_self=True), init=False, ) - - def __attrs_post_init__(self) -> None: - if self.thread_id is None: - self.thread_id = os.getenv("GT_CLOUD_THREAD_ID", self._get_thread_id()) + _thread: Optional[dict] = field(default=None, init=False) @api_key.validator # pyright: ignore[reportAttributeAccessIssue] def validate_api_key(self, _: Attribute, value: Optional[str]) -> str: @@ -56,6 +57,37 @@ def validate_api_key(self, _: Attribute, value: Optional[str]) -> str: raise ValueError(f"{self.__class__.__name__} requires an API key") return value + @property + def thread(self) -> dict: + """Try to get the Thread by ID, alias, or create a new one.""" + if self._thread is None: + thread = None + if self.thread_id is None: + self.thread_id = os.getenv("GT_CLOUD_THREAD_ID") + + if self.thread_id is not None: + res = self._call_api("get", f"/threads/{self.thread_id}", raise_for_status=False) + if res.status_code == 200: + thread = res.json() + + # use name as 'alias' to get thread + if thread is None and self.alias is not None: + res = self._call_api("get", f"/threads?alias={self.alias}").json() + if res.get("threads"): + thread = res["threads"][0] + self.thread_id = thread.get("thread_id") + + # no thread by name or thread_id + if thread is None: + data = {"name": uuid.uuid4().hex} if self.alias is None else {"name": self.alias, "alias": self.alias} + thread = self._call_api("post", "/threads", data).json() + self.thread_id = thread["thread_id"] + self.alias = thread.get("alias") + + self._thread = thread + + return self._thread # pyright: ignore[reportReturnType] + def store(self, runs: list[Run], metadata: dict[str, Any]) -> None: # serialize the run artifacts to json strings messages = [ @@ -79,25 +111,20 @@ def store(self, runs: list[Run], metadata: dict[str, Any]) -> None: # patch the Thread with the new messages and metadata # all old Messages are replaced with the new ones - response = requests.patch( - self._get_url(f"/threads/{self.thread_id}"), - json=body, - headers=self.headers, - ) - response.raise_for_status() + thread_id = self.thread["thread_id"] if self.thread_id is None else self.thread_id + self._call_api("patch", f"/threads/{thread_id}", body) + self._thread = None def load(self) -> tuple[list[Run], dict[str, Any]]: from griptape.memory.structure import Run + thread_id = self.thread["thread_id"] if self.thread_id is None else self.thread_id + # get the Messages from the Thread - messages_response = requests.get(self._get_url(f"/threads/{self.thread_id}/messages"), headers=self.headers) - messages_response.raise_for_status() - messages_response = messages_response.json() + messages_response = self._call_api("get", f"/threads/{thread_id}/messages").json() # retrieve the Thread to get the metadata - thread_response = requests.get(self._get_url(f"/threads/{self.thread_id}"), headers=self.headers) - thread_response.raise_for_status() - thread_response = thread_response.json() + thread_response = self._call_api("get", f"/threads/{thread_id}").json() runs = [ Run( @@ -110,11 +137,14 @@ def load(self) -> tuple[list[Run], dict[str, Any]]: ] return runs, thread_response.get("metadata", {}) - def _get_thread_id(self) -> str: - res = requests.post(self._get_url("/threads"), json={"name": uuid.uuid4().hex}, headers=self.headers) - res.raise_for_status() - return res.json().get("thread_id") - def _get_url(self, path: str) -> str: path = path.lstrip("/") return urljoin(self.base_url, f"/api/{path}") + + def _call_api( + self, method: str, path: str, json: Optional[dict] = None, *, raise_for_status: bool = True + ) -> requests.Response: + res = requests.request(method, self._get_url(path), json=json, headers=self.headers) + if raise_for_status: + res.raise_for_status() + return res diff --git a/griptape/drivers/memory/conversation/redis_conversation_memory_driver.py b/griptape/drivers/memory/conversation/redis_conversation_memory_driver.py index f30189e37..4eae403f2 100644 --- a/griptape/drivers/memory/conversation/redis_conversation_memory_driver.py +++ b/griptape/drivers/memory/conversation/redis_conversation_memory_driver.py @@ -58,5 +58,5 @@ def store(self, runs: list[Run], metadata: dict[str, Any]) -> None: def load(self) -> tuple[list[Run], dict[str, Any]]: memory_json = self.client.hget(self.index, self.conversation_id) if memory_json is not None: - return self._from_params_dict(json.loads(memory_json)) + return self._from_params_dict(json.loads(memory_json)) # pyright: ignore[reportArgumentType] https://github.com/redis/redis-py/issues/2399 return [], {} diff --git a/griptape/drivers/prompt/amazon_bedrock_prompt_driver.py b/griptape/drivers/prompt/amazon_bedrock_prompt_driver.py index bc339f618..be34d2a8c 100644 --- a/griptape/drivers/prompt/amazon_bedrock_prompt_driver.py +++ b/griptape/drivers/prompt/amazon_bedrock_prompt_driver.py @@ -31,6 +31,7 @@ from griptape.drivers import BasePromptDriver from griptape.tokenizers import AmazonBedrockTokenizer, BaseTokenizer from griptape.utils import import_optional_dependency +from griptape.utils.decorators import lazy_property if TYPE_CHECKING: from collections.abc import Iterator @@ -44,10 +45,6 @@ @define class AmazonBedrockPromptDriver(BasePromptDriver): session: boto3.Session = field(default=Factory(lambda: import_optional_dependency("boto3").Session()), kw_only=True) - bedrock_client: Any = field( - default=Factory(lambda self: self.session.client("bedrock-runtime"), takes_self=True), - kw_only=True, - ) additional_model_request_fields: dict = field(default=Factory(dict), kw_only=True) tokenizer: BaseTokenizer = field( default=Factory(lambda self: AmazonBedrockTokenizer(model=self.model), takes_self=True), @@ -55,10 +52,15 @@ class AmazonBedrockPromptDriver(BasePromptDriver): ) use_native_tools: bool = field(default=True, kw_only=True, metadata={"serializable": True}) tool_choice: dict = field(default=Factory(lambda: {"auto": {}}), kw_only=True, metadata={"serializable": True}) + _client: Any = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) + + @lazy_property() + def client(self) -> Any: + return self.session.client("bedrock-runtime") @observable def try_run(self, prompt_stack: PromptStack) -> Message: - response = self.bedrock_client.converse(**self._base_params(prompt_stack)) + response = self.client.converse(**self._base_params(prompt_stack)) usage = response["usage"] output_message = response["output"]["message"] @@ -71,7 +73,7 @@ def try_run(self, prompt_stack: PromptStack) -> Message: @observable def try_stream(self, prompt_stack: PromptStack) -> Iterator[DeltaMessage]: - response = self.bedrock_client.converse_stream(**self._base_params(prompt_stack)) + response = self.client.converse_stream(**self._base_params(prompt_stack)) stream = response.get("stream") if stream is not None: diff --git a/griptape/drivers/prompt/amazon_sagemaker_jumpstart_prompt_driver.py b/griptape/drivers/prompt/amazon_sagemaker_jumpstart_prompt_driver.py index d7a2f5b0b..2dcf55307 100644 --- a/griptape/drivers/prompt/amazon_sagemaker_jumpstart_prompt_driver.py +++ b/griptape/drivers/prompt/amazon_sagemaker_jumpstart_prompt_driver.py @@ -10,6 +10,7 @@ from griptape.drivers import BasePromptDriver from griptape.tokenizers import HuggingFaceTokenizer from griptape.utils import import_optional_dependency +from griptape.utils.decorators import lazy_property if TYPE_CHECKING: from collections.abc import Iterator @@ -22,10 +23,6 @@ @define class AmazonSageMakerJumpstartPromptDriver(BasePromptDriver): session: boto3.Session = field(default=Factory(lambda: import_optional_dependency("boto3").Session()), kw_only=True) - sagemaker_client: Any = field( - default=Factory(lambda self: self.session.client("sagemaker-runtime"), takes_self=True), - kw_only=True, - ) endpoint: str = field(kw_only=True, metadata={"serializable": True}) custom_attributes: str = field(default="accept_eula=true", kw_only=True, metadata={"serializable": True}) inference_component_name: Optional[str] = field(default=None, kw_only=True, metadata={"serializable": True}) @@ -38,6 +35,11 @@ class AmazonSageMakerJumpstartPromptDriver(BasePromptDriver): ), kw_only=True, ) + _client: Any = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) + + @lazy_property() + def client(self) -> Any: + return self.session.client("sagemaker-runtime") @stream.validator # pyright: ignore[reportAttributeAccessIssue] def validate_stream(self, _: Attribute, stream: bool) -> None: # noqa: FBT001 @@ -51,7 +53,7 @@ def try_run(self, prompt_stack: PromptStack) -> Message: "parameters": {**self._base_params(prompt_stack)}, } - response = self.sagemaker_client.invoke_endpoint( + response = self.client.invoke_endpoint( EndpointName=self.endpoint, ContentType="application/json", Body=json.dumps(payload), diff --git a/griptape/drivers/prompt/anthropic_prompt_driver.py b/griptape/drivers/prompt/anthropic_prompt_driver.py index ae50bc59e..1c7b376f8 100644 --- a/griptape/drivers/prompt/anthropic_prompt_driver.py +++ b/griptape/drivers/prompt/anthropic_prompt_driver.py @@ -32,6 +32,7 @@ from griptape.drivers import BasePromptDriver from griptape.tokenizers import AnthropicTokenizer, BaseTokenizer from griptape.utils import import_optional_dependency +from griptape.utils.decorators import lazy_property if TYPE_CHECKING: from collections.abc import Iterator @@ -54,13 +55,6 @@ class AnthropicPromptDriver(BasePromptDriver): api_key: Optional[str] = field(kw_only=True, default=None, metadata={"serializable": False}) model: str = field(kw_only=True, metadata={"serializable": True}) - client: Client = field( - default=Factory( - lambda self: import_optional_dependency("anthropic").Anthropic(api_key=self.api_key), - takes_self=True, - ), - kw_only=True, - ) tokenizer: BaseTokenizer = field( default=Factory(lambda self: AnthropicTokenizer(model=self.model), takes_self=True), kw_only=True, @@ -70,6 +64,11 @@ class AnthropicPromptDriver(BasePromptDriver): tool_choice: dict = field(default=Factory(lambda: {"type": "auto"}), kw_only=True, metadata={"serializable": False}) use_native_tools: bool = field(default=True, kw_only=True, metadata={"serializable": True}) max_tokens: int = field(default=1000, kw_only=True, metadata={"serializable": True}) + _client: Client = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) + + @lazy_property() + def client(self) -> Client: + return import_optional_dependency("anthropic").Anthropic(api_key=self.api_key) @observable def try_run(self, prompt_stack: PromptStack) -> Message: @@ -128,7 +127,7 @@ def __to_anthropic_role(self, message: Message) -> str: return "user" def __to_anthropic_tools(self, tools: list[BaseTool]) -> list[dict]: - return [ + tool_schemas = [ { "name": tool.to_native_tool_name(activity), "description": tool.activity_description(activity), @@ -138,6 +137,13 @@ def __to_anthropic_tools(self, tools: list[BaseTool]) -> list[dict]: for activity in tool.activities() ] + # Anthropic doesn't support $schema and $id + for tool_schema in tool_schemas: + del tool_schema["input_schema"]["$schema"] + del tool_schema["input_schema"]["$id"] + + return tool_schemas + def __to_anthropic_content(self, message: Message) -> str | list[dict]: if message.has_all_content_type(TextMessageContent): return message.to_text() diff --git a/griptape/drivers/prompt/azure_openai_chat_prompt_driver.py b/griptape/drivers/prompt/azure_openai_chat_prompt_driver.py index b08b51b69..5bb7e0760 100644 --- a/griptape/drivers/prompt/azure_openai_chat_prompt_driver.py +++ b/griptape/drivers/prompt/azure_openai_chat_prompt_driver.py @@ -6,6 +6,7 @@ from attrs import Factory, define, field from griptape.drivers import OpenAiChatPromptDriver +from griptape.utils.decorators import lazy_property if TYPE_CHECKING: from griptape.common import PromptStack @@ -37,20 +38,19 @@ class AzureOpenAiChatPromptDriver(OpenAiChatPromptDriver): metadata={"serializable": False}, ) api_version: str = field(default="2023-05-15", kw_only=True, metadata={"serializable": True}) - client: openai.AzureOpenAI = field( - default=Factory( - lambda self: openai.AzureOpenAI( - organization=self.organization, - api_key=self.api_key, - api_version=self.api_version, - azure_endpoint=self.azure_endpoint, - azure_deployment=self.azure_deployment, - azure_ad_token=self.azure_ad_token, - azure_ad_token_provider=self.azure_ad_token_provider, - ), - takes_self=True, - ), - ) + _client: openai.AzureOpenAI = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) + + @lazy_property() + def client(self) -> openai.AzureOpenAI: + return openai.AzureOpenAI( + organization=self.organization, + api_key=self.api_key, + api_version=self.api_version, + azure_endpoint=self.azure_endpoint, + azure_deployment=self.azure_deployment, + azure_ad_token=self.azure_ad_token, + azure_ad_token_provider=self.azure_ad_token_provider, + ) def _base_params(self, prompt_stack: PromptStack) -> dict: params = super()._base_params(prompt_stack) diff --git a/griptape/drivers/prompt/cohere_prompt_driver.py b/griptape/drivers/prompt/cohere_prompt_driver.py index ff1a8b482..b31c78ea3 100644 --- a/griptape/drivers/prompt/cohere_prompt_driver.py +++ b/griptape/drivers/prompt/cohere_prompt_driver.py @@ -23,6 +23,7 @@ from griptape.drivers import BasePromptDriver from griptape.tokenizers import BaseTokenizer, CohereTokenizer from griptape.utils import import_optional_dependency +from griptape.utils.decorators import lazy_property if TYPE_CHECKING: from collections.abc import Iterator @@ -45,14 +46,16 @@ class CoherePromptDriver(BasePromptDriver): api_key: str = field(metadata={"serializable": False}) model: str = field(metadata={"serializable": True}) - client: Client = field( - default=Factory(lambda self: import_optional_dependency("cohere").Client(self.api_key), takes_self=True), - ) + force_single_step: bool = field(default=False, kw_only=True, metadata={"serializable": True}) + use_native_tools: bool = field(default=True, kw_only=True, metadata={"serializable": True}) + _client: Client = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) tokenizer: BaseTokenizer = field( default=Factory(lambda self: CohereTokenizer(model=self.model, client=self.client), takes_self=True), ) - force_single_step: bool = field(default=False, kw_only=True, metadata={"serializable": True}) - use_native_tools: bool = field(default=True, kw_only=True, metadata={"serializable": True}) + + @lazy_property() + def client(self) -> Client: + return import_optional_dependency("cohere").Client(self.api_key) @observable def try_run(self, prompt_stack: PromptStack) -> Message: diff --git a/griptape/drivers/prompt/google_prompt_driver.py b/griptape/drivers/prompt/google_prompt_driver.py index 6b18f6041..4afdad5c6 100644 --- a/griptape/drivers/prompt/google_prompt_driver.py +++ b/griptape/drivers/prompt/google_prompt_driver.py @@ -26,6 +26,7 @@ from griptape.drivers import BasePromptDriver from griptape.tokenizers import BaseTokenizer, GoogleTokenizer from griptape.utils import import_optional_dependency, remove_key_in_dict_recursively +from griptape.utils.decorators import lazy_property if TYPE_CHECKING: from collections.abc import Iterator @@ -44,17 +45,13 @@ class GooglePromptDriver(BasePromptDriver): Attributes: api_key: Google API key. model: Google model name. - model_client: Custom `GenerativeModel` client. + client: Custom `GenerativeModel` client. top_p: Optional value for top_p. top_k: Optional value for top_k. """ api_key: Optional[str] = field(default=None, kw_only=True, metadata={"serializable": False}) model: str = field(kw_only=True, metadata={"serializable": True}) - model_client: GenerativeModel = field( - default=Factory(lambda self: self._default_model_client(), takes_self=True), - kw_only=True, - ) tokenizer: BaseTokenizer = field( default=Factory(lambda self: GoogleTokenizer(api_key=self.api_key, model=self.model), takes_self=True), kw_only=True, @@ -63,11 +60,19 @@ class GooglePromptDriver(BasePromptDriver): top_k: Optional[int] = field(default=None, kw_only=True, metadata={"serializable": True}) use_native_tools: bool = field(default=True, kw_only=True, metadata={"serializable": True}) tool_choice: str = field(default="auto", kw_only=True, metadata={"serializable": True}) + _client: GenerativeModel = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) + + @lazy_property() + def client(self) -> GenerativeModel: + genai = import_optional_dependency("google.generativeai") + genai.configure(api_key=self.api_key) + + return genai.GenerativeModel(self.model) @observable def try_run(self, prompt_stack: PromptStack) -> Message: messages = self.__to_google_messages(prompt_stack) - response: GenerateContentResponse = self.model_client.generate_content( + response: GenerateContentResponse = self.client.generate_content( messages, **self._base_params(prompt_stack), ) @@ -86,7 +91,7 @@ def try_run(self, prompt_stack: PromptStack) -> Message: @observable def try_stream(self, prompt_stack: PromptStack) -> Iterator[DeltaMessage]: messages = self.__to_google_messages(prompt_stack) - response: GenerateContentResponse = self.model_client.generate_content( + response: GenerateContentResponse = self.client.generate_content( messages, **self._base_params(prompt_stack), stream=True, @@ -119,7 +124,7 @@ def _base_params(self, prompt_stack: PromptStack) -> dict: system_messages = prompt_stack.system_messages if system_messages: - self.model_client._system_instruction = types.ContentDict( + self.client._system_instruction = types.ContentDict( role="system", parts=[protos.Part(text=system_message.to_text()) for system_message in system_messages], ) @@ -146,12 +151,6 @@ def _base_params(self, prompt_stack: PromptStack) -> dict: ), } - def _default_model_client(self) -> GenerativeModel: - genai = import_optional_dependency("google.generativeai") - genai.configure(api_key=self.api_key) - - return genai.GenerativeModel(self.model) - def __to_google_messages(self, prompt_stack: PromptStack) -> ContentsType: types = import_optional_dependency("google.generativeai.types") diff --git a/griptape/drivers/prompt/huggingface_hub_prompt_driver.py b/griptape/drivers/prompt/huggingface_hub_prompt_driver.py index 657b5747c..68267f755 100644 --- a/griptape/drivers/prompt/huggingface_hub_prompt_driver.py +++ b/griptape/drivers/prompt/huggingface_hub_prompt_driver.py @@ -8,6 +8,7 @@ from griptape.drivers import BasePromptDriver from griptape.tokenizers import HuggingFaceTokenizer from griptape.utils import import_optional_dependency +from griptape.utils.decorators import lazy_property if TYPE_CHECKING: from collections.abc import Iterator @@ -32,16 +33,6 @@ class HuggingFaceHubPromptDriver(BasePromptDriver): max_tokens: int = field(default=250, kw_only=True, metadata={"serializable": True}) params: dict = field(factory=dict, kw_only=True, metadata={"serializable": True}) model: str = field(kw_only=True, metadata={"serializable": True}) - client: InferenceClient = field( - default=Factory( - lambda self: import_optional_dependency("huggingface_hub").InferenceClient( - model=self.model, - token=self.api_token, - ), - takes_self=True, - ), - kw_only=True, - ) tokenizer: HuggingFaceTokenizer = field( default=Factory( lambda self: HuggingFaceTokenizer(model=self.model, max_output_tokens=self.max_tokens), @@ -49,6 +40,14 @@ class HuggingFaceHubPromptDriver(BasePromptDriver): ), kw_only=True, ) + _client: InferenceClient = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) + + @lazy_property() + def client(self) -> InferenceClient: + return import_optional_dependency("huggingface_hub").InferenceClient( + model=self.model, + token=self.api_token, + ) @observable def try_run(self, prompt_stack: PromptStack) -> Message: diff --git a/griptape/drivers/prompt/huggingface_pipeline_prompt_driver.py b/griptape/drivers/prompt/huggingface_pipeline_prompt_driver.py index 128167f52..1978b339a 100644 --- a/griptape/drivers/prompt/huggingface_pipeline_prompt_driver.py +++ b/griptape/drivers/prompt/huggingface_pipeline_prompt_driver.py @@ -9,6 +9,7 @@ from griptape.drivers import BasePromptDriver from griptape.tokenizers import HuggingFaceTokenizer from griptape.utils import import_optional_dependency +from griptape.utils.decorators import lazy_property if TYPE_CHECKING: from collections.abc import Iterator @@ -35,23 +36,24 @@ class HuggingFacePipelinePromptDriver(BasePromptDriver): ), kw_only=True, ) - pipe: TextGenerationPipeline = field( - default=Factory( - lambda self: import_optional_dependency("transformers").pipeline( - "text-generation", - model=self.model, - max_new_tokens=self.max_tokens, - tokenizer=self.tokenizer.tokenizer, - ), - takes_self=True, - ), + _pipeline: TextGenerationPipeline = field( + default=None, kw_only=True, alias="pipeline", metadata={"serializable": False} ) + @lazy_property() + def pipeline(self) -> TextGenerationPipeline: + return import_optional_dependency("transformers").pipeline( + task="text-generation", + model=self.model, + max_new_tokens=self.max_tokens, + tokenizer=self.tokenizer.tokenizer, + ) + @observable def try_run(self, prompt_stack: PromptStack) -> Message: messages = self._prompt_stack_to_messages(prompt_stack) - result = self.pipe( + result = self.pipeline( messages, max_new_tokens=self.max_tokens, temperature=self.temperature, diff --git a/griptape/drivers/prompt/ollama_prompt_driver.py b/griptape/drivers/prompt/ollama_prompt_driver.py index 70d4ce89a..5f9e32e2f 100644 --- a/griptape/drivers/prompt/ollama_prompt_driver.py +++ b/griptape/drivers/prompt/ollama_prompt_driver.py @@ -22,6 +22,7 @@ from griptape.drivers import BasePromptDriver from griptape.tokenizers import SimpleTokenizer from griptape.utils import import_optional_dependency +from griptape.utils.decorators import lazy_property if TYPE_CHECKING: from ollama import Client @@ -40,10 +41,6 @@ class OllamaPromptDriver(BasePromptDriver): model: str = field(kw_only=True, metadata={"serializable": True}) host: Optional[str] = field(default=None, kw_only=True, metadata={"serializable": True}) - client: Client = field( - default=Factory(lambda self: import_optional_dependency("ollama").Client(host=self.host), takes_self=True), - kw_only=True, - ) tokenizer: BaseTokenizer = field( default=Factory( lambda self: SimpleTokenizer( @@ -67,6 +64,11 @@ class OllamaPromptDriver(BasePromptDriver): kw_only=True, ) use_native_tools: bool = field(default=True, kw_only=True, metadata={"serializable": True}) + _client: Client = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) + + @lazy_property() + def client(self) -> Client: + return import_optional_dependency("ollama").Client(host=self.host) @observable def try_run(self, prompt_stack: PromptStack) -> Message: diff --git a/griptape/drivers/prompt/openai_chat_prompt_driver.py b/griptape/drivers/prompt/openai_chat_prompt_driver.py index 987bdc2ad..ec10ab72e 100644 --- a/griptape/drivers/prompt/openai_chat_prompt_driver.py +++ b/griptape/drivers/prompt/openai_chat_prompt_driver.py @@ -1,7 +1,7 @@ from __future__ import annotations import json -from typing import TYPE_CHECKING, Literal, Optional +from typing import TYPE_CHECKING, Optional import openai from attrs import Factory, define, field @@ -25,6 +25,7 @@ ) from griptape.drivers import BasePromptDriver from griptape.tokenizers import BaseTokenizer, OpenAiTokenizer +from griptape.utils.decorators import lazy_property if TYPE_CHECKING: from collections.abc import Iterator @@ -55,19 +56,13 @@ class OpenAiChatPromptDriver(BasePromptDriver): base_url: Optional[str] = field(default=None, kw_only=True, metadata={"serializable": True}) api_key: Optional[str] = field(default=None, kw_only=True, metadata={"serializable": False}) organization: Optional[str] = field(default=None, kw_only=True, metadata={"serializable": True}) - client: openai.OpenAI = field( - default=Factory( - lambda self: openai.OpenAI(api_key=self.api_key, base_url=self.base_url, organization=self.organization), - takes_self=True, - ), - ) model: str = field(kw_only=True, metadata={"serializable": True}) tokenizer: BaseTokenizer = field( default=Factory(lambda self: OpenAiTokenizer(model=self.model), takes_self=True), kw_only=True, ) user: str = field(default="", kw_only=True, metadata={"serializable": True}) - response_format: Optional[Literal["json_object"]] = field( + response_format: Optional[dict] = field( default=None, kw_only=True, metadata={"serializable": True}, @@ -88,6 +83,15 @@ class OpenAiChatPromptDriver(BasePromptDriver): ), kw_only=True, ) + _client: openai.OpenAI = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) + + @lazy_property() + def client(self) -> openai.OpenAI: + return openai.OpenAI( + base_url=self.base_url, + api_key=self.api_key, + organization=self.organization, + ) @observable def try_run(self, prompt_stack: PromptStack) -> Message: @@ -141,10 +145,13 @@ def _base_params(self, prompt_stack: PromptStack) -> dict: **({"stream_options": {"include_usage": True}} if self.stream else {}), } - if self.response_format == "json_object": - params["response_format"] = {"type": "json_object"} - # JSON mode still requires a system message instructing the LLM to output JSON. - prompt_stack.add_system_message("Provide your response as a valid JSON object.") + if self.response_format is not None: + if self.response_format == {"type": "json_object"}: + params["response_format"] = self.response_format + # JSON mode still requires a system message instructing the LLM to output JSON. + prompt_stack.add_system_message("Provide your response as a valid JSON object.") + else: + params["response_format"] = self.response_format messages = self.__to_openai_messages(prompt_stack.messages) diff --git a/griptape/drivers/ruleset/__init__.py b/griptape/drivers/ruleset/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/griptape/drivers/ruleset/base_ruleset_driver.py b/griptape/drivers/ruleset/base_ruleset_driver.py new file mode 100644 index 000000000..3cc513b5f --- /dev/null +++ b/griptape/drivers/ruleset/base_ruleset_driver.py @@ -0,0 +1,35 @@ +from __future__ import annotations + +from abc import ABC, abstractmethod +from typing import TYPE_CHECKING, Any + +from attrs import define, field + +from griptape.mixins.serializable_mixin import SerializableMixin + +if TYPE_CHECKING: + from griptape.rules import BaseRule + + +@define +class BaseRulesetDriver(SerializableMixin, ABC): + """Base class for ruleset drivers. + + Attributes: + raise_not_found: Whether to raise an error if the ruleset is not found. Defaults to True. + """ + + raise_not_found: bool = field(default=True, kw_only=True, metadata={"serializable": True}) + + @abstractmethod + def load(self, ruleset_name: str) -> tuple[list[BaseRule], dict[str, Any]]: ... + + def _from_ruleset_dict(self, params_dict: dict[str, Any]) -> tuple[list[BaseRule], dict[str, Any]]: + return [ + self._get_rule(rule["value"], rule.get("meta", {})) for rule in params_dict.get("rules", []) + ], params_dict.get("meta", {}) + + def _get_rule(self, value: Any, meta: dict[str, Any]) -> BaseRule: + from griptape.rules import JsonSchemaRule, Rule + + return JsonSchemaRule(value=value, meta=meta) if isinstance(value, dict) else Rule(value=str(value), meta=meta) diff --git a/griptape/drivers/ruleset/griptape_cloud_ruleset_driver.py b/griptape/drivers/ruleset/griptape_cloud_ruleset_driver.py new file mode 100644 index 000000000..426f9b849 --- /dev/null +++ b/griptape/drivers/ruleset/griptape_cloud_ruleset_driver.py @@ -0,0 +1,89 @@ +from __future__ import annotations + +import os +from typing import TYPE_CHECKING, Any, Optional +from urllib.parse import urljoin + +import requests +from attrs import Attribute, Factory, define, field + +from griptape.drivers import BaseRulesetDriver +from griptape.utils import dict_merge + +if TYPE_CHECKING: + from griptape.rules import BaseRule + + +@define(kw_only=True) +class GriptapeCloudRulesetDriver(BaseRulesetDriver): + """A driver for storing conversation memory in the Griptape Cloud. + + Attributes: + ruleset_id: The ID of the Thread to store the conversation memory in. If not provided, the driver will attempt to + retrieve the ID from the environment variable `GT_CLOUD_THREAD_ID`. If that is not set, a new Thread will be + created. + base_url: The base URL of the Griptape Cloud API. Defaults to the value of the environment variable + `GT_CLOUD_BASE_URL` or `https://cloud.griptape.ai`. + api_key: The API key to use for authenticating with the Griptape Cloud API. If not provided, the driver will + attempt to retrieve the API key from the environment variable `GT_CLOUD_API_KEY`. + + Raises: + ValueError: If `api_key` is not provided. + """ + + ruleset_id: str = field( + default=None, + metadata={"serializable": True}, + ) + base_url: str = field( + default=Factory(lambda: os.getenv("GT_CLOUD_BASE_URL", "https://cloud.griptape.ai")), + ) + api_key: Optional[str] = field(default=Factory(lambda: os.getenv("GT_CLOUD_API_KEY"))) + headers: dict = field( + default=Factory(lambda self: {"Authorization": f"Bearer {self.api_key}"}, takes_self=True), + init=False, + ) + + @api_key.validator # pyright: ignore[reportAttributeAccessIssue] + def validate_api_key(self, _: Attribute, value: Optional[str]) -> str: + if value is None: + raise ValueError(f"{self.__class__.__name__} requires an API key") + return value + + def load(self, ruleset_name: str) -> tuple[list[BaseRule], dict[str, Any]]: + """Load the ruleset from Griptape Cloud, using the ruleset name as an alias if ruleset_id is not provided.""" + ruleset = None + + if self.ruleset_id is not None: + res = self._call_api("get", f"/rulesets/{self.ruleset_id}", raise_for_status=False) + if res.status_code == 200: + ruleset = res.json() + + # use name as 'alias' to get ruleset + if ruleset is None: + res = self._call_api("get", f"/rulesets?alias={ruleset_name}").json() + if res.get("rulesets"): + ruleset = res["rulesets"][0] + + # no ruleset by name or ruleset_id + if ruleset is None: + if self.raise_not_found: + raise ValueError(f"No ruleset found with alias: {ruleset_name} or ruleset_id: {self.ruleset_id}") + return [], {} + + rules = self._call_api("get", f"/rules?ruleset_id={ruleset['ruleset_id']}").json().get("rules", []) + + for rule in rules: + rule["metadata"] = dict_merge(rule.get("metadata", {}), {"griptape_cloud_rule_id": rule["rule_id"]}) + + return [self._get_rule(rule["rule"], rule["metadata"]) for rule in rules], ruleset.get("metadata", {}) + + def _get_url(self, path: str) -> str: + path = path.lstrip("/") + return urljoin(self.base_url, f"/api/{path}") + + def _call_api(self, method: str, path: str, *, raise_for_status: bool = True) -> requests.Response: + res = requests.request(method, self._get_url(path), headers=self.headers) + if raise_for_status: + res.raise_for_status() + return res diff --git a/griptape/drivers/ruleset/local_ruleset_driver.py b/griptape/drivers/ruleset/local_ruleset_driver.py new file mode 100644 index 000000000..3996bc3d9 --- /dev/null +++ b/griptape/drivers/ruleset/local_ruleset_driver.py @@ -0,0 +1,38 @@ +from __future__ import annotations + +import json +import os +from pathlib import Path +from typing import TYPE_CHECKING, Any, Optional + +from attrs import define, field + +from griptape.drivers import BaseRulesetDriver + +if TYPE_CHECKING: + from griptape.rules import BaseRule + + +@define(kw_only=True) +class LocalRulesetDriver(BaseRulesetDriver): + persist_dir: Optional[str] = field(default=None, metadata={"serializable": True}) + + def load(self, ruleset_name: str) -> tuple[list[BaseRule], dict[str, Any]]: + if self.persist_dir is None: + return [], {} + + file_name = os.path.join(self.persist_dir, ruleset_name) + + if ( + file_name is not None + and os.path.exists(file_name) + and (loaded_str := Path(file_name).read_text()) is not None + ): + try: + return self._from_ruleset_dict(json.loads(loaded_str)) + except Exception as e: + raise ValueError(f"Unable to load data from {file_name}") from e + + if self.raise_not_found: + raise ValueError(f"Ruleset not found with name {file_name}") + return [], {} diff --git a/griptape/drivers/sql/amazon_redshift_sql_driver.py b/griptape/drivers/sql/amazon_redshift_sql_driver.py index 837405e83..8e5d912c8 100644 --- a/griptape/drivers/sql/amazon_redshift_sql_driver.py +++ b/griptape/drivers/sql/amazon_redshift_sql_driver.py @@ -3,12 +3,14 @@ import time from typing import TYPE_CHECKING, Any, Optional -from attrs import Attribute, Factory, define, field +from attrs import Attribute, define, field from griptape.drivers import BaseSqlDriver +from griptape.utils.decorators import lazy_property if TYPE_CHECKING: import boto3 + from mypy_boto3_redshift_data import RedshiftDataAPIServiceClient @define @@ -20,11 +22,14 @@ class AmazonRedshiftSqlDriver(BaseSqlDriver): db_user: Optional[str] = field(default=None, kw_only=True) database_credentials_secret_arn: Optional[str] = field(default=None, kw_only=True) wait_for_query_completion_sec: float = field(default=0.3, kw_only=True) - client: Any = field( - default=Factory(lambda self: self.session.client("redshift-data"), takes_self=True), - kw_only=True, + _client: RedshiftDataAPIServiceClient = field( + default=None, kw_only=True, alias="client", metadata={"serializable": False} ) + @lazy_property() + def client(self) -> RedshiftDataAPIServiceClient: + return self.session.client("redshift-data") + @workgroup_name.validator # pyright: ignore[reportAttributeAccessIssue] def validate_params(self, _: Attribute, workgroup_name: Optional[str]) -> None: if not self.cluster_identifier and not self.workgroup_name: diff --git a/griptape/drivers/sql/snowflake_sql_driver.py b/griptape/drivers/sql/snowflake_sql_driver.py index 656bc4b99..d1b4310b5 100644 --- a/griptape/drivers/sql/snowflake_sql_driver.py +++ b/griptape/drivers/sql/snowflake_sql_driver.py @@ -2,10 +2,11 @@ from typing import TYPE_CHECKING, Any, Callable, Optional -from attrs import Attribute, Factory, define, field +from attrs import Attribute, define, field from griptape.drivers import BaseSqlDriver from griptape.utils import import_optional_dependency +from griptape.utils.decorators import lazy_property if TYPE_CHECKING: from snowflake.connector import SnowflakeConnection @@ -15,18 +16,7 @@ @define class SnowflakeSqlDriver(BaseSqlDriver): connection_func: Callable[[], SnowflakeConnection] = field(kw_only=True) - engine: Engine = field( - default=Factory( - # Creator bypasses the URL param - # https://docs.sqlalchemy.org/en/14/core/engines.html#sqlalchemy.create_engine.params.creator - lambda self: import_optional_dependency("sqlalchemy").create_engine( - "snowflake://not@used/db", - creator=self.connection_func, - ), - takes_self=True, - ), - kw_only=True, - ) + _engine: Engine = field(default=None, kw_only=True, alias="engine", metadata={"serializable": False}) @connection_func.validator # pyright: ignore[reportFunctionMemberAccess] def validate_connection_func(self, _: Attribute, connection_func: Callable[[], SnowflakeConnection]) -> None: @@ -38,10 +28,12 @@ def validate_connection_func(self, _: Attribute, connection_func: Callable[[], S if not snowflake_connection.schema or not snowflake_connection.database: raise ValueError("Provide a schema and database for the Snowflake connection") - @engine.validator # pyright: ignore[reportAttributeAccessIssue] - def validate_engine_url(self, _: Attribute, engine: Engine) -> None: - if not engine.url.render_as_string().startswith("snowflake://"): - raise ValueError("Provide a Snowflake connection") + @lazy_property() + def engine(self) -> Engine: + return import_optional_dependency("sqlalchemy").create_engine( + "snowflake://not@used/db", + creator=self.connection_func, + ) def execute_query(self, query: str) -> Optional[list[BaseSqlDriver.RowResult]]: rows = self.execute_query_raw(query) diff --git a/griptape/drivers/sql/sql_driver.py b/griptape/drivers/sql/sql_driver.py index d2293f94d..cb7a67341 100644 --- a/griptape/drivers/sql/sql_driver.py +++ b/griptape/drivers/sql/sql_driver.py @@ -6,6 +6,7 @@ from griptape.drivers import BaseSqlDriver from griptape.utils import import_optional_dependency +from griptape.utils.decorators import lazy_property if TYPE_CHECKING: from sqlalchemy.engine import Engine @@ -15,12 +16,11 @@ class SqlDriver(BaseSqlDriver): engine_url: str = field(kw_only=True) create_engine_params: dict = field(factory=dict, kw_only=True) - engine: Engine = field(init=False) + _engine: Engine = field(default=None, kw_only=True, alias="engine", metadata={"serializable": False}) - def __attrs_post_init__(self) -> None: - sqlalchemy = import_optional_dependency("sqlalchemy") - - self.engine = sqlalchemy.create_engine(self.engine_url, **self.create_engine_params) + @lazy_property() + def engine(self) -> Engine: + return import_optional_dependency("sqlalchemy").create_engine(self.engine_url, **self.create_engine_params) def execute_query(self, query: str) -> Optional[list[BaseSqlDriver.RowResult]]: rows = self.execute_query_raw(query) diff --git a/griptape/drivers/text_to_speech/azure_openai_text_to_speech_driver.py b/griptape/drivers/text_to_speech/azure_openai_text_to_speech_driver.py index 562a1d637..f64ab0e2d 100644 --- a/griptape/drivers/text_to_speech/azure_openai_text_to_speech_driver.py +++ b/griptape/drivers/text_to_speech/azure_openai_text_to_speech_driver.py @@ -6,6 +6,7 @@ from attrs import Factory, define, field from griptape.drivers import OpenAiTextToSpeechDriver +from griptape.utils.decorators import lazy_property @define @@ -35,17 +36,16 @@ class AzureOpenAiTextToSpeechDriver(OpenAiTextToSpeechDriver): metadata={"serializable": False}, ) api_version: str = field(default="2024-07-01-preview", kw_only=True, metadata={"serializable": True}) - client: openai.AzureOpenAI = field( - default=Factory( - lambda self: openai.AzureOpenAI( - organization=self.organization, - api_key=self.api_key, - api_version=self.api_version, - azure_endpoint=self.azure_endpoint, - azure_deployment=self.azure_deployment, - azure_ad_token=self.azure_ad_token, - azure_ad_token_provider=self.azure_ad_token_provider, - ), - takes_self=True, - ), - ) + _client: openai.AzureOpenAI = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) + + @lazy_property() + def client(self) -> openai.AzureOpenAI: + return openai.AzureOpenAI( + organization=self.organization, + api_key=self.api_key, + api_version=self.api_version, + azure_endpoint=self.azure_endpoint, + azure_deployment=self.azure_deployment, + azure_ad_token=self.azure_ad_token, + azure_ad_token_provider=self.azure_ad_token_provider, + ) diff --git a/griptape/drivers/text_to_speech/elevenlabs_text_to_speech_driver.py b/griptape/drivers/text_to_speech/elevenlabs_text_to_speech_driver.py index f4be58162..ef6352cea 100644 --- a/griptape/drivers/text_to_speech/elevenlabs_text_to_speech_driver.py +++ b/griptape/drivers/text_to_speech/elevenlabs_text_to_speech_driver.py @@ -1,27 +1,28 @@ from __future__ import annotations -from typing import Any +from typing import TYPE_CHECKING -from attrs import Factory, define, field +from attrs import define, field from griptape.artifacts.audio_artifact import AudioArtifact from griptape.drivers import BaseTextToSpeechDriver from griptape.utils import import_optional_dependency +from griptape.utils.decorators import lazy_property + +if TYPE_CHECKING: + from elevenlabs.client import ElevenLabs @define class ElevenLabsTextToSpeechDriver(BaseTextToSpeechDriver): api_key: str = field(kw_only=True, metadata={"serializable": True}) - client: Any = field( - default=Factory( - lambda self: import_optional_dependency("elevenlabs.client").ElevenLabs(api_key=self.api_key), - takes_self=True, - ), - kw_only=True, - metadata={"serializable": True}, - ) voice: str = field(kw_only=True, metadata={"serializable": True}) output_format: str = field(default="mp3_44100_128", kw_only=True, metadata={"serializable": True}) + _client: ElevenLabs = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) + + @lazy_property() + def client(self) -> ElevenLabs: + return import_optional_dependency("elevenlabs.client").ElevenLabs(api_key=self.api_key) def try_text_to_audio(self, prompts: list[str]) -> AudioArtifact: audio = self.client.generate( diff --git a/griptape/drivers/text_to_speech/openai_text_to_speech_driver.py b/griptape/drivers/text_to_speech/openai_text_to_speech_driver.py index 543ef1ec7..558e2f875 100644 --- a/griptape/drivers/text_to_speech/openai_text_to_speech_driver.py +++ b/griptape/drivers/text_to_speech/openai_text_to_speech_driver.py @@ -3,10 +3,11 @@ from typing import Literal, Optional import openai -from attrs import Factory, define, field +from attrs import define, field from griptape.artifacts.audio_artifact import AudioArtifact from griptape.drivers import BaseTextToSpeechDriver +from griptape.utils.decorators import lazy_property @define @@ -23,12 +24,15 @@ class OpenAiTextToSpeechDriver(BaseTextToSpeechDriver): base_url: Optional[str] = field(default=None, kw_only=True, metadata={"serializable": True}) api_key: Optional[str] = field(default=None, kw_only=True) organization: Optional[str] = field(default=openai.organization, kw_only=True, metadata={"serializable": True}) - client: openai.OpenAI = field( - default=Factory( - lambda self: openai.OpenAI(api_key=self.api_key, base_url=self.base_url, organization=self.organization), - takes_self=True, - ), - ) + _client: openai.OpenAI = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) + + @lazy_property() + def client(self) -> openai.OpenAI: + return openai.OpenAI( + api_key=self.api_key, + base_url=self.base_url, + organization=self.organization, + ) def try_text_to_audio(self, prompts: list[str]) -> AudioArtifact: response = self.client.audio.speech.create( diff --git a/griptape/drivers/vector/amazon_opensearch_vector_store_driver.py b/griptape/drivers/vector/amazon_opensearch_vector_store_driver.py index b1d881958..465dfa476 100644 --- a/griptape/drivers/vector/amazon_opensearch_vector_store_driver.py +++ b/griptape/drivers/vector/amazon_opensearch_vector_store_driver.py @@ -9,7 +9,6 @@ if TYPE_CHECKING: from boto3 import Session - from opensearchpy import OpenSearch @define @@ -36,19 +35,6 @@ class AmazonOpenSearchVectorStoreDriver(OpenSearchVectorStoreDriver): ), ) - client: OpenSearch = field( - default=Factory( - lambda self: import_optional_dependency("opensearchpy").OpenSearch( - hosts=[{"host": self.host, "port": self.port}], - http_auth=self.http_auth, - use_ssl=self.use_ssl, - verify_certs=self.verify_certs, - connection_class=import_optional_dependency("opensearchpy").RequestsHttpConnection, - ), - takes_self=True, - ), - ) - def upsert_vector( self, vector: list[float], diff --git a/griptape/drivers/vector/astradb_vector_store_driver.py b/griptape/drivers/vector/astradb_vector_store_driver.py index 029fa382d..1e8398809 100644 --- a/griptape/drivers/vector/astradb_vector_store_driver.py +++ b/griptape/drivers/vector/astradb_vector_store_driver.py @@ -6,10 +6,11 @@ from griptape.drivers import BaseVectorStoreDriver from griptape.utils import import_optional_dependency +from griptape.utils.decorators import lazy_property if TYPE_CHECKING: - from astrapy import Collection - from astrapy.authentication import TokenProvider + import astrapy + import astrapy.authentication @define @@ -26,33 +27,37 @@ class AstraDbVectorStoreDriver(BaseVectorStoreDriver): It can be omitted for production Astra DB targets. See `astrapy.constants.Environment` for allowed values. astra_db_namespace: optional specification of the namespace (in the Astra database) for the data. *Note*: not to be confused with the "namespace" mentioned elsewhere, which is a grouping within this vector store. + caller_name: the name of the caller for the Astra DB client. Defaults to "griptape". + client: an instance of `astrapy.DataAPIClient` for the Astra DB. + collection: an instance of `astrapy.Collection` for the Astra DB. """ api_endpoint: str = field(kw_only=True, metadata={"serializable": True}) - token: Optional[str | TokenProvider] = field(kw_only=True, default=None, metadata={"serializable": False}) + token: Optional[str | astrapy.authentication.TokenProvider] = field( + kw_only=True, default=None, metadata={"serializable": False} + ) collection_name: str = field(kw_only=True, metadata={"serializable": True}) environment: Optional[str] = field(kw_only=True, default=None, metadata={"serializable": True}) astra_db_namespace: Optional[str] = field(default=None, kw_only=True, metadata={"serializable": True}) - - collection: Collection = field(init=False) - - def __attrs_post_init__(self) -> None: - astrapy = import_optional_dependency("astrapy") - self.collection = ( - astrapy.DataAPIClient( - caller_name="griptape", - environment=self.environment, - ) - .get_database( - self.api_endpoint, - token=self.token, - namespace=self.astra_db_namespace, - ) - .get_collection( - name=self.collection_name, - ) + caller_name: str = field(default="griptape", kw_only=True, metadata={"serializable": False}) + _client: astrapy.DataAPIClient = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) + _collection: astrapy.Collection = field( + default=None, kw_only=True, alias="collection", metadata={"serializable": False} + ) + + @lazy_property() + def client(self) -> astrapy.DataAPIClient: + return import_optional_dependency("astrapy").DataAPIClient( + caller_name=self.caller_name, + environment=self.environment, ) + @lazy_property() + def collection(self) -> astrapy.Collection: + return self.client.get_database( + self.api_endpoint, token=self.token, namespace=self.astra_db_namespace + ).get_collection(self.collection_name) + def delete_vector(self, vector_id: str) -> None: """Delete a vector from Astra DB store. diff --git a/griptape/drivers/vector/base_vector_store_driver.py b/griptape/drivers/vector/base_vector_store_driver.py index 50810752e..e2a394bf4 100644 --- a/griptape/drivers/vector/base_vector_store_driver.py +++ b/griptape/drivers/vector/base_vector_store_driver.py @@ -43,9 +43,9 @@ def upsert_text_artifacts( *, meta: Optional[dict] = None, **kwargs, - ) -> None: + ) -> list[str] | dict[str, list[str]]: if isinstance(artifacts, list): - utils.execute_futures_list( + return utils.execute_futures_list( [ self.futures_executor.submit(self.upsert_text_artifact, a, namespace=None, meta=meta, **kwargs) for a in artifacts @@ -65,7 +65,7 @@ def upsert_text_artifacts( ) ) - utils.execute_futures_list_dict(futures_dict) + return utils.execute_futures_list_dict(futures_dict) def upsert_text_artifact( self, @@ -89,32 +89,20 @@ def upsert_text_artifact( vector = artifact.embedding or artifact.generate_embedding(self.embedding_driver) - if isinstance(vector, list): - return self.upsert_vector(vector, vector_id=vector_id, namespace=namespace, meta=meta, **kwargs) - else: - raise ValueError("Vector must be an instance of 'list'.") + return self.upsert_vector(vector, vector_id=vector_id, namespace=namespace, meta=meta, **kwargs) def upsert_text( self, string: str, *, - vector_id: Optional[str] = None, namespace: Optional[str] = None, meta: Optional[dict] = None, + vector_id: Optional[str] = None, **kwargs, ) -> str: - vector_id = self._get_default_vector_id(string) if vector_id is None else vector_id - - if self.does_entry_exist(vector_id, namespace=namespace): - return vector_id - else: - return self.upsert_vector( - self.embedding_driver.embed_string(string), - vector_id=vector_id, - namespace=namespace, - meta=meta or {}, - **kwargs, - ) + return self.upsert_text_artifact( + TextArtifact(string), vector_id=vector_id, namespace=namespace, meta=meta, **kwargs + ) def does_entry_exist(self, vector_id: str, *, namespace: Optional[str] = None) -> bool: try: diff --git a/griptape/drivers/vector/griptape_cloud_knowledge_base_vector_store_driver.py b/griptape/drivers/vector/griptape_cloud_vector_store_driver.py similarity index 96% rename from griptape/drivers/vector/griptape_cloud_knowledge_base_vector_store_driver.py rename to griptape/drivers/vector/griptape_cloud_vector_store_driver.py index a3bd6a011..a82c791cd 100644 --- a/griptape/drivers/vector/griptape_cloud_knowledge_base_vector_store_driver.py +++ b/griptape/drivers/vector/griptape_cloud_vector_store_driver.py @@ -13,7 +13,7 @@ @define -class GriptapeCloudKnowledgeBaseVectorStoreDriver(BaseVectorStoreDriver): +class GriptapeCloudVectorStoreDriver(BaseVectorStoreDriver): """A vector store driver for Griptape Cloud Knowledge Bases. Attributes: @@ -84,7 +84,7 @@ def query( namespace: Optional[str] = None, include_vectors: Optional[bool] = None, distance_metric: Optional[str] = None, - # GriptapeCloudKnowledgeBaseVectorStoreDriver-specific params: + # GriptapeCloudVectorStoreDriver-specific params: filter: Optional[dict] = None, # noqa: A002 **kwargs, ) -> list[BaseVectorStoreDriver.Entry]: diff --git a/griptape/drivers/vector/marqo_vector_store_driver.py b/griptape/drivers/vector/marqo_vector_store_driver.py index caab118b8..55c3692a1 100644 --- a/griptape/drivers/vector/marqo_vector_store_driver.py +++ b/griptape/drivers/vector/marqo_vector_store_driver.py @@ -2,11 +2,12 @@ from typing import TYPE_CHECKING, Any, NoReturn, Optional -from attrs import Factory, define, field +from attrs import define, field from griptape import utils from griptape.drivers import BaseVectorStoreDriver from griptape.utils import import_optional_dependency +from griptape.utils.decorators import lazy_property if TYPE_CHECKING: import marqo @@ -21,20 +22,18 @@ class MarqoVectorStoreDriver(BaseVectorStoreDriver): Attributes: api_key: The API key for the Marqo API. url: The URL to the Marqo API. - mq: An optional Marqo client. Defaults to a new client with the given URL and API key. + client: An optional Marqo client. Defaults to a new client with the given URL and API key. index: The name of the index to use. """ api_key: str = field(kw_only=True, metadata={"serializable": True}) url: str = field(kw_only=True, metadata={"serializable": True}) - mq: Optional[marqo.Client] = field( - default=Factory( - lambda self: import_optional_dependency("marqo").Client(self.url, api_key=self.api_key), - takes_self=True, - ), - kw_only=True, - ) index: str = field(kw_only=True, metadata={"serializable": True}) + _client: marqo.Client = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) + + @lazy_property() + def client(self) -> marqo.Client: + return import_optional_dependency("marqo").Client(self.url, api_key=self.api_key) def upsert_text( self, @@ -65,7 +64,7 @@ def upsert_text( if namespace: doc["namespace"] = namespace - response = self.mq.index(self.index).add_documents([doc], tensor_fields=["Description"]) + response = self.client.index(self.index).add_documents([doc], tensor_fields=["Description"]) if isinstance(response, dict) and "items" in response and response["items"]: return response["items"][0]["_id"] else: @@ -102,7 +101,7 @@ def upsert_text_artifact( "namespace": namespace, } - response = self.mq.index(self.index).add_documents([doc], tensor_fields=["Description", "artifact"]) + response = self.client.index(self.index).add_documents([doc], tensor_fields=["Description", "artifact"]) if isinstance(response, dict) and "items" in response and response["items"]: return response["items"][0]["_id"] else: @@ -118,7 +117,7 @@ def load_entry(self, vector_id: str, *, namespace: Optional[str] = None) -> Opti Returns: The loaded Entry if found, otherwise None. """ - result = self.mq.index(self.index).get_document(document_id=vector_id, expose_facets=True) + result = self.client.index(self.index).get_document(document_id=vector_id, expose_facets=True) if result and "_tensor_facets" in result and len(result["_tensor_facets"]) > 0: return BaseVectorStoreDriver.Entry( @@ -141,15 +140,15 @@ def load_entries(self, *, namespace: Optional[str] = None) -> list[BaseVectorSto filter_string = f"namespace:{namespace}" if namespace else None if filter_string is not None: - results = self.mq.index(self.index).search("", limit=10000, filter_string=filter_string) + results = self.client.index(self.index).search("", limit=10000, filter_string=filter_string) else: - results = self.mq.index(self.index).search("", limit=10000) + results = self.client.index(self.index).search("", limit=10000) # get all _id's from search results ids = [r["_id"] for r in results["hits"]] # get documents corresponding to the ids - documents = self.mq.index(self.index).get_documents(document_ids=ids, expose_facets=True) + documents = self.client.index(self.index).get_documents(document_ids=ids, expose_facets=True) # for each document, if it's found, create an Entry object entries = [] @@ -195,11 +194,12 @@ def query( "filter_string": f"namespace:{namespace}" if namespace else None, } | kwargs - results = self.mq.index(self.index).search(query, **params) + results = self.client.index(self.index).search(query, **params) if include_vectors: results["hits"] = [ - {**r, **self.mq.index(self.index).get_document(r["_id"], expose_facets=True)} for r in results["hits"] + {**r, **self.client.index(self.index).get_document(r["_id"], expose_facets=True)} + for r in results["hits"] ] return [ @@ -218,7 +218,7 @@ def delete_index(self, name: str) -> dict[str, Any]: Args: name: The name of the index to delete. """ - return self.mq.delete_index(name) + return self.client.delete_index(name) def get_indexes(self) -> list[str]: """Get a list of all indexes in the Marqo client. @@ -226,7 +226,7 @@ def get_indexes(self) -> list[str]: Returns: The list of all indexes. """ - return [index["index"] for index in self.mq.get_indexes()["results"]] + return [index["index"] for index in self.client.get_indexes()["results"]] def upsert_vector( self, diff --git a/griptape/drivers/vector/mongodb_atlas_vector_store_driver.py b/griptape/drivers/vector/mongodb_atlas_vector_store_driver.py index bc3f1e22f..a6f32620a 100644 --- a/griptape/drivers/vector/mongodb_atlas_vector_store_driver.py +++ b/griptape/drivers/vector/mongodb_atlas_vector_store_driver.py @@ -2,10 +2,11 @@ from typing import TYPE_CHECKING, Optional -from attrs import Factory, define, field +from attrs import define, field from griptape.drivers import BaseVectorStoreDriver from griptape.utils import import_optional_dependency +from griptape.utils.decorators import lazy_property if TYPE_CHECKING: from pymongo import MongoClient @@ -37,12 +38,11 @@ class MongoDbAtlasVectorStoreDriver(BaseVectorStoreDriver): kw_only=True, metadata={"serializable": True}, ) # https://www.mongodb.com/docs/atlas/atlas-vector-search/vector-search-stage/#fields - client: MongoClient = field( - default=Factory( - lambda self: import_optional_dependency("pymongo").MongoClient(self.connection_string), - takes_self=True, - ), - ) + _client: MongoClient = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) + + @lazy_property() + def client(self) -> MongoClient: + return import_optional_dependency("pymongo").MongoClient(self.connection_string) def get_collection(self) -> Collection: """Returns the MongoDB Collection instance for the specified database and collection name.""" diff --git a/griptape/drivers/vector/opensearch_vector_store_driver.py b/griptape/drivers/vector/opensearch_vector_store_driver.py index cf944116a..5f247f6db 100644 --- a/griptape/drivers/vector/opensearch_vector_store_driver.py +++ b/griptape/drivers/vector/opensearch_vector_store_driver.py @@ -3,11 +3,12 @@ import logging from typing import TYPE_CHECKING, NoReturn, Optional -from attrs import Factory, define, field +from attrs import define, field from griptape import utils from griptape.drivers import BaseVectorStoreDriver from griptape.utils import import_optional_dependency +from griptape.utils.decorators import lazy_property if TYPE_CHECKING: from opensearchpy import OpenSearch @@ -32,19 +33,19 @@ class OpenSearchVectorStoreDriver(BaseVectorStoreDriver): use_ssl: bool = field(default=True, kw_only=True, metadata={"serializable": True}) verify_certs: bool = field(default=True, kw_only=True, metadata={"serializable": True}) index_name: str = field(kw_only=True, metadata={"serializable": True}) - - client: OpenSearch = field( - default=Factory( - lambda self: import_optional_dependency("opensearchpy").OpenSearch( - hosts=[{"host": self.host, "port": self.port}], - http_auth=self.http_auth, - use_ssl=self.use_ssl, - verify_certs=self.verify_certs, - connection_class=import_optional_dependency("opensearchpy").RequestsHttpConnection, - ), - takes_self=True, - ), - ) + _client: OpenSearch = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) + + @lazy_property() + def client(self) -> OpenSearch: + opensearchpy = import_optional_dependency("opensearchpy") + + return opensearchpy.OpenSearch( + hosts=[{"host": self.host, "port": self.port}], + http_auth=self.http_auth, + use_ssl=self.use_ssl, + verify_certs=self.verify_certs, + connection_class=opensearchpy.RequestsHttpConnection, + ) def upsert_vector( self, diff --git a/griptape/drivers/vector/pgvector_vector_store_driver.py b/griptape/drivers/vector/pgvector_vector_store_driver.py index 30f437c7e..c1a6bef06 100644 --- a/griptape/drivers/vector/pgvector_vector_store_driver.py +++ b/griptape/drivers/vector/pgvector_vector_store_driver.py @@ -9,9 +9,10 @@ from griptape.drivers import BaseVectorStoreDriver from griptape.utils import import_optional_dependency +from griptape.utils.decorators import lazy_property if TYPE_CHECKING: - from sqlalchemy.engine import Engine + import sqlalchemy @define @@ -27,14 +28,14 @@ class PgVectorVectorStoreDriver(BaseVectorStoreDriver): connection_string: Optional[str] = field(default=None, kw_only=True, metadata={"serializable": True}) create_engine_params: dict = field(factory=dict, kw_only=True, metadata={"serializable": True}) - engine: Optional[Engine] = field(default=None, kw_only=True) table_name: str = field(kw_only=True, metadata={"serializable": True}) _model: Any = field(default=Factory(lambda self: self.default_vector_model(), takes_self=True)) + _engine: sqlalchemy.Engine = field(default=None, kw_only=True, alias="engine", metadata={"serializable": False}) @connection_string.validator # pyright: ignore[reportAttributeAccessIssue] def validate_connection_string(self, _: Attribute, connection_string: Optional[str]) -> None: # If an engine is provided, the connection string is not used. - if self.engine is not None: + if self._engine is not None: return # If an engine is not provided, a connection string is required. @@ -44,22 +45,11 @@ def validate_connection_string(self, _: Attribute, connection_string: Optional[s if not connection_string.startswith("postgresql://"): raise ValueError("The connection string must describe a Postgres database connection") - @engine.validator # pyright: ignore[reportAttributeAccessIssue] - def validate_engine(self, _: Attribute, engine: Optional[Engine]) -> None: - # If a connection string is provided, an engine does not need to be provided. - if self.connection_string is not None: - return - - # If a connection string is not provided, an engine is required. - if engine is None: - raise ValueError("An engine or connection string is required") - - def __attrs_post_init__(self) -> None: - if self.engine is None: - if self.connection_string is None: - raise ValueError("An engine or connection string is required") - sqlalchemy = import_optional_dependency("sqlalchemy") - self.engine = sqlalchemy.create_engine(self.connection_string, **self.create_engine_params) + @lazy_property() + def engine(self) -> sqlalchemy.Engine: + return import_optional_dependency("sqlalchemy").create_engine( + self.connection_string, **self.create_engine_params + ) def setup( self, diff --git a/griptape/drivers/vector/pinecone_vector_store_driver.py b/griptape/drivers/vector/pinecone_vector_store_driver.py index a3a132ab3..81e593e72 100644 --- a/griptape/drivers/vector/pinecone_vector_store_driver.py +++ b/griptape/drivers/vector/pinecone_vector_store_driver.py @@ -6,6 +6,7 @@ from griptape.drivers import BaseVectorStoreDriver from griptape.utils import import_optional_dependency, str_to_hash +from griptape.utils.decorators import lazy_property if TYPE_CHECKING: import pinecone @@ -17,16 +18,20 @@ class PineconeVectorStoreDriver(BaseVectorStoreDriver): index_name: str = field(kw_only=True, metadata={"serializable": True}) environment: str = field(kw_only=True, metadata={"serializable": True}) project_name: Optional[str] = field(default=None, kw_only=True, metadata={"serializable": True}) - index: pinecone.Index = field(init=False) + _client: pinecone.Pinecone = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) + _index: pinecone.Index = field(default=None, kw_only=True, alias="index", metadata={"serializable": False}) - def __attrs_post_init__(self) -> None: - pinecone = import_optional_dependency("pinecone").Pinecone( + @lazy_property() + def client(self) -> pinecone.Pinecone: + return import_optional_dependency("pinecone").Pinecone( api_key=self.api_key, environment=self.environment, project_name=self.project_name, ) - self.index = pinecone.Index(self.index_name) + @lazy_property() + def index(self) -> pinecone.Index: + return self.client.Index(self.index_name) def upsert_vector( self, diff --git a/griptape/drivers/vector/qdrant_vector_store_driver.py b/griptape/drivers/vector/qdrant_vector_store_driver.py index 154e54af7..79cf64f37 100644 --- a/griptape/drivers/vector/qdrant_vector_store_driver.py +++ b/griptape/drivers/vector/qdrant_vector_store_driver.py @@ -2,12 +2,17 @@ import logging import uuid -from typing import Optional +from typing import TYPE_CHECKING, Optional from attrs import define, field from griptape.drivers import BaseVectorStoreDriver from griptape.utils import import_optional_dependency +from griptape.utils.decorators import lazy_property + +if TYPE_CHECKING: + from qdrant_client import QdrantClient + DEFAULT_DISTANCE = "Cosine" CONTENT_PAYLOAD_KEY = "data" @@ -56,9 +61,11 @@ class QdrantVectorStoreDriver(BaseVectorStoreDriver): collection_name: str = field(kw_only=True, metadata={"serializable": True}) vector_name: Optional[str] = field(default=None, kw_only=True, metadata={"serializable": True}) content_payload_key: str = field(default=CONTENT_PAYLOAD_KEY, kw_only=True, metadata={"serializable": True}) + _client: QdrantClient = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) - def __attrs_post_init__(self) -> None: - self.client = import_optional_dependency("qdrant_client").QdrantClient( + @lazy_property() + def client(self) -> QdrantClient: + return import_optional_dependency("qdrant_client").QdrantClient( location=self.location, url=self.url, host=self.host, diff --git a/griptape/drivers/vector/redis_vector_store_driver.py b/griptape/drivers/vector/redis_vector_store_driver.py index 0abf2c985..d220878f3 100644 --- a/griptape/drivers/vector/redis_vector_store_driver.py +++ b/griptape/drivers/vector/redis_vector_store_driver.py @@ -4,10 +4,11 @@ from typing import TYPE_CHECKING, NoReturn, Optional import numpy as np -from attrs import Factory, define, field +from attrs import define, field from griptape.drivers import BaseVectorStoreDriver from griptape.utils import import_optional_dependency, str_to_hash +from griptape.utils.decorators import lazy_property if TYPE_CHECKING: from redis import Redis @@ -33,19 +34,17 @@ class RedisVectorStoreDriver(BaseVectorStoreDriver): db: int = field(kw_only=True, default=0, metadata={"serializable": True}) password: Optional[str] = field(default=None, kw_only=True, metadata={"serializable": False}) index: str = field(kw_only=True, metadata={"serializable": True}) - - client: Redis = field( - default=Factory( - lambda self: import_optional_dependency("redis").Redis( - host=self.host, - port=self.port, - db=self.db, - password=self.password, - decode_responses=False, - ), - takes_self=True, - ), - ) + _client: Redis = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) + + @lazy_property() + def client(self) -> Redis: + return import_optional_dependency("redis").Redis( + host=self.host, + port=self.port, + db=self.db, + password=self.password, + decode_responses=False, + ) def upsert_vector( self, diff --git a/griptape/drivers/web_scraper/base_web_scraper_driver.py b/griptape/drivers/web_scraper/base_web_scraper_driver.py index ae39f8eac..0c33f3713 100644 --- a/griptape/drivers/web_scraper/base_web_scraper_driver.py +++ b/griptape/drivers/web_scraper/base_web_scraper_driver.py @@ -4,5 +4,13 @@ class BaseWebScraperDriver(ABC): + def scrape_url(self, url: str) -> TextArtifact: + source = self.fetch_url(url) + + return self.extract_page(source) + + @abstractmethod + def fetch_url(self, url: str) -> str: ... + @abstractmethod - def scrape_url(self, url: str) -> TextArtifact: ... + def extract_page(self, page: str) -> TextArtifact: ... diff --git a/griptape/drivers/web_scraper/markdownify_web_scraper_driver.py b/griptape/drivers/web_scraper/markdownify_web_scraper_driver.py index b54ff072f..8a41fe39e 100644 --- a/griptape/drivers/web_scraper/markdownify_web_scraper_driver.py +++ b/griptape/drivers/web_scraper/markdownify_web_scraper_driver.py @@ -27,7 +27,7 @@ class MarkdownifyWebScraperDriver(BaseWebScraperDriver): the browser has emitted the "load" event. """ - DEFAULT_EXCLUDE_TAGS = ["script", "style", "head"] + DEFAULT_EXCLUDE_TAGS = ["script", "style", "head", "audio", "img", "picture", "source", "video"] include_links: bool = field(default=True, kw_only=True) exclude_tags: list[str] = field( @@ -38,20 +38,8 @@ class MarkdownifyWebScraperDriver(BaseWebScraperDriver): exclude_ids: list[str] = field(default=Factory(list), kw_only=True) timeout: Optional[int] = field(default=None, kw_only=True) - def scrape_url(self, url: str) -> TextArtifact: + def fetch_url(self, url: str) -> str: sync_playwright = import_optional_dependency("playwright.sync_api").sync_playwright - bs4 = import_optional_dependency("bs4") - markdownify = import_optional_dependency("markdownify") - - include_links = self.include_links - - # Custom MarkdownConverter to optionally linked urls. If include_links is False only - # the text of the link is returned. - class OptionalLinksMarkdownConverter(markdownify.MarkdownConverter): - def convert_a(self, el: Any, text: str, convert_as_inline: Any) -> str: - if include_links: - return super().convert_a(el, text, convert_as_inline) - return text with sync_playwright() as p, p.chromium.launch(headless=True) as browser: page = browser.new_page() @@ -76,28 +64,43 @@ def skip_loading_images(route: Any) -> Any: if not content: raise Exception("can't access URL") - soup = bs4.BeautifulSoup(content, "html.parser") + return content + + def extract_page(self, page: str) -> TextArtifact: + bs4 = import_optional_dependency("bs4") + markdownify = import_optional_dependency("markdownify") + include_links = self.include_links + + # Custom MarkdownConverter to optionally linked urls. If include_links is False only + # the text of the link is returned. + class OptionalLinksMarkdownConverter(markdownify.MarkdownConverter): + def convert_a(self, el: Any, text: str, convert_as_inline: Any) -> str: + if include_links: + return super().convert_a(el, text, convert_as_inline) + return text + + soup = bs4.BeautifulSoup(page, "html.parser") - # Remove unwanted elements - exclude_selector = ",".join( - self.exclude_tags + [f".{c}" for c in self.exclude_classes] + [f"#{i}" for i in self.exclude_ids], - ) - if exclude_selector: - for s in soup.select(exclude_selector): - s.extract() + # Remove unwanted elements + exclude_selector = ",".join( + self.exclude_tags + [f".{c}" for c in self.exclude_classes] + [f"#{i}" for i in self.exclude_ids], + ) + if exclude_selector: + for s in soup.select(exclude_selector): + s.extract() - text = OptionalLinksMarkdownConverter().convert_soup(soup) + text = OptionalLinksMarkdownConverter().convert_soup(soup) - # Remove leading and trailing whitespace from the entire text - text = text.strip() + # Remove leading and trailing whitespace from the entire text + text = text.strip() - # Remove trailing whitespace from each line - text = re.sub(r"[ \t]+$", "", text, flags=re.MULTILINE) + # Remove trailing whitespace from each line + text = re.sub(r"[ \t]+$", "", text, flags=re.MULTILINE) - # Indent using 2 spaces instead of tabs - text = re.sub(r"(\n?\s*?)\t", r"\1 ", text) + # Indent using 2 spaces instead of tabs + text = re.sub(r"(\n?\s*?)\t", r"\1 ", text) - # Remove triple+ newlines (keep double newlines for paragraphs) - text = re.sub(r"\n\n+", "\n\n", text) + # Remove triple+ newlines (keep double newlines for paragraphs) + text = re.sub(r"\n\n+", "\n\n", text) - return TextArtifact(text) + return TextArtifact(text) diff --git a/griptape/drivers/web_scraper/proxy_web_scraper_driver.py b/griptape/drivers/web_scraper/proxy_web_scraper_driver.py index 2d785fde2..94b3914ea 100644 --- a/griptape/drivers/web_scraper/proxy_web_scraper_driver.py +++ b/griptape/drivers/web_scraper/proxy_web_scraper_driver.py @@ -12,6 +12,10 @@ class ProxyWebScraperDriver(BaseWebScraperDriver): proxies: dict = field(kw_only=True, metadata={"serializable": False}) params: dict = field(default=Factory(dict), kw_only=True, metadata={"serializable": True}) - def scrape_url(self, url: str) -> TextArtifact: + def fetch_url(self, url: str) -> str: response = requests.get(url, proxies=self.proxies, **self.params) - return TextArtifact(response.text) + + return response.text + + def extract_page(self, page: str) -> TextArtifact: + return TextArtifact(page) diff --git a/griptape/drivers/web_scraper/trafilatura_web_scraper_driver.py b/griptape/drivers/web_scraper/trafilatura_web_scraper_driver.py index 06f5573a4..e87af8af6 100644 --- a/griptape/drivers/web_scraper/trafilatura_web_scraper_driver.py +++ b/griptape/drivers/web_scraper/trafilatura_web_scraper_driver.py @@ -12,7 +12,7 @@ class TrafilaturaWebScraperDriver(BaseWebScraperDriver): include_links: bool = field(default=True, kw_only=True) - def scrape_url(self, url: str) -> TextArtifact: + def fetch_url(self, url: str) -> str: trafilatura = import_optional_dependency("trafilatura") use_config = trafilatura.settings.use_config @@ -29,6 +29,15 @@ def scrape_url(self, url: str) -> TextArtifact: if page is None: raise Exception("can't access URL") + + return page + + def extract_page(self, page: str) -> TextArtifact: + trafilatura = import_optional_dependency("trafilatura") + use_config = trafilatura.settings.use_config + + config = use_config() + extracted_page = trafilatura.extract( page, include_links=self.include_links, diff --git a/griptape/drivers/web_search/base_web_search_driver.py b/griptape/drivers/web_search/base_web_search_driver.py index b561085ef..bb88735c5 100644 --- a/griptape/drivers/web_search/base_web_search_driver.py +++ b/griptape/drivers/web_search/base_web_search_driver.py @@ -8,8 +8,6 @@ @define class BaseWebSearchDriver(ABC): results_count: int = field(default=5, kw_only=True) - language: str = field(default="en", kw_only=True) - country: str = field(default="us", kw_only=True) @abstractmethod def search(self, query: str, **kwargs) -> ListArtifact: ... diff --git a/griptape/drivers/web_search/duck_duck_go_web_search_driver.py b/griptape/drivers/web_search/duck_duck_go_web_search_driver.py index b67e81f35..113ca5a8a 100644 --- a/griptape/drivers/web_search/duck_duck_go_web_search_driver.py +++ b/griptape/drivers/web_search/duck_duck_go_web_search_driver.py @@ -3,11 +3,12 @@ import json from typing import TYPE_CHECKING -from attrs import Factory, define, field +from attrs import define, field from griptape.artifacts import ListArtifact, TextArtifact from griptape.drivers import BaseWebSearchDriver from griptape.utils import import_optional_dependency +from griptape.utils.decorators import lazy_property if TYPE_CHECKING: from duckduckgo_search import DDGS @@ -15,7 +16,13 @@ @define class DuckDuckGoWebSearchDriver(BaseWebSearchDriver): - client: DDGS = field(default=Factory(lambda: import_optional_dependency("duckduckgo_search").DDGS()), kw_only=True) + language: str = field(default="en", kw_only=True) + country: str = field(default="us", kw_only=True) + _client: DDGS = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) + + @lazy_property() + def client(self) -> DDGS: + return import_optional_dependency("duckduckgo_search").DDGS() def search(self, query: str, **kwargs) -> ListArtifact: try: diff --git a/griptape/drivers/web_search/exa_web_search_driver.py b/griptape/drivers/web_search/exa_web_search_driver.py new file mode 100644 index 000000000..c5ef3abe7 --- /dev/null +++ b/griptape/drivers/web_search/exa_web_search_driver.py @@ -0,0 +1,42 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING, Any + +from attrs import define, field + +from griptape.artifacts import JsonArtifact, ListArtifact +from griptape.drivers import BaseWebSearchDriver +from griptape.utils import import_optional_dependency +from griptape.utils.decorators import lazy_property + +if TYPE_CHECKING: + from exa_py.api import Exa + + +@define +class ExaWebSearchDriver(BaseWebSearchDriver): + api_key: str = field(kw_only=True, default=None) + highlights: bool = field(default=False, kw_only=True) + use_autoprompt: bool = field(default=False, kw_only=True) + params: dict[str, Any] = field(factory=dict, kw_only=True, metadata={"serializable": True}) + _client: Exa = field(default=None, kw_only=True, alias="client") + + @lazy_property() + def client(self) -> Exa: + return import_optional_dependency("exa_py").Exa(api_key=self.api_key) + + def search(self, query: str, **kwargs) -> ListArtifact[JsonArtifact]: + response = self.client.search_and_contents( + highlights=self.highlights, + use_autoprompt=self.use_autoprompt, + query=query, + num_results=self.results_count, + text=True, + **self.params, + **kwargs, + ) + results = [ + {"title": result.title, "url": result.url, "highlights": result.highlights, "text": result.text} + for result in response.results + ] + return ListArtifact([JsonArtifact(result) for result in results]) diff --git a/griptape/drivers/web_search/google_web_search_driver.py b/griptape/drivers/web_search/google_web_search_driver.py index 012c52307..e16e87f33 100644 --- a/griptape/drivers/web_search/google_web_search_driver.py +++ b/griptape/drivers/web_search/google_web_search_driver.py @@ -13,6 +13,8 @@ class GoogleWebSearchDriver(BaseWebSearchDriver): api_key: str = field(kw_only=True) search_id: str = field(kw_only=True) + language: str = field(default="en", kw_only=True) + country: str = field(default="us", kw_only=True) def search(self, query: str, **kwargs) -> ListArtifact: return ListArtifact([TextArtifact(json.dumps(result)) for result in self._search_google(query, **kwargs)]) diff --git a/griptape/drivers/web_search/tavily_web_search_driver.py b/griptape/drivers/web_search/tavily_web_search_driver.py new file mode 100644 index 000000000..d3ecb0b4c --- /dev/null +++ b/griptape/drivers/web_search/tavily_web_search_driver.py @@ -0,0 +1,29 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING, Any + +from attrs import define, field + +from griptape.artifacts import JsonArtifact, ListArtifact +from griptape.drivers import BaseWebSearchDriver +from griptape.utils import import_optional_dependency +from griptape.utils.decorators import lazy_property + +if TYPE_CHECKING: + from tavily import TavilyClient + + +@define +class TavilyWebSearchDriver(BaseWebSearchDriver): + api_key: str = field(kw_only=True) + params: dict[str, Any] = field(factory=dict, kw_only=True, metadata={"serializable": True}) + _client: TavilyClient = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) + + @lazy_property() + def client(self) -> TavilyClient: + return import_optional_dependency("tavily").TavilyClient(self.api_key) + + def search(self, query: str, **kwargs) -> ListArtifact: + response = self.client.search(query, max_results=self.results_count, **self.params, **kwargs) + results = response.get("results", []) + return ListArtifact([(JsonArtifact(result)) for result in results]) diff --git a/griptape/engines/extraction/base_extraction_engine.py b/griptape/engines/extraction/base_extraction_engine.py index d3a50585d..10ba5d142 100644 --- a/griptape/engines/extraction/base_extraction_engine.py +++ b/griptape/engines/extraction/base_extraction_engine.py @@ -5,11 +5,11 @@ from attrs import Attribute, Factory, define, field +from griptape.artifacts import ListArtifact, TextArtifact from griptape.chunkers import BaseChunker, TextChunker from griptape.configs import Defaults if TYPE_CHECKING: - from griptape.artifacts import ListArtifact from griptape.drivers import BasePromptDriver from griptape.rules import Ruleset @@ -47,10 +47,19 @@ def min_response_tokens(self) -> int: - self.prompt_driver.tokenizer.max_input_tokens * self.max_token_multiplier, ) + def extract_text( + self, + text: str, + *, + rulesets: Optional[list[Ruleset]] = None, + **kwargs, + ) -> ListArtifact: + return self.extract_artifacts(ListArtifact([TextArtifact(text)]), rulesets=rulesets, **kwargs) + @abstractmethod - def extract( + def extract_artifacts( self, - text: str | ListArtifact, + artifacts: ListArtifact[TextArtifact], *, rulesets: Optional[list[Ruleset]] = None, **kwargs, diff --git a/griptape/engines/extraction/csv_extraction_engine.py b/griptape/engines/extraction/csv_extraction_engine.py index e7be73c73..7fb2a164b 100644 --- a/griptape/engines/extraction/csv_extraction_engine.py +++ b/griptape/engines/extraction/csv_extraction_engine.py @@ -17,23 +17,23 @@ @define class CsvExtractionEngine(BaseExtractionEngine): - column_names: list[str] = field(default=Factory(list), kw_only=True) + column_names: list[str] = field(kw_only=True) system_template_generator: J2 = field(default=Factory(lambda: J2("engines/extraction/csv/system.j2")), kw_only=True) user_template_generator: J2 = field(default=Factory(lambda: J2("engines/extraction/csv/user.j2")), kw_only=True) formatter_fn: Callable[[dict], str] = field( default=lambda value: "\n".join(f"{key}: {val}" for key, val in value.items()), kw_only=True ) - def extract( + def extract_artifacts( self, - text: str | ListArtifact, + artifacts: ListArtifact[TextArtifact], *, rulesets: Optional[list[Ruleset]] = None, **kwargs, - ) -> ListArtifact: + ) -> ListArtifact[TextArtifact]: return ListArtifact( self._extract_rec( - cast(list[TextArtifact], text.value) if isinstance(text, ListArtifact) else [TextArtifact(text)], + cast(list[TextArtifact], artifacts.value), [], rulesets=rulesets, ), diff --git a/griptape/engines/extraction/json_extraction_engine.py b/griptape/engines/extraction/json_extraction_engine.py index a4cd3a438..c817efd5f 100644 --- a/griptape/engines/extraction/json_extraction_engine.py +++ b/griptape/engines/extraction/json_extraction_engine.py @@ -6,7 +6,7 @@ from attrs import Factory, define, field -from griptape.artifacts import ListArtifact, TextArtifact +from griptape.artifacts import JsonArtifact, ListArtifact, TextArtifact from griptape.common import PromptStack from griptape.common.prompt_stack.messages.message import Message from griptape.engines import BaseExtractionEngine @@ -20,43 +20,39 @@ class JsonExtractionEngine(BaseExtractionEngine): JSON_PATTERN = r"(?s)[^\[]*(\[.*\])" - template_schema: dict = field(default=Factory(dict), kw_only=True) + template_schema: dict = field(kw_only=True) system_template_generator: J2 = field( default=Factory(lambda: J2("engines/extraction/json/system.j2")), kw_only=True ) user_template_generator: J2 = field(default=Factory(lambda: J2("engines/extraction/json/user.j2")), kw_only=True) - def extract( + def extract_artifacts( self, - text: str | ListArtifact, + artifacts: ListArtifact[TextArtifact], *, rulesets: Optional[list[Ruleset]] = None, **kwargs, - ) -> ListArtifact: + ) -> ListArtifact[JsonArtifact]: return ListArtifact( - self._extract_rec( - cast(list[TextArtifact], text.value) if isinstance(text, ListArtifact) else [TextArtifact(text)], - [], - rulesets=rulesets, - ), + self._extract_rec(cast(list[TextArtifact], artifacts.value), [], rulesets=rulesets), item_separator="\n", ) - def json_to_text_artifacts(self, json_input: str) -> list[TextArtifact]: + def json_to_text_artifacts(self, json_input: str) -> list[JsonArtifact]: json_matches = re.findall(self.JSON_PATTERN, json_input, re.DOTALL) if json_matches: - return [TextArtifact(json.dumps(e)) for e in json.loads(json_matches[-1])] + return [JsonArtifact(e) for e in json.loads(json_matches[-1])] else: return [] def _extract_rec( self, artifacts: list[TextArtifact], - extractions: list[TextArtifact], + extractions: list[JsonArtifact], *, rulesets: Optional[list[Ruleset]] = None, - ) -> list[TextArtifact]: + ) -> list[JsonArtifact]: artifacts_text = self.chunk_joiner.join([a.value for a in artifacts]) system_prompt = self.system_template_generator.render( json_template_schema=json.dumps(self.template_schema), diff --git a/griptape/engines/rag/modules/response/prompt_response_rag_module.py b/griptape/engines/rag/modules/response/prompt_response_rag_module.py index fbb8ed7e6..b62a0eba3 100644 --- a/griptape/engines/rag/modules/response/prompt_response_rag_module.py +++ b/griptape/engines/rag/modules/response/prompt_response_rag_module.py @@ -56,8 +56,8 @@ def run(self, context: RagContext) -> BaseArtifact: def default_system_template_generator(self, context: RagContext, artifacts: list[TextArtifact]) -> str: params: dict[str, Any] = {"text_chunks": [c.to_text() for c in artifacts]} - if len(self.all_rulesets) > 0: - params["rulesets"] = J2("rulesets/rulesets.j2").render(rulesets=self.all_rulesets) + if len(self.rulesets) > 0: + params["rulesets"] = J2("rulesets/rulesets.j2").render(rulesets=self.rulesets) if self.metadata is not None: params["metadata"] = J2("engines/rag/modules/response/metadata/system.j2").render(metadata=self.metadata) diff --git a/griptape/engines/rag/modules/retrieval/text_loader_retrieval_rag_module.py b/griptape/engines/rag/modules/retrieval/text_loader_retrieval_rag_module.py index 7e4854d00..0348a2094 100644 --- a/griptape/engines/rag/modules/retrieval/text_loader_retrieval_rag_module.py +++ b/griptape/engines/rag/modules/retrieval/text_loader_retrieval_rag_module.py @@ -6,6 +6,7 @@ from attrs import Factory, define, field from griptape import utils +from griptape.chunkers import TextChunker from griptape.engines.rag.modules import BaseRetrievalRagModule if TYPE_CHECKING: @@ -14,12 +15,13 @@ from griptape.artifacts import TextArtifact from griptape.drivers import BaseVectorStoreDriver from griptape.engines.rag import RagContext - from griptape.loaders import BaseTextLoader + from griptape.loaders import TextLoader @define(kw_only=True) class TextLoaderRetrievalRagModule(BaseRetrievalRagModule): - loader: BaseTextLoader = field() + loader: TextLoader = field() + chunker: TextChunker = field(default=Factory(lambda: TextChunker())) vector_store_driver: BaseVectorStoreDriver = field() source: Any = field() query_params: dict[str, Any] = field(factory=dict) @@ -37,7 +39,8 @@ def run(self, context: RagContext) -> Sequence[TextArtifact]: query_params["namespace"] = namespace loader_output = self.loader.load(source) + chunks = self.chunker.chunk(loader_output) - self.vector_store_driver.upsert_text_artifacts({namespace: loader_output}) + self.vector_store_driver.upsert_text_artifacts({namespace: chunks}) return self.process_query_output_fn(self.vector_store_driver.query(context.query, **query_params)) diff --git a/griptape/loaders/__init__.py b/griptape/loaders/__init__.py index b79b0ff44..b86370607 100644 --- a/griptape/loaders/__init__.py +++ b/griptape/loaders/__init__.py @@ -1,26 +1,28 @@ from .base_loader import BaseLoader -from .base_text_loader import BaseTextLoader +from .base_file_loader import BaseFileLoader + from .text_loader import TextLoader from .pdf_loader import PdfLoader from .web_loader import WebLoader from .sql_loader import SqlLoader from .csv_loader import CsvLoader -from .dataframe_loader import DataFrameLoader from .email_loader import EmailLoader + +from .blob_loader import BlobLoader + from .image_loader import ImageLoader + from .audio_loader import AudioLoader -from .blob_loader import BlobLoader __all__ = [ "BaseLoader", - "BaseTextLoader", + "BaseFileLoader", "TextLoader", "PdfLoader", "WebLoader", "SqlLoader", "CsvLoader", - "DataFrameLoader", "EmailLoader", "ImageLoader", "AudioLoader", diff --git a/griptape/loaders/audio_loader.py b/griptape/loaders/audio_loader.py index 84d6b767a..0bff5c642 100644 --- a/griptape/loaders/audio_loader.py +++ b/griptape/loaders/audio_loader.py @@ -1,20 +1,15 @@ from __future__ import annotations -from typing import cast - +import filetype from attrs import define from griptape.artifacts import AudioArtifact -from griptape.loaders import BaseLoader -from griptape.utils import import_optional_dependency +from griptape.loaders.base_file_loader import BaseFileLoader @define -class AudioLoader(BaseLoader): +class AudioLoader(BaseFileLoader[AudioArtifact]): """Loads audio content into audio artifacts.""" - def load(self, source: bytes, *args, **kwargs) -> AudioArtifact: - return AudioArtifact(source, format=import_optional_dependency("filetype").guess(source).extension) - - def load_collection(self, sources: list[bytes], *args, **kwargs) -> dict[str, AudioArtifact]: - return cast(dict[str, AudioArtifact], super().load_collection(sources, *args, **kwargs)) + def parse(self, data: bytes) -> AudioArtifact: + return AudioArtifact(data, format=filetype.guess(data).extension) diff --git a/griptape/loaders/base_file_loader.py b/griptape/loaders/base_file_loader.py new file mode 100644 index 000000000..9fcffa7ae --- /dev/null +++ b/griptape/loaders/base_file_loader.py @@ -0,0 +1,37 @@ +from __future__ import annotations + +from abc import ABC +from os import PathLike +from typing import TypeVar, Union + +from attrs import Factory, define, field + +from griptape.artifacts import BaseArtifact +from griptape.drivers import BaseFileManagerDriver, LocalFileManagerDriver +from griptape.loaders import BaseLoader +from griptape.utils import deprecation_warn + +A = TypeVar("A", bound=BaseArtifact) + + +@define +class BaseFileLoader(BaseLoader[Union[str, PathLike], bytes, A], ABC): + file_manager_driver: BaseFileManagerDriver = field( + default=Factory(lambda: LocalFileManagerDriver(workdir=None)), + kw_only=True, + ) + encoding: str = field(default="utf-8", kw_only=True) + + def fetch(self, source: str | PathLike | bytes) -> bytes: + if isinstance(source, bytes): + deprecation_warn( + "Using bytes as the source is deprecated and will be removed in a future release. " + "Please use a string or PathLike object instead." + ) + return source + + data = self.file_manager_driver.load_file(str(source)).value + if isinstance(data, str): + return data.encode(self.encoding) + else: + return data diff --git a/griptape/loaders/base_loader.py b/griptape/loaders/base_loader.py index 14f9aa10f..f7340283b 100644 --- a/griptape/loaders/base_loader.py +++ b/griptape/loaders/base_loader.py @@ -1,48 +1,72 @@ from __future__ import annotations from abc import ABC, abstractmethod -from typing import TYPE_CHECKING, Any, Optional +from typing import TYPE_CHECKING, Any, Generic, Optional, TypeVar from attrs import define, field +from griptape.artifacts import BaseArtifact from griptape.mixins.futures_executor_mixin import FuturesExecutorMixin from griptape.utils.futures import execute_futures_dict from griptape.utils.hash import bytes_to_hash, str_to_hash if TYPE_CHECKING: - from collections.abc import Mapping, Sequence + from collections.abc import Mapping - from griptape.artifacts import BaseArtifact + from griptape.common import Reference + +S = TypeVar("S") # Type for the input source +F = TypeVar("F") # Type for the fetched data +A = TypeVar("A", bound=BaseArtifact) # Type for the returned Artifact @define -class BaseLoader(FuturesExecutorMixin, ABC): - encoding: Optional[str] = field(default=None, kw_only=True) +class BaseLoader(FuturesExecutorMixin, ABC, Generic[S, F, A]): + """Fetches data from a source, parses it, and returns an Artifact. + + Attributes: + reference: The optional `Reference` to set on the Artifact. + """ + + reference: Optional[Reference] = field(default=None, kw_only=True) + + def load(self, source: S) -> A: + data = self.fetch(source) + + artifact = self.parse(data) + + artifact.reference = self.reference + + return artifact @abstractmethod - def load(self, source: Any, *args, **kwargs) -> BaseArtifact | Sequence[BaseArtifact]: ... + def fetch(self, source: S) -> F: + """Fetches data from the source.""" + + ... + + @abstractmethod + def parse(self, data: F) -> A: + """Parses the fetched data and returns an Artifact.""" + + ... def load_collection( self, sources: list[Any], - *args, - **kwargs, - ) -> Mapping[str, BaseArtifact | Sequence[BaseArtifact | Sequence[BaseArtifact]]]: + ) -> Mapping[str, A]: + """Loads a collection of sources and returns a dictionary of Artifacts.""" # Create a dictionary before actually submitting the jobs to the executor # to avoid duplicate work. sources_by_key = {self.to_key(source): source for source in sources} return execute_futures_dict( - { - key: self.futures_executor.submit(self.load, source, *args, **kwargs) - for key, source in sources_by_key.items() - }, + {key: self.futures_executor.submit(self.load, source) for key, source in sources_by_key.items()}, ) - def to_key(self, source: Any, *args, **kwargs) -> str: + def to_key(self, source: S) -> str: + """Converts the source to a key for the collection.""" if isinstance(source, bytes): return bytes_to_hash(source) - elif isinstance(source, str): - return str_to_hash(source) else: return str_to_hash(str(source)) diff --git a/griptape/loaders/base_text_loader.py b/griptape/loaders/base_text_loader.py deleted file mode 100644 index 196cb0087..000000000 --- a/griptape/loaders/base_text_loader.py +++ /dev/null @@ -1,65 +0,0 @@ -from __future__ import annotations - -from abc import ABC, abstractmethod -from typing import TYPE_CHECKING, Any, Optional, cast - -from attrs import Factory, define, field - -from griptape.artifacts import TextArtifact -from griptape.chunkers import BaseChunker, TextChunker -from griptape.loaders import BaseLoader -from griptape.tokenizers import OpenAiTokenizer - -if TYPE_CHECKING: - from griptape.common import Reference - from griptape.drivers import BaseEmbeddingDriver - - -@define -class BaseTextLoader(BaseLoader, ABC): - MAX_TOKEN_RATIO = 0.5 - - tokenizer: OpenAiTokenizer = field( - default=Factory(lambda: OpenAiTokenizer(model=OpenAiTokenizer.DEFAULT_OPENAI_GPT_3_CHAT_MODEL)), - kw_only=True, - ) - max_tokens: int = field( - default=Factory(lambda self: round(self.tokenizer.max_input_tokens * self.MAX_TOKEN_RATIO), takes_self=True), - kw_only=True, - ) - chunker: BaseChunker = field( - default=Factory( - lambda self: TextChunker(tokenizer=self.tokenizer, max_tokens=self.max_tokens), - takes_self=True, - ), - kw_only=True, - ) - embedding_driver: Optional[BaseEmbeddingDriver] = field(default=None, kw_only=True) - encoding: str = field(default="utf-8", kw_only=True) - reference: Optional[Reference] = field(default=None, kw_only=True) - - @abstractmethod - def load(self, source: Any, *args, **kwargs) -> list[TextArtifact]: ... - - def load_collection(self, sources: list[Any], *args, **kwargs) -> dict[str, list[TextArtifact]]: - return cast( - dict[str, list[TextArtifact]], - super().load_collection(sources, *args, **kwargs), - ) - - def _text_to_artifacts(self, text: str) -> list[TextArtifact]: - artifacts = [] - - chunks = self.chunker.chunk(text) if self.chunker else [TextArtifact(text)] - - for chunk in chunks: - if self.embedding_driver: - chunk.generate_embedding(self.embedding_driver) - - chunk.reference = self.reference - - chunk.encoding = self.encoding - - artifacts.append(chunk) - - return artifacts diff --git a/griptape/loaders/blob_loader.py b/griptape/loaders/blob_loader.py index d0099b47b..df148e66a 100644 --- a/griptape/loaders/blob_loader.py +++ b/griptape/loaders/blob_loader.py @@ -1,20 +1,15 @@ from __future__ import annotations -from typing import Any, cast - from attrs import define from griptape.artifacts import BlobArtifact -from griptape.loaders import BaseLoader +from griptape.loaders import BaseFileLoader @define -class BlobLoader(BaseLoader): - def load(self, source: Any, *args, **kwargs) -> BlobArtifact: +class BlobLoader(BaseFileLoader[BlobArtifact]): + def parse(self, data: bytes) -> BlobArtifact: if self.encoding is None: - return BlobArtifact(source) + return BlobArtifact(data) else: - return BlobArtifact(source, encoding=self.encoding) - - def load_collection(self, sources: list[bytes | str], *args, **kwargs) -> dict[str, BlobArtifact]: - return cast(dict[str, BlobArtifact], super().load_collection(sources, *args, **kwargs)) + return BlobArtifact(data, encoding=self.encoding) diff --git a/griptape/loaders/csv_loader.py b/griptape/loaders/csv_loader.py index bcf7029d4..4487d7aec 100644 --- a/griptape/loaders/csv_loader.py +++ b/griptape/loaders/csv_loader.py @@ -2,53 +2,25 @@ import csv from io import StringIO -from typing import TYPE_CHECKING, Callable, Optional, cast +from typing import Callable from attrs import define, field -from griptape.artifacts import TextArtifact -from griptape.loaders import BaseLoader - -if TYPE_CHECKING: - from griptape.drivers import BaseEmbeddingDriver +from griptape.artifacts import ListArtifact, TextArtifact +from griptape.loaders import BaseFileLoader @define -class CsvLoader(BaseLoader): - embedding_driver: Optional[BaseEmbeddingDriver] = field(default=None, kw_only=True) +class CsvLoader(BaseFileLoader[ListArtifact[TextArtifact]]): delimiter: str = field(default=",", kw_only=True) encoding: str = field(default="utf-8", kw_only=True) formatter_fn: Callable[[dict], str] = field( default=lambda value: "\n".join(f"{key}: {val}" for key, val in value.items()), kw_only=True ) - def load(self, source: bytes | str, *args, **kwargs) -> list[TextArtifact]: - artifacts = [] - - if isinstance(source, bytes): - source = source.decode(encoding=self.encoding) - elif isinstance(source, (bytearray, memoryview)): - raise ValueError(f"Unsupported source type: {type(source)}") - - reader = csv.DictReader(StringIO(source), delimiter=self.delimiter) - chunks = [TextArtifact(self.formatter_fn(row)) for row in reader] - - if self.embedding_driver: - for chunk in chunks: - chunk.generate_embedding(self.embedding_driver) - - for chunk in chunks: - artifacts.append(chunk) - - return artifacts + def parse(self, data: bytes) -> ListArtifact[TextArtifact]: + reader = csv.DictReader(StringIO(data.decode(self.encoding)), delimiter=self.delimiter) - def load_collection( - self, - sources: list[bytes | str], - *args, - **kwargs, - ) -> dict[str, list[TextArtifact]]: - return cast( - dict[str, list[TextArtifact]], - super().load_collection(sources, *args, **kwargs), + return ListArtifact( + [TextArtifact(self.formatter_fn(row), meta={"row_num": row_num}) for row_num, row in enumerate(reader)] ) diff --git a/griptape/loaders/dataframe_loader.py b/griptape/loaders/dataframe_loader.py deleted file mode 100644 index 30d705676..000000000 --- a/griptape/loaders/dataframe_loader.py +++ /dev/null @@ -1,45 +0,0 @@ -from __future__ import annotations - -from typing import TYPE_CHECKING, Callable, Optional, cast - -from attrs import define, field - -from griptape.artifacts import TextArtifact -from griptape.loaders import BaseLoader -from griptape.utils import import_optional_dependency -from griptape.utils.hash import str_to_hash - -if TYPE_CHECKING: - from pandas import DataFrame - - from griptape.drivers import BaseEmbeddingDriver - - -@define -class DataFrameLoader(BaseLoader): - embedding_driver: Optional[BaseEmbeddingDriver] = field(default=None, kw_only=True) - formatter_fn: Callable[[dict], str] = field( - default=lambda value: "\n".join(f"{key}: {val}" for key, val in value.items()), kw_only=True - ) - - def load(self, source: DataFrame, *args, **kwargs) -> list[TextArtifact]: - artifacts = [] - - chunks = [TextArtifact(self.formatter_fn(row)) for row in source.to_dict(orient="records")] - - if self.embedding_driver: - for chunk in chunks: - chunk.generate_embedding(self.embedding_driver) - - for chunk in chunks: - artifacts.append(chunk) - - return artifacts - - def load_collection(self, sources: list[DataFrame], *args, **kwargs) -> dict[str, list[TextArtifact]]: - return cast(dict[str, list[TextArtifact]], super().load_collection(sources, *args, **kwargs)) - - def to_key(self, source: DataFrame, *args, **kwargs) -> str: - hash_pandas_object = import_optional_dependency("pandas.core.util.hashing").hash_pandas_object - - return str_to_hash(str(hash_pandas_object(source, index=True).values)) diff --git a/griptape/loaders/email_loader.py b/griptape/loaders/email_loader.py index f6c9ca406..8e935cfa4 100644 --- a/griptape/loaders/email_loader.py +++ b/griptape/loaders/email_loader.py @@ -1,7 +1,7 @@ from __future__ import annotations import imaplib -from typing import Optional, cast +from typing import Optional from attrs import astuple, define, field @@ -11,7 +11,7 @@ @define -class EmailLoader(BaseLoader): +class EmailLoader(BaseLoader["EmailLoader.EmailQuery", list[bytes], ListArtifact]): # pyright: ignore[reportGeneralTypeIssues] @define(frozen=True) class EmailQuery: """An email retrieval query. @@ -32,11 +32,10 @@ class EmailQuery: username: str = field(kw_only=True) password: str = field(kw_only=True) - def load(self, source: EmailQuery, *args, **kwargs) -> ListArtifact: - mailparser = import_optional_dependency("mailparser") + def fetch(self, source: EmailLoader.EmailQuery) -> list[bytes]: label, key, search_criteria, max_count = astuple(source) - artifacts = [] + mail_bytes = [] with imaplib.IMAP4_SSL(self.imap_url) as client: client.login(self.username, self.password) @@ -59,19 +58,24 @@ def load(self, source: EmailQuery, *args, **kwargs) -> ListArtifact: if data is None or not data or data[0] is None: continue - message = mailparser.parse_from_bytes(data[0][1]) - - # Note: mailparser only populates the text_plain field - # if the message content type is explicitly set to 'text/plain'. - if message.text_plain: - artifacts.append(TextArtifact("\n".join(message.text_plain))) + mail_bytes.append(data[0][1]) client.close() - return ListArtifact(artifacts) + return mail_bytes + + def parse(self, data: list[bytes]) -> ListArtifact[TextArtifact]: + mailparser = import_optional_dependency("mailparser") + artifacts = [] + for byte in data: + message = mailparser.parse_from_bytes(byte) + + # Note: mailparser only populates the text_plain field + # if the message content type is explicitly set to 'text/plain'. + if message.text_plain: + artifacts.append(TextArtifact("\n".join(message.text_plain))) + + return ListArtifact(artifacts) def _count_messages(self, message_numbers: bytes) -> int: return len(list(filter(None, message_numbers.decode().split(" ")))) - - def load_collection(self, sources: list[EmailQuery], *args, **kwargs) -> dict[str, ListArtifact]: - return cast(dict[str, ListArtifact], super().load_collection(sources, *args, **kwargs)) diff --git a/griptape/loaders/image_loader.py b/griptape/loaders/image_loader.py index 83060dfa8..3af3922dc 100644 --- a/griptape/loaders/image_loader.py +++ b/griptape/loaders/image_loader.py @@ -1,17 +1,17 @@ from __future__ import annotations from io import BytesIO -from typing import Optional, cast +from typing import Optional from attrs import define, field from griptape.artifacts import ImageArtifact -from griptape.loaders import BaseLoader +from griptape.loaders import BaseFileLoader from griptape.utils import import_optional_dependency @define -class ImageLoader(BaseLoader): +class ImageLoader(BaseFileLoader[ImageArtifact]): """Loads images into image artifacts. Attributes: @@ -22,36 +22,15 @@ class ImageLoader(BaseLoader): format: Optional[str] = field(default=None, kw_only=True) - FORMAT_TO_MIME_TYPE = { - "bmp": "image/bmp", - "gif": "image/gif", - "jpeg": "image/jpeg", - "png": "image/png", - "tiff": "image/tiff", - "webp": "image/webp", - } - - def load(self, source: bytes, *args, **kwargs) -> ImageArtifact: + def parse(self, data: bytes) -> ImageArtifact: pil_image = import_optional_dependency("PIL.Image") - image = pil_image.open(BytesIO(source)) + image = pil_image.open(BytesIO(data)) # Normalize format only if requested. if self.format is not None: byte_stream = BytesIO() image.save(byte_stream, format=self.format) image = pil_image.open(byte_stream) - source = byte_stream.getvalue() - - return ImageArtifact(source, format=image.format.lower(), width=image.width, height=image.height) - - def _get_mime_type(self, image_format: str | None) -> str: - if image_format is None: - raise ValueError("image_format is None") - - if image_format.lower() not in self.FORMAT_TO_MIME_TYPE: - raise ValueError(f"Unsupported image format {image_format}") - - return self.FORMAT_TO_MIME_TYPE[image_format.lower()] + data = byte_stream.getvalue() - def load_collection(self, sources: list[bytes], *args, **kwargs) -> dict[str, ImageArtifact]: - return cast(dict[str, ImageArtifact], super().load_collection(sources, *args, **kwargs)) + return ImageArtifact(data, format=image.format.lower(), width=image.width, height=image.height) diff --git a/griptape/loaders/pdf_loader.py b/griptape/loaders/pdf_loader.py index 419bfabf4..5bf5337ae 100644 --- a/griptape/loaders/pdf_loader.py +++ b/griptape/loaders/pdf_loader.py @@ -1,37 +1,25 @@ from __future__ import annotations from io import BytesIO -from typing import Optional, cast +from typing import Optional -from attrs import Factory, define, field +from attrs import define -from griptape.artifacts import TextArtifact -from griptape.chunkers import PdfChunker -from griptape.loaders import BaseTextLoader +from griptape.artifacts import ListArtifact, TextArtifact +from griptape.loaders.base_file_loader import BaseFileLoader from griptape.utils import import_optional_dependency @define -class PdfLoader(BaseTextLoader): - chunker: PdfChunker = field( - default=Factory(lambda self: PdfChunker(tokenizer=self.tokenizer, max_tokens=self.max_tokens), takes_self=True), - kw_only=True, - ) - encoding: None = field(default=None, kw_only=True) - - def load( +class PdfLoader(BaseFileLoader): + def parse( self, - source: bytes, + data: bytes, + *, password: Optional[str] = None, - *args, - **kwargs, - ) -> list[TextArtifact]: + ) -> ListArtifact: pypdf = import_optional_dependency("pypdf") - reader = pypdf.PdfReader(BytesIO(source), strict=True, password=password) - return self._text_to_artifacts("\n".join([p.extract_text() for p in reader.pages])) + reader = pypdf.PdfReader(BytesIO(data), strict=True, password=password) + pages = [TextArtifact(p.extract_text()) for p in reader.pages] - def load_collection(self, sources: list[bytes], *args, **kwargs) -> dict[str, list[TextArtifact]]: - return cast( - dict[str, list[TextArtifact]], - super().load_collection(sources, *args, **kwargs), - ) + return ListArtifact(pages) diff --git a/griptape/loaders/sql_loader.py b/griptape/loaders/sql_loader.py index 105f585cb..0c6e8bdf9 100644 --- a/griptape/loaders/sql_loader.py +++ b/griptape/loaders/sql_loader.py @@ -1,38 +1,23 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Callable, Optional, cast +from typing import Callable from attrs import define, field -from griptape.artifacts import TextArtifact +from griptape.artifacts import ListArtifact, TextArtifact +from griptape.drivers import BaseSqlDriver from griptape.loaders import BaseLoader -if TYPE_CHECKING: - from griptape.drivers import BaseEmbeddingDriver, BaseSqlDriver - @define -class SqlLoader(BaseLoader): +class SqlLoader(BaseLoader[str, list[BaseSqlDriver.RowResult], ListArtifact[TextArtifact]]): sql_driver: BaseSqlDriver = field(kw_only=True) - embedding_driver: Optional[BaseEmbeddingDriver] = field(default=None, kw_only=True) formatter_fn: Callable[[dict], str] = field( default=lambda value: "\n".join(f"{key}: {val}" for key, val in value.items()), kw_only=True ) - def load(self, source: str, *args, **kwargs) -> list[TextArtifact]: - rows = self.sql_driver.execute_query(source) - artifacts = [] - - chunks = [TextArtifact(self.formatter_fn(row.cells)) for row in rows] if rows else [] - - if self.embedding_driver: - for chunk in chunks: - chunk.generate_embedding(self.embedding_driver) - - for chunk in chunks: - artifacts.append(chunk) - - return artifacts + def fetch(self, source: str) -> list[BaseSqlDriver.RowResult]: + return self.sql_driver.execute_query(source) or [] - def load_collection(self, sources: list[str], *args, **kwargs) -> dict[str, list[TextArtifact]]: - return cast(dict[str, list[TextArtifact]], super().load_collection(sources, *args, **kwargs)) + def parse(self, data: list[BaseSqlDriver.RowResult]) -> ListArtifact[TextArtifact]: + return ListArtifact([TextArtifact(self.formatter_fn(row.cells)) for row in data]) diff --git a/griptape/loaders/text_loader.py b/griptape/loaders/text_loader.py index 79e551a8e..c33eb9018 100644 --- a/griptape/loaders/text_loader.py +++ b/griptape/loaders/text_loader.py @@ -1,55 +1,17 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Optional, cast - -from attrs import Factory, define, field +from attrs import define, field from griptape.artifacts import TextArtifact -from griptape.chunkers import TextChunker -from griptape.loaders import BaseTextLoader -from griptape.tokenizers import OpenAiTokenizer - -if TYPE_CHECKING: - from griptape.drivers import BaseEmbeddingDriver +from griptape.loaders import BaseFileLoader @define -class TextLoader(BaseTextLoader): - MAX_TOKEN_RATIO = 0.5 - - tokenizer: OpenAiTokenizer = field( - default=Factory(lambda: OpenAiTokenizer(model=OpenAiTokenizer.DEFAULT_OPENAI_GPT_3_CHAT_MODEL)), - kw_only=True, - ) - max_tokens: int = field( - default=Factory(lambda self: round(self.tokenizer.max_input_tokens * self.MAX_TOKEN_RATIO), takes_self=True), - kw_only=True, - ) - chunker: TextChunker = field( - default=Factory( - lambda self: TextChunker(tokenizer=self.tokenizer, max_tokens=self.max_tokens), - takes_self=True, - ), - kw_only=True, - ) - embedding_driver: Optional[BaseEmbeddingDriver] = field(default=None, kw_only=True) +class TextLoader(BaseFileLoader[TextArtifact]): encoding: str = field(default="utf-8", kw_only=True) - def load(self, source: bytes | str, *args, **kwargs) -> list[TextArtifact]: - if isinstance(source, bytes): - source = source.decode(encoding=self.encoding) - elif isinstance(source, (bytearray, memoryview)): - raise ValueError(f"Unsupported source type: {type(source)}") - - return self._text_to_artifacts(source) - - def load_collection( - self, - sources: list[bytes | str], - *args, - **kwargs, - ) -> dict[str, list[TextArtifact]]: - return cast( - dict[str, list[TextArtifact]], - super().load_collection(sources, *args, **kwargs), - ) + def parse(self, data: str | bytes) -> TextArtifact: + if isinstance(data, str): + return TextArtifact(data, encoding=self.encoding) + else: + return TextArtifact(data.decode(self.encoding), encoding=self.encoding) diff --git a/griptape/loaders/web_loader.py b/griptape/loaders/web_loader.py index 720ab34a1..697c18bba 100644 --- a/griptape/loaders/web_loader.py +++ b/griptape/loaders/web_loader.py @@ -1,23 +1,21 @@ from __future__ import annotations -from typing import TYPE_CHECKING - from attrs import Factory, define, field +from griptape.artifacts import TextArtifact from griptape.drivers import BaseWebScraperDriver, TrafilaturaWebScraperDriver -from griptape.loaders import BaseTextLoader - -if TYPE_CHECKING: - from griptape.artifacts import TextArtifact +from griptape.loaders import BaseLoader @define -class WebLoader(BaseTextLoader): +class WebLoader(BaseLoader[str, str, TextArtifact]): web_scraper_driver: BaseWebScraperDriver = field( default=Factory(lambda: TrafilaturaWebScraperDriver()), kw_only=True, ) - def load(self, source: str, *args, **kwargs) -> list[TextArtifact]: - single_chunk_text_artifact = self.web_scraper_driver.scrape_url(source) - return self._text_to_artifacts(single_chunk_text_artifact.value) + def fetch(self, source: str) -> str: + return self.web_scraper_driver.fetch_url(source) + + def parse(self, data: str) -> TextArtifact: + return self.web_scraper_driver.extract_page(data) diff --git a/griptape/mixins/activity_mixin.py b/griptape/mixins/activity_mixin.py index 61e8076b1..497dffe62 100644 --- a/griptape/mixins/activity_mixin.py +++ b/griptape/mixins/activity_mixin.py @@ -88,8 +88,12 @@ def activity_schema(self, activity: Callable) -> Optional[Schema]: if activity is None or not getattr(activity, "is_activity", False): raise Exception("This method is not an activity.") if getattr(activity, "config")["schema"] is not None: - # Need to deepcopy to avoid modifying the original schema - config_schema = deepcopy(getattr(activity, "config")["schema"]) + config_schema = getattr(activity, "config")["schema"] + if isinstance(config_schema, Callable): + config_schema = config_schema(self) + else: + # Need to deepcopy to avoid modifying the original schema + config_schema = deepcopy(getattr(activity, "config")["schema"]) activity_name = self.activity_name(activity) if self.extra_schema_properties is not None and activity_name in self.extra_schema_properties: diff --git a/griptape/mixins/rule_mixin.py b/griptape/mixins/rule_mixin.py index 7fe6a6346..3e111a8f5 100644 --- a/griptape/mixins/rule_mixin.py +++ b/griptape/mixins/rule_mixin.py @@ -1,56 +1,22 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Optional - -from attrs import Attribute, define, field +from attrs import define, field from griptape.rules import BaseRule, Ruleset -if TYPE_CHECKING: - from griptape.structures import Structure - @define(slots=False) class RuleMixin: DEFAULT_RULESET_NAME = "Default Ruleset" - ADDITIONAL_RULESET_NAME = "Additional Ruleset" - rulesets: list[Ruleset] = field(factory=list, kw_only=True) + _rulesets: list[Ruleset] = field(factory=list, kw_only=True, alias="rulesets") rules: list[BaseRule] = field(factory=list, kw_only=True) - structure: Optional[Structure] = field(default=None, kw_only=True) - - @rulesets.validator # pyright: ignore[reportAttributeAccessIssue] - def validate_rulesets(self, _: Attribute, rulesets: list[Ruleset]) -> None: - if not rulesets: - return - - if self.rules: - raise ValueError("Can't have both rulesets and rules specified.") - - @rules.validator # pyright: ignore[reportAttributeAccessIssue] - def validate_rules(self, _: Attribute, rules: list[BaseRule]) -> None: - if not rules: - return - - if self.rulesets: - raise ValueError("Can't have both rules and rulesets specified.") @property - def all_rulesets(self) -> list[Ruleset]: - structure_rulesets = [] - - if self.structure: - if self.structure.rulesets: - structure_rulesets = self.structure.rulesets - elif self.structure.rules: - structure_rulesets = [Ruleset(name=self.DEFAULT_RULESET_NAME, rules=self.structure.rules)] + def rulesets(self) -> list[Ruleset]: + rulesets = self._rulesets - task_rulesets = [] - if self.rulesets: - task_rulesets = self.rulesets - elif self.rules: - task_ruleset_name = self.ADDITIONAL_RULESET_NAME if structure_rulesets else self.DEFAULT_RULESET_NAME - - task_rulesets = [Ruleset(name=task_ruleset_name, rules=self.rules)] + if self.rules: + rulesets.append(Ruleset(name=self.DEFAULT_RULESET_NAME, rules=self.rules)) - return structure_rulesets + task_rulesets + return rulesets diff --git a/griptape/rules/base_rule.py b/griptape/rules/base_rule.py index 190fc71e4..a3880d49f 100644 --- a/griptape/rules/base_rule.py +++ b/griptape/rules/base_rule.py @@ -9,6 +9,7 @@ @define(frozen=True) class BaseRule(ABC): value: Any = field() + meta: dict[str, Any] = field(factory=dict, kw_only=True) def __str__(self) -> str: return self.to_text() diff --git a/griptape/rules/ruleset.py b/griptape/rules/ruleset.py index eec1203f9..a4194cf3f 100644 --- a/griptape/rules/ruleset.py +++ b/griptape/rules/ruleset.py @@ -1,14 +1,33 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Sequence +import uuid +from typing import TYPE_CHECKING, Any -from attrs import define, field +from attrs import Factory, define, field + +from griptape.configs import Defaults if TYPE_CHECKING: + from collections.abc import Sequence + + from griptape.drivers import BaseRulesetDriver from griptape.rules import BaseRule @define class Ruleset: - name: str = field() - rules: Sequence[BaseRule] = field() + id: str = field(default=Factory(lambda: uuid.uuid4().hex), kw_only=True) + name: str = field( + default=Factory(lambda self: self.id, takes_self=True), + metadata={"serializable": True}, + ) + ruleset_driver: BaseRulesetDriver = field( + default=Factory(lambda: Defaults.drivers_config.ruleset_driver), kw_only=True + ) + meta: dict[str, Any] = field(factory=dict, kw_only=True) + rules: Sequence[BaseRule] = field(factory=list) + + def __attrs_post_init__(self) -> None: + rules, meta = self.ruleset_driver.load(self.name) + self.rules = [*rules, *self.rules] + self.meta = {**meta, **self.meta} diff --git a/griptape/schemas/base_schema.py b/griptape/schemas/base_schema.py index dde3ae49a..a078af76f 100644 --- a/griptape/schemas/base_schema.py +++ b/griptape/schemas/base_schema.py @@ -124,6 +124,7 @@ def _resolve_types(cls, attrs_cls: type) -> None: BaseImageGenerationDriver, BaseImageQueryDriver, BasePromptDriver, + BaseRulesetDriver, BaseTextToSpeechDriver, BaseVectorStoreDriver, ) @@ -145,6 +146,7 @@ def _resolve_types(cls, attrs_cls: type) -> None: "BaseTextToSpeechDriver": BaseTextToSpeechDriver, "BaseAudioTranscriptionDriver": BaseAudioTranscriptionDriver, "BaseConversationMemoryDriver": BaseConversationMemoryDriver, + "BaseRulesetDriver": BaseRulesetDriver, "BaseImageGenerationDriver": BaseImageGenerationDriver, "BaseArtifact": BaseArtifact, "PromptStack": PromptStack, @@ -165,6 +167,13 @@ def _resolve_types(cls, attrs_cls: type) -> None: if is_dependency_installed("google.generativeai") else Any, "boto3": import_optional_dependency("boto3") if is_dependency_installed("boto3") else Any, + "Anthropic": import_optional_dependency("anthropic").Anthropic + if is_dependency_installed("anthropic") + else Any, + "BedrockClient": import_optional_dependency("mypy_boto3_bedrock").BedrockClient + if is_dependency_installed("mypy_boto3_bedrock") + else Any, + "voyageai": import_optional_dependency("voyageai") if is_dependency_installed("voyageai") else Any, }, ) diff --git a/griptape/structures/agent.py b/griptape/structures/agent.py index 24f57395c..77f3e0618 100644 --- a/griptape/structures/agent.py +++ b/griptape/structures/agent.py @@ -7,7 +7,6 @@ from griptape.artifacts.text_artifact import TextArtifact from griptape.common import observable from griptape.configs import Defaults -from griptape.memory.structure import Run from griptape.structures import Structure from griptape.tasks import PromptTask, ToolkitTask @@ -60,15 +59,15 @@ def task(self) -> BaseTask: return self.tasks[0] def add_task(self, task: BaseTask) -> BaseTask: - self.tasks.clear() + self._tasks.clear() task.preprocess(self) - self.tasks.append(task) + self._tasks.append(task) return task - def add_tasks(self, *tasks: BaseTask) -> list[BaseTask]: + def add_tasks(self, *tasks: BaseTask | list[BaseTask]) -> list[BaseTask]: if len(tasks) > 1: raise ValueError("Agents can only have one task.") return super().add_tasks(*tasks) @@ -77,9 +76,4 @@ def add_tasks(self, *tasks: BaseTask) -> list[BaseTask]: def try_run(self, *args) -> Agent: self.task.execute() - if self.conversation_memory and self.output is not None: - run = Run(input=self.input_task.input, output=self.output) - - self.conversation_memory.add_run(run) - return self diff --git a/griptape/structures/pipeline.py b/griptape/structures/pipeline.py index e89d83818..a5134a964 100644 --- a/griptape/structures/pipeline.py +++ b/griptape/structures/pipeline.py @@ -6,7 +6,6 @@ from griptape.artifacts import ErrorArtifact from griptape.common import observable -from griptape.memory.structure import Run from griptape.structures import Structure if TYPE_CHECKING: @@ -25,7 +24,7 @@ def add_task(self, task: BaseTask) -> BaseTask: self.output_task.child_ids.append(task.id) task.parent_ids.append(self.output_task.id) - self.tasks.append(task) + self._tasks.append(task) return task @@ -45,7 +44,7 @@ def insert_task(self, parent_task: BaseTask, task: BaseTask) -> BaseTask: parent_task.child_ids.append(task.id) parent_index = self.tasks.index(parent_task) - self.tasks.insert(parent_index + 1, task) + self._tasks.insert(parent_index + 1, task) return task @@ -53,11 +52,6 @@ def insert_task(self, parent_task: BaseTask, task: BaseTask) -> BaseTask: def try_run(self, *args) -> Pipeline: self.__run_from_task(self.input_task) - if self.conversation_memory and self.output is not None: - run = Run(input=self.input_task.input, output=self.output) - - self.conversation_memory.add_run(run) - return self def context(self, task: BaseTask) -> dict[str, Any]: diff --git a/griptape/structures/structure.py b/griptape/structures/structure.py index b066c336e..d2e6d2f3f 100644 --- a/griptape/structures/structure.py +++ b/griptape/structures/structure.py @@ -4,27 +4,25 @@ from abc import ABC, abstractmethod from typing import TYPE_CHECKING, Any, Optional -from attrs import Attribute, Factory, define, field +from attrs import Factory, define, field from griptape.common import observable from griptape.events import EventBus, FinishStructureRunEvent, StartStructureRunEvent from griptape.memory import TaskMemory from griptape.memory.meta import MetaMemory -from griptape.memory.structure import ConversationMemory +from griptape.memory.structure import ConversationMemory, Run +from griptape.mixins.rule_mixin import RuleMixin if TYPE_CHECKING: from griptape.artifacts import BaseArtifact from griptape.memory.structure import BaseConversationMemory - from griptape.rules import BaseRule, Rule, Ruleset from griptape.tasks import BaseTask @define -class Structure(ABC): +class Structure(ABC, RuleMixin): id: str = field(default=Factory(lambda: uuid.uuid4().hex), kw_only=True) - rulesets: list[Ruleset] = field(factory=list, kw_only=True) - rules: list[BaseRule] = field(factory=list, kw_only=True) - tasks: list[BaseTask] = field(factory=list, kw_only=True) + _tasks: list[BaseTask | list[BaseTask]] = field(factory=list, kw_only=True, alias="tasks") conversation_memory: Optional[BaseConversationMemory] = field( default=Factory(lambda: ConversationMemory()), kw_only=True, @@ -37,29 +35,24 @@ class Structure(ABC): fail_fast: bool = field(default=True, kw_only=True) _execution_args: tuple = () - @rulesets.validator # pyright: ignore[reportAttributeAccessIssue] - def validate_rulesets(self, _: Attribute, rulesets: list[Ruleset]) -> None: - if not rulesets: - return - - if self.rules: - raise ValueError("can't have both rulesets and rules specified") - - @rules.validator # pyright: ignore[reportAttributeAccessIssue] - def validate_rules(self, _: Attribute, rules: list[Rule]) -> None: - if not rules: - return - - if self.rulesets: - raise ValueError("can't have both rules and rulesets specified") - def __attrs_post_init__(self) -> None: - tasks = self.tasks.copy() - self.tasks.clear() + tasks = self._tasks.copy() + self._tasks.clear() self.add_tasks(*tasks) - def __add__(self, other: BaseTask | list[BaseTask]) -> list[BaseTask]: - return self.add_tasks(*other) if isinstance(other, list) else self + [other] + def __add__(self, other: BaseTask | list[BaseTask | list[BaseTask]]) -> list[BaseTask]: + return self.add_tasks(*other) if isinstance(other, list) else self.add_tasks(other) + + @property + def tasks(self) -> list[BaseTask]: + tasks = [] + + for task in self._tasks: + if isinstance(task, list): + tasks.extend(task) + else: + tasks.append(task) + return tasks @property def execution_args(self) -> tuple: @@ -74,8 +67,10 @@ def output_task(self) -> Optional[BaseTask]: return self.tasks[-1] if self.tasks else None @property - def output(self) -> Optional[BaseArtifact]: - return self.output_task.output if self.output_task is not None else None + def output(self) -> BaseArtifact: + if self.output_task.output is None: + raise ValueError("Structure's output Task has no output. Run the Structure to generate output.") + return self.output_task.output @property def finished_tasks(self) -> list[BaseTask]: @@ -98,8 +93,14 @@ def try_find_task(self, task_id: str) -> Optional[BaseTask]: return task return None - def add_tasks(self, *tasks: BaseTask) -> list[BaseTask]: - return [self.add_task(s) for s in tasks] + def add_tasks(self, *tasks: BaseTask | list[BaseTask]) -> list[BaseTask]: + added_tasks = [] + for task in tasks: + if isinstance(task, list): + added_tasks.extend(self.add_tasks(*task)) + else: + added_tasks.append(self.add_task(task)) + return added_tasks def context(self, task: BaseTask) -> dict[str, Any]: return {"args": self.execution_args, "structure": self} @@ -146,6 +147,11 @@ def before_run(self, args: Any) -> None: @observable def after_run(self) -> None: + if self.conversation_memory and self.output_task.output is not None: + run = Run(input=self.input_task.input, output=self.output_task.output) + + self.conversation_memory.add_run(run) + EventBus.publish_event( FinishStructureRunEvent( structure_id=self.id, diff --git a/griptape/structures/workflow.py b/griptape/structures/workflow.py index cd7bef07d..1c65c59c4 100644 --- a/griptape/structures/workflow.py +++ b/griptape/structures/workflow.py @@ -8,11 +8,11 @@ from griptape.artifacts import ErrorArtifact from griptape.common import observable -from griptape.memory.structure import Run from griptape.mixins.futures_executor_mixin import FuturesExecutorMixin from griptape.structures import Structure if TYPE_CHECKING: + from griptape.artifacts import BaseArtifact from griptape.tasks import BaseTask @@ -26,13 +26,25 @@ def input_task(self) -> Optional[BaseTask]: def output_task(self) -> Optional[BaseTask]: return self.order_tasks()[-1] if self.tasks else None + @property + def input_tasks(self) -> list[BaseTask]: + return [task for task in self.tasks if not task.parents] + + @property + def output_tasks(self) -> list[BaseTask]: + return [task for task in self.tasks if not task.children] + + @property + def outputs(self) -> list[BaseArtifact]: + return [task.output for task in self.output_tasks if task.output is not None] + def add_task(self, task: BaseTask) -> BaseTask: if (existing_task := self.try_find_task(task.id)) is not None: return existing_task task.preprocess(self) - self.tasks.append(task) + self._tasks.append(task) return task @@ -82,7 +94,7 @@ def insert_task( last_parent_index = self.__link_task_to_parents(task, parent_tasks) # Insert the new task once, just after the last parent task - self.tasks.insert(last_parent_index + 1, task) + self._tasks.insert(last_parent_index + 1, task) return task @@ -106,11 +118,6 @@ def try_run(self, *args) -> Workflow: break - if self.conversation_memory and self.output is not None: - run = Run(input=self.input_task.input, output=self.output) - - self.conversation_memory.add_run(run) - return self def context(self, task: BaseTask) -> dict[str, Any]: diff --git a/griptape/tasks/__init__.py b/griptape/tasks/__init__.py index 7d08cf858..4f65a4226 100644 --- a/griptape/tasks/__init__.py +++ b/griptape/tasks/__init__.py @@ -1,6 +1,5 @@ from .base_task import BaseTask from .base_text_input_task import BaseTextInputTask -from .base_multi_text_input_task import BaseMultiTextInputTask from .prompt_task import PromptTask from .actions_subtask import ActionsSubtask from .toolkit_task import ToolkitTask @@ -23,7 +22,6 @@ __all__ = [ "BaseTask", "BaseTextInputTask", - "BaseMultiTextInputTask", "PromptTask", "ActionsSubtask", "ToolkitTask", diff --git a/griptape/tasks/actions_subtask.py b/griptape/tasks/actions_subtask.py index 38a96a603..9057fc127 100644 --- a/griptape/tasks/actions_subtask.py +++ b/griptape/tasks/actions_subtask.py @@ -108,7 +108,7 @@ def before_run(self) -> None: parts = [ f"Subtask {self.id}", - *([f"\nThought: {self.thought}"] if self.thought is not None else []), + *([f"\nThought: {self.thought}"] if self.thought else []), f"\nActions: {self.actions_to_json()}", ] logger.info("".join(parts)) diff --git a/griptape/tasks/base_image_generation_task.py b/griptape/tasks/base_image_generation_task.py index 326b2a551..4a502e7cc 100644 --- a/griptape/tasks/base_image_generation_task.py +++ b/griptape/tasks/base_image_generation_task.py @@ -66,4 +66,4 @@ def all_negative_rulesets(self) -> list[Ruleset]: def _read_from_file(self, path: str) -> ImageArtifact: logger.info("Reading image from %s", os.path.abspath(path)) - return ImageLoader().load(Path(path).read_bytes()) + return ImageLoader().load(Path(path)) diff --git a/griptape/tasks/base_multi_text_input_task.py b/griptape/tasks/base_multi_text_input_task.py deleted file mode 100644 index 347dd7e29..000000000 --- a/griptape/tasks/base_multi_text_input_task.py +++ /dev/null @@ -1,60 +0,0 @@ -from __future__ import annotations - -import logging -from abc import ABC -from typing import Callable - -from attrs import Factory, define, field - -from griptape.artifacts import ListArtifact, TextArtifact -from griptape.configs import Defaults -from griptape.mixins.rule_mixin import RuleMixin -from griptape.tasks import BaseTask -from griptape.utils import J2 - -logger = logging.getLogger(Defaults.logging_config.logger_name) - - -@define -class BaseMultiTextInputTask(RuleMixin, BaseTask, ABC): - DEFAULT_INPUT_TEMPLATE = "{{ args[0] }}" - - _input: tuple[str, ...] | tuple[TextArtifact, ...] | tuple[Callable[[BaseTask], TextArtifact], ...] = field( - default=Factory(lambda self: (self.DEFAULT_INPUT_TEMPLATE,), takes_self=True), - alias="input", - ) - - @property - def input(self) -> ListArtifact: - if all(isinstance(elem, TextArtifact) for elem in self._input): - return ListArtifact([artifact for artifact in self._input if isinstance(artifact, TextArtifact)]) - elif all(isinstance(elem, Callable) for elem in self._input): - return ListArtifact( - [callable_input(self) for callable_input in self._input if isinstance(callable_input, Callable)] - ) - else: - return ListArtifact( - [ - TextArtifact(J2().render_from_string(input_template, **self.full_context)) - for input_template in self._input - if isinstance(input_template, str) - ], - ) - - @input.setter - def input( - self, - value: tuple[str, ...] | tuple[TextArtifact, ...] | tuple[Callable[[BaseTask], TextArtifact], ...], - ) -> None: - self._input = value - - def before_run(self) -> None: - super().before_run() - - joined_input = "\n".join([i.to_text() for i in self.input]) - logger.info("%s %s\nInput: %s", self.__class__.__name__, self.id, joined_input) - - def after_run(self) -> None: - super().after_run() - - logger.info("%s %s\nOutput: %s", self.__class__.__name__, self.id, self.output.to_text()) diff --git a/griptape/tasks/base_task.py b/griptape/tasks/base_task.py index 2c6743035..3fca55c30 100644 --- a/griptape/tasks/base_task.py +++ b/griptape/tasks/base_task.py @@ -78,7 +78,7 @@ def parents_output_text(self) -> str: @property def meta_memories(self) -> list[BaseMetaEntry]: - if self.structure and self.structure.meta_memory: + if self.structure is not None and self.structure.meta_memory: if self.max_meta_memory_entries: return self.structure.meta_memory.entries[: self.max_meta_memory_entries] else: @@ -191,11 +191,8 @@ def run(self) -> BaseArtifact: ... @property def full_context(self) -> dict[str, Any]: - if self.structure: - structure_context = self.structure.context(self) - - structure_context.update(self.context) + context = self.context + if self.structure is not None: + context.update(self.structure.context(self)) - return structure_context - else: - return {} + return context diff --git a/griptape/tasks/extraction_task.py b/griptape/tasks/extraction_task.py index c74c3ac49..43096dced 100644 --- a/griptape/tasks/extraction_task.py +++ b/griptape/tasks/extraction_task.py @@ -4,10 +4,11 @@ from attrs import define, field +from griptape.artifacts import ListArtifact from griptape.tasks import BaseTextInputTask if TYPE_CHECKING: - from griptape.artifacts import ErrorArtifact, ListArtifact + from griptape.artifacts import ErrorArtifact from griptape.engines import BaseExtractionEngine @@ -17,4 +18,4 @@ class ExtractionTask(BaseTextInputTask): args: dict = field(kw_only=True, factory=dict) def run(self) -> ListArtifact | ErrorArtifact: - return self.extraction_engine.extract(self.input.to_text(), rulesets=self.all_rulesets, **self.args) + return self.extraction_engine.extract_artifacts(ListArtifact([self.input]), rulesets=self.rulesets, **self.args) diff --git a/griptape/tasks/inpainting_image_generation_task.py b/griptape/tasks/inpainting_image_generation_task.py index 0ed28a11b..649f9e3fb 100644 --- a/griptape/tasks/inpainting_image_generation_task.py +++ b/griptape/tasks/inpainting_image_generation_task.py @@ -74,7 +74,7 @@ def run(self) -> ImageArtifact: prompts=[prompt_artifact.to_text()], image=image_artifact, mask=mask_artifact, - rulesets=self.all_rulesets, + rulesets=self.rulesets, negative_rulesets=self.negative_rulesets, ) diff --git a/griptape/tasks/outpainting_image_generation_task.py b/griptape/tasks/outpainting_image_generation_task.py index 6b63709db..019f74fa1 100644 --- a/griptape/tasks/outpainting_image_generation_task.py +++ b/griptape/tasks/outpainting_image_generation_task.py @@ -74,7 +74,7 @@ def run(self) -> ImageArtifact: prompts=[prompt_artifact.to_text()], image=image_artifact, mask=mask_artifact, - rulesets=self.all_rulesets, + rulesets=self.rulesets, negative_rulesets=self.negative_rulesets, ) diff --git a/griptape/tasks/prompt_image_generation_task.py b/griptape/tasks/prompt_image_generation_task.py index 4d3356392..d2ebf79c2 100644 --- a/griptape/tasks/prompt_image_generation_task.py +++ b/griptape/tasks/prompt_image_generation_task.py @@ -53,7 +53,7 @@ def input(self, value: TextArtifact) -> None: def run(self) -> ImageArtifact: image_artifact = self.image_generation_engine.run( prompts=[self.input.to_text()], - rulesets=self.all_rulesets, + rulesets=self.rulesets, negative_rulesets=self.negative_rulesets, ) diff --git a/griptape/tasks/prompt_task.py b/griptape/tasks/prompt_task.py index d2dd20b36..127fabf48 100644 --- a/griptape/tasks/prompt_task.py +++ b/griptape/tasks/prompt_task.py @@ -9,6 +9,7 @@ from griptape.common import PromptStack from griptape.configs import Defaults from griptape.mixins.rule_mixin import RuleMixin +from griptape.rules import Ruleset from griptape.tasks import BaseTask from griptape.utils import J2 @@ -32,6 +33,22 @@ class PromptTask(RuleMixin, BaseTask): alias="input", ) + @property + def rulesets(self) -> list: + default_rules = self.rules + rulesets = self._rulesets + + if self.structure is not None: + if self.structure._rulesets: + rulesets = self.structure._rulesets + self._rulesets + if self.structure.rules: + default_rules = self.structure.rules + self.rules + + if default_rules: + rulesets.append(Ruleset(name=self.DEFAULT_RULESET_NAME, rules=default_rules)) + + return rulesets + @property def input(self) -> BaseArtifact: return self._process_task_input(self._input) @@ -45,7 +62,7 @@ def input(self, value: str | list | tuple | BaseArtifact | Callable[[BaseTask], @property def prompt_stack(self) -> PromptStack: stack = PromptStack() - memory = self.structure.conversation_memory + memory = self.structure.conversation_memory if self.structure is not None else None system_template = self.generate_system_template(self) if system_template: @@ -64,7 +81,7 @@ def prompt_stack(self) -> PromptStack: def default_system_template_generator(self, _: PromptTask) -> str: return J2("tasks/prompt_task/system.j2").render( - rulesets=J2("rulesets/rulesets.j2").render(rulesets=self.all_rulesets), + rulesets=J2("rulesets/rulesets.j2").render(rulesets=self.rulesets), ) def before_run(self) -> None: diff --git a/griptape/tasks/structure_run_task.py b/griptape/tasks/structure_run_task.py index 887a33b8a..6860958aa 100644 --- a/griptape/tasks/structure_run_task.py +++ b/griptape/tasks/structure_run_task.py @@ -4,7 +4,8 @@ from attrs import define, field -from griptape.tasks import BaseMultiTextInputTask +from griptape.artifacts.list_artifact import ListArtifact +from griptape.tasks.prompt_task import PromptTask if TYPE_CHECKING: from griptape.artifacts import BaseArtifact @@ -12,7 +13,7 @@ @define -class StructureRunTask(BaseMultiTextInputTask): +class StructureRunTask(PromptTask): """Task to run a Structure. Attributes: @@ -22,4 +23,7 @@ class StructureRunTask(BaseMultiTextInputTask): driver: BaseStructureRunDriver = field(kw_only=True) def run(self) -> BaseArtifact: - return self.driver.run(*self.input) + if isinstance(self.input, ListArtifact): + return self.driver.run(*self.input.value) + else: + return self.driver.run(self.input) diff --git a/griptape/tasks/text_summary_task.py b/griptape/tasks/text_summary_task.py index dc1a7b8be..4861510d6 100644 --- a/griptape/tasks/text_summary_task.py +++ b/griptape/tasks/text_summary_task.py @@ -17,4 +17,4 @@ class TextSummaryTask(BaseTextInputTask): summary_engine: BaseSummaryEngine = field(default=Factory(lambda: PromptSummaryEngine()), kw_only=True) def run(self) -> TextArtifact: - return TextArtifact(self.summary_engine.summarize_text(self.input.to_text(), rulesets=self.all_rulesets)) + return TextArtifact(self.summary_engine.summarize_text(self.input.to_text(), rulesets=self.rulesets)) diff --git a/griptape/tasks/text_to_speech_task.py b/griptape/tasks/text_to_speech_task.py index 680a67603..c131d69bc 100644 --- a/griptape/tasks/text_to_speech_task.py +++ b/griptape/tasks/text_to_speech_task.py @@ -35,7 +35,7 @@ def input(self, value: TextArtifact) -> None: self._input = value def run(self) -> AudioArtifact: - audio_artifact = self.text_to_speech_engine.run(prompts=[self.input.to_text()], rulesets=self.all_rulesets) + audio_artifact = self.text_to_speech_engine.run(prompts=[self.input.to_text()], rulesets=self.rulesets) if self.output_dir or self.output_file: self._write_to_file(audio_artifact) diff --git a/griptape/tasks/tool_task.py b/griptape/tasks/tool_task.py index 7ae63b902..38d6e1512 100644 --- a/griptape/tasks/tool_task.py +++ b/griptape/tasks/tool_task.py @@ -51,7 +51,7 @@ def preprocess(self, structure: Structure) -> ToolTask: def default_system_template_generator(self, _: PromptTask) -> str: return J2("tasks/tool_task/system.j2").render( - rulesets=J2("rulesets/rulesets.j2").render(rulesets=self.all_rulesets), + rulesets=J2("rulesets/rulesets.j2").render(rulesets=self.rulesets), action_schema=utils.minify_json(json.dumps(self.tool.schema())), meta_memory=J2("memory/meta/meta_memory.j2").render(meta_memories=self.meta_memories), use_native_tools=self.prompt_driver.use_native_tools, diff --git a/griptape/tasks/toolkit_task.py b/griptape/tasks/toolkit_task.py index ed9860c66..2a4a926bd 100644 --- a/griptape/tasks/toolkit_task.py +++ b/griptape/tasks/toolkit_task.py @@ -68,7 +68,7 @@ def tool_output_memory(self) -> list[TaskMemory]: @property def prompt_stack(self) -> PromptStack: stack = PromptStack(tools=self.tools) - memory = self.structure.conversation_memory + memory = self.structure.conversation_memory if self.structure is not None else None stack.add_system_message(self.generate_system_template(self)) @@ -113,7 +113,7 @@ def prompt_stack(self) -> PromptStack: stack.add_assistant_message(self.generate_assistant_subtask_template(s)) stack.add_user_message(self.generate_user_subtask_template(s)) - if memory: + if memory is not None: # inserting at index 1 to place memory right after system prompt memory.add_to_prompt_stack(self.prompt_driver, stack, 1) @@ -132,7 +132,7 @@ def default_system_template_generator(self, _: PromptTask) -> str: schema["minItems"] = 1 # The `schema` library doesn't support `minItems` so we must add it manually. return J2("tasks/toolkit_task/system.j2").render( - rulesets=J2("rulesets/rulesets.j2").render(rulesets=self.all_rulesets), + rulesets=J2("rulesets/rulesets.j2").render(rulesets=self.rulesets), action_names=str.join(", ", [tool.name for tool in self.tools]), actions_schema=utils.minify_json(json.dumps(schema)), meta_memory=J2("memory/meta/meta_memory.j2").render(meta_memories=self.meta_memories), diff --git a/griptape/tasks/variation_image_generation_task.py b/griptape/tasks/variation_image_generation_task.py index e3feaeac5..ddc16178b 100644 --- a/griptape/tasks/variation_image_generation_task.py +++ b/griptape/tasks/variation_image_generation_task.py @@ -66,7 +66,7 @@ def run(self) -> ImageArtifact: output_image_artifact = self.image_generation_engine.run( prompts=[prompt_artifact.to_text()], image=image_artifact, - rulesets=self.all_rulesets, + rulesets=self.rulesets, negative_rulesets=self.negative_rulesets, ) diff --git a/griptape/tokenizers/google_tokenizer.py b/griptape/tokenizers/google_tokenizer.py index 87020bd96..144c09d75 100644 --- a/griptape/tokenizers/google_tokenizer.py +++ b/griptape/tokenizers/google_tokenizer.py @@ -2,10 +2,11 @@ from typing import TYPE_CHECKING -from attrs import Factory, define, field +from attrs import define, field from griptape.tokenizers import BaseTokenizer from griptape.utils import import_optional_dependency +from griptape.utils.decorators import lazy_property if TYPE_CHECKING: from google.generativeai import GenerativeModel @@ -17,16 +18,14 @@ class GoogleTokenizer(BaseTokenizer): MODEL_PREFIXES_TO_MAX_OUTPUT_TOKENS = {"gemini": 2048} api_key: str = field(kw_only=True, metadata={"serializable": True}) - model_client: GenerativeModel = field( - default=Factory(lambda self: self._default_model_client(), takes_self=True), - kw_only=True, - ) + _client: GenerativeModel = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) - def count_tokens(self, text: str) -> int: - return self.model_client.count_tokens(text).total_tokens - - def _default_model_client(self) -> GenerativeModel: + @lazy_property() + def client(self) -> GenerativeModel: genai = import_optional_dependency("google.generativeai") genai.configure(api_key=self.api_key) return genai.GenerativeModel(self.model) + + def count_tokens(self, text: str) -> int: + return self.client.count_tokens(text).total_tokens diff --git a/griptape/tools/audio_transcription/manifest.yml b/griptape/tools/audio_transcription/manifest.yml deleted file mode 100644 index 32b017c55..000000000 --- a/griptape/tools/audio_transcription/manifest.yml +++ /dev/null @@ -1,5 +0,0 @@ -version: "v1" -name: Transcription Tool -description: A tool for generating transcription of audio. -contact_email: hello@griptape.ai -legal_info_url: https://www.griptape.ai/legal diff --git a/griptape/tools/audio_transcription/tool.py b/griptape/tools/audio_transcription/tool.py index 4174db209..826aeb895 100644 --- a/griptape/tools/audio_transcription/tool.py +++ b/griptape/tools/audio_transcription/tool.py @@ -1,6 +1,5 @@ from __future__ import annotations -from pathlib import Path from typing import TYPE_CHECKING, Any, cast from attrs import Factory, define, field @@ -32,7 +31,7 @@ class AudioTranscriptionTool(BaseTool): def transcribe_audio_from_disk(self, params: dict) -> TextArtifact | ErrorArtifact: audio_path = params["values"]["path"] - audio_artifact = self.audio_loader.load(Path(audio_path).read_bytes()) + audio_artifact = self.audio_loader.load(audio_path) return self.engine.run(audio_artifact) diff --git a/griptape/tools/aws_iam/manifest.yml b/griptape/tools/aws_iam/manifest.yml deleted file mode 100644 index 072d4f92e..000000000 --- a/griptape/tools/aws_iam/manifest.yml +++ /dev/null @@ -1,5 +0,0 @@ -version: "v1" -name: AWS IAM Tool -description: Tool for the IAM boto3 API. -contact_email: hello@griptape.ai -legal_info_url: https://www.griptape.ai/legal \ No newline at end of file diff --git a/griptape/tools/aws_iam/tool.py b/griptape/tools/aws_iam/tool.py index 8d22dd3c9..6c1bed054 100644 --- a/griptape/tools/aws_iam/tool.py +++ b/griptape/tools/aws_iam/tool.py @@ -2,20 +2,24 @@ from typing import TYPE_CHECKING -from attrs import Factory, define, field +from attrs import define, field from schema import Literal, Schema from griptape.artifacts import ErrorArtifact, ListArtifact, TextArtifact from griptape.tools import BaseAwsTool -from griptape.utils.decorators import activity +from griptape.utils.decorators import activity, lazy_property if TYPE_CHECKING: - from mypy_boto3_iam import Client + from mypy_boto3_iam import IAMClient @define class AwsIamTool(BaseAwsTool): - iam_client: Client = field(default=Factory(lambda self: self.session.client("iam"), takes_self=True), kw_only=True) + _client: IAMClient = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) + + @lazy_property() + def client(self) -> IAMClient: + return self.session.client("iam") @activity( config={ @@ -33,7 +37,7 @@ class AwsIamTool(BaseAwsTool): ) def get_user_policy(self, params: dict) -> TextArtifact | ErrorArtifact: try: - policy = self.iam_client.get_user_policy( + policy = self.client.get_user_policy( UserName=params["values"]["user_name"], PolicyName=params["values"]["policy_name"], ) @@ -44,7 +48,7 @@ def get_user_policy(self, params: dict) -> TextArtifact | ErrorArtifact: @activity(config={"description": "Can be used to list AWS MFA Devices"}) def list_mfa_devices(self, _: dict) -> ListArtifact | ErrorArtifact: try: - devices = self.iam_client.list_mfa_devices() + devices = self.client.list_mfa_devices() return ListArtifact([TextArtifact(str(d)) for d in devices["MFADevices"]]) except Exception as e: return ErrorArtifact(f"error listing mfa devices: {e}") @@ -59,10 +63,10 @@ def list_mfa_devices(self, _: dict) -> ListArtifact | ErrorArtifact: ) def list_user_policies(self, params: dict) -> ListArtifact | ErrorArtifact: try: - policies = self.iam_client.list_user_policies(UserName=params["values"]["user_name"]) + policies = self.client.list_user_policies(UserName=params["values"]["user_name"]) policy_names = policies["PolicyNames"] - attached_policies = self.iam_client.list_attached_user_policies(UserName=params["values"]["user_name"]) + attached_policies = self.client.list_attached_user_policies(UserName=params["values"]["user_name"]) attached_policy_names = [ p["PolicyName"] for p in attached_policies["AttachedPolicies"] if "PolicyName" in p ] @@ -74,7 +78,7 @@ def list_user_policies(self, params: dict) -> ListArtifact | ErrorArtifact: @activity(config={"description": "Can be used to list AWS IAM users."}) def list_users(self, _: dict) -> ListArtifact | ErrorArtifact: try: - users = self.iam_client.list_users() + users = self.client.list_users() return ListArtifact([TextArtifact(str(u)) for u in users["Users"]]) except Exception as e: return ErrorArtifact(f"error listing s3 users: {e}") diff --git a/griptape/tools/aws_s3/manifest.yml b/griptape/tools/aws_s3/manifest.yml deleted file mode 100644 index a48169f0c..000000000 --- a/griptape/tools/aws_s3/manifest.yml +++ /dev/null @@ -1,5 +0,0 @@ -version: "v1" -name: AWS S3 Tool -description: Tool for the S3 boto3 API. -contact_email: hello@griptape.ai -legal_info_url: https://www.griptape.ai/legal \ No newline at end of file diff --git a/griptape/tools/aws_s3/tool.py b/griptape/tools/aws_s3/tool.py index 24d091d71..b352da2d5 100644 --- a/griptape/tools/aws_s3/tool.py +++ b/griptape/tools/aws_s3/tool.py @@ -3,20 +3,24 @@ import io from typing import TYPE_CHECKING, Any -from attrs import Factory, define, field +from attrs import define, field from schema import Literal, Schema from griptape.artifacts import BlobArtifact, ErrorArtifact, InfoArtifact, ListArtifact, TextArtifact from griptape.tools import BaseAwsTool -from griptape.utils.decorators import activity +from griptape.utils.decorators import activity, lazy_property if TYPE_CHECKING: - from mypy_boto3_s3 import Client + from mypy_boto3_s3 import S3Client @define class AwsS3Tool(BaseAwsTool): - s3_client: Client = field(default=Factory(lambda self: self.session.client("s3"), takes_self=True), kw_only=True) + _client: S3Client = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) + + @lazy_property() + def client(self) -> S3Client: + return self.session.client("s3") @activity( config={ @@ -33,7 +37,7 @@ class AwsS3Tool(BaseAwsTool): ) def get_bucket_acl(self, params: dict) -> TextArtifact | ErrorArtifact: try: - acl = self.s3_client.get_bucket_acl(Bucket=params["values"]["bucket_name"]) + acl = self.client.get_bucket_acl(Bucket=params["values"]["bucket_name"]) return TextArtifact(acl) except Exception as e: return ErrorArtifact(f"error getting bucket acl: {e}") @@ -48,7 +52,7 @@ def get_bucket_acl(self, params: dict) -> TextArtifact | ErrorArtifact: ) def get_bucket_policy(self, params: dict) -> TextArtifact | ErrorArtifact: try: - policy = self.s3_client.get_bucket_policy(Bucket=params["values"]["bucket_name"]) + policy = self.client.get_bucket_policy(Bucket=params["values"]["bucket_name"]) return TextArtifact(policy) except Exception as e: return ErrorArtifact(f"error getting bucket policy: {e}") @@ -66,7 +70,7 @@ def get_bucket_policy(self, params: dict) -> TextArtifact | ErrorArtifact: ) def get_object_acl(self, params: dict) -> TextArtifact | ErrorArtifact: try: - acl = self.s3_client.get_object_acl( + acl = self.client.get_object_acl( Bucket=params["values"]["bucket_name"], Key=params["values"]["object_key"], ) @@ -77,7 +81,7 @@ def get_object_acl(self, params: dict) -> TextArtifact | ErrorArtifact: @activity(config={"description": "Can be used to list all AWS S3 buckets."}) def list_s3_buckets(self, _: dict) -> ListArtifact | ErrorArtifact: try: - buckets = self.s3_client.list_buckets() + buckets = self.client.list_buckets() return ListArtifact([TextArtifact(str(b)) for b in buckets["Buckets"]]) except Exception as e: @@ -91,7 +95,7 @@ def list_s3_buckets(self, _: dict) -> ListArtifact | ErrorArtifact: ) def list_objects(self, params: dict) -> ListArtifact | ErrorArtifact: try: - objects = self.s3_client.list_objects_v2(Bucket=params["values"]["bucket_name"]) + objects = self.client.list_objects_v2(Bucket=params["values"]["bucket_name"]) if "Contents" not in objects: return ErrorArtifact("no objects found in the bucket") @@ -192,7 +196,7 @@ def download_objects(self, params: dict) -> ListArtifact | ErrorArtifact: artifacts = [] for object_info in objects: try: - obj = self.s3_client.get_object(Bucket=object_info["bucket_name"], Key=object_info["object_key"]) + obj = self.client.get_object(Bucket=object_info["bucket_name"], Key=object_info["object_key"]) content = obj["Body"].read() artifacts.append(BlobArtifact(content, name=object_info["object_key"])) @@ -203,9 +207,9 @@ def download_objects(self, params: dict) -> ListArtifact | ErrorArtifact: return ListArtifact(artifacts) def _upload_object(self, bucket_name: str, object_name: str, value: Any) -> None: - self.s3_client.create_bucket(Bucket=bucket_name) + self.client.create_bucket(Bucket=bucket_name) - self.s3_client.upload_fileobj( + self.client.upload_fileobj( Fileobj=io.BytesIO(value.encode() if isinstance(value, str) else value), Bucket=bucket_name, Key=object_name, diff --git a/griptape/tools/base_tool.py b/griptape/tools/base_tool.py index b846ec40b..81f127791 100644 --- a/griptape/tools/base_tool.py +++ b/griptape/tools/base_tool.py @@ -10,7 +10,6 @@ from typing import TYPE_CHECKING, Any, Callable, Optional import schema -import yaml from attrs import Attribute, Factory, define, field from schema import Literal, Or, Schema @@ -38,7 +37,6 @@ class BaseTool(ActivityMixin, ABC): off_prompt: Determines whether tool activity output goes to the output memory. """ - MANIFEST_FILE = "manifest.yml" REQUIREMENTS_FILE = "requirements.txt" name: str = field(default=Factory(lambda self: self.__class__.__name__, takes_self=True), kw_only=True) @@ -67,19 +65,10 @@ def validate_output_memory(self, _: Attribute, output_memory: dict[str, Optional if len(output_memory_names) > len(set(output_memory_names)): raise ValueError(f"memory names have to be unique in activity '{activity_name}' output") - @property - def manifest_path(self) -> str: - return os.path.join(self.abs_dir_path, self.MANIFEST_FILE) - @property def requirements_path(self) -> str: return os.path.join(self.abs_dir_path, self.REQUIREMENTS_FILE) - @property - def manifest(self) -> dict: - with open(self.manifest_path) as yaml_file: - return yaml.safe_load(yaml_file) - @property def abs_file_path(self) -> str: return os.path.abspath(inspect.getfile(self.__class__)) @@ -173,16 +162,8 @@ def after_run( return InfoArtifact("Tool returned an empty value") def validate(self) -> bool: - from griptape.utils import ManifestValidator - - if not os.path.exists(self.manifest_path): - raise Exception(f"{self.MANIFEST_FILE} not found") - if not os.path.exists(self.requirements_path): raise Exception(f"{self.REQUIREMENTS_FILE} not found") - - ManifestValidator().validate(self.manifest) - return True def tool_dir(self) -> str: diff --git a/griptape/tools/calculator/manifest.yml b/griptape/tools/calculator/manifest.yml deleted file mode 100644 index dd902c616..000000000 --- a/griptape/tools/calculator/manifest.yml +++ /dev/null @@ -1,5 +0,0 @@ -version: "v1" -name: Calculator Tool -description: Tool for making simple calculations in Python. -contact_email: hello@griptape.ai -legal_info_url: https://www.griptape.ai/legal diff --git a/griptape/tools/computer/manifest.yml b/griptape/tools/computer/manifest.yml deleted file mode 100644 index 4c5d30495..000000000 --- a/griptape/tools/computer/manifest.yml +++ /dev/null @@ -1,5 +0,0 @@ -version: "v1" -name: Computer Tool -description: Tool that allows LLMs to run Python code and access the shell -contact_email: hello@griptape.ai -legal_info_url: https://www.griptape.ai/legal diff --git a/griptape/tools/date_time/manifest.yml b/griptape/tools/date_time/manifest.yml deleted file mode 100644 index da8e553a5..000000000 --- a/griptape/tools/date_time/manifest.yml +++ /dev/null @@ -1,5 +0,0 @@ -version: "v1" -name: Date Time Tool -description: Tool that allows LLMs to retrieve the current date & time -contact_email: hello@griptape.ai -legal_info_url: https://www.griptape.ai/legal diff --git a/griptape/tools/email/manifest.yml b/griptape/tools/email/manifest.yml deleted file mode 100644 index 08009292d..000000000 --- a/griptape/tools/email/manifest.yml +++ /dev/null @@ -1,5 +0,0 @@ -version: "v1" -name: Email Tool -description: Tool for working with email. -contact_email: hello@griptape.ai -legal_info_url: https://www.griptape.ai/legal \ No newline at end of file diff --git a/griptape/tools/extraction/manifest.yml b/griptape/tools/extraction/manifest.yml deleted file mode 100644 index 9c489d9f6..000000000 --- a/griptape/tools/extraction/manifest.yml +++ /dev/null @@ -1,5 +0,0 @@ -version: "v1" -name: Extraction Client -description: Tool for performing structured extractions on unstructured data. -contact_email: hello@griptape.ai -legal_info_url: https://www.griptape.ai/legal diff --git a/griptape/tools/extraction/tool.py b/griptape/tools/extraction/tool.py index 3cb46a670..7e52dad69 100644 --- a/griptape/tools/extraction/tool.py +++ b/griptape/tools/extraction/tool.py @@ -57,4 +57,4 @@ def extract(self, params: dict) -> ListArtifact | InfoArtifact | ErrorArtifact: else: return ErrorArtifact("memory not found") - return self.extraction_engine.extract(artifacts) + return self.extraction_engine.extract_artifacts(artifacts) diff --git a/griptape/tools/file_manager/manifest.yml b/griptape/tools/file_manager/manifest.yml deleted file mode 100644 index 132a03327..000000000 --- a/griptape/tools/file_manager/manifest.yml +++ /dev/null @@ -1,5 +0,0 @@ -version: "v1" -name: File Manager Tool -description: Tool for managing files in the local environment. -contact_email: hello@griptape.ai -legal_info_url: https://www.griptape.ai/legal diff --git a/griptape/tools/file_manager/tool.py b/griptape/tools/file_manager/tool.py index b72f82329..8dc0c9393 100644 --- a/griptape/tools/file_manager/tool.py +++ b/griptape/tools/file_manager/tool.py @@ -5,9 +5,12 @@ from attrs import Factory, define, field from schema import Literal, Schema +import griptape.loaders as loaders from griptape.artifacts import ErrorArtifact, InfoArtifact, ListArtifact, TextArtifact from griptape.drivers import BaseFileManagerDriver, LocalFileManagerDriver +from griptape.loaders.blob_loader import BlobLoader from griptape.tools import BaseTool +from griptape.utils import get_mime_type from griptape.utils.decorators import activity @@ -21,6 +24,20 @@ class FileManagerTool(BaseTool): file_manager_driver: BaseFileManagerDriver = field(default=Factory(lambda: LocalFileManagerDriver()), kw_only=True) + loaders: dict[str, loaders.BaseLoader] = field( + default=Factory( + lambda self: { + "application/pdf": loaders.PdfLoader(file_manager_driver=self.file_manager_driver), + "text/csv": loaders.CsvLoader(file_manager_driver=self.file_manager_driver), + "text": loaders.TextLoader(file_manager_driver=self.file_manager_driver), + "image": loaders.ImageLoader(file_manager_driver=self.file_manager_driver), + "application/octet-stream": BlobLoader(file_manager_driver=self.file_manager_driver), + }, + takes_self=True, + ), + kw_only=True, + ) + @activity( config={ "description": "Can be used to list files on disk", @@ -51,7 +68,11 @@ def load_files_from_disk(self, params: dict) -> ListArtifact | ErrorArtifact: artifacts = [] for path in paths: - artifact = self.file_manager_driver.load_file(path) + abs_path = os.path.join(self.file_manager_driver.workdir, path) + mime_type = get_mime_type(abs_path) + loader = next((loader for key, loader in self.loaders.items() if mime_type.startswith(key))) + + artifact = loader.load(path) if isinstance(artifact, ListArtifact): artifacts.extend(artifact.value) else: diff --git a/griptape/tools/google_calendar/manifest.yml b/griptape/tools/google_calendar/manifest.yml deleted file mode 100644 index ffc4765db..000000000 --- a/griptape/tools/google_calendar/manifest.yml +++ /dev/null @@ -1,5 +0,0 @@ -version: "v1" -name: Google Calendar Tool -description: Tool for working with Google Calendar. -contact_email: hello@griptape.ai -legal_info_url: https://www.griptape.ai/legal \ No newline at end of file diff --git a/griptape/tools/google_docs/manifest.yml b/griptape/tools/google_docs/manifest.yml deleted file mode 100644 index b0b6e648f..000000000 --- a/griptape/tools/google_docs/manifest.yml +++ /dev/null @@ -1,5 +0,0 @@ -version: "v1" -name: Google Docs Tool -description: Tool for working with Google Docs. -contact_email: hello@griptape.ai -legal_info_url: https://www.griptape.ai/legal \ No newline at end of file diff --git a/griptape/tools/google_drive/manifest.yml b/griptape/tools/google_drive/manifest.yml deleted file mode 100644 index 22e1e54f8..000000000 --- a/griptape/tools/google_drive/manifest.yml +++ /dev/null @@ -1,5 +0,0 @@ -version: "v1" -name: Google Drive Tool -description: Tool for working with Google Drive. -contact_email: hello@griptape.ai -legal_info_url: https://www.griptape.ai/legal \ No newline at end of file diff --git a/griptape/tools/google_gmail/manifest.yml b/griptape/tools/google_gmail/manifest.yml deleted file mode 100644 index 869575166..000000000 --- a/griptape/tools/google_gmail/manifest.yml +++ /dev/null @@ -1,5 +0,0 @@ -version: "v1" -name: Google Gmail Tool -description: Tool for working with Google Gmail. -contact_email: hello@griptape.ai -legal_info_url: https://www.griptape.ai/legal \ No newline at end of file diff --git a/griptape/tools/griptape_cloud_knowledge_base/manifest.yml b/griptape/tools/griptape_cloud_knowledge_base/manifest.yml deleted file mode 100644 index 7262964c3..000000000 --- a/griptape/tools/griptape_cloud_knowledge_base/manifest.yml +++ /dev/null @@ -1,5 +0,0 @@ -version: "v1" -name: Griptape Cloud Knowledge Base Tool -description: Tool for using the Griptape Cloud Knowledge Base API. -contact_email: hello@griptape.ai -legal_info_url: https://www.griptape.ai/legal diff --git a/griptape/tools/image_query/manifest.yml b/griptape/tools/image_query/manifest.yml deleted file mode 100644 index 504543fca..000000000 --- a/griptape/tools/image_query/manifest.yml +++ /dev/null @@ -1,5 +0,0 @@ -version: "v1" -name: Image Query Tool -description: Tool for executing a natural language query on images. -contact_email: hello@griptape.ai -legal_info_url: https://www.griptape.ai/legal diff --git a/griptape/tools/image_query/tool.py b/griptape/tools/image_query/tool.py index 9d1dbb89b..7b654bd72 100644 --- a/griptape/tools/image_query/tool.py +++ b/griptape/tools/image_query/tool.py @@ -1,6 +1,5 @@ from __future__ import annotations -from pathlib import Path from typing import TYPE_CHECKING, Any, cast from attrs import Factory, define, field @@ -41,7 +40,7 @@ def query_image_from_disk(self, params: dict) -> TextArtifact | ErrorArtifact: image_artifacts = [] for image_path in image_paths: - image_artifacts.append(self.image_loader.load(Path(image_path).read_bytes())) + image_artifacts.append(self.image_loader.load(image_path)) return self.image_query_engine.run(query, image_artifacts) diff --git a/griptape/tools/inpainting_image_generation/manifest.yml b/griptape/tools/inpainting_image_generation/manifest.yml deleted file mode 100644 index d6592b741..000000000 --- a/griptape/tools/inpainting_image_generation/manifest.yml +++ /dev/null @@ -1,5 +0,0 @@ -version: "v1" -name: Inpainting Image Generation Tool -description: Tool for generating images through image inpainting. -contact_email: hello@griptape.ai -legal_info_url: https://www.griptape.ai/legal diff --git a/griptape/tools/inpainting_image_generation/tool.py b/griptape/tools/inpainting_image_generation/tool.py index d32f481d9..b529cb637 100644 --- a/griptape/tools/inpainting_image_generation/tool.py +++ b/griptape/tools/inpainting_image_generation/tool.py @@ -1,6 +1,5 @@ from __future__ import annotations -from pathlib import Path from typing import TYPE_CHECKING, cast from attrs import define, field @@ -51,8 +50,8 @@ def image_inpainting_from_file(self, params: dict[str, dict[str, str]]) -> Image image_file = params["values"]["image_file"] mask_file = params["values"]["mask_file"] - input_artifact = self.image_loader.load(Path(image_file).read_bytes()) - mask_artifact = self.image_loader.load(Path(mask_file).read_bytes()) + input_artifact = self.image_loader.load(image_file) + mask_artifact = self.image_loader.load(mask_file) return self._generate_inpainting( prompt, negative_prompt, cast(ImageArtifact, input_artifact), cast(ImageArtifact, mask_artifact) diff --git a/griptape/tools/openweather/manifest.yml b/griptape/tools/openweather/manifest.yml deleted file mode 100644 index 315143ea2..000000000 --- a/griptape/tools/openweather/manifest.yml +++ /dev/null @@ -1,5 +0,0 @@ -version: "v1" -name: OpenWeather Tool -description: Tool for using OpenWeather to retrieve weather information -contact_email: hello@griptape.ai -legal_info_url: https://www.griptape.ai/legal diff --git a/griptape/tools/outpainting_image_generation/manifest.yml b/griptape/tools/outpainting_image_generation/manifest.yml deleted file mode 100644 index 8b7ca14a1..000000000 --- a/griptape/tools/outpainting_image_generation/manifest.yml +++ /dev/null @@ -1,5 +0,0 @@ -version: "v1" -name: Outpainting Image Generation Tool -description: Tool for generating images through image outpainting. -contact_email: hello@griptape.ai -legal_info_url: https://www.griptape.ai/legal diff --git a/griptape/tools/outpainting_image_generation/tool.py b/griptape/tools/outpainting_image_generation/tool.py index afa39e178..47863b03d 100644 --- a/griptape/tools/outpainting_image_generation/tool.py +++ b/griptape/tools/outpainting_image_generation/tool.py @@ -1,6 +1,5 @@ from __future__ import annotations -from pathlib import Path from typing import TYPE_CHECKING, cast from attrs import define, field @@ -51,8 +50,8 @@ def image_outpainting_from_file(self, params: dict[str, dict[str, str]]) -> Imag image_file = params["values"]["image_file"] mask_file = params["values"]["mask_file"] - input_artifact = self.image_loader.load(Path(image_file).read_bytes()) - mask_artifact = self.image_loader.load(Path(mask_file).read_bytes()) + input_artifact = self.image_loader.load(image_file) + mask_artifact = self.image_loader.load(mask_file) return self._generate_outpainting(prompt, negative_prompt, input_artifact, mask_artifact) diff --git a/griptape/tools/prompt_image_generation/manifest.yml b/griptape/tools/prompt_image_generation/manifest.yml deleted file mode 100644 index 091cc14d7..000000000 --- a/griptape/tools/prompt_image_generation/manifest.yml +++ /dev/null @@ -1,5 +0,0 @@ -version: "v1" -name: Prompt Image Generation Tool -description: Tool for generating images from text prompts. -contact_email: hello@griptape.ai -legal_info_url: https://www.griptape.ai/legal diff --git a/griptape/tools/prompt_summary/manifest.yml b/griptape/tools/prompt_summary/manifest.yml deleted file mode 100644 index a83ea4021..000000000 --- a/griptape/tools/prompt_summary/manifest.yml +++ /dev/null @@ -1,5 +0,0 @@ -version: "v1" -name: Prompt Summary Client -description: Tool for using a Prompt Summary Engine -contact_email: hello@griptape.ai -legal_info_url: https://www.griptape.ai/legal diff --git a/griptape/tools/prompt_summary/tool.py b/griptape/tools/prompt_summary/tool.py index 517507380..e12b19547 100644 --- a/griptape/tools/prompt_summary/tool.py +++ b/griptape/tools/prompt_summary/tool.py @@ -52,4 +52,4 @@ def summarize(self, params: dict) -> BaseArtifact: else: return ErrorArtifact("memory not found") - return self.prompt_summary_engine.summarize_artifacts(artifacts, rulesets=self.all_rulesets) + return self.prompt_summary_engine.summarize_artifacts(artifacts, rulesets=self.rulesets) diff --git a/griptape/tools/query/manifest.yml b/griptape/tools/query/manifest.yml deleted file mode 100644 index 086a86d5a..000000000 --- a/griptape/tools/query/manifest.yml +++ /dev/null @@ -1,5 +0,0 @@ -version: "v1" -name: Query Client -description: Tool for performing a query against data. -contact_email: hello@griptape.ai -legal_info_url: https://www.griptape.ai/legal diff --git a/griptape/tools/rag/manifest.yml b/griptape/tools/rag/manifest.yml deleted file mode 100644 index 7a3d49c65..000000000 --- a/griptape/tools/rag/manifest.yml +++ /dev/null @@ -1,5 +0,0 @@ -version: "v1" -name: RAG Tool -description: Tool for querying RAG engines -contact_email: hello@griptape.ai -legal_info_url: https://www.griptape.ai/legal \ No newline at end of file diff --git a/griptape/tools/rest_api/manifest.yml b/griptape/tools/rest_api/manifest.yml deleted file mode 100644 index 01816e483..000000000 --- a/griptape/tools/rest_api/manifest.yml +++ /dev/null @@ -1,5 +0,0 @@ -version: "v1" -name: Rest Api Tool -description: Tool for calling rest apis. -contact_email: hello@griptape.ai -legal_info_url: https://www.griptape.ai/legal diff --git a/griptape/tools/sql/manifest.yml b/griptape/tools/sql/manifest.yml deleted file mode 100644 index 2e1459a0d..000000000 --- a/griptape/tools/sql/manifest.yml +++ /dev/null @@ -1,5 +0,0 @@ -version: "v1" -name: SQL Tool -description: Tool for executing SQL queries. -contact_email: hello@griptape.ai -legal_info_url: https://www.griptape.ai/legal \ No newline at end of file diff --git a/griptape/tools/sql/tool.py b/griptape/tools/sql/tool.py index a84bb87be..59d0a3dba 100644 --- a/griptape/tools/sql/tool.py +++ b/griptape/tools/sql/tool.py @@ -51,6 +51,6 @@ def execute_query(self, params: dict) -> ListArtifact | InfoArtifact | ErrorArti return ErrorArtifact(f"error executing query: {e}") if len(rows) > 0: - return ListArtifact(rows) + return rows else: return InfoArtifact("No results found") diff --git a/griptape/tools/structure_run/manifest.yml b/griptape/tools/structure_run/manifest.yml deleted file mode 100644 index b5feb835a..000000000 --- a/griptape/tools/structure_run/manifest.yml +++ /dev/null @@ -1,5 +0,0 @@ -version: "v1" -name: Structure Run Tool -description: Tool for running a Structure. -contact_email: hello@griptape.ai -legal_info_url: https://www.griptape.ai/legal diff --git a/griptape/tools/text_to_speech/manifest.yml b/griptape/tools/text_to_speech/manifest.yml deleted file mode 100644 index 875e04576..000000000 --- a/griptape/tools/text_to_speech/manifest.yml +++ /dev/null @@ -1,5 +0,0 @@ -version: "v1" -name: Text to Speech Tool -description: A tool for generating speech from text. -contact_email: hello@griptape.ai -legal_info_url: https://www.griptape.ai/legal diff --git a/griptape/tools/variation_image_generation/manifest.yml b/griptape/tools/variation_image_generation/manifest.yml deleted file mode 100644 index 1f3eb28e8..000000000 --- a/griptape/tools/variation_image_generation/manifest.yml +++ /dev/null @@ -1,5 +0,0 @@ -version: "v1" -name: Variation Image Generation Tool -description: Tool for generating variations of existing images. -contact_email: hello@griptape.ai -legal_info_url: https://www.griptape.ai/legal diff --git a/griptape/tools/variation_image_generation/tool.py b/griptape/tools/variation_image_generation/tool.py index 0d4456c2f..1fb8c8bcc 100644 --- a/griptape/tools/variation_image_generation/tool.py +++ b/griptape/tools/variation_image_generation/tool.py @@ -1,6 +1,5 @@ from __future__ import annotations -from pathlib import Path from typing import TYPE_CHECKING, cast from attrs import define, field @@ -49,7 +48,7 @@ def image_variation_from_file(self, params: dict[str, dict[str, str]]) -> ImageA negative_prompt = params["values"]["negative_prompt"] image_file = params["values"]["image_file"] - image_artifact = self.image_loader.load(Path(image_file).read_bytes()) + image_artifact = self.image_loader.load(image_file) return self._generate_variation(prompt, negative_prompt, image_artifact) diff --git a/griptape/tools/vector_store/manifest.yml b/griptape/tools/vector_store/manifest.yml deleted file mode 100644 index d1fab7ce5..000000000 --- a/griptape/tools/vector_store/manifest.yml +++ /dev/null @@ -1,5 +0,0 @@ -version: "v1" -name: Vector Store Tool -description: Tool for storing and accessing data in vector stores -contact_email: hello@griptape.ai -legal_info_url: https://www.griptape.ai/legal \ No newline at end of file diff --git a/griptape/tools/web_scraper/manifest.yml b/griptape/tools/web_scraper/manifest.yml deleted file mode 100644 index ec9d3db25..000000000 --- a/griptape/tools/web_scraper/manifest.yml +++ /dev/null @@ -1,5 +0,0 @@ -version: "v1" -name: Web Scraper Tool -description: Tool for scraping web pages for content, titles, authors, and keywords. -contact_email: hello@griptape.ai -legal_info_url: https://www.griptape.ai/legal diff --git a/griptape/tools/web_scraper/tool.py b/griptape/tools/web_scraper/tool.py index 2895d5e0d..982123b30 100644 --- a/griptape/tools/web_scraper/tool.py +++ b/griptape/tools/web_scraper/tool.py @@ -4,6 +4,7 @@ from schema import Literal, Schema from griptape.artifacts import ErrorArtifact, ListArtifact +from griptape.chunkers import TextChunker from griptape.loaders import WebLoader from griptape.tools import BaseTool from griptape.utils.decorators import activity @@ -12,6 +13,7 @@ @define class WebScraperTool(BaseTool): web_loader: WebLoader = field(default=Factory(lambda: WebLoader()), kw_only=True) + text_chunker: TextChunker = field(default=Factory(lambda: TextChunker()), kw_only=True) @activity( config={ @@ -24,6 +26,8 @@ def get_content(self, params: dict) -> ListArtifact | ErrorArtifact: try: result = self.web_loader.load(url) - return ListArtifact(result) + chunks = TextChunker().chunk(result) + + return ListArtifact(chunks) except Exception as e: return ErrorArtifact("Error getting page content: " + str(e)) diff --git a/griptape/tools/web_search/manifest.yml b/griptape/tools/web_search/manifest.yml deleted file mode 100644 index c06db4f20..000000000 --- a/griptape/tools/web_search/manifest.yml +++ /dev/null @@ -1,5 +0,0 @@ -version: "v1" -name: Web Search Tool -description: Tool for making making web searches. -contact_email: hello@griptape.ai -legal_info_url: https://www.griptape.ai/legal diff --git a/griptape/utils/__init__.py b/griptape/utils/__init__.py index 03725f59d..77e3f3b0a 100644 --- a/griptape/utils/__init__.py +++ b/griptape/utils/__init__.py @@ -8,7 +8,6 @@ from .futures import execute_futures_dict, execute_futures_list, execute_futures_list_dict from .token_counter import TokenCounter from .dict_utils import remove_null_values_in_dict_recursively, dict_merge, remove_key_in_dict_recursively -from .file_utils import load_file, load_files from .hash import str_to_hash from .import_utils import import_optional_dependency from .import_utils import is_dependency_installed @@ -17,6 +16,7 @@ from .deprecation import deprecation_warn from .structure_visualizer import StructureVisualizer from .reference_utils import references_from_artifacts +from .file_utils import get_mime_type def minify_json(value: str) -> str: @@ -44,8 +44,7 @@ def minify_json(value: str) -> str: "Stream", "load_artifact_from_memory", "deprecation_warn", - "load_file", - "load_files", "StructureVisualizer", "references_from_artifacts", + "get_mime_type", ] diff --git a/griptape/utils/decorators.py b/griptape/utils/decorators.py index 2ea296693..356f4eec0 100644 --- a/griptape/utils/decorators.py +++ b/griptape/utils/decorators.py @@ -6,7 +6,12 @@ import schema from schema import Schema -CONFIG_SCHEMA = Schema({"description": str, schema.Optional("schema"): Schema}) +CONFIG_SCHEMA = Schema( + { + "description": str, + schema.Optional("schema"): lambda data: isinstance(data, (Schema, Callable)), + } +) def activity(config: dict) -> Any: diff --git a/griptape/utils/file_utils.py b/griptape/utils/file_utils.py index 19c9f699c..0dbbbc093 100644 --- a/griptape/utils/file_utils.py +++ b/griptape/utils/file_utils.py @@ -1,38 +1,16 @@ -from __future__ import annotations +import mimetypes -from concurrent import futures -from pathlib import Path -from typing import Optional +import filetype -import griptape.utils as utils +def get_mime_type(file_path: str) -> str: + filetype_guess = filetype.guess(file_path) -def load_file(path: str) -> bytes: - """Load a file from the given path and return its content as bytes. - - Args: - path (str): The path to the file to load. - - Returns: - The content of the file. - """ - return Path(path).read_bytes() - - -def load_files(paths: list[str], futures_executor: Optional[futures.ThreadPoolExecutor] = None) -> dict[str, bytes]: - """Load multiple files concurrently and return a dictionary of their content. - - Args: - paths: The paths to the files to load. - futures_executor: The executor to use for concurrent loading. If None, a new ThreadPoolExecutor will be created. - - Returns: - A dictionary where the keys are a hash of the path and the values are the content of the files. - """ - if futures_executor is None: - futures_executor = futures.ThreadPoolExecutor() - - with futures_executor as executor: - return utils.execute_futures_dict( - {utils.str_to_hash(str(path)): executor.submit(load_file, path) for path in paths}, - ) + if filetype_guess is None: + type_, _ = mimetypes.guess_type(file_path) + if type_ is None: + return "application/octet-stream" + else: + return type_ + else: + return filetype_guess.mime diff --git a/mkdocs.yml b/mkdocs.yml index 4a00919c6..0be4ec7e5 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -88,6 +88,7 @@ nav: - Create a Structure: "griptape-cloud/structures/create-structure.md" - Structure Config YAML: "griptape-cloud/structures/structure-config.md" - Running Your Structure: "griptape-cloud/structures/run-structure.md" + - Structure Run Events: "griptape-cloud/structures/structure-run-events.md" - Cloud API: - API Reference: "griptape-cloud/api/api-reference.md" - Framework: @@ -130,8 +131,8 @@ nav: - Data: - Overview: "griptape-framework/data/index.md" - Artifacts: "griptape-framework/data/artifacts.md" - - Chunkers: "griptape-framework/data/chunkers.md" - Loaders: "griptape-framework/data/loaders.md" + - Chunkers: "griptape-framework/data/chunkers.md" - Misc: - Events: "griptape-framework/misc/events.md" - Tokenizers: "griptape-framework/misc/tokenizers.md" diff --git a/poetry.lock b/poetry.lock index 744068c4c..1743a9650 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,36 +1,5 @@ # This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. -[[package]] -name = "accelerate" -version = "0.34.2" -description = "Accelerate" -optional = true -python-versions = ">=3.8.0" -files = [ - {file = "accelerate-0.34.2-py3-none-any.whl", hash = "sha256:d69159e2c4e4a473d14443b27d2d732929254e826b3ab4813b3785b5ac616c7c"}, - {file = "accelerate-0.34.2.tar.gz", hash = "sha256:98c1ebe1f5a45c0a3af02dc60b5bb8b7d58d60c3326a326a06ce6d956b18ca5b"}, -] - -[package.dependencies] -huggingface-hub = ">=0.21.0" -numpy = ">=1.17,<3.0.0" -packaging = ">=20.0" -psutil = "*" -pyyaml = "*" -safetensors = ">=0.4.3" -torch = ">=1.10.0" - -[package.extras] -deepspeed = ["deepspeed"] -dev = ["bitsandbytes", "black (>=23.1,<24.0)", "datasets", "diffusers", "evaluate", "hf-doc-builder (>=0.3.0)", "parameterized", "pytest (>=7.2.0,<=8.0.0)", "pytest-subtests", "pytest-xdist", "rich", "ruff (>=0.2.1,<0.3.0)", "scikit-learn", "scipy", "timm", "torchdata (>=0.8.0)", "torchpippy (>=0.2.0)", "tqdm", "transformers"] -quality = ["black (>=23.1,<24.0)", "hf-doc-builder (>=0.3.0)", "ruff (>=0.2.1,<0.3.0)"] -rich = ["rich"] -sagemaker = ["sagemaker"] -test-dev = ["bitsandbytes", "datasets", "diffusers", "evaluate", "scikit-learn", "scipy", "timm", "torchdata (>=0.8.0)", "torchpippy (>=0.2.0)", "tqdm", "transformers"] -test-prod = ["parameterized", "pytest (>=7.2.0,<=8.0.0)", "pytest-subtests", "pytest-xdist"] -test-trackers = ["comet-ml", "dvclive", "tensorboard", "wandb"] -testing = ["bitsandbytes", "datasets", "diffusers", "evaluate", "parameterized", "pytest (>=7.2.0,<=8.0.0)", "pytest-subtests", "pytest-xdist", "scikit-learn", "scipy", "timm", "torchdata (>=0.8.0)", "torchpippy (>=0.2.0)", "tqdm", "transformers"] - [[package]] name = "aiohappyeyeballs" version = "2.4.0" @@ -192,13 +161,13 @@ files = [ [[package]] name = "anthropic" -version = "0.34.2" +version = "0.35.0" description = "The official Python library for the anthropic API" optional = true python-versions = ">=3.7" files = [ - {file = "anthropic-0.34.2-py3-none-any.whl", hash = "sha256:f50a628eb71e2c76858b106c8cbea278c45c6bd2077cb3aff716a112abddc9fc"}, - {file = "anthropic-0.34.2.tar.gz", hash = "sha256:808ea19276f26646bfde9ee535669735519376e4eeb301a2974fc69892be1d6e"}, + {file = "anthropic-0.35.0-py3-none-any.whl", hash = "sha256:777983989ed9e444eb4a6d92dad84027f14a6639cba6f48772c0078d51959828"}, + {file = "anthropic-0.35.0.tar.gz", hash = "sha256:d2f998246413c309a7770d1faa617500f505377a04ab45a13a66f8559daf3742"}, ] [package.dependencies] @@ -250,13 +219,13 @@ files = [ [[package]] name = "astrapy" -version = "1.4.2" +version = "1.5.0" description = "AstraPy is a Pythonic SDK for DataStax Astra and its Data API" optional = true python-versions = "<4.0.0,>=3.8.0" files = [ - {file = "astrapy-1.4.2-py3-none-any.whl", hash = "sha256:e8b595377c6448ae675823b614b24520fbdb35572c260b6ed23383da6391478e"}, - {file = "astrapy-1.4.2.tar.gz", hash = "sha256:8fd3d2acaf439c5069d74e3d76e8a3e976120896d87cc2b05a6af51d528c6094"}, + {file = "astrapy-1.5.0-py3-none-any.whl", hash = "sha256:eb805202c976f5c3f5a6dcc2bd79f4c566e68b2c0ee25bfa3f56bf9db7b454b1"}, + {file = "astrapy-1.5.0.tar.gz", hash = "sha256:a9d75fade84f67f6fdf8d1286ed0bfb265f44c109f4f26acf50ed4883abef035"}, ] [package.dependencies] @@ -349,17 +318,17 @@ lxml = ["lxml"] [[package]] name = "boto3" -version = "1.35.19" +version = "1.35.34" description = "The AWS SDK for Python" optional = false python-versions = ">=3.8" files = [ - {file = "boto3-1.35.19-py3-none-any.whl", hash = "sha256:84b3fe1727945bc3cada832d969ddb3dc0d08fce1677064ca8bdc13a89c1a143"}, - {file = "boto3-1.35.19.tar.gz", hash = "sha256:9979fe674780a0b7100eae9156d74ee374cd1638a9f61c77277e3ce712f3e496"}, + {file = "boto3-1.35.34-py3-none-any.whl", hash = "sha256:291e7b97a34967ed93297e6171f1bebb8529e64633dd48426760e3fdef1cdea8"}, + {file = "boto3-1.35.34.tar.gz", hash = "sha256:57e6ee8504e7929bc094bb2afc879943906064179a1e88c23b4812e2c6f61532"}, ] [package.dependencies] -botocore = ">=1.35.19,<1.36.0" +botocore = ">=1.35.34,<1.36.0" jmespath = ">=0.7.1,<2.0.0" s3transfer = ">=0.10.0,<0.11.0" @@ -368,22 +337,26 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "boto3-stubs" -version = "1.35.19" -description = "Type annotations for boto3 1.35.19 generated with mypy-boto3-builder 8.0.1" +version = "1.35.34" +description = "Type annotations for boto3 1.35.34 generated with mypy-boto3-builder 8.1.2" optional = false python-versions = ">=3.8" files = [ - {file = "boto3_stubs-1.35.19-py3-none-any.whl", hash = "sha256:6adace32995ae7b88675cf0bbde3b4f31876cbf57520db1ec1f392ac32660b4c"}, - {file = "boto3_stubs-1.35.19.tar.gz", hash = "sha256:c5842cd82d4a1570613f178831c2b6d1b60f511b87f56cc014f2a216c03ecf5a"}, + {file = "boto3_stubs-1.35.34-py3-none-any.whl", hash = "sha256:6a2379d8ce47ca704690dbb058c29b8900e77e6210bf8bcebfe876640522ee1c"}, + {file = "boto3_stubs-1.35.34.tar.gz", hash = "sha256:5e9209b26901f8feba4f6bca47024ad1590f9e7e21423ce4d112928973a5e09c"}, ] [package.dependencies] botocore-stubs = "*" mypy-boto3-bedrock = {version = ">=1.35.0,<1.36.0", optional = true, markers = "extra == \"bedrock\""} +mypy-boto3-dynamodb = {version = ">=1.35.0,<1.36.0", optional = true, markers = "extra == \"dynamodb\""} mypy-boto3-iam = {version = ">=1.35.0,<1.36.0", optional = true, markers = "extra == \"iam\""} +mypy-boto3-iot-data = {version = ">=1.35.0,<1.36.0", optional = true, markers = "extra == \"iot-data\""} mypy-boto3-opensearch = {version = ">=1.35.0,<1.36.0", optional = true, markers = "extra == \"opensearch\""} +mypy-boto3-redshift-data = {version = ">=1.35.0,<1.36.0", optional = true, markers = "extra == \"redshift-data\""} mypy-boto3-s3 = {version = ">=1.35.0,<1.36.0", optional = true, markers = "extra == \"s3\""} mypy-boto3-sagemaker = {version = ">=1.35.0,<1.36.0", optional = true, markers = "extra == \"sagemaker\""} +mypy-boto3-sqs = {version = ">=1.35.0,<1.36.0", optional = true, markers = "extra == \"sqs\""} types-s3transfer = "*" typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.12\""} @@ -392,7 +365,7 @@ accessanalyzer = ["mypy-boto3-accessanalyzer (>=1.35.0,<1.36.0)"] account = ["mypy-boto3-account (>=1.35.0,<1.36.0)"] acm = ["mypy-boto3-acm (>=1.35.0,<1.36.0)"] acm-pca = ["mypy-boto3-acm-pca (>=1.35.0,<1.36.0)"] -all = ["mypy-boto3-accessanalyzer (>=1.35.0,<1.36.0)", "mypy-boto3-account (>=1.35.0,<1.36.0)", "mypy-boto3-acm (>=1.35.0,<1.36.0)", "mypy-boto3-acm-pca (>=1.35.0,<1.36.0)", "mypy-boto3-amp (>=1.35.0,<1.36.0)", "mypy-boto3-amplify (>=1.35.0,<1.36.0)", "mypy-boto3-amplifybackend (>=1.35.0,<1.36.0)", "mypy-boto3-amplifyuibuilder (>=1.35.0,<1.36.0)", "mypy-boto3-apigateway (>=1.35.0,<1.36.0)", "mypy-boto3-apigatewaymanagementapi (>=1.35.0,<1.36.0)", "mypy-boto3-apigatewayv2 (>=1.35.0,<1.36.0)", "mypy-boto3-appconfig (>=1.35.0,<1.36.0)", "mypy-boto3-appconfigdata (>=1.35.0,<1.36.0)", "mypy-boto3-appfabric (>=1.35.0,<1.36.0)", "mypy-boto3-appflow (>=1.35.0,<1.36.0)", "mypy-boto3-appintegrations (>=1.35.0,<1.36.0)", "mypy-boto3-application-autoscaling (>=1.35.0,<1.36.0)", "mypy-boto3-application-insights (>=1.35.0,<1.36.0)", "mypy-boto3-application-signals (>=1.35.0,<1.36.0)", "mypy-boto3-applicationcostprofiler (>=1.35.0,<1.36.0)", "mypy-boto3-appmesh (>=1.35.0,<1.36.0)", "mypy-boto3-apprunner (>=1.35.0,<1.36.0)", "mypy-boto3-appstream (>=1.35.0,<1.36.0)", "mypy-boto3-appsync (>=1.35.0,<1.36.0)", "mypy-boto3-apptest (>=1.35.0,<1.36.0)", "mypy-boto3-arc-zonal-shift (>=1.35.0,<1.36.0)", "mypy-boto3-artifact (>=1.35.0,<1.36.0)", "mypy-boto3-athena (>=1.35.0,<1.36.0)", "mypy-boto3-auditmanager (>=1.35.0,<1.36.0)", "mypy-boto3-autoscaling (>=1.35.0,<1.36.0)", "mypy-boto3-autoscaling-plans (>=1.35.0,<1.36.0)", "mypy-boto3-b2bi (>=1.35.0,<1.36.0)", "mypy-boto3-backup (>=1.35.0,<1.36.0)", "mypy-boto3-backup-gateway (>=1.35.0,<1.36.0)", "mypy-boto3-batch (>=1.35.0,<1.36.0)", "mypy-boto3-bcm-data-exports (>=1.35.0,<1.36.0)", "mypy-boto3-bedrock (>=1.35.0,<1.36.0)", "mypy-boto3-bedrock-agent (>=1.35.0,<1.36.0)", "mypy-boto3-bedrock-agent-runtime (>=1.35.0,<1.36.0)", "mypy-boto3-bedrock-runtime (>=1.35.0,<1.36.0)", "mypy-boto3-billingconductor (>=1.35.0,<1.36.0)", "mypy-boto3-braket (>=1.35.0,<1.36.0)", "mypy-boto3-budgets (>=1.35.0,<1.36.0)", "mypy-boto3-ce (>=1.35.0,<1.36.0)", "mypy-boto3-chatbot (>=1.35.0,<1.36.0)", "mypy-boto3-chime (>=1.35.0,<1.36.0)", "mypy-boto3-chime-sdk-identity (>=1.35.0,<1.36.0)", "mypy-boto3-chime-sdk-media-pipelines (>=1.35.0,<1.36.0)", "mypy-boto3-chime-sdk-meetings (>=1.35.0,<1.36.0)", "mypy-boto3-chime-sdk-messaging (>=1.35.0,<1.36.0)", "mypy-boto3-chime-sdk-voice (>=1.35.0,<1.36.0)", "mypy-boto3-cleanrooms (>=1.35.0,<1.36.0)", "mypy-boto3-cleanroomsml (>=1.35.0,<1.36.0)", "mypy-boto3-cloud9 (>=1.35.0,<1.36.0)", "mypy-boto3-cloudcontrol (>=1.35.0,<1.36.0)", "mypy-boto3-clouddirectory (>=1.35.0,<1.36.0)", "mypy-boto3-cloudformation (>=1.35.0,<1.36.0)", "mypy-boto3-cloudfront (>=1.35.0,<1.36.0)", "mypy-boto3-cloudfront-keyvaluestore (>=1.35.0,<1.36.0)", "mypy-boto3-cloudhsm (>=1.35.0,<1.36.0)", "mypy-boto3-cloudhsmv2 (>=1.35.0,<1.36.0)", "mypy-boto3-cloudsearch (>=1.35.0,<1.36.0)", "mypy-boto3-cloudsearchdomain (>=1.35.0,<1.36.0)", "mypy-boto3-cloudtrail (>=1.35.0,<1.36.0)", "mypy-boto3-cloudtrail-data (>=1.35.0,<1.36.0)", "mypy-boto3-cloudwatch (>=1.35.0,<1.36.0)", "mypy-boto3-codeartifact (>=1.35.0,<1.36.0)", "mypy-boto3-codebuild (>=1.35.0,<1.36.0)", "mypy-boto3-codecatalyst (>=1.35.0,<1.36.0)", "mypy-boto3-codecommit (>=1.35.0,<1.36.0)", "mypy-boto3-codeconnections (>=1.35.0,<1.36.0)", "mypy-boto3-codedeploy (>=1.35.0,<1.36.0)", "mypy-boto3-codeguru-reviewer (>=1.35.0,<1.36.0)", "mypy-boto3-codeguru-security (>=1.35.0,<1.36.0)", "mypy-boto3-codeguruprofiler (>=1.35.0,<1.36.0)", "mypy-boto3-codepipeline (>=1.35.0,<1.36.0)", "mypy-boto3-codestar-connections (>=1.35.0,<1.36.0)", "mypy-boto3-codestar-notifications (>=1.35.0,<1.36.0)", "mypy-boto3-cognito-identity (>=1.35.0,<1.36.0)", "mypy-boto3-cognito-idp (>=1.35.0,<1.36.0)", "mypy-boto3-cognito-sync (>=1.35.0,<1.36.0)", "mypy-boto3-comprehend (>=1.35.0,<1.36.0)", "mypy-boto3-comprehendmedical (>=1.35.0,<1.36.0)", "mypy-boto3-compute-optimizer (>=1.35.0,<1.36.0)", "mypy-boto3-config (>=1.35.0,<1.36.0)", "mypy-boto3-connect (>=1.35.0,<1.36.0)", "mypy-boto3-connect-contact-lens (>=1.35.0,<1.36.0)", "mypy-boto3-connectcampaigns (>=1.35.0,<1.36.0)", "mypy-boto3-connectcases (>=1.35.0,<1.36.0)", "mypy-boto3-connectparticipant (>=1.35.0,<1.36.0)", "mypy-boto3-controlcatalog (>=1.35.0,<1.36.0)", "mypy-boto3-controltower (>=1.35.0,<1.36.0)", "mypy-boto3-cost-optimization-hub (>=1.35.0,<1.36.0)", "mypy-boto3-cur (>=1.35.0,<1.36.0)", "mypy-boto3-customer-profiles (>=1.35.0,<1.36.0)", "mypy-boto3-databrew (>=1.35.0,<1.36.0)", "mypy-boto3-dataexchange (>=1.35.0,<1.36.0)", "mypy-boto3-datapipeline (>=1.35.0,<1.36.0)", "mypy-boto3-datasync (>=1.35.0,<1.36.0)", "mypy-boto3-datazone (>=1.35.0,<1.36.0)", "mypy-boto3-dax (>=1.35.0,<1.36.0)", "mypy-boto3-deadline (>=1.35.0,<1.36.0)", "mypy-boto3-detective (>=1.35.0,<1.36.0)", "mypy-boto3-devicefarm (>=1.35.0,<1.36.0)", "mypy-boto3-devops-guru (>=1.35.0,<1.36.0)", "mypy-boto3-directconnect (>=1.35.0,<1.36.0)", "mypy-boto3-discovery (>=1.35.0,<1.36.0)", "mypy-boto3-dlm (>=1.35.0,<1.36.0)", "mypy-boto3-dms (>=1.35.0,<1.36.0)", "mypy-boto3-docdb (>=1.35.0,<1.36.0)", "mypy-boto3-docdb-elastic (>=1.35.0,<1.36.0)", "mypy-boto3-drs (>=1.35.0,<1.36.0)", "mypy-boto3-ds (>=1.35.0,<1.36.0)", "mypy-boto3-dynamodb (>=1.35.0,<1.36.0)", "mypy-boto3-dynamodbstreams (>=1.35.0,<1.36.0)", "mypy-boto3-ebs (>=1.35.0,<1.36.0)", "mypy-boto3-ec2 (>=1.35.0,<1.36.0)", "mypy-boto3-ec2-instance-connect (>=1.35.0,<1.36.0)", "mypy-boto3-ecr (>=1.35.0,<1.36.0)", "mypy-boto3-ecr-public (>=1.35.0,<1.36.0)", "mypy-boto3-ecs (>=1.35.0,<1.36.0)", "mypy-boto3-efs (>=1.35.0,<1.36.0)", "mypy-boto3-eks (>=1.35.0,<1.36.0)", "mypy-boto3-eks-auth (>=1.35.0,<1.36.0)", "mypy-boto3-elastic-inference (>=1.35.0,<1.36.0)", "mypy-boto3-elasticache (>=1.35.0,<1.36.0)", "mypy-boto3-elasticbeanstalk (>=1.35.0,<1.36.0)", "mypy-boto3-elastictranscoder (>=1.35.0,<1.36.0)", "mypy-boto3-elb (>=1.35.0,<1.36.0)", "mypy-boto3-elbv2 (>=1.35.0,<1.36.0)", "mypy-boto3-emr (>=1.35.0,<1.36.0)", "mypy-boto3-emr-containers (>=1.35.0,<1.36.0)", "mypy-boto3-emr-serverless (>=1.35.0,<1.36.0)", "mypy-boto3-entityresolution (>=1.35.0,<1.36.0)", "mypy-boto3-es (>=1.35.0,<1.36.0)", "mypy-boto3-events (>=1.35.0,<1.36.0)", "mypy-boto3-evidently (>=1.35.0,<1.36.0)", "mypy-boto3-finspace (>=1.35.0,<1.36.0)", "mypy-boto3-finspace-data (>=1.35.0,<1.36.0)", "mypy-boto3-firehose (>=1.35.0,<1.36.0)", "mypy-boto3-fis (>=1.35.0,<1.36.0)", "mypy-boto3-fms (>=1.35.0,<1.36.0)", "mypy-boto3-forecast (>=1.35.0,<1.36.0)", "mypy-boto3-forecastquery (>=1.35.0,<1.36.0)", "mypy-boto3-frauddetector (>=1.35.0,<1.36.0)", "mypy-boto3-freetier (>=1.35.0,<1.36.0)", "mypy-boto3-fsx (>=1.35.0,<1.36.0)", "mypy-boto3-gamelift (>=1.35.0,<1.36.0)", "mypy-boto3-glacier (>=1.35.0,<1.36.0)", "mypy-boto3-globalaccelerator (>=1.35.0,<1.36.0)", "mypy-boto3-glue (>=1.35.0,<1.36.0)", "mypy-boto3-grafana (>=1.35.0,<1.36.0)", "mypy-boto3-greengrass (>=1.35.0,<1.36.0)", "mypy-boto3-greengrassv2 (>=1.35.0,<1.36.0)", "mypy-boto3-groundstation (>=1.35.0,<1.36.0)", "mypy-boto3-guardduty (>=1.35.0,<1.36.0)", "mypy-boto3-health (>=1.35.0,<1.36.0)", "mypy-boto3-healthlake (>=1.35.0,<1.36.0)", "mypy-boto3-iam (>=1.35.0,<1.36.0)", "mypy-boto3-identitystore (>=1.35.0,<1.36.0)", "mypy-boto3-imagebuilder (>=1.35.0,<1.36.0)", "mypy-boto3-importexport (>=1.35.0,<1.36.0)", "mypy-boto3-inspector (>=1.35.0,<1.36.0)", "mypy-boto3-inspector-scan (>=1.35.0,<1.36.0)", "mypy-boto3-inspector2 (>=1.35.0,<1.36.0)", "mypy-boto3-internetmonitor (>=1.35.0,<1.36.0)", "mypy-boto3-iot (>=1.35.0,<1.36.0)", "mypy-boto3-iot-data (>=1.35.0,<1.36.0)", "mypy-boto3-iot-jobs-data (>=1.35.0,<1.36.0)", "mypy-boto3-iot1click-devices (>=1.35.0,<1.36.0)", "mypy-boto3-iot1click-projects (>=1.35.0,<1.36.0)", "mypy-boto3-iotanalytics (>=1.35.0,<1.36.0)", "mypy-boto3-iotdeviceadvisor (>=1.35.0,<1.36.0)", "mypy-boto3-iotevents (>=1.35.0,<1.36.0)", "mypy-boto3-iotevents-data (>=1.35.0,<1.36.0)", "mypy-boto3-iotfleethub (>=1.35.0,<1.36.0)", "mypy-boto3-iotfleetwise (>=1.35.0,<1.36.0)", "mypy-boto3-iotsecuretunneling (>=1.35.0,<1.36.0)", "mypy-boto3-iotsitewise (>=1.35.0,<1.36.0)", "mypy-boto3-iotthingsgraph (>=1.35.0,<1.36.0)", "mypy-boto3-iottwinmaker (>=1.35.0,<1.36.0)", "mypy-boto3-iotwireless (>=1.35.0,<1.36.0)", "mypy-boto3-ivs (>=1.35.0,<1.36.0)", "mypy-boto3-ivs-realtime (>=1.35.0,<1.36.0)", "mypy-boto3-ivschat (>=1.35.0,<1.36.0)", "mypy-boto3-kafka (>=1.35.0,<1.36.0)", "mypy-boto3-kafkaconnect (>=1.35.0,<1.36.0)", "mypy-boto3-kendra (>=1.35.0,<1.36.0)", "mypy-boto3-kendra-ranking (>=1.35.0,<1.36.0)", "mypy-boto3-keyspaces (>=1.35.0,<1.36.0)", "mypy-boto3-kinesis (>=1.35.0,<1.36.0)", "mypy-boto3-kinesis-video-archived-media (>=1.35.0,<1.36.0)", "mypy-boto3-kinesis-video-media (>=1.35.0,<1.36.0)", "mypy-boto3-kinesis-video-signaling (>=1.35.0,<1.36.0)", "mypy-boto3-kinesis-video-webrtc-storage (>=1.35.0,<1.36.0)", "mypy-boto3-kinesisanalytics (>=1.35.0,<1.36.0)", "mypy-boto3-kinesisanalyticsv2 (>=1.35.0,<1.36.0)", "mypy-boto3-kinesisvideo (>=1.35.0,<1.36.0)", "mypy-boto3-kms (>=1.35.0,<1.36.0)", "mypy-boto3-lakeformation (>=1.35.0,<1.36.0)", "mypy-boto3-lambda (>=1.35.0,<1.36.0)", "mypy-boto3-launch-wizard (>=1.35.0,<1.36.0)", "mypy-boto3-lex-models (>=1.35.0,<1.36.0)", "mypy-boto3-lex-runtime (>=1.35.0,<1.36.0)", "mypy-boto3-lexv2-models (>=1.35.0,<1.36.0)", "mypy-boto3-lexv2-runtime (>=1.35.0,<1.36.0)", "mypy-boto3-license-manager (>=1.35.0,<1.36.0)", "mypy-boto3-license-manager-linux-subscriptions (>=1.35.0,<1.36.0)", "mypy-boto3-license-manager-user-subscriptions (>=1.35.0,<1.36.0)", "mypy-boto3-lightsail (>=1.35.0,<1.36.0)", "mypy-boto3-location (>=1.35.0,<1.36.0)", "mypy-boto3-logs (>=1.35.0,<1.36.0)", "mypy-boto3-lookoutequipment (>=1.35.0,<1.36.0)", "mypy-boto3-lookoutmetrics (>=1.35.0,<1.36.0)", "mypy-boto3-lookoutvision (>=1.35.0,<1.36.0)", "mypy-boto3-m2 (>=1.35.0,<1.36.0)", "mypy-boto3-machinelearning (>=1.35.0,<1.36.0)", "mypy-boto3-macie2 (>=1.35.0,<1.36.0)", "mypy-boto3-mailmanager (>=1.35.0,<1.36.0)", "mypy-boto3-managedblockchain (>=1.35.0,<1.36.0)", "mypy-boto3-managedblockchain-query (>=1.35.0,<1.36.0)", "mypy-boto3-marketplace-agreement (>=1.35.0,<1.36.0)", "mypy-boto3-marketplace-catalog (>=1.35.0,<1.36.0)", "mypy-boto3-marketplace-deployment (>=1.35.0,<1.36.0)", "mypy-boto3-marketplace-entitlement (>=1.35.0,<1.36.0)", "mypy-boto3-marketplacecommerceanalytics (>=1.35.0,<1.36.0)", "mypy-boto3-mediaconnect (>=1.35.0,<1.36.0)", "mypy-boto3-mediaconvert (>=1.35.0,<1.36.0)", "mypy-boto3-medialive (>=1.35.0,<1.36.0)", "mypy-boto3-mediapackage (>=1.35.0,<1.36.0)", "mypy-boto3-mediapackage-vod (>=1.35.0,<1.36.0)", "mypy-boto3-mediapackagev2 (>=1.35.0,<1.36.0)", "mypy-boto3-mediastore (>=1.35.0,<1.36.0)", "mypy-boto3-mediastore-data (>=1.35.0,<1.36.0)", "mypy-boto3-mediatailor (>=1.35.0,<1.36.0)", "mypy-boto3-medical-imaging (>=1.35.0,<1.36.0)", "mypy-boto3-memorydb (>=1.35.0,<1.36.0)", "mypy-boto3-meteringmarketplace (>=1.35.0,<1.36.0)", "mypy-boto3-mgh (>=1.35.0,<1.36.0)", "mypy-boto3-mgn (>=1.35.0,<1.36.0)", "mypy-boto3-migration-hub-refactor-spaces (>=1.35.0,<1.36.0)", "mypy-boto3-migrationhub-config (>=1.35.0,<1.36.0)", "mypy-boto3-migrationhuborchestrator (>=1.35.0,<1.36.0)", "mypy-boto3-migrationhubstrategy (>=1.35.0,<1.36.0)", "mypy-boto3-mq (>=1.35.0,<1.36.0)", "mypy-boto3-mturk (>=1.35.0,<1.36.0)", "mypy-boto3-mwaa (>=1.35.0,<1.36.0)", "mypy-boto3-neptune (>=1.35.0,<1.36.0)", "mypy-boto3-neptune-graph (>=1.35.0,<1.36.0)", "mypy-boto3-neptunedata (>=1.35.0,<1.36.0)", "mypy-boto3-network-firewall (>=1.35.0,<1.36.0)", "mypy-boto3-networkmanager (>=1.35.0,<1.36.0)", "mypy-boto3-networkmonitor (>=1.35.0,<1.36.0)", "mypy-boto3-nimble (>=1.35.0,<1.36.0)", "mypy-boto3-oam (>=1.35.0,<1.36.0)", "mypy-boto3-omics (>=1.35.0,<1.36.0)", "mypy-boto3-opensearch (>=1.35.0,<1.36.0)", "mypy-boto3-opensearchserverless (>=1.35.0,<1.36.0)", "mypy-boto3-opsworks (>=1.35.0,<1.36.0)", "mypy-boto3-opsworkscm (>=1.35.0,<1.36.0)", "mypy-boto3-organizations (>=1.35.0,<1.36.0)", "mypy-boto3-osis (>=1.35.0,<1.36.0)", "mypy-boto3-outposts (>=1.35.0,<1.36.0)", "mypy-boto3-panorama (>=1.35.0,<1.36.0)", "mypy-boto3-payment-cryptography (>=1.35.0,<1.36.0)", "mypy-boto3-payment-cryptography-data (>=1.35.0,<1.36.0)", "mypy-boto3-pca-connector-ad (>=1.35.0,<1.36.0)", "mypy-boto3-pca-connector-scep (>=1.35.0,<1.36.0)", "mypy-boto3-pcs (>=1.35.0,<1.36.0)", "mypy-boto3-personalize (>=1.35.0,<1.36.0)", "mypy-boto3-personalize-events (>=1.35.0,<1.36.0)", "mypy-boto3-personalize-runtime (>=1.35.0,<1.36.0)", "mypy-boto3-pi (>=1.35.0,<1.36.0)", "mypy-boto3-pinpoint (>=1.35.0,<1.36.0)", "mypy-boto3-pinpoint-email (>=1.35.0,<1.36.0)", "mypy-boto3-pinpoint-sms-voice (>=1.35.0,<1.36.0)", "mypy-boto3-pinpoint-sms-voice-v2 (>=1.35.0,<1.36.0)", "mypy-boto3-pipes (>=1.35.0,<1.36.0)", "mypy-boto3-polly (>=1.35.0,<1.36.0)", "mypy-boto3-pricing (>=1.35.0,<1.36.0)", "mypy-boto3-privatenetworks (>=1.35.0,<1.36.0)", "mypy-boto3-proton (>=1.35.0,<1.36.0)", "mypy-boto3-qapps (>=1.35.0,<1.36.0)", "mypy-boto3-qbusiness (>=1.35.0,<1.36.0)", "mypy-boto3-qconnect (>=1.35.0,<1.36.0)", "mypy-boto3-qldb (>=1.35.0,<1.36.0)", "mypy-boto3-qldb-session (>=1.35.0,<1.36.0)", "mypy-boto3-quicksight (>=1.35.0,<1.36.0)", "mypy-boto3-ram (>=1.35.0,<1.36.0)", "mypy-boto3-rbin (>=1.35.0,<1.36.0)", "mypy-boto3-rds (>=1.35.0,<1.36.0)", "mypy-boto3-rds-data (>=1.35.0,<1.36.0)", "mypy-boto3-redshift (>=1.35.0,<1.36.0)", "mypy-boto3-redshift-data (>=1.35.0,<1.36.0)", "mypy-boto3-redshift-serverless (>=1.35.0,<1.36.0)", "mypy-boto3-rekognition (>=1.35.0,<1.36.0)", "mypy-boto3-repostspace (>=1.35.0,<1.36.0)", "mypy-boto3-resiliencehub (>=1.35.0,<1.36.0)", "mypy-boto3-resource-explorer-2 (>=1.35.0,<1.36.0)", "mypy-boto3-resource-groups (>=1.35.0,<1.36.0)", "mypy-boto3-resourcegroupstaggingapi (>=1.35.0,<1.36.0)", "mypy-boto3-robomaker (>=1.35.0,<1.36.0)", "mypy-boto3-rolesanywhere (>=1.35.0,<1.36.0)", "mypy-boto3-route53 (>=1.35.0,<1.36.0)", "mypy-boto3-route53-recovery-cluster (>=1.35.0,<1.36.0)", "mypy-boto3-route53-recovery-control-config (>=1.35.0,<1.36.0)", "mypy-boto3-route53-recovery-readiness (>=1.35.0,<1.36.0)", "mypy-boto3-route53domains (>=1.35.0,<1.36.0)", "mypy-boto3-route53profiles (>=1.35.0,<1.36.0)", "mypy-boto3-route53resolver (>=1.35.0,<1.36.0)", "mypy-boto3-rum (>=1.35.0,<1.36.0)", "mypy-boto3-s3 (>=1.35.0,<1.36.0)", "mypy-boto3-s3control (>=1.35.0,<1.36.0)", "mypy-boto3-s3outposts (>=1.35.0,<1.36.0)", "mypy-boto3-sagemaker (>=1.35.0,<1.36.0)", "mypy-boto3-sagemaker-a2i-runtime (>=1.35.0,<1.36.0)", "mypy-boto3-sagemaker-edge (>=1.35.0,<1.36.0)", "mypy-boto3-sagemaker-featurestore-runtime (>=1.35.0,<1.36.0)", "mypy-boto3-sagemaker-geospatial (>=1.35.0,<1.36.0)", "mypy-boto3-sagemaker-metrics (>=1.35.0,<1.36.0)", "mypy-boto3-sagemaker-runtime (>=1.35.0,<1.36.0)", "mypy-boto3-savingsplans (>=1.35.0,<1.36.0)", "mypy-boto3-scheduler (>=1.35.0,<1.36.0)", "mypy-boto3-schemas (>=1.35.0,<1.36.0)", "mypy-boto3-sdb (>=1.35.0,<1.36.0)", "mypy-boto3-secretsmanager (>=1.35.0,<1.36.0)", "mypy-boto3-securityhub (>=1.35.0,<1.36.0)", "mypy-boto3-securitylake (>=1.35.0,<1.36.0)", "mypy-boto3-serverlessrepo (>=1.35.0,<1.36.0)", "mypy-boto3-service-quotas (>=1.35.0,<1.36.0)", "mypy-boto3-servicecatalog (>=1.35.0,<1.36.0)", "mypy-boto3-servicecatalog-appregistry (>=1.35.0,<1.36.0)", "mypy-boto3-servicediscovery (>=1.35.0,<1.36.0)", "mypy-boto3-ses (>=1.35.0,<1.36.0)", "mypy-boto3-sesv2 (>=1.35.0,<1.36.0)", "mypy-boto3-shield (>=1.35.0,<1.36.0)", "mypy-boto3-signer (>=1.35.0,<1.36.0)", "mypy-boto3-simspaceweaver (>=1.35.0,<1.36.0)", "mypy-boto3-sms (>=1.35.0,<1.36.0)", "mypy-boto3-sms-voice (>=1.35.0,<1.36.0)", "mypy-boto3-snow-device-management (>=1.35.0,<1.36.0)", "mypy-boto3-snowball (>=1.35.0,<1.36.0)", "mypy-boto3-sns (>=1.35.0,<1.36.0)", "mypy-boto3-sqs (>=1.35.0,<1.36.0)", "mypy-boto3-ssm (>=1.35.0,<1.36.0)", "mypy-boto3-ssm-contacts (>=1.35.0,<1.36.0)", "mypy-boto3-ssm-incidents (>=1.35.0,<1.36.0)", "mypy-boto3-ssm-quicksetup (>=1.35.0,<1.36.0)", "mypy-boto3-ssm-sap (>=1.35.0,<1.36.0)", "mypy-boto3-sso (>=1.35.0,<1.36.0)", "mypy-boto3-sso-admin (>=1.35.0,<1.36.0)", "mypy-boto3-sso-oidc (>=1.35.0,<1.36.0)", "mypy-boto3-stepfunctions (>=1.35.0,<1.36.0)", "mypy-boto3-storagegateway (>=1.35.0,<1.36.0)", "mypy-boto3-sts (>=1.35.0,<1.36.0)", "mypy-boto3-supplychain (>=1.35.0,<1.36.0)", "mypy-boto3-support (>=1.35.0,<1.36.0)", "mypy-boto3-support-app (>=1.35.0,<1.36.0)", "mypy-boto3-swf (>=1.35.0,<1.36.0)", "mypy-boto3-synthetics (>=1.35.0,<1.36.0)", "mypy-boto3-taxsettings (>=1.35.0,<1.36.0)", "mypy-boto3-textract (>=1.35.0,<1.36.0)", "mypy-boto3-timestream-influxdb (>=1.35.0,<1.36.0)", "mypy-boto3-timestream-query (>=1.35.0,<1.36.0)", "mypy-boto3-timestream-write (>=1.35.0,<1.36.0)", "mypy-boto3-tnb (>=1.35.0,<1.36.0)", "mypy-boto3-transcribe (>=1.35.0,<1.36.0)", "mypy-boto3-transfer (>=1.35.0,<1.36.0)", "mypy-boto3-translate (>=1.35.0,<1.36.0)", "mypy-boto3-trustedadvisor (>=1.35.0,<1.36.0)", "mypy-boto3-verifiedpermissions (>=1.35.0,<1.36.0)", "mypy-boto3-voice-id (>=1.35.0,<1.36.0)", "mypy-boto3-vpc-lattice (>=1.35.0,<1.36.0)", "mypy-boto3-waf (>=1.35.0,<1.36.0)", "mypy-boto3-waf-regional (>=1.35.0,<1.36.0)", "mypy-boto3-wafv2 (>=1.35.0,<1.36.0)", "mypy-boto3-wellarchitected (>=1.35.0,<1.36.0)", "mypy-boto3-wisdom (>=1.35.0,<1.36.0)", "mypy-boto3-workdocs (>=1.35.0,<1.36.0)", "mypy-boto3-worklink (>=1.35.0,<1.36.0)", "mypy-boto3-workmail (>=1.35.0,<1.36.0)", "mypy-boto3-workmailmessageflow (>=1.35.0,<1.36.0)", "mypy-boto3-workspaces (>=1.35.0,<1.36.0)", "mypy-boto3-workspaces-thin-client (>=1.35.0,<1.36.0)", "mypy-boto3-workspaces-web (>=1.35.0,<1.36.0)", "mypy-boto3-xray (>=1.35.0,<1.36.0)"] +all = ["mypy-boto3-accessanalyzer (>=1.35.0,<1.36.0)", "mypy-boto3-account (>=1.35.0,<1.36.0)", "mypy-boto3-acm (>=1.35.0,<1.36.0)", "mypy-boto3-acm-pca (>=1.35.0,<1.36.0)", "mypy-boto3-amp (>=1.35.0,<1.36.0)", "mypy-boto3-amplify (>=1.35.0,<1.36.0)", "mypy-boto3-amplifybackend (>=1.35.0,<1.36.0)", "mypy-boto3-amplifyuibuilder (>=1.35.0,<1.36.0)", "mypy-boto3-apigateway (>=1.35.0,<1.36.0)", "mypy-boto3-apigatewaymanagementapi (>=1.35.0,<1.36.0)", "mypy-boto3-apigatewayv2 (>=1.35.0,<1.36.0)", "mypy-boto3-appconfig (>=1.35.0,<1.36.0)", "mypy-boto3-appconfigdata (>=1.35.0,<1.36.0)", "mypy-boto3-appfabric (>=1.35.0,<1.36.0)", "mypy-boto3-appflow (>=1.35.0,<1.36.0)", "mypy-boto3-appintegrations (>=1.35.0,<1.36.0)", "mypy-boto3-application-autoscaling (>=1.35.0,<1.36.0)", "mypy-boto3-application-insights (>=1.35.0,<1.36.0)", "mypy-boto3-application-signals (>=1.35.0,<1.36.0)", "mypy-boto3-applicationcostprofiler (>=1.35.0,<1.36.0)", "mypy-boto3-appmesh (>=1.35.0,<1.36.0)", "mypy-boto3-apprunner (>=1.35.0,<1.36.0)", "mypy-boto3-appstream (>=1.35.0,<1.36.0)", "mypy-boto3-appsync (>=1.35.0,<1.36.0)", "mypy-boto3-apptest (>=1.35.0,<1.36.0)", "mypy-boto3-arc-zonal-shift (>=1.35.0,<1.36.0)", "mypy-boto3-artifact (>=1.35.0,<1.36.0)", "mypy-boto3-athena (>=1.35.0,<1.36.0)", "mypy-boto3-auditmanager (>=1.35.0,<1.36.0)", "mypy-boto3-autoscaling (>=1.35.0,<1.36.0)", "mypy-boto3-autoscaling-plans (>=1.35.0,<1.36.0)", "mypy-boto3-b2bi (>=1.35.0,<1.36.0)", "mypy-boto3-backup (>=1.35.0,<1.36.0)", "mypy-boto3-backup-gateway (>=1.35.0,<1.36.0)", "mypy-boto3-batch (>=1.35.0,<1.36.0)", "mypy-boto3-bcm-data-exports (>=1.35.0,<1.36.0)", "mypy-boto3-bedrock (>=1.35.0,<1.36.0)", "mypy-boto3-bedrock-agent (>=1.35.0,<1.36.0)", "mypy-boto3-bedrock-agent-runtime (>=1.35.0,<1.36.0)", "mypy-boto3-bedrock-runtime (>=1.35.0,<1.36.0)", "mypy-boto3-billingconductor (>=1.35.0,<1.36.0)", "mypy-boto3-braket (>=1.35.0,<1.36.0)", "mypy-boto3-budgets (>=1.35.0,<1.36.0)", "mypy-boto3-ce (>=1.35.0,<1.36.0)", "mypy-boto3-chatbot (>=1.35.0,<1.36.0)", "mypy-boto3-chime (>=1.35.0,<1.36.0)", "mypy-boto3-chime-sdk-identity (>=1.35.0,<1.36.0)", "mypy-boto3-chime-sdk-media-pipelines (>=1.35.0,<1.36.0)", "mypy-boto3-chime-sdk-meetings (>=1.35.0,<1.36.0)", "mypy-boto3-chime-sdk-messaging (>=1.35.0,<1.36.0)", "mypy-boto3-chime-sdk-voice (>=1.35.0,<1.36.0)", "mypy-boto3-cleanrooms (>=1.35.0,<1.36.0)", "mypy-boto3-cleanroomsml (>=1.35.0,<1.36.0)", "mypy-boto3-cloud9 (>=1.35.0,<1.36.0)", "mypy-boto3-cloudcontrol (>=1.35.0,<1.36.0)", "mypy-boto3-clouddirectory (>=1.35.0,<1.36.0)", "mypy-boto3-cloudformation (>=1.35.0,<1.36.0)", "mypy-boto3-cloudfront (>=1.35.0,<1.36.0)", "mypy-boto3-cloudfront-keyvaluestore (>=1.35.0,<1.36.0)", "mypy-boto3-cloudhsm (>=1.35.0,<1.36.0)", "mypy-boto3-cloudhsmv2 (>=1.35.0,<1.36.0)", "mypy-boto3-cloudsearch (>=1.35.0,<1.36.0)", "mypy-boto3-cloudsearchdomain (>=1.35.0,<1.36.0)", "mypy-boto3-cloudtrail (>=1.35.0,<1.36.0)", "mypy-boto3-cloudtrail-data (>=1.35.0,<1.36.0)", "mypy-boto3-cloudwatch (>=1.35.0,<1.36.0)", "mypy-boto3-codeartifact (>=1.35.0,<1.36.0)", "mypy-boto3-codebuild (>=1.35.0,<1.36.0)", "mypy-boto3-codecatalyst (>=1.35.0,<1.36.0)", "mypy-boto3-codecommit (>=1.35.0,<1.36.0)", "mypy-boto3-codeconnections (>=1.35.0,<1.36.0)", "mypy-boto3-codedeploy (>=1.35.0,<1.36.0)", "mypy-boto3-codeguru-reviewer (>=1.35.0,<1.36.0)", "mypy-boto3-codeguru-security (>=1.35.0,<1.36.0)", "mypy-boto3-codeguruprofiler (>=1.35.0,<1.36.0)", "mypy-boto3-codepipeline (>=1.35.0,<1.36.0)", "mypy-boto3-codestar-connections (>=1.35.0,<1.36.0)", "mypy-boto3-codestar-notifications (>=1.35.0,<1.36.0)", "mypy-boto3-cognito-identity (>=1.35.0,<1.36.0)", "mypy-boto3-cognito-idp (>=1.35.0,<1.36.0)", "mypy-boto3-cognito-sync (>=1.35.0,<1.36.0)", "mypy-boto3-comprehend (>=1.35.0,<1.36.0)", "mypy-boto3-comprehendmedical (>=1.35.0,<1.36.0)", "mypy-boto3-compute-optimizer (>=1.35.0,<1.36.0)", "mypy-boto3-config (>=1.35.0,<1.36.0)", "mypy-boto3-connect (>=1.35.0,<1.36.0)", "mypy-boto3-connect-contact-lens (>=1.35.0,<1.36.0)", "mypy-boto3-connectcampaigns (>=1.35.0,<1.36.0)", "mypy-boto3-connectcases (>=1.35.0,<1.36.0)", "mypy-boto3-connectparticipant (>=1.35.0,<1.36.0)", "mypy-boto3-controlcatalog (>=1.35.0,<1.36.0)", "mypy-boto3-controltower (>=1.35.0,<1.36.0)", "mypy-boto3-cost-optimization-hub (>=1.35.0,<1.36.0)", "mypy-boto3-cur (>=1.35.0,<1.36.0)", "mypy-boto3-customer-profiles (>=1.35.0,<1.36.0)", "mypy-boto3-databrew (>=1.35.0,<1.36.0)", "mypy-boto3-dataexchange (>=1.35.0,<1.36.0)", "mypy-boto3-datapipeline (>=1.35.0,<1.36.0)", "mypy-boto3-datasync (>=1.35.0,<1.36.0)", "mypy-boto3-datazone (>=1.35.0,<1.36.0)", "mypy-boto3-dax (>=1.35.0,<1.36.0)", "mypy-boto3-deadline (>=1.35.0,<1.36.0)", "mypy-boto3-detective (>=1.35.0,<1.36.0)", "mypy-boto3-devicefarm (>=1.35.0,<1.36.0)", "mypy-boto3-devops-guru (>=1.35.0,<1.36.0)", "mypy-boto3-directconnect (>=1.35.0,<1.36.0)", "mypy-boto3-discovery (>=1.35.0,<1.36.0)", "mypy-boto3-dlm (>=1.35.0,<1.36.0)", "mypy-boto3-dms (>=1.35.0,<1.36.0)", "mypy-boto3-docdb (>=1.35.0,<1.36.0)", "mypy-boto3-docdb-elastic (>=1.35.0,<1.36.0)", "mypy-boto3-drs (>=1.35.0,<1.36.0)", "mypy-boto3-ds (>=1.35.0,<1.36.0)", "mypy-boto3-ds-data (>=1.35.0,<1.36.0)", "mypy-boto3-dynamodb (>=1.35.0,<1.36.0)", "mypy-boto3-dynamodbstreams (>=1.35.0,<1.36.0)", "mypy-boto3-ebs (>=1.35.0,<1.36.0)", "mypy-boto3-ec2 (>=1.35.0,<1.36.0)", "mypy-boto3-ec2-instance-connect (>=1.35.0,<1.36.0)", "mypy-boto3-ecr (>=1.35.0,<1.36.0)", "mypy-boto3-ecr-public (>=1.35.0,<1.36.0)", "mypy-boto3-ecs (>=1.35.0,<1.36.0)", "mypy-boto3-efs (>=1.35.0,<1.36.0)", "mypy-boto3-eks (>=1.35.0,<1.36.0)", "mypy-boto3-eks-auth (>=1.35.0,<1.36.0)", "mypy-boto3-elastic-inference (>=1.35.0,<1.36.0)", "mypy-boto3-elasticache (>=1.35.0,<1.36.0)", "mypy-boto3-elasticbeanstalk (>=1.35.0,<1.36.0)", "mypy-boto3-elastictranscoder (>=1.35.0,<1.36.0)", "mypy-boto3-elb (>=1.35.0,<1.36.0)", "mypy-boto3-elbv2 (>=1.35.0,<1.36.0)", "mypy-boto3-emr (>=1.35.0,<1.36.0)", "mypy-boto3-emr-containers (>=1.35.0,<1.36.0)", "mypy-boto3-emr-serverless (>=1.35.0,<1.36.0)", "mypy-boto3-entityresolution (>=1.35.0,<1.36.0)", "mypy-boto3-es (>=1.35.0,<1.36.0)", "mypy-boto3-events (>=1.35.0,<1.36.0)", "mypy-boto3-evidently (>=1.35.0,<1.36.0)", "mypy-boto3-finspace (>=1.35.0,<1.36.0)", "mypy-boto3-finspace-data (>=1.35.0,<1.36.0)", "mypy-boto3-firehose (>=1.35.0,<1.36.0)", "mypy-boto3-fis (>=1.35.0,<1.36.0)", "mypy-boto3-fms (>=1.35.0,<1.36.0)", "mypy-boto3-forecast (>=1.35.0,<1.36.0)", "mypy-boto3-forecastquery (>=1.35.0,<1.36.0)", "mypy-boto3-frauddetector (>=1.35.0,<1.36.0)", "mypy-boto3-freetier (>=1.35.0,<1.36.0)", "mypy-boto3-fsx (>=1.35.0,<1.36.0)", "mypy-boto3-gamelift (>=1.35.0,<1.36.0)", "mypy-boto3-glacier (>=1.35.0,<1.36.0)", "mypy-boto3-globalaccelerator (>=1.35.0,<1.36.0)", "mypy-boto3-glue (>=1.35.0,<1.36.0)", "mypy-boto3-grafana (>=1.35.0,<1.36.0)", "mypy-boto3-greengrass (>=1.35.0,<1.36.0)", "mypy-boto3-greengrassv2 (>=1.35.0,<1.36.0)", "mypy-boto3-groundstation (>=1.35.0,<1.36.0)", "mypy-boto3-guardduty (>=1.35.0,<1.36.0)", "mypy-boto3-health (>=1.35.0,<1.36.0)", "mypy-boto3-healthlake (>=1.35.0,<1.36.0)", "mypy-boto3-iam (>=1.35.0,<1.36.0)", "mypy-boto3-identitystore (>=1.35.0,<1.36.0)", "mypy-boto3-imagebuilder (>=1.35.0,<1.36.0)", "mypy-boto3-importexport (>=1.35.0,<1.36.0)", "mypy-boto3-inspector (>=1.35.0,<1.36.0)", "mypy-boto3-inspector-scan (>=1.35.0,<1.36.0)", "mypy-boto3-inspector2 (>=1.35.0,<1.36.0)", "mypy-boto3-internetmonitor (>=1.35.0,<1.36.0)", "mypy-boto3-iot (>=1.35.0,<1.36.0)", "mypy-boto3-iot-data (>=1.35.0,<1.36.0)", "mypy-boto3-iot-jobs-data (>=1.35.0,<1.36.0)", "mypy-boto3-iot1click-devices (>=1.35.0,<1.36.0)", "mypy-boto3-iot1click-projects (>=1.35.0,<1.36.0)", "mypy-boto3-iotanalytics (>=1.35.0,<1.36.0)", "mypy-boto3-iotdeviceadvisor (>=1.35.0,<1.36.0)", "mypy-boto3-iotevents (>=1.35.0,<1.36.0)", "mypy-boto3-iotevents-data (>=1.35.0,<1.36.0)", "mypy-boto3-iotfleethub (>=1.35.0,<1.36.0)", "mypy-boto3-iotfleetwise (>=1.35.0,<1.36.0)", "mypy-boto3-iotsecuretunneling (>=1.35.0,<1.36.0)", "mypy-boto3-iotsitewise (>=1.35.0,<1.36.0)", "mypy-boto3-iotthingsgraph (>=1.35.0,<1.36.0)", "mypy-boto3-iottwinmaker (>=1.35.0,<1.36.0)", "mypy-boto3-iotwireless (>=1.35.0,<1.36.0)", "mypy-boto3-ivs (>=1.35.0,<1.36.0)", "mypy-boto3-ivs-realtime (>=1.35.0,<1.36.0)", "mypy-boto3-ivschat (>=1.35.0,<1.36.0)", "mypy-boto3-kafka (>=1.35.0,<1.36.0)", "mypy-boto3-kafkaconnect (>=1.35.0,<1.36.0)", "mypy-boto3-kendra (>=1.35.0,<1.36.0)", "mypy-boto3-kendra-ranking (>=1.35.0,<1.36.0)", "mypy-boto3-keyspaces (>=1.35.0,<1.36.0)", "mypy-boto3-kinesis (>=1.35.0,<1.36.0)", "mypy-boto3-kinesis-video-archived-media (>=1.35.0,<1.36.0)", "mypy-boto3-kinesis-video-media (>=1.35.0,<1.36.0)", "mypy-boto3-kinesis-video-signaling (>=1.35.0,<1.36.0)", "mypy-boto3-kinesis-video-webrtc-storage (>=1.35.0,<1.36.0)", "mypy-boto3-kinesisanalytics (>=1.35.0,<1.36.0)", "mypy-boto3-kinesisanalyticsv2 (>=1.35.0,<1.36.0)", "mypy-boto3-kinesisvideo (>=1.35.0,<1.36.0)", "mypy-boto3-kms (>=1.35.0,<1.36.0)", "mypy-boto3-lakeformation (>=1.35.0,<1.36.0)", "mypy-boto3-lambda (>=1.35.0,<1.36.0)", "mypy-boto3-launch-wizard (>=1.35.0,<1.36.0)", "mypy-boto3-lex-models (>=1.35.0,<1.36.0)", "mypy-boto3-lex-runtime (>=1.35.0,<1.36.0)", "mypy-boto3-lexv2-models (>=1.35.0,<1.36.0)", "mypy-boto3-lexv2-runtime (>=1.35.0,<1.36.0)", "mypy-boto3-license-manager (>=1.35.0,<1.36.0)", "mypy-boto3-license-manager-linux-subscriptions (>=1.35.0,<1.36.0)", "mypy-boto3-license-manager-user-subscriptions (>=1.35.0,<1.36.0)", "mypy-boto3-lightsail (>=1.35.0,<1.36.0)", "mypy-boto3-location (>=1.35.0,<1.36.0)", "mypy-boto3-logs (>=1.35.0,<1.36.0)", "mypy-boto3-lookoutequipment (>=1.35.0,<1.36.0)", "mypy-boto3-lookoutmetrics (>=1.35.0,<1.36.0)", "mypy-boto3-lookoutvision (>=1.35.0,<1.36.0)", "mypy-boto3-m2 (>=1.35.0,<1.36.0)", "mypy-boto3-machinelearning (>=1.35.0,<1.36.0)", "mypy-boto3-macie2 (>=1.35.0,<1.36.0)", "mypy-boto3-mailmanager (>=1.35.0,<1.36.0)", "mypy-boto3-managedblockchain (>=1.35.0,<1.36.0)", "mypy-boto3-managedblockchain-query (>=1.35.0,<1.36.0)", "mypy-boto3-marketplace-agreement (>=1.35.0,<1.36.0)", "mypy-boto3-marketplace-catalog (>=1.35.0,<1.36.0)", "mypy-boto3-marketplace-deployment (>=1.35.0,<1.36.0)", "mypy-boto3-marketplace-entitlement (>=1.35.0,<1.36.0)", "mypy-boto3-marketplace-reporting (>=1.35.0,<1.36.0)", "mypy-boto3-marketplacecommerceanalytics (>=1.35.0,<1.36.0)", "mypy-boto3-mediaconnect (>=1.35.0,<1.36.0)", "mypy-boto3-mediaconvert (>=1.35.0,<1.36.0)", "mypy-boto3-medialive (>=1.35.0,<1.36.0)", "mypy-boto3-mediapackage (>=1.35.0,<1.36.0)", "mypy-boto3-mediapackage-vod (>=1.35.0,<1.36.0)", "mypy-boto3-mediapackagev2 (>=1.35.0,<1.36.0)", "mypy-boto3-mediastore (>=1.35.0,<1.36.0)", "mypy-boto3-mediastore-data (>=1.35.0,<1.36.0)", "mypy-boto3-mediatailor (>=1.35.0,<1.36.0)", "mypy-boto3-medical-imaging (>=1.35.0,<1.36.0)", "mypy-boto3-memorydb (>=1.35.0,<1.36.0)", "mypy-boto3-meteringmarketplace (>=1.35.0,<1.36.0)", "mypy-boto3-mgh (>=1.35.0,<1.36.0)", "mypy-boto3-mgn (>=1.35.0,<1.36.0)", "mypy-boto3-migration-hub-refactor-spaces (>=1.35.0,<1.36.0)", "mypy-boto3-migrationhub-config (>=1.35.0,<1.36.0)", "mypy-boto3-migrationhuborchestrator (>=1.35.0,<1.36.0)", "mypy-boto3-migrationhubstrategy (>=1.35.0,<1.36.0)", "mypy-boto3-mq (>=1.35.0,<1.36.0)", "mypy-boto3-mturk (>=1.35.0,<1.36.0)", "mypy-boto3-mwaa (>=1.35.0,<1.36.0)", "mypy-boto3-neptune (>=1.35.0,<1.36.0)", "mypy-boto3-neptune-graph (>=1.35.0,<1.36.0)", "mypy-boto3-neptunedata (>=1.35.0,<1.36.0)", "mypy-boto3-network-firewall (>=1.35.0,<1.36.0)", "mypy-boto3-networkmanager (>=1.35.0,<1.36.0)", "mypy-boto3-networkmonitor (>=1.35.0,<1.36.0)", "mypy-boto3-nimble (>=1.35.0,<1.36.0)", "mypy-boto3-oam (>=1.35.0,<1.36.0)", "mypy-boto3-omics (>=1.35.0,<1.36.0)", "mypy-boto3-opensearch (>=1.35.0,<1.36.0)", "mypy-boto3-opensearchserverless (>=1.35.0,<1.36.0)", "mypy-boto3-opsworks (>=1.35.0,<1.36.0)", "mypy-boto3-opsworkscm (>=1.35.0,<1.36.0)", "mypy-boto3-organizations (>=1.35.0,<1.36.0)", "mypy-boto3-osis (>=1.35.0,<1.36.0)", "mypy-boto3-outposts (>=1.35.0,<1.36.0)", "mypy-boto3-panorama (>=1.35.0,<1.36.0)", "mypy-boto3-payment-cryptography (>=1.35.0,<1.36.0)", "mypy-boto3-payment-cryptography-data (>=1.35.0,<1.36.0)", "mypy-boto3-pca-connector-ad (>=1.35.0,<1.36.0)", "mypy-boto3-pca-connector-scep (>=1.35.0,<1.36.0)", "mypy-boto3-pcs (>=1.35.0,<1.36.0)", "mypy-boto3-personalize (>=1.35.0,<1.36.0)", "mypy-boto3-personalize-events (>=1.35.0,<1.36.0)", "mypy-boto3-personalize-runtime (>=1.35.0,<1.36.0)", "mypy-boto3-pi (>=1.35.0,<1.36.0)", "mypy-boto3-pinpoint (>=1.35.0,<1.36.0)", "mypy-boto3-pinpoint-email (>=1.35.0,<1.36.0)", "mypy-boto3-pinpoint-sms-voice (>=1.35.0,<1.36.0)", "mypy-boto3-pinpoint-sms-voice-v2 (>=1.35.0,<1.36.0)", "mypy-boto3-pipes (>=1.35.0,<1.36.0)", "mypy-boto3-polly (>=1.35.0,<1.36.0)", "mypy-boto3-pricing (>=1.35.0,<1.36.0)", "mypy-boto3-privatenetworks (>=1.35.0,<1.36.0)", "mypy-boto3-proton (>=1.35.0,<1.36.0)", "mypy-boto3-qapps (>=1.35.0,<1.36.0)", "mypy-boto3-qbusiness (>=1.35.0,<1.36.0)", "mypy-boto3-qconnect (>=1.35.0,<1.36.0)", "mypy-boto3-qldb (>=1.35.0,<1.36.0)", "mypy-boto3-qldb-session (>=1.35.0,<1.36.0)", "mypy-boto3-quicksight (>=1.35.0,<1.36.0)", "mypy-boto3-ram (>=1.35.0,<1.36.0)", "mypy-boto3-rbin (>=1.35.0,<1.36.0)", "mypy-boto3-rds (>=1.35.0,<1.36.0)", "mypy-boto3-rds-data (>=1.35.0,<1.36.0)", "mypy-boto3-redshift (>=1.35.0,<1.36.0)", "mypy-boto3-redshift-data (>=1.35.0,<1.36.0)", "mypy-boto3-redshift-serverless (>=1.35.0,<1.36.0)", "mypy-boto3-rekognition (>=1.35.0,<1.36.0)", "mypy-boto3-repostspace (>=1.35.0,<1.36.0)", "mypy-boto3-resiliencehub (>=1.35.0,<1.36.0)", "mypy-boto3-resource-explorer-2 (>=1.35.0,<1.36.0)", "mypy-boto3-resource-groups (>=1.35.0,<1.36.0)", "mypy-boto3-resourcegroupstaggingapi (>=1.35.0,<1.36.0)", "mypy-boto3-robomaker (>=1.35.0,<1.36.0)", "mypy-boto3-rolesanywhere (>=1.35.0,<1.36.0)", "mypy-boto3-route53 (>=1.35.0,<1.36.0)", "mypy-boto3-route53-recovery-cluster (>=1.35.0,<1.36.0)", "mypy-boto3-route53-recovery-control-config (>=1.35.0,<1.36.0)", "mypy-boto3-route53-recovery-readiness (>=1.35.0,<1.36.0)", "mypy-boto3-route53domains (>=1.35.0,<1.36.0)", "mypy-boto3-route53profiles (>=1.35.0,<1.36.0)", "mypy-boto3-route53resolver (>=1.35.0,<1.36.0)", "mypy-boto3-rum (>=1.35.0,<1.36.0)", "mypy-boto3-s3 (>=1.35.0,<1.36.0)", "mypy-boto3-s3control (>=1.35.0,<1.36.0)", "mypy-boto3-s3outposts (>=1.35.0,<1.36.0)", "mypy-boto3-sagemaker (>=1.35.0,<1.36.0)", "mypy-boto3-sagemaker-a2i-runtime (>=1.35.0,<1.36.0)", "mypy-boto3-sagemaker-edge (>=1.35.0,<1.36.0)", "mypy-boto3-sagemaker-featurestore-runtime (>=1.35.0,<1.36.0)", "mypy-boto3-sagemaker-geospatial (>=1.35.0,<1.36.0)", "mypy-boto3-sagemaker-metrics (>=1.35.0,<1.36.0)", "mypy-boto3-sagemaker-runtime (>=1.35.0,<1.36.0)", "mypy-boto3-savingsplans (>=1.35.0,<1.36.0)", "mypy-boto3-scheduler (>=1.35.0,<1.36.0)", "mypy-boto3-schemas (>=1.35.0,<1.36.0)", "mypy-boto3-sdb (>=1.35.0,<1.36.0)", "mypy-boto3-secretsmanager (>=1.35.0,<1.36.0)", "mypy-boto3-securityhub (>=1.35.0,<1.36.0)", "mypy-boto3-securitylake (>=1.35.0,<1.36.0)", "mypy-boto3-serverlessrepo (>=1.35.0,<1.36.0)", "mypy-boto3-service-quotas (>=1.35.0,<1.36.0)", "mypy-boto3-servicecatalog (>=1.35.0,<1.36.0)", "mypy-boto3-servicecatalog-appregistry (>=1.35.0,<1.36.0)", "mypy-boto3-servicediscovery (>=1.35.0,<1.36.0)", "mypy-boto3-ses (>=1.35.0,<1.36.0)", "mypy-boto3-sesv2 (>=1.35.0,<1.36.0)", "mypy-boto3-shield (>=1.35.0,<1.36.0)", "mypy-boto3-signer (>=1.35.0,<1.36.0)", "mypy-boto3-simspaceweaver (>=1.35.0,<1.36.0)", "mypy-boto3-sms (>=1.35.0,<1.36.0)", "mypy-boto3-sms-voice (>=1.35.0,<1.36.0)", "mypy-boto3-snow-device-management (>=1.35.0,<1.36.0)", "mypy-boto3-snowball (>=1.35.0,<1.36.0)", "mypy-boto3-sns (>=1.35.0,<1.36.0)", "mypy-boto3-sqs (>=1.35.0,<1.36.0)", "mypy-boto3-ssm (>=1.35.0,<1.36.0)", "mypy-boto3-ssm-contacts (>=1.35.0,<1.36.0)", "mypy-boto3-ssm-incidents (>=1.35.0,<1.36.0)", "mypy-boto3-ssm-quicksetup (>=1.35.0,<1.36.0)", "mypy-boto3-ssm-sap (>=1.35.0,<1.36.0)", "mypy-boto3-sso (>=1.35.0,<1.36.0)", "mypy-boto3-sso-admin (>=1.35.0,<1.36.0)", "mypy-boto3-sso-oidc (>=1.35.0,<1.36.0)", "mypy-boto3-stepfunctions (>=1.35.0,<1.36.0)", "mypy-boto3-storagegateway (>=1.35.0,<1.36.0)", "mypy-boto3-sts (>=1.35.0,<1.36.0)", "mypy-boto3-supplychain (>=1.35.0,<1.36.0)", "mypy-boto3-support (>=1.35.0,<1.36.0)", "mypy-boto3-support-app (>=1.35.0,<1.36.0)", "mypy-boto3-swf (>=1.35.0,<1.36.0)", "mypy-boto3-synthetics (>=1.35.0,<1.36.0)", "mypy-boto3-taxsettings (>=1.35.0,<1.36.0)", "mypy-boto3-textract (>=1.35.0,<1.36.0)", "mypy-boto3-timestream-influxdb (>=1.35.0,<1.36.0)", "mypy-boto3-timestream-query (>=1.35.0,<1.36.0)", "mypy-boto3-timestream-write (>=1.35.0,<1.36.0)", "mypy-boto3-tnb (>=1.35.0,<1.36.0)", "mypy-boto3-transcribe (>=1.35.0,<1.36.0)", "mypy-boto3-transfer (>=1.35.0,<1.36.0)", "mypy-boto3-translate (>=1.35.0,<1.36.0)", "mypy-boto3-trustedadvisor (>=1.35.0,<1.36.0)", "mypy-boto3-verifiedpermissions (>=1.35.0,<1.36.0)", "mypy-boto3-voice-id (>=1.35.0,<1.36.0)", "mypy-boto3-vpc-lattice (>=1.35.0,<1.36.0)", "mypy-boto3-waf (>=1.35.0,<1.36.0)", "mypy-boto3-waf-regional (>=1.35.0,<1.36.0)", "mypy-boto3-wafv2 (>=1.35.0,<1.36.0)", "mypy-boto3-wellarchitected (>=1.35.0,<1.36.0)", "mypy-boto3-wisdom (>=1.35.0,<1.36.0)", "mypy-boto3-workdocs (>=1.35.0,<1.36.0)", "mypy-boto3-workmail (>=1.35.0,<1.36.0)", "mypy-boto3-workmailmessageflow (>=1.35.0,<1.36.0)", "mypy-boto3-workspaces (>=1.35.0,<1.36.0)", "mypy-boto3-workspaces-thin-client (>=1.35.0,<1.36.0)", "mypy-boto3-workspaces-web (>=1.35.0,<1.36.0)", "mypy-boto3-xray (>=1.35.0,<1.36.0)"] amp = ["mypy-boto3-amp (>=1.35.0,<1.36.0)"] amplify = ["mypy-boto3-amplify (>=1.35.0,<1.36.0)"] amplifybackend = ["mypy-boto3-amplifybackend (>=1.35.0,<1.36.0)"] @@ -430,7 +403,7 @@ bedrock-agent = ["mypy-boto3-bedrock-agent (>=1.35.0,<1.36.0)"] bedrock-agent-runtime = ["mypy-boto3-bedrock-agent-runtime (>=1.35.0,<1.36.0)"] bedrock-runtime = ["mypy-boto3-bedrock-runtime (>=1.35.0,<1.36.0)"] billingconductor = ["mypy-boto3-billingconductor (>=1.35.0,<1.36.0)"] -boto3 = ["boto3 (==1.35.19)", "botocore (==1.35.19)"] +boto3 = ["boto3 (==1.35.34)", "botocore (==1.35.34)"] braket = ["mypy-boto3-braket (>=1.35.0,<1.36.0)"] budgets = ["mypy-boto3-budgets (>=1.35.0,<1.36.0)"] ce = ["mypy-boto3-ce (>=1.35.0,<1.36.0)"] @@ -503,6 +476,7 @@ docdb = ["mypy-boto3-docdb (>=1.35.0,<1.36.0)"] docdb-elastic = ["mypy-boto3-docdb-elastic (>=1.35.0,<1.36.0)"] drs = ["mypy-boto3-drs (>=1.35.0,<1.36.0)"] ds = ["mypy-boto3-ds (>=1.35.0,<1.36.0)"] +ds-data = ["mypy-boto3-ds-data (>=1.35.0,<1.36.0)"] dynamodb = ["mypy-boto3-dynamodb (>=1.35.0,<1.36.0)"] dynamodbstreams = ["mypy-boto3-dynamodbstreams (>=1.35.0,<1.36.0)"] ebs = ["mypy-boto3-ebs (>=1.35.0,<1.36.0)"] @@ -538,6 +512,7 @@ forecastquery = ["mypy-boto3-forecastquery (>=1.35.0,<1.36.0)"] frauddetector = ["mypy-boto3-frauddetector (>=1.35.0,<1.36.0)"] freetier = ["mypy-boto3-freetier (>=1.35.0,<1.36.0)"] fsx = ["mypy-boto3-fsx (>=1.35.0,<1.36.0)"] +full = ["boto3-stubs-full"] gamelift = ["mypy-boto3-gamelift (>=1.35.0,<1.36.0)"] glacier = ["mypy-boto3-glacier (>=1.35.0,<1.36.0)"] globalaccelerator = ["mypy-boto3-globalaccelerator (>=1.35.0,<1.36.0)"] @@ -616,6 +591,7 @@ marketplace-agreement = ["mypy-boto3-marketplace-agreement (>=1.35.0,<1.36.0)"] marketplace-catalog = ["mypy-boto3-marketplace-catalog (>=1.35.0,<1.36.0)"] marketplace-deployment = ["mypy-boto3-marketplace-deployment (>=1.35.0,<1.36.0)"] marketplace-entitlement = ["mypy-boto3-marketplace-entitlement (>=1.35.0,<1.36.0)"] +marketplace-reporting = ["mypy-boto3-marketplace-reporting (>=1.35.0,<1.36.0)"] marketplacecommerceanalytics = ["mypy-boto3-marketplacecommerceanalytics (>=1.35.0,<1.36.0)"] mediaconnect = ["mypy-boto3-mediaconnect (>=1.35.0,<1.36.0)"] mediaconvert = ["mypy-boto3-mediaconvert (>=1.35.0,<1.36.0)"] @@ -770,7 +746,6 @@ wafv2 = ["mypy-boto3-wafv2 (>=1.35.0,<1.36.0)"] wellarchitected = ["mypy-boto3-wellarchitected (>=1.35.0,<1.36.0)"] wisdom = ["mypy-boto3-wisdom (>=1.35.0,<1.36.0)"] workdocs = ["mypy-boto3-workdocs (>=1.35.0,<1.36.0)"] -worklink = ["mypy-boto3-worklink (>=1.35.0,<1.36.0)"] workmail = ["mypy-boto3-workmail (>=1.35.0,<1.36.0)"] workmailmessageflow = ["mypy-boto3-workmailmessageflow (>=1.35.0,<1.36.0)"] workspaces = ["mypy-boto3-workspaces (>=1.35.0,<1.36.0)"] @@ -780,13 +755,13 @@ xray = ["mypy-boto3-xray (>=1.35.0,<1.36.0)"] [[package]] name = "botocore" -version = "1.35.19" +version = "1.35.34" description = "Low-level, data-driven core of boto 3." optional = false python-versions = ">=3.8" files = [ - {file = "botocore-1.35.19-py3-none-any.whl", hash = "sha256:c83f7f0cacfe7c19b109b363ebfa8736e570d24922f16ed371681f58ebab44a9"}, - {file = "botocore-1.35.19.tar.gz", hash = "sha256:42d6d8db7250cbd7899f786f9861e02cab17dc238f64d6acb976098ed9809625"}, + {file = "botocore-1.35.34-py3-none-any.whl", hash = "sha256:ccb0fe397b11b81c9abc0c87029d17298e17bf658d8db5c0c5a551a12a207e7a"}, + {file = "botocore-1.35.34.tar.gz", hash = "sha256:789b6501a3bb4a9591c1fe10da200cc315c1fa5df5ada19c720d8ef06439b3e3"}, ] [package.dependencies] @@ -798,7 +773,7 @@ urllib3 = [ ] [package.extras] -crt = ["awscrt (==0.21.5)"] +crt = ["awscrt (==0.22.0)"] [[package]] name = "botocore-stubs" @@ -1107,13 +1082,13 @@ colorama = {version = "*", markers = "platform_system == \"Windows\""} [[package]] name = "cohere" -version = "5.9.2" +version = "5.10.0" description = "" optional = true python-versions = "<4.0,>=3.8" files = [ - {file = "cohere-5.9.2-py3-none-any.whl", hash = "sha256:169ee06b0a54f8a913d42b19123bd72c1a72833275d544a52606d307f5547a7b"}, - {file = "cohere-5.9.2.tar.gz", hash = "sha256:1860c527b2a8a5593873a342b0bf572220b6db7966c0782076b3f2740ab3d94d"}, + {file = "cohere-5.10.0-py3-none-any.whl", hash = "sha256:46e50e3e8514a99cf77b4c022c8077a6205fba948051c33087ddeb66ec706f0a"}, + {file = "cohere-5.10.0.tar.gz", hash = "sha256:21020a7ae4c30f72991ef91566a926a9d7d1485d7abeed7bfa2bd6f35ea34783"}, ] [package.dependencies] @@ -1350,13 +1325,13 @@ packaging = "*" [[package]] name = "diffusers" -version = "0.30.2" +version = "0.30.3" description = "State-of-the-art diffusion in PyTorch and JAX." optional = true python-versions = ">=3.8.0" files = [ - {file = "diffusers-0.30.2-py3-none-any.whl", hash = "sha256:739826043147c2b59560944591dfdea5d24cd4fb15e751abbe20679a289bece8"}, - {file = "diffusers-0.30.2.tar.gz", hash = "sha256:641875f78f36bdfa4b9af752b124d1fd6d431eadd5547fe0a3f354ae0af2636c"}, + {file = "diffusers-0.30.3-py3-none-any.whl", hash = "sha256:1b70209e4d2c61223b96a7e13bc4d70869c8b0b68f54a35ce3a67fcf813edeee"}, + {file = "diffusers-0.30.3.tar.gz", hash = "sha256:67c5eb25d5b50bf0742624ef43fe0f6d1e1604f64aad3e8558469cbe89ecf72f"}, ] [package.dependencies] @@ -1455,13 +1430,13 @@ files = [ [[package]] name = "duckduckgo-search" -version = "6.2.13" +version = "6.3.0" description = "Search for words, documents, images, news, maps and text translation using the DuckDuckGo.com search engine." optional = true python-versions = ">=3.8" files = [ - {file = "duckduckgo_search-6.2.13-py3-none-any.whl", hash = "sha256:a6618fb2744fa1d081b1bf2e47ef8051de993276a15c20f4e879a150726472de"}, - {file = "duckduckgo_search-6.2.13.tar.gz", hash = "sha256:f89a9782f0f47d18c01a761c83453d0aef7a4335d1b6466fc47709652d5ca791"}, + {file = "duckduckgo_search-6.3.0-py3-none-any.whl", hash = "sha256:9a231a7b325226811cf7d35a240f3f501e718ae10a1aa0a638cabc80e129dfe7"}, + {file = "duckduckgo_search-6.3.0.tar.gz", hash = "sha256:e9f56955569325a7d9cacda2488ca78bf6629a459e74415892bee560b664f5eb"}, ] [package.dependencies] @@ -1474,13 +1449,13 @@ lxml = ["lxml (>=5.2.2)"] [[package]] name = "elevenlabs" -version = "1.8.1" +version = "1.9.0" description = "" optional = true python-versions = "<4.0,>=3.8" files = [ - {file = "elevenlabs-1.8.1-py3-none-any.whl", hash = "sha256:d9a2a62963b008f4b8f52326984b1cf35fd455ebbe24ff136cb3009908e1185d"}, - {file = "elevenlabs-1.8.1.tar.gz", hash = "sha256:a6309fc7ca91d379dfbec3fddeb5a6b755847c25fa935f936654d55a95d7b9a9"}, + {file = "elevenlabs-1.9.0-py3-none-any.whl", hash = "sha256:e8828d154085c717bc5b35c5d8a65d3421655a7670643fc596ba54dc53e17c30"}, + {file = "elevenlabs-1.9.0.tar.gz", hash = "sha256:873baad8f687b865436f2ca6d697a0d75f38796bec1cc0728c9ed589d1d846b2"}, ] [package.dependencies] @@ -1501,6 +1476,22 @@ files = [ {file = "Events-0.5-py3-none-any.whl", hash = "sha256:a7286af378ba3e46640ac9825156c93bdba7502174dd696090fdfcd4d80a1abd"}, ] +[[package]] +name = "exa-py" +version = "1.4.0" +description = "Python SDK for Exa API." +optional = true +python-versions = "*" +files = [ + {file = "exa_py-1.4.0-py3-none-any.whl", hash = "sha256:89e425c44ee8a78e57ca831a2b1c12adfc75be0ac5c1de0c6ab49f91b0c5398b"}, + {file = "exa_py-1.4.0.tar.gz", hash = "sha256:d31e13ab290203c44f7fe66ae34c9ad3329e0d9fb7a346d96b206e1d0f647eed"}, +] + +[package.dependencies] +openai = ">=1.10.0" +requests = "*" +typing-extensions = "*" + [[package]] name = "exceptiongroup" version = "1.2.2" @@ -1595,7 +1586,7 @@ typing = ["typing-extensions (>=4.8)"] name = "filetype" version = "1.2.0" description = "Infer file type and MIME type of any file/buffer. No external dependencies." -optional = true +optional = false python-versions = "*" files = [ {file = "filetype-1.2.0-py2.py3-none-any.whl", hash = "sha256:7ce71b6880181241cf7ac8697a2f1eb6a8bd9b429f7ad6d27b8db9ba5f1c2d25"}, @@ -1692,7 +1683,7 @@ files = [ name = "fsspec" version = "2024.6.1" description = "File-system specification" -optional = true +optional = false python-versions = ">=3.8" files = [ {file = "fsspec-2024.6.1-py3-none-any.whl", hash = "sha256:3cb443f8bcd2efb31295a5b9fdb02aee81d8452c80d28f97a6d0959e6cee101e"}, @@ -1775,13 +1766,13 @@ dev = ["flake8", "markdown", "twine", "wheel"] [[package]] name = "google-ai-generativelanguage" -version = "0.6.9" +version = "0.6.10" description = "Google Ai Generativelanguage API client library" optional = true python-versions = ">=3.7" files = [ - {file = "google_ai_generativelanguage-0.6.9-py3-none-any.whl", hash = "sha256:50360cd80015d1a8cc70952e98560f32fa06ddee2e8e9f4b4b98e431dc561e0b"}, - {file = "google_ai_generativelanguage-0.6.9.tar.gz", hash = "sha256:899f1d3a06efa9739f1cd9d2788070178db33c89d4a76f2e8f4da76f649155fa"}, + {file = "google_ai_generativelanguage-0.6.10-py3-none-any.whl", hash = "sha256:854a2bf833d18be05ad5ef13c755567b66a4f4a870f099b62c61fe11bddabcf4"}, + {file = "google_ai_generativelanguage-0.6.10.tar.gz", hash = "sha256:6fa642c964d8728006fe7e8771026fc0b599ae0ebeaf83caf550941e8e693455"}, ] [package.dependencies] @@ -1879,16 +1870,16 @@ httplib2 = ">=0.19.0" [[package]] name = "google-generativeai" -version = "0.8.1" +version = "0.8.3" description = "Google Generative AI High level API client library and tools." optional = true python-versions = ">=3.9" files = [ - {file = "google_generativeai-0.8.1-py3-none-any.whl", hash = "sha256:b031877f24d51af0945207657c085896a0a886eceec7a1cb7029327b0aa6e2f6"}, + {file = "google_generativeai-0.8.3-py3-none-any.whl", hash = "sha256:1108ff89d5b8e59f51e63d1a8bf84701cd84656e17ca28d73aeed745e736d9b7"}, ] [package.dependencies] -google-ai-generativelanguage = "0.6.9" +google-ai-generativelanguage = "0.6.10" google-api-core = "*" google-api-python-client = "*" google-auth = ">=2.15.0" @@ -2270,13 +2261,13 @@ files = [ [[package]] name = "huggingface-hub" -version = "0.24.7" +version = "0.25.1" description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" optional = true python-versions = ">=3.8.0" files = [ - {file = "huggingface_hub-0.24.7-py3-none-any.whl", hash = "sha256:a212c555324c8a7b1ffdd07266bb7e7d69ca71aa238d27b7842d65e9a26ac3e5"}, - {file = "huggingface_hub-0.24.7.tar.gz", hash = "sha256:0ad8fb756e2831da0ac0491175b960f341fe06ebcf80ed6f8728313f95fc0207"}, + {file = "huggingface_hub-0.25.1-py3-none-any.whl", hash = "sha256:a5158ded931b3188f54ea9028097312cb0acd50bffaaa2612014c3c526b44972"}, + {file = "huggingface_hub-0.25.1.tar.gz", hash = "sha256:9ff7cb327343211fbd06e2b149b8f362fd1e389454f3f14c6db75a4999ee20ff"}, ] [package.dependencies] @@ -2905,13 +2896,13 @@ files = [ [[package]] name = "marqo" -version = "3.8.0" +version = "3.8.1" description = "Tensor search for humans" optional = true python-versions = ">=3" files = [ - {file = "marqo-3.8.0-py3-none-any.whl", hash = "sha256:6b3ac5d5b308aa771798c3bbf95ad42dc6bb73e34701ba9073f9c4eaf4a5ac8a"}, - {file = "marqo-3.8.0.tar.gz", hash = "sha256:89f36227c54ad0e60a638c524e9864211a3aa7898bec3d6bc2473a748fe7c8b3"}, + {file = "marqo-3.8.1-py3-none-any.whl", hash = "sha256:ff88782766bfaccd371ae595832e024a90a438406436415251ecf75949e22088"}, + {file = "marqo-3.8.1.tar.gz", hash = "sha256:1e7565805bc78752bd18f1fcaa21fa008b63422ca354cbdc26a7ada1fe9ac9b8"}, ] [package.dependencies] @@ -3081,13 +3072,13 @@ mkdocs = ">=1.0.3" [[package]] name = "mkdocs-material" -version = "9.5.34" +version = "9.5.39" description = "Documentation that simply works" optional = false python-versions = ">=3.8" files = [ - {file = "mkdocs_material-9.5.34-py3-none-any.whl", hash = "sha256:54caa8be708de2b75167fd4d3b9f3d949579294f49cb242515d4653dbee9227e"}, - {file = "mkdocs_material-9.5.34.tar.gz", hash = "sha256:1e60ddf716cfb5679dfd65900b8a25d277064ed82d9a53cd5190e3f894df7840"}, + {file = "mkdocs_material-9.5.39-py3-none-any.whl", hash = "sha256:0f2f68c8db89523cb4a59705cd01b4acd62b2f71218ccb67e1e004e560410d2b"}, + {file = "mkdocs_material-9.5.39.tar.gz", hash = "sha256:25faa06142afa38549d2b781d475a86fb61de93189f532b88e69bf11e5e5c3be"}, ] [package.dependencies] @@ -3210,56 +3201,57 @@ files = [ [[package]] name = "moto" -version = "4.2.14" +version = "5.0.16" description = "" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "moto-4.2.14-py2.py3-none-any.whl", hash = "sha256:6d242dbbabe925bb385ddb6958449e5c827670b13b8e153ed63f91dbdb50372c"}, - {file = "moto-4.2.14.tar.gz", hash = "sha256:8f9263ca70b646f091edcc93e97cda864a542e6d16ed04066b1370ed217bd190"}, + {file = "moto-5.0.16-py2.py3-none-any.whl", hash = "sha256:4ce1f34830307f7b3d553d77a7ef26066ab3b70006203d4226b048c9d11a3be4"}, + {file = "moto-5.0.16.tar.gz", hash = "sha256:f4afb176a964cd7a70da9bc5e053d43109614ce3cab26044bcbb53610435dff4"}, ] [package.dependencies] boto3 = ">=1.9.201" -botocore = ">=1.12.201" +botocore = ">=1.14.0" cryptography = ">=3.3.1" docker = {version = ">=3.0.0", optional = true, markers = "extra == \"dynamodb\""} Jinja2 = ">=2.10.1" jsondiff = {version = ">=1.1.2", optional = true, markers = "extra == \"iotdata\""} -py-partiql-parser = {version = "0.5.0", optional = true, markers = "extra == \"dynamodb\""} +py-partiql-parser = {version = "0.5.6", optional = true, markers = "extra == \"dynamodb\""} python-dateutil = ">=2.1,<3.0.0" requests = ">=2.5" -responses = ">=0.13.0" +responses = ">=0.15.0" werkzeug = ">=0.5,<2.2.0 || >2.2.0,<2.2.1 || >2.2.1" xmltodict = "*" [package.extras] -all = ["PyYAML (>=5.1)", "aws-xray-sdk (>=0.93,!=0.96)", "cfn-lint (>=0.40.0)", "docker (>=3.0.0)", "ecdsa (!=0.15)", "graphql-core", "jsondiff (>=1.1.2)", "multipart", "openapi-spec-validator (>=0.5.0)", "py-partiql-parser (==0.5.0)", "pyparsing (>=3.0.7)", "python-jose[cryptography] (>=3.1.0,<4.0.0)", "setuptools", "sshpubkeys (>=3.1.0)"] -apigateway = ["PyYAML (>=5.1)", "ecdsa (!=0.15)", "openapi-spec-validator (>=0.5.0)", "python-jose[cryptography] (>=3.1.0,<4.0.0)"] -apigatewayv2 = ["PyYAML (>=5.1)"] +all = ["PyYAML (>=5.1)", "antlr4-python3-runtime", "aws-xray-sdk (>=0.93,!=0.96)", "cfn-lint (>=0.40.0)", "docker (>=3.0.0)", "graphql-core", "joserfc (>=0.9.0)", "jsondiff (>=1.1.2)", "jsonpath-ng", "multipart", "openapi-spec-validator (>=0.5.0)", "py-partiql-parser (==0.5.6)", "pyparsing (>=3.0.7)", "setuptools"] +apigateway = ["PyYAML (>=5.1)", "joserfc (>=0.9.0)", "openapi-spec-validator (>=0.5.0)"] +apigatewayv2 = ["PyYAML (>=5.1)", "openapi-spec-validator (>=0.5.0)"] appsync = ["graphql-core"] awslambda = ["docker (>=3.0.0)"] batch = ["docker (>=3.0.0)"] -cloudformation = ["PyYAML (>=5.1)", "aws-xray-sdk (>=0.93,!=0.96)", "cfn-lint (>=0.40.0)", "docker (>=3.0.0)", "ecdsa (!=0.15)", "graphql-core", "jsondiff (>=1.1.2)", "openapi-spec-validator (>=0.5.0)", "py-partiql-parser (==0.5.0)", "pyparsing (>=3.0.7)", "python-jose[cryptography] (>=3.1.0,<4.0.0)", "setuptools", "sshpubkeys (>=3.1.0)"] -cognitoidp = ["ecdsa (!=0.15)", "python-jose[cryptography] (>=3.1.0,<4.0.0)"] -dynamodb = ["docker (>=3.0.0)", "py-partiql-parser (==0.5.0)"] -dynamodbstreams = ["docker (>=3.0.0)", "py-partiql-parser (==0.5.0)"] -ec2 = ["sshpubkeys (>=3.1.0)"] +cloudformation = ["PyYAML (>=5.1)", "aws-xray-sdk (>=0.93,!=0.96)", "cfn-lint (>=0.40.0)", "docker (>=3.0.0)", "graphql-core", "joserfc (>=0.9.0)", "jsondiff (>=1.1.2)", "openapi-spec-validator (>=0.5.0)", "py-partiql-parser (==0.5.6)", "pyparsing (>=3.0.7)", "setuptools"] +cognitoidp = ["joserfc (>=0.9.0)"] +dynamodb = ["docker (>=3.0.0)", "py-partiql-parser (==0.5.6)"] +dynamodbstreams = ["docker (>=3.0.0)", "py-partiql-parser (==0.5.6)"] +events = ["jsonpath-ng"] glue = ["pyparsing (>=3.0.7)"] iotdata = ["jsondiff (>=1.1.2)"] -proxy = ["PyYAML (>=5.1)", "aws-xray-sdk (>=0.93,!=0.96)", "cfn-lint (>=0.40.0)", "docker (>=2.5.1)", "ecdsa (!=0.15)", "graphql-core", "jsondiff (>=1.1.2)", "multipart", "openapi-spec-validator (>=0.5.0)", "py-partiql-parser (==0.5.0)", "pyparsing (>=3.0.7)", "python-jose[cryptography] (>=3.1.0,<4.0.0)", "setuptools", "sshpubkeys (>=3.1.0)"] -resourcegroupstaggingapi = ["PyYAML (>=5.1)", "cfn-lint (>=0.40.0)", "docker (>=3.0.0)", "ecdsa (!=0.15)", "graphql-core", "jsondiff (>=1.1.2)", "openapi-spec-validator (>=0.5.0)", "py-partiql-parser (==0.5.0)", "pyparsing (>=3.0.7)", "python-jose[cryptography] (>=3.1.0,<4.0.0)"] -s3 = ["PyYAML (>=5.1)", "py-partiql-parser (==0.5.0)"] -s3crc32c = ["PyYAML (>=5.1)", "crc32c", "py-partiql-parser (==0.5.0)"] -server = ["PyYAML (>=5.1)", "aws-xray-sdk (>=0.93,!=0.96)", "cfn-lint (>=0.40.0)", "docker (>=3.0.0)", "ecdsa (!=0.15)", "flask (!=2.2.0,!=2.2.1)", "flask-cors", "graphql-core", "jsondiff (>=1.1.2)", "openapi-spec-validator (>=0.5.0)", "py-partiql-parser (==0.5.0)", "pyparsing (>=3.0.7)", "python-jose[cryptography] (>=3.1.0,<4.0.0)", "setuptools", "sshpubkeys (>=3.1.0)"] +proxy = ["PyYAML (>=5.1)", "antlr4-python3-runtime", "aws-xray-sdk (>=0.93,!=0.96)", "cfn-lint (>=0.40.0)", "docker (>=2.5.1)", "graphql-core", "joserfc (>=0.9.0)", "jsondiff (>=1.1.2)", "jsonpath-ng", "multipart", "openapi-spec-validator (>=0.5.0)", "py-partiql-parser (==0.5.6)", "pyparsing (>=3.0.7)", "setuptools"] +resourcegroupstaggingapi = ["PyYAML (>=5.1)", "cfn-lint (>=0.40.0)", "docker (>=3.0.0)", "graphql-core", "joserfc (>=0.9.0)", "jsondiff (>=1.1.2)", "openapi-spec-validator (>=0.5.0)", "py-partiql-parser (==0.5.6)", "pyparsing (>=3.0.7)"] +s3 = ["PyYAML (>=5.1)", "py-partiql-parser (==0.5.6)"] +s3crc32c = ["PyYAML (>=5.1)", "crc32c", "py-partiql-parser (==0.5.6)"] +server = ["PyYAML (>=5.1)", "antlr4-python3-runtime", "aws-xray-sdk (>=0.93,!=0.96)", "cfn-lint (>=0.40.0)", "docker (>=3.0.0)", "flask (!=2.2.0,!=2.2.1)", "flask-cors", "graphql-core", "joserfc (>=0.9.0)", "jsondiff (>=1.1.2)", "jsonpath-ng", "openapi-spec-validator (>=0.5.0)", "py-partiql-parser (==0.5.6)", "pyparsing (>=3.0.7)", "setuptools"] ssm = ["PyYAML (>=5.1)"] +stepfunctions = ["antlr4-python3-runtime", "jsonpath-ng"] xray = ["aws-xray-sdk (>=0.93,!=0.96)", "setuptools"] [[package]] name = "mpmath" version = "1.3.0" description = "Python library for arbitrary-precision floating-point arithmetic" -optional = true +optional = false python-versions = "*" files = [ {file = "mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c"}, @@ -3385,6 +3377,20 @@ files = [ [package.dependencies] typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.12\""} +[[package]] +name = "mypy-boto3-dynamodb" +version = "1.35.24" +description = "Type annotations for boto3.DynamoDB 1.35.24 service generated with mypy-boto3-builder 8.1.1" +optional = false +python-versions = ">=3.8" +files = [ + {file = "mypy_boto3_dynamodb-1.35.24-py3-none-any.whl", hash = "sha256:022859543c5314f14fb03ef4e445e34b97b9bc0cecb003c14c10943a2eaa3ff7"}, + {file = "mypy_boto3_dynamodb-1.35.24.tar.gz", hash = "sha256:55bf897a1d0e354579edb05001f4bc4f472b9452badd9db24876c31bdf3f72a1"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.12\""} + [[package]] name = "mypy-boto3-iam" version = "1.35.0" @@ -3399,6 +3405,20 @@ files = [ [package.dependencies] typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.12\""} +[[package]] +name = "mypy-boto3-iot-data" +version = "1.35.0" +description = "Type annotations for boto3.IoTDataPlane 1.35.0 service generated with mypy-boto3-builder 7.26.0" +optional = false +python-versions = ">=3.8" +files = [ + {file = "mypy_boto3_iot_data-1.35.0-py3-none-any.whl", hash = "sha256:1f442679a71f22a82b0436ee4f71c06104a9ed722aa71c6800fd93bd345cfc03"}, + {file = "mypy_boto3_iot_data-1.35.0.tar.gz", hash = "sha256:e83cbbd948bc388ed139d2820442af1d319ca37dce708df44295c4acfcfb30f8"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.12\""} + [[package]] name = "mypy-boto3-opensearch" version = "1.35.0" @@ -3413,6 +3433,20 @@ files = [ [package.dependencies] typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.12\""} +[[package]] +name = "mypy-boto3-redshift-data" +version = "1.35.10" +description = "Type annotations for boto3.RedshiftDataAPIService 1.35.10 service generated with mypy-boto3-builder 7.26.1" +optional = false +python-versions = ">=3.8" +files = [ + {file = "mypy_boto3_redshift_data-1.35.10-py3-none-any.whl", hash = "sha256:1d37d8453c4f3e6b688703a91316729ee2dcaec101326c4f58658d8526d5fc09"}, + {file = "mypy_boto3_redshift_data-1.35.10.tar.gz", hash = "sha256:2cfe518ef3027c2b050facffd2621924458ddf2fb3df9699cdba33e8a6859594"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.12\""} + [[package]] name = "mypy-boto3-s3" version = "1.35.2" @@ -3441,6 +3475,20 @@ files = [ [package.dependencies] typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.12\""} +[[package]] +name = "mypy-boto3-sqs" +version = "1.35.0" +description = "Type annotations for boto3.SQS 1.35.0 service generated with mypy-boto3-builder 7.26.0" +optional = false +python-versions = ">=3.8" +files = [ + {file = "mypy_boto3_sqs-1.35.0-py3-none-any.whl", hash = "sha256:9fd6e622ed231c06f7542ba6f8f0eea92046cace24defa95d0d0ce04e7caee0c"}, + {file = "mypy_boto3_sqs-1.35.0.tar.gz", hash = "sha256:61752f1c2bf2efa3815f64d43c25b4a39dbdbd9e472ae48aa18d7c6d2a7a6eb8"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.12\""} + [[package]] name = "ndg-httpsclient" version = "0.5.1" @@ -3461,7 +3509,7 @@ PyOpenSSL = "*" name = "networkx" version = "3.2.1" description = "Python package for creating and manipulating graphs and networks" -optional = true +optional = false python-versions = ">=3.9" files = [ {file = "networkx-3.2.1-py3-none-any.whl", hash = "sha256:f18c69adc97877c42332c170849c96cefa91881c99a7cb3e95b7c659ebdc1ec2"}, @@ -3560,7 +3608,7 @@ files = [ name = "nvidia-cublas-cu12" version = "12.1.3.1" description = "CUBLAS native runtime libraries" -optional = true +optional = false python-versions = ">=3" files = [ {file = "nvidia_cublas_cu12-12.1.3.1-py3-none-manylinux1_x86_64.whl", hash = "sha256:ee53ccca76a6fc08fb9701aa95b6ceb242cdaab118c3bb152af4e579af792728"}, @@ -3571,7 +3619,7 @@ files = [ name = "nvidia-cuda-cupti-cu12" version = "12.1.105" description = "CUDA profiling tools runtime libs." -optional = true +optional = false python-versions = ">=3" files = [ {file = "nvidia_cuda_cupti_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:e54fde3983165c624cb79254ae9818a456eb6e87a7fd4d56a2352c24ee542d7e"}, @@ -3582,7 +3630,7 @@ files = [ name = "nvidia-cuda-nvrtc-cu12" version = "12.1.105" description = "NVRTC native runtime libraries" -optional = true +optional = false python-versions = ">=3" files = [ {file = "nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:339b385f50c309763ca65456ec75e17bbefcbbf2893f462cb8b90584cd27a1c2"}, @@ -3593,7 +3641,7 @@ files = [ name = "nvidia-cuda-runtime-cu12" version = "12.1.105" description = "CUDA Runtime native Libraries" -optional = true +optional = false python-versions = ">=3" files = [ {file = "nvidia_cuda_runtime_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:6e258468ddf5796e25f1dc591a31029fa317d97a0a94ed93468fc86301d61e40"}, @@ -3604,7 +3652,7 @@ files = [ name = "nvidia-cudnn-cu12" version = "9.1.0.70" description = "cuDNN runtime libraries" -optional = true +optional = false python-versions = ">=3" files = [ {file = "nvidia_cudnn_cu12-9.1.0.70-py3-none-manylinux2014_x86_64.whl", hash = "sha256:165764f44ef8c61fcdfdfdbe769d687e06374059fbb388b6c89ecb0e28793a6f"}, @@ -3618,7 +3666,7 @@ nvidia-cublas-cu12 = "*" name = "nvidia-cufft-cu12" version = "11.0.2.54" description = "CUFFT native runtime libraries" -optional = true +optional = false python-versions = ">=3" files = [ {file = "nvidia_cufft_cu12-11.0.2.54-py3-none-manylinux1_x86_64.whl", hash = "sha256:794e3948a1aa71fd817c3775866943936774d1c14e7628c74f6f7417224cdf56"}, @@ -3629,7 +3677,7 @@ files = [ name = "nvidia-curand-cu12" version = "10.3.2.106" description = "CURAND native runtime libraries" -optional = true +optional = false python-versions = ">=3" files = [ {file = "nvidia_curand_cu12-10.3.2.106-py3-none-manylinux1_x86_64.whl", hash = "sha256:9d264c5036dde4e64f1de8c50ae753237c12e0b1348738169cd0f8a536c0e1e0"}, @@ -3640,7 +3688,7 @@ files = [ name = "nvidia-cusolver-cu12" version = "11.4.5.107" description = "CUDA solver native runtime libraries" -optional = true +optional = false python-versions = ">=3" files = [ {file = "nvidia_cusolver_cu12-11.4.5.107-py3-none-manylinux1_x86_64.whl", hash = "sha256:8a7ec542f0412294b15072fa7dab71d31334014a69f953004ea7a118206fe0dd"}, @@ -3656,7 +3704,7 @@ nvidia-nvjitlink-cu12 = "*" name = "nvidia-cusparse-cu12" version = "12.1.0.106" description = "CUSPARSE native runtime libraries" -optional = true +optional = false python-versions = ">=3" files = [ {file = "nvidia_cusparse_cu12-12.1.0.106-py3-none-manylinux1_x86_64.whl", hash = "sha256:f3b50f42cf363f86ab21f720998517a659a48131e8d538dc02f8768237bd884c"}, @@ -3670,7 +3718,7 @@ nvidia-nvjitlink-cu12 = "*" name = "nvidia-nccl-cu12" version = "2.20.5" description = "NVIDIA Collective Communication Library (NCCL) Runtime" -optional = true +optional = false python-versions = ">=3" files = [ {file = "nvidia_nccl_cu12-2.20.5-py3-none-manylinux2014_aarch64.whl", hash = "sha256:1fc150d5c3250b170b29410ba682384b14581db722b2531b0d8d33c595f33d01"}, @@ -3679,21 +3727,21 @@ files = [ [[package]] name = "nvidia-nvjitlink-cu12" -version = "12.6.20" +version = "12.6.77" description = "Nvidia JIT LTO Library" -optional = true +optional = false python-versions = ">=3" files = [ - {file = "nvidia_nvjitlink_cu12-12.6.20-py3-none-manylinux2014_aarch64.whl", hash = "sha256:84fb38465a5bc7c70cbc320cfd0963eb302ee25a5e939e9f512bbba55b6072fb"}, - {file = "nvidia_nvjitlink_cu12-12.6.20-py3-none-manylinux2014_x86_64.whl", hash = "sha256:562ab97ea2c23164823b2a89cb328d01d45cb99634b8c65fe7cd60d14562bd79"}, - {file = "nvidia_nvjitlink_cu12-12.6.20-py3-none-win_amd64.whl", hash = "sha256:ed3c43a17f37b0c922a919203d2d36cbef24d41cc3e6b625182f8b58203644f6"}, + {file = "nvidia_nvjitlink_cu12-12.6.77-py3-none-manylinux2014_aarch64.whl", hash = "sha256:3bf10d85bb1801e9c894c6e197e44dd137d2a0a9e43f8450e9ad13f2df0dd52d"}, + {file = "nvidia_nvjitlink_cu12-12.6.77-py3-none-manylinux2014_x86_64.whl", hash = "sha256:9ae346d16203ae4ea513be416495167a0101d33d2d14935aa9c1829a3fb45142"}, + {file = "nvidia_nvjitlink_cu12-12.6.77-py3-none-win_amd64.whl", hash = "sha256:410718cd44962bed862a31dd0318620f6f9a8b28a6291967bcfcb446a6516771"}, ] [[package]] name = "nvidia-nvtx-cu12" version = "12.1.105" description = "NVIDIA Tools Extension" -optional = true +optional = false python-versions = ">=3" files = [ {file = "nvidia_nvtx_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:dc21cf308ca5691e7c04d962e213f8a4aa9bbfa23d95412f452254c2caeb09e5"}, @@ -3716,13 +3764,13 @@ httpx = ">=0.27.0,<0.28.0" [[package]] name = "openai" -version = "1.45.1" +version = "1.51.1" description = "The official Python library for the openai API" optional = false python-versions = ">=3.7.1" files = [ - {file = "openai-1.45.1-py3-none-any.whl", hash = "sha256:4a6cce402aec803ae57ae7eff4b5b94bf6c0e1703a8d85541c27243c2adeadf8"}, - {file = "openai-1.45.1.tar.gz", hash = "sha256:f79e384916b219ab2f028bbf9c778e81291c61eb0645ccfa1828a4b18b55d534"}, + {file = "openai-1.51.1-py3-none-any.whl", hash = "sha256:035ba637bef7523282b5b8d9f2f5fdc0bb5bc18d52af2bfc7f64e4a7b0a169fb"}, + {file = "openai-1.51.1.tar.gz", hash = "sha256:a4908d68e0a1f4bcb45cbaf273c5fbdc3a4fa6239bb75128b58b94f7d5411563"}, ] [package.dependencies] @@ -3987,12 +4035,13 @@ files = [ [[package]] name = "pgvector" -version = "0.3.3" +version = "0.3.5" description = "pgvector support for Python" optional = true python-versions = ">=3.8" files = [ - {file = "pgvector-0.3.3-py2.py3-none-any.whl", hash = "sha256:2c14c9a5219ccf3757cda493dc756506992afad6233dafcecb6d8ab08155f177"}, + {file = "pgvector-0.3.5-py3-none-any.whl", hash = "sha256:56cca90392e596ea18873c593ec858a1984a77d16d1f82b8d0c180e79ef1018f"}, + {file = "pgvector-0.3.5.tar.gz", hash = "sha256:e876c9ee382c4c2f7ee57691a4c4015d688c7222e47448ce310ded03ecfafe2f"}, ] [package.dependencies] @@ -4215,13 +4264,13 @@ files = [ [[package]] name = "pre-commit" -version = "3.8.0" +version = "4.0.0" description = "A framework for managing and maintaining multi-language pre-commit hooks." optional = false python-versions = ">=3.9" files = [ - {file = "pre_commit-3.8.0-py2.py3-none-any.whl", hash = "sha256:9a90a53bf82fdd8778d58085faf8d83df56e40dfe18f45b19446e26bf1b3a63f"}, - {file = "pre_commit-3.8.0.tar.gz", hash = "sha256:8bb6494d4a20423842e198980c9ecf9f96607a07ea29549e180eef9ae80fe7af"}, + {file = "pre_commit-4.0.0-py2.py3-none-any.whl", hash = "sha256:0ca2341cf94ac1865350970951e54b1a50521e57b7b500403307aed4315a1234"}, + {file = "pre_commit-4.0.0.tar.gz", hash = "sha256:5d9807162cc5537940f94f266cbe2d716a75cfad0d78a317a92cac16287cfed6"}, ] [package.dependencies] @@ -4288,35 +4337,6 @@ files = [ {file = "protobuf-4.25.4.tar.gz", hash = "sha256:0dc4a62cc4052a036ee2204d26fe4d835c62827c855c8a03f29fe6da146b380d"}, ] -[[package]] -name = "psutil" -version = "6.0.0" -description = "Cross-platform lib for process and system monitoring in Python." -optional = true -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" -files = [ - {file = "psutil-6.0.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a021da3e881cd935e64a3d0a20983bda0bb4cf80e4f74fa9bfcb1bc5785360c6"}, - {file = "psutil-6.0.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:1287c2b95f1c0a364d23bc6f2ea2365a8d4d9b726a3be7294296ff7ba97c17f0"}, - {file = "psutil-6.0.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:a9a3dbfb4de4f18174528d87cc352d1f788b7496991cca33c6996f40c9e3c92c"}, - {file = "psutil-6.0.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:6ec7588fb3ddaec7344a825afe298db83fe01bfaaab39155fa84cf1c0d6b13c3"}, - {file = "psutil-6.0.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:1e7c870afcb7d91fdea2b37c24aeb08f98b6d67257a5cb0a8bc3ac68d0f1a68c"}, - {file = "psutil-6.0.0-cp27-none-win32.whl", hash = "sha256:02b69001f44cc73c1c5279d02b30a817e339ceb258ad75997325e0e6169d8b35"}, - {file = "psutil-6.0.0-cp27-none-win_amd64.whl", hash = "sha256:21f1fb635deccd510f69f485b87433460a603919b45e2a324ad65b0cc74f8fb1"}, - {file = "psutil-6.0.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:c588a7e9b1173b6e866756dde596fd4cad94f9399daf99ad8c3258b3cb2b47a0"}, - {file = "psutil-6.0.0-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ed2440ada7ef7d0d608f20ad89a04ec47d2d3ab7190896cd62ca5fc4fe08bf0"}, - {file = "psutil-6.0.0-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5fd9a97c8e94059b0ef54a7d4baf13b405011176c3b6ff257c247cae0d560ecd"}, - {file = "psutil-6.0.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2e8d0054fc88153ca0544f5c4d554d42e33df2e009c4ff42284ac9ebdef4132"}, - {file = "psutil-6.0.0-cp36-cp36m-win32.whl", hash = "sha256:fc8c9510cde0146432bbdb433322861ee8c3efbf8589865c8bf8d21cb30c4d14"}, - {file = "psutil-6.0.0-cp36-cp36m-win_amd64.whl", hash = "sha256:34859b8d8f423b86e4385ff3665d3f4d94be3cdf48221fbe476e883514fdb71c"}, - {file = "psutil-6.0.0-cp37-abi3-win32.whl", hash = "sha256:a495580d6bae27291324fe60cea0b5a7c23fa36a7cd35035a16d93bdcf076b9d"}, - {file = "psutil-6.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:33ea5e1c975250a720b3a6609c490db40dae5d83a4eb315170c4fe0d8b1f34b3"}, - {file = "psutil-6.0.0-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:ffe7fc9b6b36beadc8c322f84e1caff51e8703b88eee1da46d1e3a6ae11b4fd0"}, - {file = "psutil-6.0.0.tar.gz", hash = "sha256:8faae4f310b6d969fa26ca0545338b21f73c6b15db7c4a8d934a5482faa818f2"}, -] - -[package.extras] -test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"] - [[package]] name = "psycopg2-binary" version = "2.9.9" @@ -4424,13 +4444,13 @@ tornado = ["tornado (>=5.0.0)"] [[package]] name = "py-partiql-parser" -version = "0.5.0" +version = "0.5.6" description = "Pure Python PartiQL Parser" optional = false python-versions = "*" files = [ - {file = "py-partiql-parser-0.5.0.tar.gz", hash = "sha256:427a662e87d51a0a50150fc8b75c9ebb4a52d49129684856c40c88b8c8e027e4"}, - {file = "py_partiql_parser-0.5.0-py3-none-any.whl", hash = "sha256:dc454c27526adf62deca5177ea997bf41fac4fd109c5d4c8d81f984de738ba8f"}, + {file = "py_partiql_parser-0.5.6-py2.py3-none-any.whl", hash = "sha256:622d7b0444becd08c1f4e9e73b31690f4b1c309ab6e5ed45bf607fe71319309f"}, + {file = "py_partiql_parser-0.5.6.tar.gz", hash = "sha256:6339f6bf85573a35686529fc3f491302e71dd091711dfe8df3be89a93767f97b"}, ] [package.extras] @@ -4645,13 +4665,13 @@ tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"] [[package]] name = "pymdown-extensions" -version = "10.9" +version = "10.11.2" description = "Extension pack for Python Markdown." optional = false python-versions = ">=3.8" files = [ - {file = "pymdown_extensions-10.9-py3-none-any.whl", hash = "sha256:d323f7e90d83c86113ee78f3fe62fc9dee5f56b54d912660703ea1816fed5626"}, - {file = "pymdown_extensions-10.9.tar.gz", hash = "sha256:6ff740bcd99ec4172a938970d42b96128bdc9d4b9bcad72494f29921dc69b753"}, + {file = "pymdown_extensions-10.11.2-py3-none-any.whl", hash = "sha256:41cdde0a77290e480cf53892f5c5e50921a7ee3e5cd60ba91bf19837b33badcf"}, + {file = "pymdown_extensions-10.11.2.tar.gz", hash = "sha256:bc8847ecc9e784a098efd35e20cba772bc5a1b529dfcef9dc1972db9021a1049"}, ] [package.dependencies] @@ -4663,61 +4683,70 @@ extra = ["pygments (>=2.12)"] [[package]] name = "pymongo" -version = "4.8.0" +version = "4.10.1" description = "Python driver for MongoDB " optional = true python-versions = ">=3.8" files = [ - {file = "pymongo-4.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f2b7bec27e047e84947fbd41c782f07c54c30c76d14f3b8bf0c89f7413fac67a"}, - {file = "pymongo-4.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3c68fe128a171493018ca5c8020fc08675be130d012b7ab3efe9e22698c612a1"}, - {file = "pymongo-4.8.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:920d4f8f157a71b3cb3f39bc09ce070693d6e9648fb0e30d00e2657d1dca4e49"}, - {file = "pymongo-4.8.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:52b4108ac9469febba18cea50db972605cc43978bedaa9fea413378877560ef8"}, - {file = "pymongo-4.8.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:180d5eb1dc28b62853e2f88017775c4500b07548ed28c0bd9c005c3d7bc52526"}, - {file = "pymongo-4.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aec2b9088cdbceb87e6ca9c639d0ff9b9d083594dda5ca5d3c4f6774f4c81b33"}, - {file = "pymongo-4.8.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d0cf61450feadca81deb1a1489cb1a3ae1e4266efd51adafecec0e503a8dcd84"}, - {file = "pymongo-4.8.0-cp310-cp310-win32.whl", hash = "sha256:8b18c8324809539c79bd6544d00e0607e98ff833ca21953df001510ca25915d1"}, - {file = "pymongo-4.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:e5df28f74002e37bcbdfdc5109799f670e4dfef0fb527c391ff84f078050e7b5"}, - {file = "pymongo-4.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6b50040d9767197b77ed420ada29b3bf18a638f9552d80f2da817b7c4a4c9c68"}, - {file = "pymongo-4.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:417369ce39af2b7c2a9c7152c1ed2393edfd1cbaf2a356ba31eb8bcbd5c98dd7"}, - {file = "pymongo-4.8.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf821bd3befb993a6db17229a2c60c1550e957de02a6ff4dd0af9476637b2e4d"}, - {file = "pymongo-4.8.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9365166aa801c63dff1a3cb96e650be270da06e3464ab106727223123405510f"}, - {file = "pymongo-4.8.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cc8b8582f4209c2459b04b049ac03c72c618e011d3caa5391ff86d1bda0cc486"}, - {file = "pymongo-4.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:16e5019f75f6827bb5354b6fef8dfc9d6c7446894a27346e03134d290eb9e758"}, - {file = "pymongo-4.8.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3b5802151fc2b51cd45492c80ed22b441d20090fb76d1fd53cd7760b340ff554"}, - {file = "pymongo-4.8.0-cp311-cp311-win32.whl", hash = "sha256:4bf58e6825b93da63e499d1a58de7de563c31e575908d4e24876234ccb910eba"}, - {file = "pymongo-4.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:b747c0e257b9d3e6495a018309b9e0c93b7f0d65271d1d62e572747f4ffafc88"}, - {file = "pymongo-4.8.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e6a720a3d22b54183352dc65f08cd1547204d263e0651b213a0a2e577e838526"}, - {file = "pymongo-4.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:31e4d21201bdf15064cf47ce7b74722d3e1aea2597c6785882244a3bb58c7eab"}, - {file = "pymongo-4.8.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6b804bb4f2d9dc389cc9e827d579fa327272cdb0629a99bfe5b83cb3e269ebf"}, - {file = "pymongo-4.8.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f2fbdb87fe5075c8beb17a5c16348a1ea3c8b282a5cb72d173330be2fecf22f5"}, - {file = "pymongo-4.8.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd39455b7ee70aabee46f7399b32ab38b86b236c069ae559e22be6b46b2bbfc4"}, - {file = "pymongo-4.8.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:940d456774b17814bac5ea7fc28188c7a1338d4a233efbb6ba01de957bded2e8"}, - {file = "pymongo-4.8.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:236bbd7d0aef62e64caf4b24ca200f8c8670d1a6f5ea828c39eccdae423bc2b2"}, - {file = "pymongo-4.8.0-cp312-cp312-win32.whl", hash = "sha256:47ec8c3f0a7b2212dbc9be08d3bf17bc89abd211901093e3ef3f2adea7de7a69"}, - {file = "pymongo-4.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:e84bc7707492f06fbc37a9f215374d2977d21b72e10a67f1b31893ec5a140ad8"}, - {file = "pymongo-4.8.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:519d1bab2b5e5218c64340b57d555d89c3f6c9d717cecbf826fb9d42415e7750"}, - {file = "pymongo-4.8.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:87075a1feb1e602e539bdb1ef8f4324a3427eb0d64208c3182e677d2c0718b6f"}, - {file = "pymongo-4.8.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77f53429515d2b3e86dcc83dadecf7ff881e538c168d575f3688698a8707b80a"}, - {file = "pymongo-4.8.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fdc20cd1e1141b04696ffcdb7c71e8a4a665db31fe72e51ec706b3bdd2d09f36"}, - {file = "pymongo-4.8.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:284d0717d1a7707744018b0b6ee7801b1b1ff044c42f7be7a01bb013de639470"}, - {file = "pymongo-4.8.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5bf0eb8b6ef40fa22479f09375468c33bebb7fe49d14d9c96c8fd50355188b0"}, - {file = "pymongo-4.8.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2ecd71b9226bd1d49416dc9f999772038e56f415a713be51bf18d8676a0841c8"}, - {file = "pymongo-4.8.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e0061af6e8c5e68b13f1ec9ad5251247726653c5af3c0bbdfbca6cf931e99216"}, - {file = "pymongo-4.8.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:658d0170f27984e0d89c09fe5c42296613b711a3ffd847eb373b0dbb5b648d5f"}, - {file = "pymongo-4.8.0-cp38-cp38-win32.whl", hash = "sha256:3ed1c316718a2836f7efc3d75b4b0ffdd47894090bc697de8385acd13c513a70"}, - {file = "pymongo-4.8.0-cp38-cp38-win_amd64.whl", hash = "sha256:7148419eedfea9ecb940961cfe465efaba90595568a1fb97585fb535ea63fe2b"}, - {file = "pymongo-4.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e8400587d594761e5136a3423111f499574be5fd53cf0aefa0d0f05b180710b0"}, - {file = "pymongo-4.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:af3e98dd9702b73e4e6fd780f6925352237f5dce8d99405ff1543f3771201704"}, - {file = "pymongo-4.8.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:de3a860f037bb51f968de320baef85090ff0bbb42ec4f28ec6a5ddf88be61871"}, - {file = "pymongo-4.8.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0fc18b3a093f3db008c5fea0e980dbd3b743449eee29b5718bc2dc15ab5088bb"}, - {file = "pymongo-4.8.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18c9d8f975dd7194c37193583fd7d1eb9aea0c21ee58955ecf35362239ff31ac"}, - {file = "pymongo-4.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:408b2f8fdbeca3c19e4156f28fff1ab11c3efb0407b60687162d49f68075e63c"}, - {file = "pymongo-4.8.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b6564780cafd6abeea49759fe661792bd5a67e4f51bca62b88faab497ab5fe89"}, - {file = "pymongo-4.8.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d18d86bc9e103f4d3d4f18b85a0471c0e13ce5b79194e4a0389a224bb70edd53"}, - {file = "pymongo-4.8.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:9097c331577cecf8034422956daaba7ec74c26f7b255d718c584faddd7fa2e3c"}, - {file = "pymongo-4.8.0-cp39-cp39-win32.whl", hash = "sha256:d5428dbcd43d02f6306e1c3c95f692f68b284e6ee5390292242f509004c9e3a8"}, - {file = "pymongo-4.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:ef7225755ed27bfdb18730c68f6cb023d06c28f2b734597480fb4c0e500feb6f"}, - {file = "pymongo-4.8.0.tar.gz", hash = "sha256:454f2295875744dc70f1881e4b2eb99cdad008a33574bc8aaf120530f66c0cde"}, + {file = "pymongo-4.10.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e699aa68c4a7dea2ab5a27067f7d3e08555f8d2c0dc6a0c8c60cfd9ff2e6a4b1"}, + {file = "pymongo-4.10.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:70645abc714f06b4ad6b72d5bf73792eaad14e3a2cfe29c62a9c81ada69d9e4b"}, + {file = "pymongo-4.10.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ae2fd94c9fe048c94838badcc6e992d033cb9473eb31e5710b3707cba5e8aee2"}, + {file = "pymongo-4.10.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5ded27a4a5374dae03a92e084a60cdbcecd595306555bda553b833baf3fc4868"}, + {file = "pymongo-4.10.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1ecc2455e3974a6c429687b395a0bc59636f2d6aedf5785098cf4e1f180f1c71"}, + {file = "pymongo-4.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a920fee41f7d0259f5f72c1f1eb331bc26ffbdc952846f9bd8c3b119013bb52c"}, + {file = "pymongo-4.10.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e0a15665b2d6cf364f4cd114d62452ce01d71abfbd9c564ba8c74dcd7bbd6822"}, + {file = "pymongo-4.10.1-cp310-cp310-win32.whl", hash = "sha256:29e1c323c28a4584b7095378ff046815e39ff82cdb8dc4cc6dfe3acf6f9ad1f8"}, + {file = "pymongo-4.10.1-cp310-cp310-win_amd64.whl", hash = "sha256:88dc4aa45f8744ccfb45164aedb9a4179c93567bbd98a33109d7dc400b00eb08"}, + {file = "pymongo-4.10.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:57ee6becae534e6d47848c97f6a6dff69e3cce7c70648d6049bd586764febe59"}, + {file = "pymongo-4.10.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6f437a612f4d4f7aca1812311b1e84477145e950fdafe3285b687ab8c52541f3"}, + {file = "pymongo-4.10.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a970fd3117ab40a4001c3dad333bbf3c43687d90f35287a6237149b5ccae61d"}, + {file = "pymongo-4.10.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7c4d0e7cd08ef9f8fbf2d15ba281ed55604368a32752e476250724c3ce36c72e"}, + {file = "pymongo-4.10.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ca6f700cff6833de4872a4e738f43123db34400173558b558ae079b5535857a4"}, + {file = "pymongo-4.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cec237c305fcbeef75c0bcbe9d223d1e22a6e3ba1b53b2f0b79d3d29c742b45b"}, + {file = "pymongo-4.10.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3337804ea0394a06e916add4e5fac1c89902f1b6f33936074a12505cab4ff05"}, + {file = "pymongo-4.10.1-cp311-cp311-win32.whl", hash = "sha256:778ac646ce6ac1e469664062dfe9ae1f5c9961f7790682809f5ec3b8fda29d65"}, + {file = "pymongo-4.10.1-cp311-cp311-win_amd64.whl", hash = "sha256:9df4ab5594fdd208dcba81be815fa8a8a5d8dedaf3b346cbf8b61c7296246a7a"}, + {file = "pymongo-4.10.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fbedc4617faa0edf423621bb0b3b8707836687161210d470e69a4184be9ca011"}, + {file = "pymongo-4.10.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7bd26b2aec8ceeb95a5d948d5cc0f62b0eb6d66f3f4230705c1e3d3d2c04ec76"}, + {file = "pymongo-4.10.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb104c3c2a78d9d85571c8ac90ec4f95bca9b297c6eee5ada71fabf1129e1674"}, + {file = "pymongo-4.10.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4924355245a9c79f77b5cda2db36e0f75ece5faf9f84d16014c0a297f6d66786"}, + {file = "pymongo-4.10.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:11280809e5dacaef4971113f0b4ff4696ee94cfdb720019ff4fa4f9635138252"}, + {file = "pymongo-4.10.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e5d55f2a82e5eb23795f724991cac2bffbb1c0f219c0ba3bf73a835f97f1bb2e"}, + {file = "pymongo-4.10.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e974ab16a60be71a8dfad4e5afccf8dd05d41c758060f5d5bda9a758605d9a5d"}, + {file = "pymongo-4.10.1-cp312-cp312-win32.whl", hash = "sha256:544890085d9641f271d4f7a47684450ed4a7344d6b72d5968bfae32203b1bb7c"}, + {file = "pymongo-4.10.1-cp312-cp312-win_amd64.whl", hash = "sha256:dcc07b1277e8b4bf4d7382ca133850e323b7ab048b8353af496d050671c7ac52"}, + {file = "pymongo-4.10.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:90bc6912948dfc8c363f4ead54d54a02a15a7fee6cfafb36dc450fc8962d2cb7"}, + {file = "pymongo-4.10.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:594dd721b81f301f33e843453638e02d92f63c198358e5a0fa8b8d0b1218dabc"}, + {file = "pymongo-4.10.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0783e0c8e95397c84e9cf8ab092ab1e5dd7c769aec0ef3a5838ae7173b98dea0"}, + {file = "pymongo-4.10.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6fb6a72e88df46d1c1040fd32cd2d2c5e58722e5d3e31060a0393f04ad3283de"}, + {file = "pymongo-4.10.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2e3a593333e20c87415420a4fb76c00b7aae49b6361d2e2205b6fece0563bf40"}, + {file = "pymongo-4.10.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72e2ace7456167c71cfeca7dcb47bd5dceda7db2231265b80fc625c5e8073186"}, + {file = "pymongo-4.10.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8ad05eb9c97e4f589ed9e74a00fcaac0d443ccd14f38d1258eb4c39a35dd722b"}, + {file = "pymongo-4.10.1-cp313-cp313-win32.whl", hash = "sha256:ee4c86d8e6872a61f7888fc96577b0ea165eb3bdb0d841962b444fa36001e2bb"}, + {file = "pymongo-4.10.1-cp313-cp313-win_amd64.whl", hash = "sha256:45ee87a4e12337353242bc758accc7fb47a2f2d9ecc0382a61e64c8f01e86708"}, + {file = "pymongo-4.10.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:442ca247f53ad24870a01e80a71cd81b3f2318655fd9d66748ee2bd1b1569d9e"}, + {file = "pymongo-4.10.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:23e1d62df5592518204943b507be7b457fb8a4ad95a349440406fd42db5d0923"}, + {file = "pymongo-4.10.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6131bc6568b26e7495a9f3ef2b1700566b76bbecd919f4472bfe90038a61f425"}, + {file = "pymongo-4.10.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fdeba88c540c9ed0338c0b2062d9f81af42b18d6646b3e6dda05cf6edd46ada9"}, + {file = "pymongo-4.10.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:15a624d752dd3c89d10deb0ef6431559b6d074703cab90a70bb849ece02adc6b"}, + {file = "pymongo-4.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba164e73fdade9b4614a2497321c5b7512ddf749ed508950bdecc28d8d76a2d9"}, + {file = "pymongo-4.10.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9235fa319993405ae5505bf1333366388add2e06848db7b3deee8f990b69808e"}, + {file = "pymongo-4.10.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e4a65567bd17d19f03157c7ec992c6530eafd8191a4e5ede25566792c4fe3fa2"}, + {file = "pymongo-4.10.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:f1945d48fb9b8a87d515da07f37e5b2c35b364a435f534c122e92747881f4a7c"}, + {file = "pymongo-4.10.1-cp38-cp38-win32.whl", hash = "sha256:345f8d340802ebce509f49d5833cc913da40c82f2e0daf9f60149cacc9ca680f"}, + {file = "pymongo-4.10.1-cp38-cp38-win_amd64.whl", hash = "sha256:3a70d5efdc0387ac8cd50f9a5f379648ecfc322d14ec9e1ba8ec957e5d08c372"}, + {file = "pymongo-4.10.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:15b1492cc5c7cd260229590be7218261e81684b8da6d6de2660cf743445500ce"}, + {file = "pymongo-4.10.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:95207503c41b97e7ecc7e596d84a61f441b4935f11aa8332828a754e7ada8c82"}, + {file = "pymongo-4.10.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb99f003c720c6d83be02c8f1a7787c22384a8ca9a4181e406174db47a048619"}, + {file = "pymongo-4.10.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f2bc1ee4b1ca2c4e7e6b7a5e892126335ec8d9215bcd3ac2fe075870fefc3358"}, + {file = "pymongo-4.10.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:93a0833c10a967effcd823b4e7445ec491f0bf6da5de0ca33629c0528f42b748"}, + {file = "pymongo-4.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f56707497323150bd2ed5d63067f4ffce940d0549d4ea2dfae180deec7f9363"}, + {file = "pymongo-4.10.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:409ab7d6c4223e5c85881697f365239dd3ed1b58f28e4124b846d9d488c86880"}, + {file = "pymongo-4.10.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:dac78a650dc0637d610905fd06b5fa6419ae9028cf4d04d6a2657bc18a66bbce"}, + {file = "pymongo-4.10.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:1ec3fa88b541e0481aff3c35194c9fac96e4d57ec5d1c122376000eb28c01431"}, + {file = "pymongo-4.10.1-cp39-cp39-win32.whl", hash = "sha256:e0e961923a7b8a1c801c43552dcb8153e45afa41749d9efbd3a6d33f45489f7a"}, + {file = "pymongo-4.10.1-cp39-cp39-win_amd64.whl", hash = "sha256:dabe8bf1ad644e6b93f3acf90ff18536d94538ca4d27e583c6db49889e98e48f"}, + {file = "pymongo-4.10.1.tar.gz", hash = "sha256:a9de02be53b6bb98efe0b9eda84ffa1ec027fcb23a2de62c4f941d9a2f2f3330"}, ] [package.dependencies] @@ -4725,12 +4754,12 @@ dnspython = ">=1.16.0,<3.0.0" [package.extras] aws = ["pymongo-auth-aws (>=1.1.0,<2.0.0)"] -docs = ["furo (==2023.9.10)", "readthedocs-sphinx-search (>=0.3,<1.0)", "sphinx (>=5.3,<8)", "sphinx-rtd-theme (>=2,<3)", "sphinxcontrib-shellcheck (>=1,<2)"] -encryption = ["certifi", "pymongo-auth-aws (>=1.1.0,<2.0.0)", "pymongocrypt (>=1.6.0,<2.0.0)"] +docs = ["furo (==2023.9.10)", "readthedocs-sphinx-search (>=0.3,<1.0)", "sphinx (>=5.3,<8)", "sphinx-autobuild (>=2020.9.1)", "sphinx-rtd-theme (>=2,<3)", "sphinxcontrib-shellcheck (>=1,<2)"] +encryption = ["certifi", "pymongo-auth-aws (>=1.1.0,<2.0.0)", "pymongocrypt (>=1.10.0,<2.0.0)"] gssapi = ["pykerberos", "winkerberos (>=0.5.0)"] ocsp = ["certifi", "cryptography (>=2.5)", "pyopenssl (>=17.2.0)", "requests (<3.0.0)", "service-identity (>=18.1.0)"] snappy = ["python-snappy"] -test = ["pytest (>=7)"] +test = ["pytest (>=8.2)", "pytest-asyncio (>=0.24.0)"] zstd = ["zstandard"] [[package]] @@ -4793,17 +4822,17 @@ diagrams = ["jinja2", "railroad-diagrams"] [[package]] name = "pypdf" -version = "3.17.4" +version = "5.0.1" description = "A pure-python PDF library capable of splitting, merging, cropping, and transforming PDF files" optional = true -python-versions = ">=3.6" +python-versions = ">=3.8" files = [ - {file = "pypdf-3.17.4-py3-none-any.whl", hash = "sha256:6aa0f61b33779b64486de3f42835d3668badd48dac4a536aeb87da187a5eacd2"}, - {file = "pypdf-3.17.4.tar.gz", hash = "sha256:ec96e2e4fc9648ac609d19c00d41e9d606e0ae2ce5a0bbe7691426f5f157166a"}, + {file = "pypdf-5.0.1-py3-none-any.whl", hash = "sha256:ff8a32da6c7a63fea9c32fa4dd837cdd0db7966adf6c14f043e3f12592e992db"}, + {file = "pypdf-5.0.1.tar.gz", hash = "sha256:a361c3c372b4a659f9c8dd438d5ce29a753c79c620dc6e1fd66977651f5547ea"}, ] [package.dependencies] -typing_extensions = {version = ">=3.7.4.3", markers = "python_version < \"3.10\""} +typing_extensions = {version = ">=4.0", markers = "python_version < \"3.11\""} [package.extras] crypto = ["PyCryptodome", "cryptography"] @@ -4814,21 +4843,23 @@ image = ["Pillow (>=8.0.0)"] [[package]] name = "pyright" -version = "1.1.380" +version = "1.1.383" description = "Command line wrapper for pyright" optional = false python-versions = ">=3.7" files = [ - {file = "pyright-1.1.380-py3-none-any.whl", hash = "sha256:a6404392053d8848bacc7aebcbd9d318bb46baf1a1a000359305481920f43879"}, - {file = "pyright-1.1.380.tar.gz", hash = "sha256:e6ceb1a5f7e9f03106e0aa1d6fbb4d97735a5e7ffb59f3de6b2db590baf935b2"}, + {file = "pyright-1.1.383-py3-none-any.whl", hash = "sha256:d864d1182a313f45aaf99e9bfc7d2668eeabc99b29a556b5344894fd73cb1959"}, + {file = "pyright-1.1.383.tar.gz", hash = "sha256:1df7f12407f3710c9c6df938d98ec53f70053e6c6bbf71ce7bcb038d42f10070"}, ] [package.dependencies] nodeenv = ">=1.6.0" +typing-extensions = ">=4.1" [package.extras] -all = ["twine (>=3.4.1)"] +all = ["nodejs-wheel-binaries", "twine (>=3.4.1)"] dev = ["twine (>=3.4.1)"] +nodejs = ["nodejs-wheel-binaries"] [[package]] name = "pytest" @@ -4887,21 +4918,21 @@ testing = ["fields", "hunter", "process-tests", "pytest-xdist", "virtualenv"] [[package]] name = "pytest-env" -version = "1.1.4" +version = "1.1.5" description = "pytest plugin that allows you to add environment variables." optional = false python-versions = ">=3.8" files = [ - {file = "pytest_env-1.1.4-py3-none-any.whl", hash = "sha256:a4212056d4d440febef311a98fdca56c31256d58fb453d103cba4e8a532b721d"}, - {file = "pytest_env-1.1.4.tar.gz", hash = "sha256:86653658da8f11c6844975db955746c458a9c09f1e64957603161e2ff93f5133"}, + {file = "pytest_env-1.1.5-py3-none-any.whl", hash = "sha256:ce90cf8772878515c24b31cd97c7fa1f4481cd68d588419fd45f10ecaee6bc30"}, + {file = "pytest_env-1.1.5.tar.gz", hash = "sha256:91209840aa0e43385073ac464a554ad2947cc2fd663a9debf88d03b01e0cc1cf"}, ] [package.dependencies] -pytest = ">=8.3.2" +pytest = ">=8.3.3" tomli = {version = ">=2.0.1", markers = "python_version < \"3.11\""} [package.extras] -test = ["covdefaults (>=2.3)", "coverage (>=7.6.1)", "pytest-mock (>=3.14)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.6.1)", "pytest-mock (>=3.14)"] [[package]] name = "pytest-mock" @@ -5077,13 +5108,13 @@ pyyaml = "*" [[package]] name = "qdrant-client" -version = "1.11.1" +version = "1.11.3" description = "Client library for the Qdrant vector search engine" optional = true python-versions = ">=3.8" files = [ - {file = "qdrant_client-1.11.1-py3-none-any.whl", hash = "sha256:1375fad77c825c957181ff53775fb900c4383e817f864ea30b2605314da92f07"}, - {file = "qdrant_client-1.11.1.tar.gz", hash = "sha256:bfc23239b027073352ad92152209ec50281519686b7da3041612faece0fcdfbd"}, + {file = "qdrant_client-1.11.3-py3-none-any.whl", hash = "sha256:fcf040b58203ed0827608c9ad957da671b1e31bf27e5e35b322c1b577b6ec133"}, + {file = "qdrant_client-1.11.3.tar.gz", hash = "sha256:5a155d8281a224ac18acef512eae2f5e9a0907975d52a7627ec66fa6586d0285"}, ] [package.dependencies] @@ -5123,21 +5154,21 @@ md = ["cmarkgfm (>=0.8.0)"] [[package]] name = "redis" -version = "4.6.0" +version = "5.1.1" description = "Python client for Redis database and key-value store" optional = true -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "redis-4.6.0-py3-none-any.whl", hash = "sha256:e2b03db868160ee4591de3cb90d40ebb50a90dd302138775937f6a42b7ed183c"}, - {file = "redis-4.6.0.tar.gz", hash = "sha256:585dc516b9eb042a619ef0a39c3d7d55fe81bdb4df09a52c9cdde0d07bf1aa7d"}, + {file = "redis-5.1.1-py3-none-any.whl", hash = "sha256:f8ea06b7482a668c6475ae202ed8d9bcaa409f6e87fb77ed1043d912afd62e24"}, + {file = "redis-5.1.1.tar.gz", hash = "sha256:f6c997521fedbae53387307c5d0bf784d9acc28d9f1d058abeac566ec4dbed72"}, ] [package.dependencies] -async-timeout = {version = ">=4.0.2", markers = "python_full_version <= \"3.11.2\""} +async-timeout = {version = ">=4.0.3", markers = "python_full_version < \"3.11.3\""} [package.extras] -hiredis = ["hiredis (>=1.0.0)"] -ocsp = ["cryptography (>=36.0.1)", "pyopenssl (==20.0.1)", "requests (>=2.26.0)"] +hiredis = ["hiredis (>=3.0.0)"] +ocsp = ["cryptography (>=36.0.1)", "pyopenssl (==23.2.1)", "requests (>=2.31.0)"] [[package]] name = "regex" @@ -5297,18 +5328,19 @@ idna2008 = ["idna"] [[package]] name = "rich" -version = "13.8.1" +version = "13.9.2" description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" optional = false -python-versions = ">=3.7.0" +python-versions = ">=3.8.0" files = [ - {file = "rich-13.8.1-py3-none-any.whl", hash = "sha256:1760a3c0848469b97b558fc61c85233e3dafb69c7a071b4d60c38099d3cd4c06"}, - {file = "rich-13.8.1.tar.gz", hash = "sha256:8260cda28e3db6bf04d2d1ef4dbc03ba80a824c88b0e7668a0f23126a424844a"}, + {file = "rich-13.9.2-py3-none-any.whl", hash = "sha256:8c82a3d3f8dcfe9e734771313e606b39d8247bb6b826e196f4914b333b743cf1"}, + {file = "rich-13.9.2.tar.gz", hash = "sha256:51a2c62057461aaf7152b4d611168f93a9fc73068f8ded2790f29fe2b5366d0c"}, ] [package.dependencies] markdown-it-py = ">=2.2.0" pygments = ">=2.13.0,<3.0.0" +typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.11\""} [package.extras] jupyter = ["ipywidgets (>=7.5.1,<9)"] @@ -5329,29 +5361,29 @@ pyasn1 = ">=0.1.3" [[package]] name = "ruff" -version = "0.6.5" +version = "0.6.9" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.6.5-py3-none-linux_armv6l.whl", hash = "sha256:7e4e308f16e07c95fc7753fc1aaac690a323b2bb9f4ec5e844a97bb7fbebd748"}, - {file = "ruff-0.6.5-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:932cd69eefe4daf8c7d92bd6689f7e8182571cb934ea720af218929da7bd7d69"}, - {file = "ruff-0.6.5-py3-none-macosx_11_0_arm64.whl", hash = "sha256:3a8d42d11fff8d3143ff4da41742a98f8f233bf8890e9fe23077826818f8d680"}, - {file = "ruff-0.6.5-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a50af6e828ee692fb10ff2dfe53f05caecf077f4210fae9677e06a808275754f"}, - {file = "ruff-0.6.5-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:794ada3400a0d0b89e3015f1a7e01f4c97320ac665b7bc3ade24b50b54cb2972"}, - {file = "ruff-0.6.5-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:381413ec47f71ce1d1c614f7779d88886f406f1fd53d289c77e4e533dc6ea200"}, - {file = "ruff-0.6.5-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:52e75a82bbc9b42e63c08d22ad0ac525117e72aee9729a069d7c4f235fc4d276"}, - {file = "ruff-0.6.5-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:09c72a833fd3551135ceddcba5ebdb68ff89225d30758027280968c9acdc7810"}, - {file = "ruff-0.6.5-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:800c50371bdcb99b3c1551d5691e14d16d6f07063a518770254227f7f6e8c178"}, - {file = "ruff-0.6.5-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e25ddd9cd63ba1f3bd51c1f09903904a6adf8429df34f17d728a8fa11174253"}, - {file = "ruff-0.6.5-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:7291e64d7129f24d1b0c947ec3ec4c0076e958d1475c61202497c6aced35dd19"}, - {file = "ruff-0.6.5-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:9ad7dfbd138d09d9a7e6931e6a7e797651ce29becd688be8a0d4d5f8177b4b0c"}, - {file = "ruff-0.6.5-py3-none-musllinux_1_2_i686.whl", hash = "sha256:005256d977021790cc52aa23d78f06bb5090dc0bfbd42de46d49c201533982ae"}, - {file = "ruff-0.6.5-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:482c1e6bfeb615eafc5899127b805d28e387bd87db38b2c0c41d271f5e58d8cc"}, - {file = "ruff-0.6.5-py3-none-win32.whl", hash = "sha256:cf4d3fa53644137f6a4a27a2b397381d16454a1566ae5335855c187fbf67e4f5"}, - {file = "ruff-0.6.5-py3-none-win_amd64.whl", hash = "sha256:3e42a57b58e3612051a636bc1ac4e6b838679530235520e8f095f7c44f706ff9"}, - {file = "ruff-0.6.5-py3-none-win_arm64.whl", hash = "sha256:51935067740773afdf97493ba9b8231279e9beef0f2a8079188c4776c25688e0"}, - {file = "ruff-0.6.5.tar.gz", hash = "sha256:4d32d87fab433c0cf285c3683dd4dae63be05fd7a1d65b3f5bf7cdd05a6b96fb"}, + {file = "ruff-0.6.9-py3-none-linux_armv6l.whl", hash = "sha256:064df58d84ccc0ac0fcd63bc3090b251d90e2a372558c0f057c3f75ed73e1ccd"}, + {file = "ruff-0.6.9-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:140d4b5c9f5fc7a7b074908a78ab8d384dd7f6510402267bc76c37195c02a7ec"}, + {file = "ruff-0.6.9-py3-none-macosx_11_0_arm64.whl", hash = "sha256:53fd8ca5e82bdee8da7f506d7b03a261f24cd43d090ea9db9a1dc59d9313914c"}, + {file = "ruff-0.6.9-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:645d7d8761f915e48a00d4ecc3686969761df69fb561dd914a773c1a8266e14e"}, + {file = "ruff-0.6.9-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eae02b700763e3847595b9d2891488989cac00214da7f845f4bcf2989007d577"}, + {file = "ruff-0.6.9-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d5ccc9e58112441de8ad4b29dcb7a86dc25c5f770e3c06a9d57e0e5eba48829"}, + {file = "ruff-0.6.9-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:417b81aa1c9b60b2f8edc463c58363075412866ae4e2b9ab0f690dc1e87ac1b5"}, + {file = "ruff-0.6.9-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3c866b631f5fbce896a74a6e4383407ba7507b815ccc52bcedabb6810fdb3ef7"}, + {file = "ruff-0.6.9-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7b118afbb3202f5911486ad52da86d1d52305b59e7ef2031cea3425142b97d6f"}, + {file = "ruff-0.6.9-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a67267654edc23c97335586774790cde402fb6bbdb3c2314f1fc087dee320bfa"}, + {file = "ruff-0.6.9-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:3ef0cc774b00fec123f635ce5c547dac263f6ee9fb9cc83437c5904183b55ceb"}, + {file = "ruff-0.6.9-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:12edd2af0c60fa61ff31cefb90aef4288ac4d372b4962c2864aeea3a1a2460c0"}, + {file = "ruff-0.6.9-py3-none-musllinux_1_2_i686.whl", hash = "sha256:55bb01caeaf3a60b2b2bba07308a02fca6ab56233302406ed5245180a05c5625"}, + {file = "ruff-0.6.9-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:925d26471fa24b0ce5a6cdfab1bb526fb4159952385f386bdcc643813d472039"}, + {file = "ruff-0.6.9-py3-none-win32.whl", hash = "sha256:eb61ec9bdb2506cffd492e05ac40e5bc6284873aceb605503d8494180d6fc84d"}, + {file = "ruff-0.6.9-py3-none-win_amd64.whl", hash = "sha256:785d31851c1ae91f45b3d8fe23b8ae4b5170089021fbb42402d811135f0b7117"}, + {file = "ruff-0.6.9-py3-none-win_arm64.whl", hash = "sha256:a9641e31476d601f83cd602608739a0840e348bda93fec9f1ee816f8b6798b93"}, + {file = "ruff-0.6.9.tar.gz", hash = "sha256:b076ef717a8e5bc819514ee1d602bbdca5b4420ae13a9cf61a0c0a4f53a2baa2"}, ] [[package]] @@ -5529,68 +5561,6 @@ files = [ cryptography = ">=2.0" jeepney = ">=0.6" -[[package]] -name = "sentencepiece" -version = "0.2.0" -description = "SentencePiece python wrapper" -optional = true -python-versions = "*" -files = [ - {file = "sentencepiece-0.2.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:188779e1298a1c8b8253c7d3ad729cb0a9891e5cef5e5d07ce4592c54869e227"}, - {file = "sentencepiece-0.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bed9cf85b296fa2b76fc2547b9cbb691a523864cebaee86304c43a7b4cb1b452"}, - {file = "sentencepiece-0.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d7b67e724bead13f18db6e1d10b6bbdc454af574d70efbb36f27d90387be1ca3"}, - {file = "sentencepiece-0.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2fde4b08cfe237be4484c6c7c2e2c75fb862cfeab6bd5449ce4caeafd97b767a"}, - {file = "sentencepiece-0.2.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c378492056202d1c48a4979650981635fd97875a00eabb1f00c6a236b013b5e"}, - {file = "sentencepiece-0.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1380ce6540a368de2ef6d7e6ba14ba8f3258df650d39ba7d833b79ee68a52040"}, - {file = "sentencepiece-0.2.0-cp310-cp310-win32.whl", hash = "sha256:a1151d6a6dd4b43e552394aed0edfe9292820272f0194bd56c7c1660a0c06c3d"}, - {file = "sentencepiece-0.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:d490142b0521ef22bc1085f061d922a2a6666175bb6b42e588ff95c0db6819b2"}, - {file = "sentencepiece-0.2.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:17982700c4f6dbb55fa3594f3d7e5dd1c8659a274af3738e33c987d2a27c9d5c"}, - {file = "sentencepiece-0.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7c867012c0e8bcd5bdad0f791609101cb5c66acb303ab3270218d6debc68a65e"}, - {file = "sentencepiece-0.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7fd6071249c74f779c5b27183295b9202f8dedb68034e716784364443879eaa6"}, - {file = "sentencepiece-0.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:27f90c55a65013cbb8f4d7aab0599bf925cde4adc67ae43a0d323677b5a1c6cb"}, - {file = "sentencepiece-0.2.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b293734059ef656dcd65be62ff771507bea8fed0a711b6733976e1ed3add4553"}, - {file = "sentencepiece-0.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e58b47f933aca74c6a60a79dcb21d5b9e47416256c795c2d58d55cec27f9551d"}, - {file = "sentencepiece-0.2.0-cp311-cp311-win32.whl", hash = "sha256:c581258cf346b327c62c4f1cebd32691826306f6a41d8c4bec43b010dee08e75"}, - {file = "sentencepiece-0.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:0993dbc665f4113017892f1b87c3904a44d0640eda510abcacdfb07f74286d36"}, - {file = "sentencepiece-0.2.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:ea5f536e32ea8ec96086ee00d7a4a131ce583a1b18d130711707c10e69601cb2"}, - {file = "sentencepiece-0.2.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d0cb51f53b6aae3c36bafe41e86167c71af8370a039f542c43b0cce5ef24a68c"}, - {file = "sentencepiece-0.2.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3212121805afc58d8b00ab4e7dd1f8f76c203ddb9dc94aa4079618a31cf5da0f"}, - {file = "sentencepiece-0.2.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2a3149e3066c2a75e0d68a43eb632d7ae728c7925b517f4c05c40f6f7280ce08"}, - {file = "sentencepiece-0.2.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:632f3594d3e7ac8b367bca204cb3fd05a01d5b21455acd097ea4c0e30e2f63d7"}, - {file = "sentencepiece-0.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f295105c6bdbb05bd5e1b0cafbd78ff95036f5d3641e7949455a3f4e5e7c3109"}, - {file = "sentencepiece-0.2.0-cp312-cp312-win32.whl", hash = "sha256:fb89f811e5efd18bab141afc3fea3de141c3f69f3fe9e898f710ae7fe3aab251"}, - {file = "sentencepiece-0.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:7a673a72aab81fef5ebe755c6e0cc60087d1f3a4700835d40537183c1703a45f"}, - {file = "sentencepiece-0.2.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:4547683f330289ec4f093027bfeb87f9ef023b2eb6f879fdc4a8187c7e0ffb90"}, - {file = "sentencepiece-0.2.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7cd6175f7eaec7142d2bf6f6597ce7db4c9ac89acf93fcdb17410c3a8b781eeb"}, - {file = "sentencepiece-0.2.0-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:859ba1acde782609a0910a26a60e16c191a82bf39b5621107552c0cd79fad00f"}, - {file = "sentencepiece-0.2.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bcbbef6cc277f8f18f36959e305f10b1c620442d75addc79c21d7073ae581b50"}, - {file = "sentencepiece-0.2.0-cp36-cp36m-win32.whl", hash = "sha256:536b934e244829e3fe6c4f198652cd82da48adb9aa145c9f00889542726dee3d"}, - {file = "sentencepiece-0.2.0-cp36-cp36m-win_amd64.whl", hash = "sha256:0a91aaa3c769b52440df56fafda683b3aa48e3f2169cf7ee5b8c8454a7f3ae9b"}, - {file = "sentencepiece-0.2.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:787e480ca4c1d08c9985a7eb1eae4345c107729c99e9b5a9a00f2575fc7d4b4b"}, - {file = "sentencepiece-0.2.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f4d158189eb2ecffea3a51edf6d25e110b3678ec47f1a40f2d541eafbd8f6250"}, - {file = "sentencepiece-0.2.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d1e5ca43013e8935f25457a4fca47e315780172c3e821b4b13a890668911c792"}, - {file = "sentencepiece-0.2.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7140d9e5a74a0908493bb4a13f1f16a401297bd755ada4c707e842fbf6f0f5bf"}, - {file = "sentencepiece-0.2.0-cp37-cp37m-win32.whl", hash = "sha256:6cf333625234f247ab357b0bd9836638405ea9082e1543d5b8408f014979dcbf"}, - {file = "sentencepiece-0.2.0-cp37-cp37m-win_amd64.whl", hash = "sha256:ff88712338b01031910e8e61e7239aff3ce8869ee31a47df63cb38aadd591bea"}, - {file = "sentencepiece-0.2.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:20813a68d4c221b1849c62c30e1281ea81687894d894b8d4a0f4677d9311e0f5"}, - {file = "sentencepiece-0.2.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:926ef920ae2e8182db31d3f5d081ada57804e3e1d3a8c4ef8b117f9d9fb5a945"}, - {file = "sentencepiece-0.2.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:89f65f69636b7e9c015b79dff9c9985a9bc7d19ded6f79ef9f1ec920fdd73ecf"}, - {file = "sentencepiece-0.2.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f67eae0dbe6f2d7d6ba50a354623d787c99965f068b81e145d53240198021b0"}, - {file = "sentencepiece-0.2.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:98501e075f35dd1a1d5a20f65be26839fcb1938752ec61539af008a5aa6f510b"}, - {file = "sentencepiece-0.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e3d1d2cc4882e8d6a1adf9d5927d7716f80617fc693385661caff21888972269"}, - {file = "sentencepiece-0.2.0-cp38-cp38-win32.whl", hash = "sha256:b99a308a2e5e569031ab164b74e6fab0b6f37dfb493c32f7816225f4d411a6dd"}, - {file = "sentencepiece-0.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:cdb701eec783d3ec86b7cd4c763adad8eaf6b46db37ee1c36e5e6c44b3fe1b5f"}, - {file = "sentencepiece-0.2.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:1e0f9c4d0a6b0af59b613175f019916e28ade076e21242fd5be24340d8a2f64a"}, - {file = "sentencepiece-0.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:298f21cc1366eb60311aedba3169d30f885c363ddbf44214b0a587d2908141ad"}, - {file = "sentencepiece-0.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3f1ec95aa1e5dab11f37ac7eff190493fd87770f7a8b81ebc9dd768d1a3c8704"}, - {file = "sentencepiece-0.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b06b70af54daa4b4904cbb90b4eb6d35c9f3252fdc86c9c32d5afd4d30118d8"}, - {file = "sentencepiece-0.2.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:22e37bac44dd6603388cb598c64ff7a76e41ca774646f21c23aadfbf5a2228ab"}, - {file = "sentencepiece-0.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0461324897735512a32d222e3d886e24ad6a499761952b6bda2a9ee6e4313ea5"}, - {file = "sentencepiece-0.2.0-cp39-cp39-win32.whl", hash = "sha256:38aed822fb76435fa1f12185f10465a94ab9e51d5e8a9159e9a540ce926f0ffd"}, - {file = "sentencepiece-0.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:d8cf876516548b5a1d6ac4745d8b554f5c07891d55da557925e5c13ff0b4e6ad"}, - {file = "sentencepiece-0.2.0.tar.gz", hash = "sha256:a52c19171daaf2e697dc6cbe67684e0fa341b1248966f6aebb541de654d15843"}, -] - [[package]] name = "sentinels" version = "1.0.0" @@ -5605,7 +5575,7 @@ files = [ name = "setuptools" version = "73.0.1" description = "Easily download, build, install, upgrade, and uninstall Python packages" -optional = true +optional = false python-versions = ">=3.8" files = [ {file = "setuptools-73.0.1-py3-none-any.whl", hash = "sha256:b208925fcb9f7af924ed2dc04708ea89791e24bde0d3020b27df0e116088b34e"}, @@ -5860,48 +5830,60 @@ files = [ [[package]] name = "sqlalchemy" -version = "2.0.34" +version = "2.0.35" description = "Database Abstraction Library" optional = true python-versions = ">=3.7" files = [ - {file = "SQLAlchemy-2.0.34-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:95d0b2cf8791ab5fb9e3aa3d9a79a0d5d51f55b6357eecf532a120ba3b5524db"}, - {file = "SQLAlchemy-2.0.34-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:243f92596f4fd4c8bd30ab8e8dd5965afe226363d75cab2468f2c707f64cd83b"}, - {file = "SQLAlchemy-2.0.34-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:173f5f122d2e1bff8fbd9f7811b7942bead1f5e9f371cdf9e670b327e6703ebd"}, - {file = "SQLAlchemy-2.0.34-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:bd90c221ed4e60ac9d476db967f436cfcecbd4ef744537c0f2d5291439848768"}, - {file = "SQLAlchemy-2.0.34-cp310-cp310-win32.whl", hash = "sha256:3166dfff2d16fe9be3241ee60ece6fcb01cf8e74dd7c5e0b64f8e19fab44911b"}, - {file = "SQLAlchemy-2.0.34-cp310-cp310-win_amd64.whl", hash = "sha256:6831a78bbd3c40f909b3e5233f87341f12d0b34a58f14115c9e94b4cdaf726d3"}, - {file = "SQLAlchemy-2.0.34-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c7db3db284a0edaebe87f8f6642c2b2c27ed85c3e70064b84d1c9e4ec06d5d84"}, - {file = "SQLAlchemy-2.0.34-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:430093fce0efc7941d911d34f75a70084f12f6ca5c15d19595c18753edb7c33b"}, - {file = "SQLAlchemy-2.0.34-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fb1b30f31a36c7f3fee848391ff77eebdd3af5750bf95fbf9b8b5323edfdb4ec"}, - {file = "SQLAlchemy-2.0.34-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:80bd73ea335203b125cf1d8e50fef06be709619eb6ab9e7b891ea34b5baa2287"}, - {file = "SQLAlchemy-2.0.34-cp311-cp311-win32.whl", hash = "sha256:6daeb8382d0df526372abd9cb795c992e18eed25ef2c43afe518c73f8cccb721"}, - {file = "SQLAlchemy-2.0.34-cp311-cp311-win_amd64.whl", hash = "sha256:5bc08e75ed11693ecb648b7a0a4ed80da6d10845e44be0c98c03f2f880b68ff4"}, - {file = "SQLAlchemy-2.0.34-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:53e68b091492c8ed2bd0141e00ad3089bcc6bf0e6ec4142ad6505b4afe64163e"}, - {file = "SQLAlchemy-2.0.34-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:bcd18441a49499bf5528deaa9dee1f5c01ca491fc2791b13604e8f972877f812"}, - {file = "SQLAlchemy-2.0.34-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3330415cd387d2b88600e8e26b510d0370db9b7eaf984354a43e19c40df2e2b"}, - {file = "SQLAlchemy-2.0.34-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7cee4c6917857fd6121ed84f56d1dc78eb1d0e87f845ab5a568aba73e78adf83"}, - {file = "SQLAlchemy-2.0.34-cp312-cp312-win32.whl", hash = "sha256:fbb034f565ecbe6c530dff948239377ba859420d146d5f62f0271407ffb8c580"}, - {file = "SQLAlchemy-2.0.34-cp312-cp312-win_amd64.whl", hash = "sha256:707c8f44931a4facd4149b52b75b80544a8d824162602b8cd2fe788207307f9a"}, - {file = "SQLAlchemy-2.0.34-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:24af3dc43568f3780b7e1e57c49b41d98b2d940c1fd2e62d65d3928b6f95f021"}, - {file = "SQLAlchemy-2.0.34-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:413c85cd0177c23e32dee6898c67a5f49296640041d98fddb2c40888fe4daa2e"}, - {file = "SQLAlchemy-2.0.34-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:526ce723265643dbc4c7efb54f56648cc30e7abe20f387d763364b3ce7506c82"}, - {file = "SQLAlchemy-2.0.34-cp37-cp37m-win32.whl", hash = "sha256:13be2cc683b76977a700948411a94c67ad8faf542fa7da2a4b167f2244781cf3"}, - {file = "SQLAlchemy-2.0.34-cp37-cp37m-win_amd64.whl", hash = "sha256:e54ef33ea80d464c3dcfe881eb00ad5921b60f8115ea1a30d781653edc2fd6a2"}, - {file = "SQLAlchemy-2.0.34-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:43f28005141165edd11fbbf1541c920bd29e167b8bbc1fb410d4fe2269c1667a"}, - {file = "SQLAlchemy-2.0.34-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b68094b165a9e930aedef90725a8fcfafe9ef95370cbb54abc0464062dbf808f"}, - {file = "SQLAlchemy-2.0.34-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:203d46bddeaa7982f9c3cc693e5bc93db476ab5de9d4b4640d5c99ff219bee8c"}, - {file = "SQLAlchemy-2.0.34-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:9661268415f450c95f72f0ac1217cc6f10256f860eed85c2ae32e75b60278ad8"}, - {file = "SQLAlchemy-2.0.34-cp38-cp38-win32.whl", hash = "sha256:895184dfef8708e15f7516bd930bda7e50ead069280d2ce09ba11781b630a434"}, - {file = "SQLAlchemy-2.0.34-cp38-cp38-win_amd64.whl", hash = "sha256:6e7cde3a2221aa89247944cafb1b26616380e30c63e37ed19ff0bba5e968688d"}, - {file = "SQLAlchemy-2.0.34-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:dbcdf987f3aceef9763b6d7b1fd3e4ee210ddd26cac421d78b3c206d07b2700b"}, - {file = "SQLAlchemy-2.0.34-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ce119fc4ce0d64124d37f66a6f2a584fddc3c5001755f8a49f1ca0a177ef9796"}, - {file = "SQLAlchemy-2.0.34-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ebc11c54c6ecdd07bb4efbfa1554538982f5432dfb8456958b6d46b9f834bb7"}, - {file = "SQLAlchemy-2.0.34-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:220574e78ad986aea8e81ac68821e47ea9202b7e44f251b7ed8c66d9ae3f4278"}, - {file = "SQLAlchemy-2.0.34-cp39-cp39-win32.whl", hash = "sha256:b75b00083e7fe6621ce13cfce9d4469c4774e55e8e9d38c305b37f13cf1e874c"}, - {file = "SQLAlchemy-2.0.34-cp39-cp39-win_amd64.whl", hash = "sha256:c29d03e0adf3cc1a8c3ec62d176824972ae29b67a66cbb18daff3062acc6faa8"}, - {file = "SQLAlchemy-2.0.34-py3-none-any.whl", hash = "sha256:7286c353ee6475613d8beff83167374006c6b3e3f0e6491bfe8ca610eb1dec0f"}, - {file = "sqlalchemy-2.0.34.tar.gz", hash = "sha256:10d8f36990dd929690666679b0f42235c159a7051534adb135728ee52828dd22"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:67219632be22f14750f0d1c70e62f204ba69d28f62fd6432ba05ab295853de9b"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4668bd8faf7e5b71c0319407b608f278f279668f358857dbfd10ef1954ac9f90"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb8bea573863762bbf45d1e13f87c2d2fd32cee2dbd50d050f83f87429c9e1ea"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f552023710d4b93d8fb29a91fadf97de89c5926c6bd758897875435f2a939f33"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:016b2e665f778f13d3c438651dd4de244214b527a275e0acf1d44c05bc6026a9"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7befc148de64b6060937231cbff8d01ccf0bfd75aa26383ffdf8d82b12ec04ff"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-win32.whl", hash = "sha256:22b83aed390e3099584b839b93f80a0f4a95ee7f48270c97c90acd40ee646f0b"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-win_amd64.whl", hash = "sha256:a29762cd3d116585278ffb2e5b8cc311fb095ea278b96feef28d0b423154858e"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e21f66748ab725ade40fa7af8ec8b5019c68ab00b929f6643e1b1af461eddb60"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8a6219108a15fc6d24de499d0d515c7235c617b2540d97116b663dade1a54d62"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:042622a5306c23b972192283f4e22372da3b8ddf5f7aac1cc5d9c9b222ab3ff6"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:627dee0c280eea91aed87b20a1f849e9ae2fe719d52cbf847c0e0ea34464b3f7"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:4fdcd72a789c1c31ed242fd8c1bcd9ea186a98ee8e5408a50e610edfef980d71"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:89b64cd8898a3a6f642db4eb7b26d1b28a497d4022eccd7717ca066823e9fb01"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-win32.whl", hash = "sha256:6a93c5a0dfe8d34951e8a6f499a9479ffb9258123551fa007fc708ae2ac2bc5e"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-win_amd64.whl", hash = "sha256:c68fe3fcde03920c46697585620135b4ecfdfc1ed23e75cc2c2ae9f8502c10b8"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:eb60b026d8ad0c97917cb81d3662d0b39b8ff1335e3fabb24984c6acd0c900a2"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6921ee01caf375363be5e9ae70d08ce7ca9d7e0e8983183080211a062d299468"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8cdf1a0dbe5ced887a9b127da4ffd7354e9c1a3b9bb330dce84df6b70ccb3a8d"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93a71c8601e823236ac0e5d087e4f397874a421017b3318fd92c0b14acf2b6db"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e04b622bb8a88f10e439084486f2f6349bf4d50605ac3e445869c7ea5cf0fa8c"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1b56961e2d31389aaadf4906d453859f35302b4eb818d34a26fab72596076bb8"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-win32.whl", hash = "sha256:0f9f3f9a3763b9c4deb8c5d09c4cc52ffe49f9876af41cc1b2ad0138878453cf"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-win_amd64.whl", hash = "sha256:25b0f63e7fcc2a6290cb5f7f5b4fc4047843504983a28856ce9b35d8f7de03cc"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:f021d334f2ca692523aaf7bbf7592ceff70c8594fad853416a81d66b35e3abf9"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05c3f58cf91683102f2f0265c0db3bd3892e9eedabe059720492dbaa4f922da1"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:032d979ce77a6c2432653322ba4cbeabf5a6837f704d16fa38b5a05d8e21fa00"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:2e795c2f7d7249b75bb5f479b432a51b59041580d20599d4e112b5f2046437a3"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:cc32b2990fc34380ec2f6195f33a76b6cdaa9eecf09f0c9404b74fc120aef36f"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-win32.whl", hash = "sha256:9509c4123491d0e63fb5e16199e09f8e262066e58903e84615c301dde8fa2e87"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-win_amd64.whl", hash = "sha256:3655af10ebcc0f1e4e06c5900bb33e080d6a1fa4228f502121f28a3b1753cde5"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4c31943b61ed8fdd63dfd12ccc919f2bf95eefca133767db6fbbd15da62078ec"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a62dd5d7cc8626a3634208df458c5fe4f21200d96a74d122c83bc2015b333bc1"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0630774b0977804fba4b6bbea6852ab56c14965a2b0c7fc7282c5f7d90a1ae72"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d625eddf7efeba2abfd9c014a22c0f6b3796e0ffb48f5d5ab106568ef01ff5a"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ada603db10bb865bbe591939de854faf2c60f43c9b763e90f653224138f910d9"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c41411e192f8d3ea39ea70e0fae48762cd11a2244e03751a98bd3c0ca9a4e936"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-win32.whl", hash = "sha256:d299797d75cd747e7797b1b41817111406b8b10a4f88b6e8fe5b5e59598b43b0"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-win_amd64.whl", hash = "sha256:0375a141e1c0878103eb3d719eb6d5aa444b490c96f3fedab8471c7f6ffe70ee"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ccae5de2a0140d8be6838c331604f91d6fafd0735dbdcee1ac78fc8fbaba76b4"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2a275a806f73e849e1c309ac11108ea1a14cd7058577aba962cd7190e27c9e3c"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:732e026240cdd1c1b2e3ac515c7a23820430ed94292ce33806a95869c46bd139"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:890da8cd1941fa3dab28c5bac3b9da8502e7e366f895b3b8e500896f12f94d11"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c0d8326269dbf944b9201911b0d9f3dc524d64779a07518199a58384c3d37a44"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b76d63495b0508ab9fc23f8152bac63205d2a704cd009a2b0722f4c8e0cba8e0"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-win32.whl", hash = "sha256:69683e02e8a9de37f17985905a5eca18ad651bf592314b4d3d799029797d0eb3"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-win_amd64.whl", hash = "sha256:aee110e4ef3c528f3abbc3c2018c121e708938adeeff9006428dd7c8555e9b3f"}, + {file = "SQLAlchemy-2.0.35-py3-none-any.whl", hash = "sha256:2ab3f0336c0387662ce6221ad30ab3a5e6499aab01b9790879b6578fd9b8faa1"}, + {file = "sqlalchemy-2.0.35.tar.gz", hash = "sha256:e11d7ea4d24f0a262bccf9a7cd6284c976c5369dac21db237cff59586045ab9f"}, ] [package.dependencies] @@ -5945,13 +5927,13 @@ files = [ [[package]] name = "sympy" -version = "1.13.2" +version = "1.13.3" description = "Computer algebra system (CAS) in Python" -optional = true +optional = false python-versions = ">=3.8" files = [ - {file = "sympy-1.13.2-py3-none-any.whl", hash = "sha256:c51d75517712f1aed280d4ce58506a4a88d635d6b5dd48b39102a7ae1f3fcfe9"}, - {file = "sympy-1.13.2.tar.gz", hash = "sha256:401449d84d07be9d0c7a46a64bd54fe097667d5e7181bfe67ec777be9e01cb13"}, + {file = "sympy-1.13.3-py3-none-any.whl", hash = "sha256:54612cf55a62755ee71824ce692986f23c88ffa77207b30c1368eda4a7060f73"}, + {file = "sympy-1.13.3.tar.gz", hash = "sha256:b27fd2c6530e0ab39e275fc9b683895367e51d5da91baa8d3d64db2565fec4d9"}, ] [package.dependencies] @@ -5960,6 +5942,22 @@ mpmath = ">=1.1.0,<1.4" [package.extras] dev = ["hypothesis (>=6.70.0)", "pytest (>=7.1.0)"] +[[package]] +name = "tavily-python" +version = "0.5.0" +description = "Python wrapper for the Tavily API" +optional = true +python-versions = ">=3.6" +files = [ + {file = "tavily_python-0.5.0-py3-none-any.whl", hash = "sha256:e874f6a04a56cdda80a505fe0b4f5d61d25372bd52a83e6773926fb297dcaa29"}, + {file = "tavily_python-0.5.0.tar.gz", hash = "sha256:2c60b88203b630e1b37fc711913a1090ced6719b3f21089f25ec06e9e1602822"}, +] + +[package.dependencies] +httpx = "*" +requests = "*" +tiktoken = ">=0.5.1" + [[package]] name = "tenacity" version = "8.5.0" @@ -5977,47 +5975,42 @@ test = ["pytest", "tornado (>=4.5)", "typeguard"] [[package]] name = "tiktoken" -version = "0.7.0" +version = "0.8.0" description = "tiktoken is a fast BPE tokeniser for use with OpenAI's models" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "tiktoken-0.7.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:485f3cc6aba7c6b6ce388ba634fbba656d9ee27f766216f45146beb4ac18b25f"}, - {file = "tiktoken-0.7.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e54be9a2cd2f6d6ffa3517b064983fb695c9a9d8aa7d574d1ef3c3f931a99225"}, - {file = "tiktoken-0.7.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79383a6e2c654c6040e5f8506f3750db9ddd71b550c724e673203b4f6b4b4590"}, - {file = "tiktoken-0.7.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d4511c52caacf3c4981d1ae2df85908bd31853f33d30b345c8b6830763f769c"}, - {file = "tiktoken-0.7.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:13c94efacdd3de9aff824a788353aa5749c0faee1fbe3816df365ea450b82311"}, - {file = "tiktoken-0.7.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8e58c7eb29d2ab35a7a8929cbeea60216a4ccdf42efa8974d8e176d50c9a3df5"}, - {file = "tiktoken-0.7.0-cp310-cp310-win_amd64.whl", hash = "sha256:21a20c3bd1dd3e55b91c1331bf25f4af522c525e771691adbc9a69336fa7f702"}, - {file = "tiktoken-0.7.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:10c7674f81e6e350fcbed7c09a65bca9356eaab27fb2dac65a1e440f2bcfe30f"}, - {file = "tiktoken-0.7.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:084cec29713bc9d4189a937f8a35dbdfa785bd1235a34c1124fe2323821ee93f"}, - {file = "tiktoken-0.7.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:811229fde1652fedcca7c6dfe76724d0908775b353556d8a71ed74d866f73f7b"}, - {file = "tiktoken-0.7.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86b6e7dc2e7ad1b3757e8a24597415bafcfb454cebf9a33a01f2e6ba2e663992"}, - {file = "tiktoken-0.7.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1063c5748be36344c7e18c7913c53e2cca116764c2080177e57d62c7ad4576d1"}, - {file = "tiktoken-0.7.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:20295d21419bfcca092644f7e2f2138ff947a6eb8cfc732c09cc7d76988d4a89"}, - {file = "tiktoken-0.7.0-cp311-cp311-win_amd64.whl", hash = "sha256:959d993749b083acc57a317cbc643fb85c014d055b2119b739487288f4e5d1cb"}, - {file = "tiktoken-0.7.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:71c55d066388c55a9c00f61d2c456a6086673ab7dec22dd739c23f77195b1908"}, - {file = "tiktoken-0.7.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:09ed925bccaa8043e34c519fbb2f99110bd07c6fd67714793c21ac298e449410"}, - {file = "tiktoken-0.7.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03c6c40ff1db0f48a7b4d2dafeae73a5607aacb472fa11f125e7baf9dce73704"}, - {file = "tiktoken-0.7.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d20b5c6af30e621b4aca094ee61777a44118f52d886dbe4f02b70dfe05c15350"}, - {file = "tiktoken-0.7.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d427614c3e074004efa2f2411e16c826f9df427d3c70a54725cae860f09e4bf4"}, - {file = "tiktoken-0.7.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:8c46d7af7b8c6987fac9b9f61041b452afe92eb087d29c9ce54951280f899a97"}, - {file = "tiktoken-0.7.0-cp312-cp312-win_amd64.whl", hash = "sha256:0bc603c30b9e371e7c4c7935aba02af5994a909fc3c0fe66e7004070858d3f8f"}, - {file = "tiktoken-0.7.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2398fecd38c921bcd68418675a6d155fad5f5e14c2e92fcf5fe566fa5485a858"}, - {file = "tiktoken-0.7.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8f5f6afb52fb8a7ea1c811e435e4188f2bef81b5e0f7a8635cc79b0eef0193d6"}, - {file = "tiktoken-0.7.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:861f9ee616766d736be4147abac500732b505bf7013cfaf019b85892637f235e"}, - {file = "tiktoken-0.7.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:54031f95c6939f6b78122c0aa03a93273a96365103793a22e1793ee86da31685"}, - {file = "tiktoken-0.7.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:fffdcb319b614cf14f04d02a52e26b1d1ae14a570f90e9b55461a72672f7b13d"}, - {file = "tiktoken-0.7.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c72baaeaefa03ff9ba9688624143c858d1f6b755bb85d456d59e529e17234769"}, - {file = "tiktoken-0.7.0-cp38-cp38-win_amd64.whl", hash = "sha256:131b8aeb043a8f112aad9f46011dced25d62629091e51d9dc1adbf4a1cc6aa98"}, - {file = "tiktoken-0.7.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:cabc6dc77460df44ec5b879e68692c63551ae4fae7460dd4ff17181df75f1db7"}, - {file = "tiktoken-0.7.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8d57f29171255f74c0aeacd0651e29aa47dff6f070cb9f35ebc14c82278f3b25"}, - {file = "tiktoken-0.7.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ee92776fdbb3efa02a83f968c19d4997a55c8e9ce7be821ceee04a1d1ee149c"}, - {file = "tiktoken-0.7.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e215292e99cb41fbc96988ef62ea63bb0ce1e15f2c147a61acc319f8b4cbe5bf"}, - {file = "tiktoken-0.7.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8a81bac94769cab437dd3ab0b8a4bc4e0f9cf6835bcaa88de71f39af1791727a"}, - {file = "tiktoken-0.7.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:d6d73ea93e91d5ca771256dfc9d1d29f5a554b83821a1dc0891987636e0ae226"}, - {file = "tiktoken-0.7.0-cp39-cp39-win_amd64.whl", hash = "sha256:2bcb28ddf79ffa424f171dfeef9a4daff61a94c631ca6813f43967cb263b83b9"}, - {file = "tiktoken-0.7.0.tar.gz", hash = "sha256:1077266e949c24e0291f6c350433c6f0971365ece2b173a23bc3b9f9defef6b6"}, + {file = "tiktoken-0.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b07e33283463089c81ef1467180e3e00ab00d46c2c4bbcef0acab5f771d6695e"}, + {file = "tiktoken-0.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9269348cb650726f44dd3bbb3f9110ac19a8dcc8f54949ad3ef652ca22a38e21"}, + {file = "tiktoken-0.8.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25e13f37bc4ef2d012731e93e0fef21dc3b7aea5bb9009618de9a4026844e560"}, + {file = "tiktoken-0.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f13d13c981511331eac0d01a59b5df7c0d4060a8be1e378672822213da51e0a2"}, + {file = "tiktoken-0.8.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:6b2ddbc79a22621ce8b1166afa9f9a888a664a579350dc7c09346a3b5de837d9"}, + {file = "tiktoken-0.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:d8c2d0e5ba6453a290b86cd65fc51fedf247e1ba170191715b049dac1f628005"}, + {file = "tiktoken-0.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d622d8011e6d6f239297efa42a2657043aaed06c4f68833550cac9e9bc723ef1"}, + {file = "tiktoken-0.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2efaf6199717b4485031b4d6edb94075e4d79177a172f38dd934d911b588d54a"}, + {file = "tiktoken-0.8.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5637e425ce1fc49cf716d88df3092048359a4b3bbb7da762840426e937ada06d"}, + {file = "tiktoken-0.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fb0e352d1dbe15aba082883058b3cce9e48d33101bdaac1eccf66424feb5b47"}, + {file = "tiktoken-0.8.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:56edfefe896c8f10aba372ab5706b9e3558e78db39dd497c940b47bf228bc419"}, + {file = "tiktoken-0.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:326624128590def898775b722ccc327e90b073714227175ea8febbc920ac0a99"}, + {file = "tiktoken-0.8.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:881839cfeae051b3628d9823b2e56b5cc93a9e2efb435f4cf15f17dc45f21586"}, + {file = "tiktoken-0.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:fe9399bdc3f29d428f16a2f86c3c8ec20be3eac5f53693ce4980371c3245729b"}, + {file = "tiktoken-0.8.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9a58deb7075d5b69237a3ff4bb51a726670419db6ea62bdcd8bd80c78497d7ab"}, + {file = "tiktoken-0.8.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2908c0d043a7d03ebd80347266b0e58440bdef5564f84f4d29fb235b5df3b04"}, + {file = "tiktoken-0.8.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:294440d21a2a51e12d4238e68a5972095534fe9878be57d905c476017bff99fc"}, + {file = "tiktoken-0.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:d8f3192733ac4d77977432947d563d7e1b310b96497acd3c196c9bddb36ed9db"}, + {file = "tiktoken-0.8.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:02be1666096aff7da6cbd7cdaa8e7917bfed3467cd64b38b1f112e96d3b06a24"}, + {file = "tiktoken-0.8.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:c94ff53c5c74b535b2cbf431d907fc13c678bbd009ee633a2aca269a04389f9a"}, + {file = "tiktoken-0.8.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b231f5e8982c245ee3065cd84a4712d64692348bc609d84467c57b4b72dcbc5"}, + {file = "tiktoken-0.8.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4177faa809bd55f699e88c96d9bb4635d22e3f59d635ba6fd9ffedf7150b9953"}, + {file = "tiktoken-0.8.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5376b6f8dc4753cd81ead935c5f518fa0fbe7e133d9e25f648d8c4dabdd4bad7"}, + {file = "tiktoken-0.8.0-cp313-cp313-win_amd64.whl", hash = "sha256:18228d624807d66c87acd8f25fc135665617cab220671eb65b50f5d70fa51f69"}, + {file = "tiktoken-0.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7e17807445f0cf1f25771c9d86496bd8b5c376f7419912519699f3cc4dc5c12e"}, + {file = "tiktoken-0.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:886f80bd339578bbdba6ed6d0567a0d5c6cfe198d9e587ba6c447654c65b8edc"}, + {file = "tiktoken-0.8.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6adc8323016d7758d6de7313527f755b0fc6c72985b7d9291be5d96d73ecd1e1"}, + {file = "tiktoken-0.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b591fb2b30d6a72121a80be24ec7a0e9eb51c5500ddc7e4c2496516dd5e3816b"}, + {file = "tiktoken-0.8.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:845287b9798e476b4d762c3ebda5102be87ca26e5d2c9854002825d60cdb815d"}, + {file = "tiktoken-0.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:1473cfe584252dc3fa62adceb5b1c763c1874e04511b197da4e6de51d6ce5a02"}, + {file = "tiktoken-0.8.0.tar.gz", hash = "sha256:9ccbb2740f24542534369c5635cfd9b2b3c2490754a78ac8831d99f89f94eeb2"}, ] [package.dependencies] @@ -6040,111 +6033,111 @@ files = [ [[package]] name = "tokenizers" -version = "0.19.1" +version = "0.20.0" description = "" optional = true python-versions = ">=3.7" files = [ - {file = "tokenizers-0.19.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:952078130b3d101e05ecfc7fc3640282d74ed26bcf691400f872563fca15ac97"}, - {file = "tokenizers-0.19.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:82c8b8063de6c0468f08e82c4e198763e7b97aabfe573fd4cf7b33930ca4df77"}, - {file = "tokenizers-0.19.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f03727225feaf340ceeb7e00604825addef622d551cbd46b7b775ac834c1e1c4"}, - {file = "tokenizers-0.19.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:453e4422efdfc9c6b6bf2eae00d5e323f263fff62b29a8c9cd526c5003f3f642"}, - {file = "tokenizers-0.19.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:02e81bf089ebf0e7f4df34fa0207519f07e66d8491d963618252f2e0729e0b46"}, - {file = "tokenizers-0.19.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b07c538ba956843833fee1190cf769c60dc62e1cf934ed50d77d5502194d63b1"}, - {file = "tokenizers-0.19.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e28cab1582e0eec38b1f38c1c1fb2e56bce5dc180acb1724574fc5f47da2a4fe"}, - {file = "tokenizers-0.19.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b01afb7193d47439f091cd8f070a1ced347ad0f9144952a30a41836902fe09e"}, - {file = "tokenizers-0.19.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:7fb297edec6c6841ab2e4e8f357209519188e4a59b557ea4fafcf4691d1b4c98"}, - {file = "tokenizers-0.19.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2e8a3dd055e515df7054378dc9d6fa8c8c34e1f32777fb9a01fea81496b3f9d3"}, - {file = "tokenizers-0.19.1-cp310-none-win32.whl", hash = "sha256:7ff898780a155ea053f5d934925f3902be2ed1f4d916461e1a93019cc7250837"}, - {file = "tokenizers-0.19.1-cp310-none-win_amd64.whl", hash = "sha256:bea6f9947e9419c2fda21ae6c32871e3d398cba549b93f4a65a2d369662d9403"}, - {file = "tokenizers-0.19.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:5c88d1481f1882c2e53e6bb06491e474e420d9ac7bdff172610c4f9ad3898059"}, - {file = "tokenizers-0.19.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ddf672ed719b4ed82b51499100f5417d7d9f6fb05a65e232249268f35de5ed14"}, - {file = "tokenizers-0.19.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:dadc509cc8a9fe460bd274c0e16ac4184d0958117cf026e0ea8b32b438171594"}, - {file = "tokenizers-0.19.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dfedf31824ca4915b511b03441784ff640378191918264268e6923da48104acc"}, - {file = "tokenizers-0.19.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ac11016d0a04aa6487b1513a3a36e7bee7eec0e5d30057c9c0408067345c48d2"}, - {file = "tokenizers-0.19.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:76951121890fea8330d3a0df9a954b3f2a37e3ec20e5b0530e9a0044ca2e11fe"}, - {file = "tokenizers-0.19.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b342d2ce8fc8d00f376af068e3274e2e8649562e3bc6ae4a67784ded6b99428d"}, - {file = "tokenizers-0.19.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d16ff18907f4909dca9b076b9c2d899114dd6abceeb074eca0c93e2353f943aa"}, - {file = "tokenizers-0.19.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:706a37cc5332f85f26efbe2bdc9ef8a9b372b77e4645331a405073e4b3a8c1c6"}, - {file = "tokenizers-0.19.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:16baac68651701364b0289979ecec728546133e8e8fe38f66fe48ad07996b88b"}, - {file = "tokenizers-0.19.1-cp311-none-win32.whl", hash = "sha256:9ed240c56b4403e22b9584ee37d87b8bfa14865134e3e1c3fb4b2c42fafd3256"}, - {file = "tokenizers-0.19.1-cp311-none-win_amd64.whl", hash = "sha256:ad57d59341710b94a7d9dbea13f5c1e7d76fd8d9bcd944a7a6ab0b0da6e0cc66"}, - {file = "tokenizers-0.19.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:621d670e1b1c281a1c9698ed89451395d318802ff88d1fc1accff0867a06f153"}, - {file = "tokenizers-0.19.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d924204a3dbe50b75630bd16f821ebda6a5f729928df30f582fb5aade90c818a"}, - {file = "tokenizers-0.19.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:4f3fefdc0446b1a1e6d81cd4c07088ac015665d2e812f6dbba4a06267d1a2c95"}, - {file = "tokenizers-0.19.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9620b78e0b2d52ef07b0d428323fb34e8ea1219c5eac98c2596311f20f1f9266"}, - {file = "tokenizers-0.19.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:04ce49e82d100594715ac1b2ce87d1a36e61891a91de774755f743babcd0dd52"}, - {file = "tokenizers-0.19.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c5c2ff13d157afe413bf7e25789879dd463e5a4abfb529a2d8f8473d8042e28f"}, - {file = "tokenizers-0.19.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3174c76efd9d08f836bfccaca7cfec3f4d1c0a4cf3acbc7236ad577cc423c840"}, - {file = "tokenizers-0.19.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7c9d5b6c0e7a1e979bec10ff960fae925e947aab95619a6fdb4c1d8ff3708ce3"}, - {file = "tokenizers-0.19.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:a179856d1caee06577220ebcfa332af046d576fb73454b8f4d4b0ba8324423ea"}, - {file = "tokenizers-0.19.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:952b80dac1a6492170f8c2429bd11fcaa14377e097d12a1dbe0ef2fb2241e16c"}, - {file = "tokenizers-0.19.1-cp312-none-win32.whl", hash = "sha256:01d62812454c188306755c94755465505836fd616f75067abcae529c35edeb57"}, - {file = "tokenizers-0.19.1-cp312-none-win_amd64.whl", hash = "sha256:b70bfbe3a82d3e3fb2a5e9b22a39f8d1740c96c68b6ace0086b39074f08ab89a"}, - {file = "tokenizers-0.19.1-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:bb9dfe7dae85bc6119d705a76dc068c062b8b575abe3595e3c6276480e67e3f1"}, - {file = "tokenizers-0.19.1-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:1f0360cbea28ea99944ac089c00de7b2e3e1c58f479fb8613b6d8d511ce98267"}, - {file = "tokenizers-0.19.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:71e3ec71f0e78780851fef28c2a9babe20270404c921b756d7c532d280349214"}, - {file = "tokenizers-0.19.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b82931fa619dbad979c0ee8e54dd5278acc418209cc897e42fac041f5366d626"}, - {file = "tokenizers-0.19.1-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e8ff5b90eabdcdaa19af697885f70fe0b714ce16709cf43d4952f1f85299e73a"}, - {file = "tokenizers-0.19.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e742d76ad84acbdb1a8e4694f915fe59ff6edc381c97d6dfdd054954e3478ad4"}, - {file = "tokenizers-0.19.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d8c5d59d7b59885eab559d5bc082b2985555a54cda04dda4c65528d90ad252ad"}, - {file = "tokenizers-0.19.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b2da5c32ed869bebd990c9420df49813709e953674c0722ff471a116d97b22d"}, - {file = "tokenizers-0.19.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:638e43936cc8b2cbb9f9d8dde0fe5e7e30766a3318d2342999ae27f68fdc9bd6"}, - {file = "tokenizers-0.19.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:78e769eb3b2c79687d9cb0f89ef77223e8e279b75c0a968e637ca7043a84463f"}, - {file = "tokenizers-0.19.1-cp37-none-win32.whl", hash = "sha256:72791f9bb1ca78e3ae525d4782e85272c63faaef9940d92142aa3eb79f3407a3"}, - {file = "tokenizers-0.19.1-cp37-none-win_amd64.whl", hash = "sha256:f3bbb7a0c5fcb692950b041ae11067ac54826204318922da754f908d95619fbc"}, - {file = "tokenizers-0.19.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:07f9295349bbbcedae8cefdbcfa7f686aa420be8aca5d4f7d1ae6016c128c0c5"}, - {file = "tokenizers-0.19.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:10a707cc6c4b6b183ec5dbfc5c34f3064e18cf62b4a938cb41699e33a99e03c1"}, - {file = "tokenizers-0.19.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6309271f57b397aa0aff0cbbe632ca9d70430839ca3178bf0f06f825924eca22"}, - {file = "tokenizers-0.19.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ad23d37d68cf00d54af184586d79b84075ada495e7c5c0f601f051b162112dc"}, - {file = "tokenizers-0.19.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:427c4f0f3df9109314d4f75b8d1f65d9477033e67ffaec4bca53293d3aca286d"}, - {file = "tokenizers-0.19.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e83a31c9cf181a0a3ef0abad2b5f6b43399faf5da7e696196ddd110d332519ee"}, - {file = "tokenizers-0.19.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c27b99889bd58b7e301468c0838c5ed75e60c66df0d4db80c08f43462f82e0d3"}, - {file = "tokenizers-0.19.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bac0b0eb952412b0b196ca7a40e7dce4ed6f6926489313414010f2e6b9ec2adf"}, - {file = "tokenizers-0.19.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8a6298bde623725ca31c9035a04bf2ef63208d266acd2bed8c2cb7d2b7d53ce6"}, - {file = "tokenizers-0.19.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:08a44864e42fa6d7d76d7be4bec62c9982f6f6248b4aa42f7302aa01e0abfd26"}, - {file = "tokenizers-0.19.1-cp38-none-win32.whl", hash = "sha256:1de5bc8652252d9357a666e609cb1453d4f8e160eb1fb2830ee369dd658e8975"}, - {file = "tokenizers-0.19.1-cp38-none-win_amd64.whl", hash = "sha256:0bcce02bf1ad9882345b34d5bd25ed4949a480cf0e656bbd468f4d8986f7a3f1"}, - {file = "tokenizers-0.19.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:0b9394bd204842a2a1fd37fe29935353742be4a3460b6ccbaefa93f58a8df43d"}, - {file = "tokenizers-0.19.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4692ab92f91b87769d950ca14dbb61f8a9ef36a62f94bad6c82cc84a51f76f6a"}, - {file = "tokenizers-0.19.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6258c2ef6f06259f70a682491c78561d492e885adeaf9f64f5389f78aa49a051"}, - {file = "tokenizers-0.19.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c85cf76561fbd01e0d9ea2d1cbe711a65400092bc52b5242b16cfd22e51f0c58"}, - {file = "tokenizers-0.19.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:670b802d4d82bbbb832ddb0d41df7015b3e549714c0e77f9bed3e74d42400fbe"}, - {file = "tokenizers-0.19.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:85aa3ab4b03d5e99fdd31660872249df5e855334b6c333e0bc13032ff4469c4a"}, - {file = "tokenizers-0.19.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cbf001afbbed111a79ca47d75941e9e5361297a87d186cbfc11ed45e30b5daba"}, - {file = "tokenizers-0.19.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4c89aa46c269e4e70c4d4f9d6bc644fcc39bb409cb2a81227923404dd6f5227"}, - {file = "tokenizers-0.19.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:39c1ec76ea1027438fafe16ecb0fb84795e62e9d643444c1090179e63808c69d"}, - {file = "tokenizers-0.19.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c2a0d47a89b48d7daa241e004e71fb5a50533718897a4cd6235cb846d511a478"}, - {file = "tokenizers-0.19.1-cp39-none-win32.whl", hash = "sha256:61b7fe8886f2e104d4caf9218b157b106207e0f2a4905c9c7ac98890688aabeb"}, - {file = "tokenizers-0.19.1-cp39-none-win_amd64.whl", hash = "sha256:f97660f6c43efd3e0bfd3f2e3e5615bf215680bad6ee3d469df6454b8c6e8256"}, - {file = "tokenizers-0.19.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3b11853f17b54c2fe47742c56d8a33bf49ce31caf531e87ac0d7d13d327c9334"}, - {file = "tokenizers-0.19.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d26194ef6c13302f446d39972aaa36a1dda6450bc8949f5eb4c27f51191375bd"}, - {file = "tokenizers-0.19.1-pp310-pypy310_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:e8d1ed93beda54bbd6131a2cb363a576eac746d5c26ba5b7556bc6f964425594"}, - {file = "tokenizers-0.19.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca407133536f19bdec44b3da117ef0d12e43f6d4b56ac4c765f37eca501c7bda"}, - {file = "tokenizers-0.19.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce05fde79d2bc2e46ac08aacbc142bead21614d937aac950be88dc79f9db9022"}, - {file = "tokenizers-0.19.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:35583cd46d16f07c054efd18b5d46af4a2f070a2dd0a47914e66f3ff5efb2b1e"}, - {file = "tokenizers-0.19.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:43350270bfc16b06ad3f6f07eab21f089adb835544417afda0f83256a8bf8b75"}, - {file = "tokenizers-0.19.1-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b4399b59d1af5645bcee2072a463318114c39b8547437a7c2d6a186a1b5a0e2d"}, - {file = "tokenizers-0.19.1-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6852c5b2a853b8b0ddc5993cd4f33bfffdca4fcc5d52f89dd4b8eada99379285"}, - {file = "tokenizers-0.19.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bcd266ae85c3d39df2f7e7d0e07f6c41a55e9a3123bb11f854412952deacd828"}, - {file = "tokenizers-0.19.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ecb2651956eea2aa0a2d099434134b1b68f1c31f9a5084d6d53f08ed43d45ff2"}, - {file = "tokenizers-0.19.1-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:b279ab506ec4445166ac476fb4d3cc383accde1ea152998509a94d82547c8e2a"}, - {file = "tokenizers-0.19.1-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:89183e55fb86e61d848ff83753f64cded119f5d6e1f553d14ffee3700d0a4a49"}, - {file = "tokenizers-0.19.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b2edbc75744235eea94d595a8b70fe279dd42f3296f76d5a86dde1d46e35f574"}, - {file = "tokenizers-0.19.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:0e64bfde9a723274e9a71630c3e9494ed7b4c0f76a1faacf7fe294cd26f7ae7c"}, - {file = "tokenizers-0.19.1-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:0b5ca92bfa717759c052e345770792d02d1f43b06f9e790ca0a1db62838816f3"}, - {file = "tokenizers-0.19.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f8a20266e695ec9d7a946a019c1d5ca4eddb6613d4f466888eee04f16eedb85"}, - {file = "tokenizers-0.19.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63c38f45d8f2a2ec0f3a20073cccb335b9f99f73b3c69483cd52ebc75369d8a1"}, - {file = "tokenizers-0.19.1-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:dd26e3afe8a7b61422df3176e06664503d3f5973b94f45d5c45987e1cb711876"}, - {file = "tokenizers-0.19.1-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:eddd5783a4a6309ce23432353cdb36220e25cbb779bfa9122320666508b44b88"}, - {file = "tokenizers-0.19.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:56ae39d4036b753994476a1b935584071093b55c7a72e3b8288e68c313ca26e7"}, - {file = "tokenizers-0.19.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:f9939ca7e58c2758c01b40324a59c034ce0cebad18e0d4563a9b1beab3018243"}, - {file = "tokenizers-0.19.1-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6c330c0eb815d212893c67a032e9dc1b38a803eccb32f3e8172c19cc69fbb439"}, - {file = "tokenizers-0.19.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec11802450a2487cdf0e634b750a04cbdc1c4d066b97d94ce7dd2cb51ebb325b"}, - {file = "tokenizers-0.19.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2b718f316b596f36e1dae097a7d5b91fc5b85e90bf08b01ff139bd8953b25af"}, - {file = "tokenizers-0.19.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:ed69af290c2b65169f0ba9034d1dc39a5db9459b32f1dd8b5f3f32a3fcf06eab"}, - {file = "tokenizers-0.19.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f8a9c828277133af13f3859d1b6bf1c3cb6e9e1637df0e45312e6b7c2e622b1f"}, - {file = "tokenizers-0.19.1.tar.gz", hash = "sha256:ee59e6680ed0fdbe6b724cf38bd70400a0c1dd623b07ac729087270caeac88e3"}, + {file = "tokenizers-0.20.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:6cff5c5e37c41bc5faa519d6f3df0679e4b37da54ea1f42121719c5e2b4905c0"}, + {file = "tokenizers-0.20.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:62a56bf75c27443432456f4ca5ca055befa95e25be8a28141cc495cac8ae4d6d"}, + {file = "tokenizers-0.20.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68cc7de6a63f09c4a86909c2597b995aa66e19df852a23aea894929c74369929"}, + {file = "tokenizers-0.20.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:053c37ecee482cc958fdee53af3c6534286a86f5d35aac476f7c246830e53ae5"}, + {file = "tokenizers-0.20.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3d7074aaabc151a6363fa03db5493fc95b423b2a1874456783989e96d541c7b6"}, + {file = "tokenizers-0.20.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a11435780f2acd89e8fefe5e81cecf01776f6edb9b3ac95bcb76baee76b30b90"}, + {file = "tokenizers-0.20.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9a81cd2712973b007d84268d45fc3f6f90a79c31dfe7f1925e6732f8d2959987"}, + {file = "tokenizers-0.20.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d7dfd796ab9d909f76fb93080e1c7c8309f196ecb316eb130718cd5e34231c69"}, + {file = "tokenizers-0.20.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:8029ad2aa8cb00605c9374566034c1cc1b15130713e0eb5afcef6cface8255c9"}, + {file = "tokenizers-0.20.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ca4d54260ebe97d59dfa9a30baa20d0c4dd9137d99a8801700055c561145c24e"}, + {file = "tokenizers-0.20.0-cp310-none-win32.whl", hash = "sha256:95ee16b57cec11b86a7940174ec5197d506439b0f415ab3859f254b1dffe9df0"}, + {file = "tokenizers-0.20.0-cp310-none-win_amd64.whl", hash = "sha256:0a61a11e93eeadbf02aea082ffc75241c4198e0608bbbac4f65a9026851dcf37"}, + {file = "tokenizers-0.20.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:6636b798b3c4d6c9b1af1a918bd07c867808e5a21c64324e95318a237e6366c3"}, + {file = "tokenizers-0.20.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5ec603e42eaf499ffd58b9258162add948717cf21372458132f14e13a6bc7172"}, + {file = "tokenizers-0.20.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cce124264903a8ea6f8f48e1cc7669e5ef638c18bd4ab0a88769d5f92debdf7f"}, + {file = "tokenizers-0.20.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:07bbeba0231cf8de07aa6b9e33e9779ff103d47042eeeb859a8c432e3292fb98"}, + {file = "tokenizers-0.20.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:06c0ca8397b35d38b83a44a9c6929790c1692957d88541df061cb34d82ebbf08"}, + {file = "tokenizers-0.20.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ca6557ac3b83d912dfbb1f70ab56bd4b0594043916688e906ede09f42e192401"}, + {file = "tokenizers-0.20.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2a5ad94c9e80ac6098328bee2e3264dbced4c6faa34429994d473f795ec58ef4"}, + {file = "tokenizers-0.20.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b5c7f906ee6bec30a9dc20268a8b80f3b9584de1c9f051671cb057dc6ce28f6"}, + {file = "tokenizers-0.20.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:31e087e9ee1b8f075b002bfee257e858dc695f955b43903e1bb4aa9f170e37fe"}, + {file = "tokenizers-0.20.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c3124fb6f3346cb3d8d775375d3b429bf4dcfc24f739822702009d20a4297990"}, + {file = "tokenizers-0.20.0-cp311-none-win32.whl", hash = "sha256:a4bb8b40ba9eefa621fdcabf04a74aa6038ae3be0c614c6458bd91a4697a452f"}, + {file = "tokenizers-0.20.0-cp311-none-win_amd64.whl", hash = "sha256:2b709d371f1fe60a28ef0c5c67815952d455ca7f34dbe7197eaaed3cc54b658e"}, + {file = "tokenizers-0.20.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:15c81a17d0d66f4987c6ca16f4bea7ec253b8c7ed1bb00fdc5d038b1bb56e714"}, + {file = "tokenizers-0.20.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6a531cdf1fb6dc41c984c785a3b299cb0586de0b35683842a3afbb1e5207f910"}, + {file = "tokenizers-0.20.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:06caabeb4587f8404e0cd9d40f458e9cba3e815c8155a38e579a74ff3e2a4301"}, + {file = "tokenizers-0.20.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8768f964f23f5b9f50546c0369c75ab3262de926983888bbe8b98be05392a79c"}, + {file = "tokenizers-0.20.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:626403860152c816f97b649fd279bd622c3d417678c93b4b1a8909b6380b69a8"}, + {file = "tokenizers-0.20.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9c1b88fa9e5ff062326f4bf82681da5a96fca7104d921a6bd7b1e6fcf224af26"}, + {file = "tokenizers-0.20.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d7e559436a07dc547f22ce1101f26d8b2fad387e28ec8e7e1e3b11695d681d8"}, + {file = "tokenizers-0.20.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e48afb75e50449848964e4a67b0da01261dd3aa8df8daecf10db8fd7f5b076eb"}, + {file = "tokenizers-0.20.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:baf5d0e1ff44710a95eefc196dd87666ffc609fd447c5e5b68272a7c3d342a1d"}, + {file = "tokenizers-0.20.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e5e56df0e8ed23ba60ae3848c3f069a0710c4b197218fe4f89e27eba38510768"}, + {file = "tokenizers-0.20.0-cp312-none-win32.whl", hash = "sha256:ec53e5ecc142a82432f9c6c677dbbe5a2bfee92b8abf409a9ecb0d425ee0ce75"}, + {file = "tokenizers-0.20.0-cp312-none-win_amd64.whl", hash = "sha256:f18661ece72e39c0dfaa174d6223248a15b457dbd4b0fc07809b8e6d3ca1a234"}, + {file = "tokenizers-0.20.0-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:f7065b1084d8d1a03dc89d9aad69bcbc8415d4bc123c367063eb32958cd85054"}, + {file = "tokenizers-0.20.0-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:e5d4069e4714e3f7ba0a4d3d44f9d84a432cd4e4aa85c3d7dd1f51440f12e4a1"}, + {file = "tokenizers-0.20.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:799b808529e54b7e1a36350bda2aeb470e8390e484d3e98c10395cee61d4e3c6"}, + {file = "tokenizers-0.20.0-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7f9baa027cc8a281ad5f7725a93c204d7a46986f88edbe8ef7357f40a23fb9c7"}, + {file = "tokenizers-0.20.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:010ec7f3f7a96adc4c2a34a3ada41fa14b4b936b5628b4ff7b33791258646c6b"}, + {file = "tokenizers-0.20.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:98d88f06155335b14fd78e32ee28ca5b2eb30fced4614e06eb14ae5f7fba24ed"}, + {file = "tokenizers-0.20.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e13eb000ef540c2280758d1b9cfa5fe424b0424ae4458f440e6340a4f18b2638"}, + {file = "tokenizers-0.20.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fab3cf066ff426f7e6d70435dc28a9ff01b2747be83810e397cba106f39430b0"}, + {file = "tokenizers-0.20.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:39fa3761b30a89368f322e5daf4130dce8495b79ad831f370449cdacfb0c0d37"}, + {file = "tokenizers-0.20.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:c8da0fba4d179ddf2607821575998df3c294aa59aa8df5a6646dc64bc7352bce"}, + {file = "tokenizers-0.20.0-cp37-none-win32.whl", hash = "sha256:fada996d6da8cf213f6e3c91c12297ad4f6cdf7a85c2fadcd05ec32fa6846fcd"}, + {file = "tokenizers-0.20.0-cp37-none-win_amd64.whl", hash = "sha256:7d29aad702279e0760c265fcae832e89349078e3418dd329732d4503259fd6bd"}, + {file = "tokenizers-0.20.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:099c68207f3ef0227ecb6f80ab98ea74de559f7b124adc7b17778af0250ee90a"}, + {file = "tokenizers-0.20.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:68012d8a8cddb2eab3880870d7e2086cb359c7f7a2b03f5795044f5abff4e850"}, + {file = "tokenizers-0.20.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9253bdd209c6aee168deca7d0e780581bf303e0058f268f9bb06859379de19b6"}, + {file = "tokenizers-0.20.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8f868600ddbcb0545905ed075eb7218a0756bf6c09dae7528ea2f8436ebd2c93"}, + {file = "tokenizers-0.20.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9a9643d9c8c5f99b6aba43fd10034f77cc6c22c31f496d2f0ee183047d948fa0"}, + {file = "tokenizers-0.20.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c375c6a889aeab44734028bc65cc070acf93ccb0f9368be42b67a98e1063d3f6"}, + {file = "tokenizers-0.20.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e359f852328e254f070bbd09a19a568421d23388f04aad9f2fb7da7704c7228d"}, + {file = "tokenizers-0.20.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d98b01a309d4387f3b1c1dd68a8b8136af50376cf146c1b7e8d8ead217a5be4b"}, + {file = "tokenizers-0.20.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:459f7537119554c2899067dec1ac74a00d02beef6558f4ee2e99513bf6d568af"}, + {file = "tokenizers-0.20.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:392b87ec89452628c045c9f2a88bc2a827f4c79e7d84bc3b72752b74c2581f70"}, + {file = "tokenizers-0.20.0-cp38-none-win32.whl", hash = "sha256:55a393f893d2ed4dd95a1553c2e42d4d4086878266f437b03590d3f81984c4fe"}, + {file = "tokenizers-0.20.0-cp38-none-win_amd64.whl", hash = "sha256:30ffe33c5c2f2aab8e9a3340d0110dd9f7ace7eec7362e20a697802306bd8068"}, + {file = "tokenizers-0.20.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:aa2d4a6fed2a7e3f860c7fc9d48764bb30f2649d83915d66150d6340e06742b8"}, + {file = "tokenizers-0.20.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b5ef0f814084a897e9071fc4a868595f018c5c92889197bdc4bf19018769b148"}, + {file = "tokenizers-0.20.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc1e1b791e8c3bf4c4f265f180dadaff1c957bf27129e16fdd5e5d43c2d3762c"}, + {file = "tokenizers-0.20.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b69e55e481459c07885263743a0d3c18d52db19bae8226a19bcca4aaa213fff"}, + {file = "tokenizers-0.20.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4806b4d82e27a2512bc23057b2986bc8b85824914286975b84d8105ff40d03d9"}, + {file = "tokenizers-0.20.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9859e9ef13adf5a473ccab39d31bff9c550606ae3c784bf772b40f615742a24f"}, + {file = "tokenizers-0.20.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ef703efedf4c20488a8eb17637b55973745b27997ff87bad88ed499b397d1144"}, + {file = "tokenizers-0.20.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6eec0061bab94b1841ab87d10831fdf1b48ebaed60e6d66d66dbe1d873f92bf5"}, + {file = "tokenizers-0.20.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:980f3d0d7e73f845b69087f29a63c11c7eb924c4ad6b358da60f3db4cf24bdb4"}, + {file = "tokenizers-0.20.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7c157550a2f3851b29d7fdc9dc059fcf81ff0c0fc49a1e5173a89d533ed043fa"}, + {file = "tokenizers-0.20.0-cp39-none-win32.whl", hash = "sha256:8a3d2f4d08608ec4f9895ec25b4b36a97f05812543190a5f2c3cd19e8f041e5a"}, + {file = "tokenizers-0.20.0-cp39-none-win_amd64.whl", hash = "sha256:d90188d12afd0c75e537f9a1d92f9c7375650188ee4f48fdc76f9e38afbd2251"}, + {file = "tokenizers-0.20.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:d68e15f1815357b059ec266062340c343ea7f98f7f330602df81ffa3474b6122"}, + {file = "tokenizers-0.20.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:23f9ecec637b9bc80da5f703808d29ed5329e56b5aa8d791d1088014f48afadc"}, + {file = "tokenizers-0.20.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f830b318ee599e3d0665b3e325f85bc75ee2d2ca6285f52e439dc22b64691580"}, + {file = "tokenizers-0.20.0-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3dc750def789cb1de1b5a37657919545e1d9ffa667658b3fa9cb7862407a1b8"}, + {file = "tokenizers-0.20.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e26e6c755ae884c2ea6135cd215bdd0fccafe4ee62405014b8c3cd19954e3ab9"}, + {file = "tokenizers-0.20.0-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:a1158c7174f427182e08baa2a8ded2940f2b4a3e94969a85cc9cfd16004cbcea"}, + {file = "tokenizers-0.20.0-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:6324826287a3fc198898d3dcf758fe4a8479e42d6039f4c59e2cedd3cf92f64e"}, + {file = "tokenizers-0.20.0-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:7d8653149405bb0c16feaf9cfee327fdb6aaef9dc2998349fec686f35e81c4e2"}, + {file = "tokenizers-0.20.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b8a2dc1e402a155e97309287ca085c80eb1b7fab8ae91527d3b729181639fa51"}, + {file = "tokenizers-0.20.0-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07bef67b20aa6e5f7868c42c7c5eae4d24f856274a464ae62e47a0f2cccec3da"}, + {file = "tokenizers-0.20.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da06e397182ff53789c506c7833220c192952c57e1581a53f503d8d953e2d67e"}, + {file = "tokenizers-0.20.0-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:302f7e11a14814028b7fc88c45a41f1bbe9b5b35fd76d6869558d1d1809baa43"}, + {file = "tokenizers-0.20.0-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:055ec46e807b875589dfbe3d9259f9a6ee43394fb553b03b3d1e9541662dbf25"}, + {file = "tokenizers-0.20.0-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e3144b8acebfa6ae062e8f45f7ed52e4b50fb6c62f93afc8871b525ab9fdcab3"}, + {file = "tokenizers-0.20.0-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:b52aa3fd14b2a07588c00a19f66511cff5cca8f7266ca3edcdd17f3512ad159f"}, + {file = "tokenizers-0.20.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2b8cf52779ffc5d4d63a0170fbeb512372bad0dd014ce92bbb9149756c831124"}, + {file = "tokenizers-0.20.0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:983a45dd11a876124378dae71d6d9761822199b68a4c73f32873d8cdaf326a5b"}, + {file = "tokenizers-0.20.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df6b819c9a19831ebec581e71a7686a54ab45d90faf3842269a10c11d746de0c"}, + {file = "tokenizers-0.20.0-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:e738cfd80795fcafcef89c5731c84b05638a4ab3f412f97d5ed7765466576eb1"}, + {file = "tokenizers-0.20.0-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:c8842c7be2fadb9c9edcee233b1b7fe7ade406c99b0973f07439985c1c1d0683"}, + {file = "tokenizers-0.20.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e47a82355511c373a4a430c4909dc1e518e00031207b1fec536c49127388886b"}, + {file = "tokenizers-0.20.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:9afbf359004551179a5db19424180c81276682773cff2c5d002f6eaaffe17230"}, + {file = "tokenizers-0.20.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a07eaa8799a92e6af6f472c21a75bf71575de2af3c0284120b7a09297c0de2f3"}, + {file = "tokenizers-0.20.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0994b2e5fc53a301071806bc4303e4bc3bdc3f490e92a21338146a36746b0872"}, + {file = "tokenizers-0.20.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b6466e0355b603d10e3cc3d282d350b646341b601e50969464a54939f9848d0"}, + {file = "tokenizers-0.20.0-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:1e86594c2a433cb1ea09cfbe596454448c566e57ee8905bd557e489d93e89986"}, + {file = "tokenizers-0.20.0-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:3e14cdef1efa96ecead6ea64a891828432c3ebba128bdc0596e3059fea104ef3"}, + {file = "tokenizers-0.20.0.tar.gz", hash = "sha256:39d7acc43f564c274085cafcd1dae9d36f332456de1a31970296a6b8da4eac8d"}, ] [package.dependencies] @@ -6192,7 +6185,7 @@ files = [ name = "torch" version = "2.4.1" description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" -optional = true +optional = false python-versions = ">=3.8.0" files = [ {file = "torch-2.4.1-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:362f82e23a4cd46341daabb76fba08f04cd646df9bfaf5da50af97cb60ca4971"}, @@ -6288,17 +6281,16 @@ gui = ["Gooey (>=1.0.1)"] [[package]] name = "transformers" -version = "4.44.2" +version = "4.45.2" description = "State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow" optional = true python-versions = ">=3.8.0" files = [ - {file = "transformers-4.44.2-py3-none-any.whl", hash = "sha256:1c02c65e7bfa5e52a634aff3da52138b583fc6f263c1f28d547dc144ba3d412d"}, - {file = "transformers-4.44.2.tar.gz", hash = "sha256:36aa17cc92ee154058e426d951684a2dab48751b35b49437896f898931270826"}, + {file = "transformers-4.45.2-py3-none-any.whl", hash = "sha256:c551b33660cfc815bae1f9f097ecfd1e65be623f13c6ee0dda372bd881460210"}, + {file = "transformers-4.45.2.tar.gz", hash = "sha256:72bc390f6b203892561f05f86bbfaa0e234aab8e927a83e62b9d92ea7e3ae101"}, ] [package.dependencies] -accelerate = {version = ">=0.21.0", optional = true, markers = "extra == \"torch\""} filelock = "*" huggingface-hub = ">=0.23.2,<1.0" numpy = ">=1.17" @@ -6307,22 +6299,21 @@ pyyaml = ">=5.1" regex = "!=2019.12.17" requests = "*" safetensors = ">=0.4.1" -tokenizers = ">=0.19,<0.20" -torch = {version = "*", optional = true, markers = "extra == \"torch\""} +tokenizers = ">=0.20,<0.21" tqdm = ">=4.27" [package.extras] -accelerate = ["accelerate (>=0.21.0)"] -agents = ["Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "datasets (!=2.5.0)", "diffusers", "opencv-python", "sentencepiece (>=0.1.91,!=0.1.92)", "torch"] -all = ["Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "av (==9.2.0)", "codecarbon (==1.2.0)", "decord (==0.6.0)", "flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1,<0.14.0)", "librosa", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf", "pyctcdecode (>=0.4.0)", "ray[tune] (>=2.7.0)", "scipy (<1.13.0)", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timm (<=0.9.16)", "tokenizers (>=0.19,<0.20)", "torch", "torchaudio", "torchvision"] +accelerate = ["accelerate (>=0.26.0)"] +agents = ["Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.26.0)", "datasets (!=2.5.0)", "diffusers", "opencv-python", "sentencepiece (>=0.1.91,!=0.1.92)", "torch"] +all = ["Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.26.0)", "av (==9.2.0)", "codecarbon (==1.2.0)", "decord (==0.6.0)", "flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1,<0.14.0)", "librosa", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf", "pyctcdecode (>=0.4.0)", "ray[tune] (>=2.7.0)", "scipy (<1.13.0)", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timm (<=0.9.16)", "tokenizers (>=0.20,<0.21)", "torch", "torchaudio", "torchvision"] audio = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] -benchmark = ["optimum-benchmark (>=0.2.0)"] +benchmark = ["optimum-benchmark (>=0.3.0)"] codecarbon = ["codecarbon (==1.2.0)"] -deepspeed = ["accelerate (>=0.21.0)", "deepspeed (>=0.9.3)"] -deepspeed-testing = ["GitPython (<3.1.19)", "accelerate (>=0.21.0)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "deepspeed (>=0.9.3)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "nltk", "optuna", "parameterized", "protobuf", "psutil", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "timeout-decorator"] -dev = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "av (==9.2.0)", "beautifulsoup4", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "decord (==0.6.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "flax (>=0.4.1,<=0.7.0)", "fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1,<0.14.0)", "librosa", "nltk", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "ray[tune] (>=2.7.0)", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "scipy (<1.13.0)", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timeout-decorator", "timm (<=0.9.16)", "tokenizers (>=0.19,<0.20)", "torch", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] -dev-tensorflow = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "isort (>=5.5.4)", "kenlm", "keras-nlp (>=0.3.1,<0.14.0)", "librosa", "nltk", "onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timeout-decorator", "tokenizers (>=0.19,<0.20)", "urllib3 (<2.0.0)"] -dev-torch = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "beautifulsoup4", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "kenlm", "librosa", "nltk", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "ray[tune] (>=2.7.0)", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "timeout-decorator", "timm (<=0.9.16)", "tokenizers (>=0.19,<0.20)", "torch", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] +deepspeed = ["accelerate (>=0.26.0)", "deepspeed (>=0.9.3)"] +deepspeed-testing = ["GitPython (<3.1.19)", "accelerate (>=0.26.0)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "deepspeed (>=0.9.3)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "nltk (<=3.8.1)", "optuna", "parameterized", "protobuf", "psutil", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "timeout-decorator"] +dev = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.26.0)", "av (==9.2.0)", "beautifulsoup4", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "decord (==0.6.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "flax (>=0.4.1,<=0.7.0)", "fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1,<0.14.0)", "libcst", "librosa", "nltk (<=3.8.1)", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "ray[tune] (>=2.7.0)", "rhoknp (>=1.1.0,<1.3.1)", "rich", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "scipy (<1.13.0)", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timeout-decorator", "timm (<=0.9.16)", "tokenizers (>=0.20,<0.21)", "torch", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] +dev-tensorflow = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "isort (>=5.5.4)", "kenlm", "keras-nlp (>=0.3.1,<0.14.0)", "libcst", "librosa", "nltk (<=3.8.1)", "onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "rich", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timeout-decorator", "tokenizers (>=0.20,<0.21)", "urllib3 (<2.0.0)"] +dev-torch = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.26.0)", "beautifulsoup4", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "kenlm", "libcst", "librosa", "nltk (<=3.8.1)", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "ray[tune] (>=2.7.0)", "rhoknp (>=1.1.0,<1.3.1)", "rich", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "timeout-decorator", "timm (<=0.9.16)", "tokenizers (>=0.20,<0.21)", "torch", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] flax = ["flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "optax (>=0.0.8,<=0.1.4)", "scipy (<1.13.0)"] flax-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] ftfy = ["ftfy"] @@ -6333,7 +6324,7 @@ natten = ["natten (>=0.14.6,<0.15.0)"] onnx = ["onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "tf2onnx"] onnxruntime = ["onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)"] optuna = ["optuna"] -quality = ["GitPython (<3.1.19)", "datasets (!=2.5.0)", "isort (>=5.5.4)", "ruff (==0.5.1)", "urllib3 (<2.0.0)"] +quality = ["GitPython (<3.1.19)", "datasets (!=2.5.0)", "isort (>=5.5.4)", "libcst", "rich", "ruff (==0.5.1)", "urllib3 (<2.0.0)"] ray = ["ray[tune] (>=2.7.0)"] retrieval = ["datasets (!=2.5.0)", "faiss-cpu"] ruff = ["ruff (==0.5.1)"] @@ -6343,16 +6334,17 @@ serving = ["fastapi", "pydantic", "starlette", "uvicorn"] sigopt = ["sigopt"] sklearn = ["scikit-learn"] speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"] -testing = ["GitPython (<3.1.19)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "nltk", "parameterized", "psutil", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "timeout-decorator"] +testing = ["GitPython (<3.1.19)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "nltk (<=3.8.1)", "parameterized", "psutil", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "timeout-decorator"] tf = ["keras-nlp (>=0.3.1,<0.14.0)", "onnxconverter-common", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx"] tf-cpu = ["keras (>2.9,<2.16)", "keras-nlp (>=0.3.1,<0.14.0)", "onnxconverter-common", "tensorflow-cpu (>2.9,<2.16)", "tensorflow-probability (<0.24)", "tensorflow-text (<2.16)", "tf2onnx"] tf-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] +tiktoken = ["blobfile", "tiktoken"] timm = ["timm (<=0.9.16)"] -tokenizers = ["tokenizers (>=0.19,<0.20)"] -torch = ["accelerate (>=0.21.0)", "torch"] +tokenizers = ["tokenizers (>=0.20,<0.21)"] +torch = ["accelerate (>=0.26.0)", "torch"] torch-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"] torch-vision = ["Pillow (>=10.0.1,<=15.0)", "torchvision"] -torchhub = ["filelock", "huggingface-hub (>=0.23.2,<1.0)", "importlib-metadata", "numpy (>=1.17)", "packaging (>=20.0)", "protobuf", "regex (!=2019.12.17)", "requests", "sentencepiece (>=0.1.91,!=0.1.92)", "tokenizers (>=0.19,<0.20)", "torch", "tqdm (>=4.27)"] +torchhub = ["filelock", "huggingface-hub (>=0.23.2,<1.0)", "importlib-metadata", "numpy (>=1.17)", "packaging (>=20.0)", "protobuf", "regex (!=2019.12.17)", "requests", "sentencepiece (>=0.1.91,!=0.1.92)", "tokenizers (>=0.20,<0.21)", "torch", "tqdm (>=4.27)"] video = ["av (==9.2.0)", "decord (==0.6.0)"] vision = ["Pillow (>=10.0.1,<=15.0)"] @@ -6360,7 +6352,7 @@ vision = ["Pillow (>=10.0.1,<=15.0)"] name = "triton" version = "3.0.0" description = "A language and compiler for custom Deep Learning operations" -optional = true +optional = false python-versions = "*" files = [ {file = "triton-3.0.0-1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:e1efef76935b2febc365bfadf74bcb65a6f959a9872e5bddf44cc9e0adce1e1a"}, @@ -6465,21 +6457,21 @@ files = [ [[package]] name = "typos" -version = "1.24.5" +version = "1.26.0" description = "Source Code Spelling Correction" optional = false python-versions = ">=3.7" files = [ - {file = "typos-1.24.5-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:95a75c76ecd4aa32b8a18b5aed9f20e4223276851ffa9d77d552533ed3e23198"}, - {file = "typos-1.24.5-py3-none-macosx_11_0_arm64.whl", hash = "sha256:9a4634eda1082fbe9e7b3fc947870b36b50a964f6b89861ccf19bb9ebf26ddd9"}, - {file = "typos-1.24.5-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:00e1f569ddc8ed80255114cbbbdcb9db278ae738f4ee435ba60803b2c8e7d519"}, - {file = "typos-1.24.5-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8940a50ad420dc7924e0db520c88cedce2c6cc88f206c621755e5a966c0ad645"}, - {file = "typos-1.24.5-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1433c987eb2dec6ce627e381870aa36f44cb98696ca4f9ff194abb87bc2075d3"}, - {file = "typos-1.24.5-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:b3e2f44f7f39272ae0cce3f3b89157218db82f5214354d76d3a60f1af0bd0602"}, - {file = "typos-1.24.5-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:b226d2f2960e6a5430a0af30b896e2e067ffa9fe98ea4196c0f514ad97219c47"}, - {file = "typos-1.24.5-py3-none-win32.whl", hash = "sha256:e6a7b77c13e49a5791d3be537eede2e8f4496e662aa7501260344edd5ba7df86"}, - {file = "typos-1.24.5-py3-none-win_amd64.whl", hash = "sha256:47c237a0bbcd8ab432a562020c386abe45f8ea71218b74d800d799d65b39d08b"}, - {file = "typos-1.24.5.tar.gz", hash = "sha256:b31af4d73fd35c6cda7530c5f9d7ca23ecfa11e97d4709783496353cef7e7a73"}, + {file = "typos-1.26.0-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:4a49e389f89b7b53aaea5f956134317507ddd92a14d7ec380cb918390e04aac8"}, + {file = "typos-1.26.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:2c50b4094f2252f5b4552514776f455914ae619f6a7418de002af51d981aaa7a"}, + {file = "typos-1.26.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f33cd305042642d6a0bdc5e09fa6338385a7cb608e1385fd2b6d22d482e2071"}, + {file = "typos-1.26.0-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fb97608d6a77470bf36d378341c81ded61dd14a10baa15e46431e12688e3d504"}, + {file = "typos-1.26.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:775802bc32203a68c475e47fb9dfa69ffe427bd85d3e27b03baa5a455bc2b4de"}, + {file = "typos-1.26.0-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:72412d456c1c4a1cac8dd7f56410fe14aad22ab883b49aba704cbdb58b5cda0e"}, + {file = "typos-1.26.0-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:e247246548c862e819ae541713471f60830020b051364fc6f216606945322427"}, + {file = "typos-1.26.0-py3-none-win32.whl", hash = "sha256:7488c7124ac52a66ed79e74bdd11248b87d295391937b833f64e45c6c6237c82"}, + {file = "typos-1.26.0-py3-none-win_amd64.whl", hash = "sha256:f4157c7b778e2128121f65279fa763329556e12188d200828f3e38c3029a6764"}, + {file = "typos-1.26.0.tar.gz", hash = "sha256:97f7bc943aafa040bca272cd5c0b679503876041898b7b99339c9604c0794786"}, ] [[package]] @@ -6954,7 +6946,7 @@ doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linke test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy", "pytest-ruff (>=0.2.1)"] [extras] -all = ["accelerate", "anthropic", "astrapy", "beautifulsoup4", "boto3", "cohere", "diffusers", "duckduckgo-search", "elevenlabs", "filetype", "google-generativeai", "mail-parser", "markdownify", "marqo", "ollama", "opensearch-py", "opentelemetry-api", "opentelemetry-exporter-otlp-proto-http", "opentelemetry-instrumentation", "opentelemetry-instrumentation-threading", "opentelemetry-sdk", "pandas", "pgvector", "pillow", "pinecone-client", "playwright", "psycopg2-binary", "pusher", "pymongo", "pypdf", "qdrant-client", "redis", "sentencepiece", "snowflake-sqlalchemy", "sqlalchemy", "torch", "trafilatura", "transformers", "voyageai"] +all = ["anthropic", "astrapy", "beautifulsoup4", "boto3", "cohere", "diffusers", "duckduckgo-search", "elevenlabs", "google-generativeai", "mail-parser", "markdownify", "marqo", "ollama", "opensearch-py", "opentelemetry-api", "opentelemetry-exporter-otlp-proto-http", "opentelemetry-instrumentation", "opentelemetry-instrumentation-threading", "opentelemetry-sdk", "pandas", "pgvector", "pillow", "pinecone-client", "playwright", "psycopg2-binary", "pusher", "pymongo", "pypdf", "qdrant-client", "redis", "snowflake-sqlalchemy", "sqlalchemy", "tavily-python", "trafilatura", "transformers", "voyageai"] drivers-embedding-amazon-bedrock = ["boto3"] drivers-embedding-amazon-sagemaker = ["boto3"] drivers-embedding-cohere = ["cohere"] @@ -6965,7 +6957,7 @@ drivers-embedding-voyageai = ["voyageai"] drivers-event-listener-amazon-iot = ["boto3"] drivers-event-listener-amazon-sqs = ["boto3"] drivers-event-listener-pusher = ["pusher"] -drivers-image-generation-huggingface = ["accelerate", "diffusers", "pillow", "sentencepiece", "torch"] +drivers-image-generation-huggingface = ["diffusers", "pillow"] drivers-memory-conversation-amazon-dynamodb = ["boto3"] drivers-memory-conversation-redis = ["redis"] drivers-observability-datadog = ["opentelemetry-api", "opentelemetry-exporter-otlp-proto-http", "opentelemetry-instrumentation", "opentelemetry-instrumentation-threading", "opentelemetry-sdk"] @@ -6996,8 +6988,8 @@ drivers-vector-redis = ["redis"] drivers-web-scraper-markdownify = ["beautifulsoup4", "markdownify", "playwright"] drivers-web-scraper-trafilatura = ["trafilatura"] drivers-web-search-duckduckgo = ["duckduckgo-search"] -loaders-audio = ["filetype"] -loaders-dataframe = ["pandas"] +drivers-web-search-exa = ["exa-py"] +drivers-web-search-tavily = ["tavily-python"] loaders-email = ["mail-parser"] loaders-image = ["pillow"] loaders-pdf = ["pypdf"] @@ -7006,4 +6998,4 @@ loaders-sql = ["sqlalchemy"] [metadata] lock-version = "2.0" python-versions = "^3.9" -content-hash = "e951b9aff748e3891f504bacac4bdc86843303f9d74de37949690ede153c1cb2" +content-hash = "9632ef0546326e19f48c0cdfa3e81fd7179dc15ca40e3f3485ee0ad5c266985c" diff --git a/pyproject.toml b/pyproject.toml index 9c84d3f84..715bd50aa 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "griptape" -version = "0.32.0" +version = "0.33.0" description = "Modular Python framework for LLM workflows, tools, memory, and data." authors = ["Griptape "] license = "Apache 2.0" @@ -18,7 +18,7 @@ attrs = "^24.2.0" jinja2 = "^3.1.4" marshmallow = "^3.21.3" marshmallow-enum = "^1.5.1" -tiktoken = "^0.7" +tiktoken = "^0.8" rich = "^13.7.1" schema = "^0.7.7" pyyaml = "^6.0.1" @@ -27,50 +27,49 @@ numpy = "^1.26.4" stringcase = "^1.2.0" docker = "^7.1.0" requests = "^2.32.0" +filetype = "^1.2" # drivers cohere = { version = "^5.5.4", optional = true } -anthropic = { version = ">=0.29,<0.35", optional = true } -transformers = { version = "^4.41.1", optional = true, extras=["torch"] } -huggingface-hub = { version = "^0.24.0", optional = true } +anthropic = { version = "^0.35.0", optional = true } +transformers = { version = "^4.41.1", optional = true} +huggingface-hub = { version = "^0.25.1", optional = true } boto3 = { version = "^1.34.119", optional = true } snowflake-sqlalchemy = { version = "^1.6.1", optional = true } pinecone-client = { version = "^3", optional = true } pymongo = { version = "^4.8.0", optional = true } marqo = { version = "^3.7.0", optional = true } -redis = { version = "^4.6.0", optional = true } +redis = { version = "^5.1.0", optional = true } opensearch-py = { version = "^2.3.1", optional = true } -pgvector = { version = ">=0.2.3,<0.4.0", optional = true } +pgvector = { version = "^0.3.4", optional = true } psycopg2-binary = { version = "^2.9.9", optional = true } -google-generativeai = { version = ">=0.7.2,<0.9.0", optional = true } +google-generativeai = { version = "^0.8.2", optional = true } trafilatura = {version = "^1.6", optional = true} playwright = {version = "^1.42", optional = true} beautifulsoup4 = {version = "^4.12.3", optional = true} -markdownify = {version = ">=0.11.6,<0.14.0", optional = true} +markdownify = {version = "^0.13.1", optional = true} voyageai = {version = "^0.2.1", optional = true} elevenlabs = {version = "^1.1.2", optional = true} qdrant-client = { version = "^1.10.1", optional = true } astrapy = { version = "^1.4", optional = true } pusher = {version = "^3.3.2", optional = true} ollama = {version = "^0.3.0", optional = true} -duckduckgo-search = {version = "^6.1.12", optional = true} +duckduckgo-search = {version = "^6.2.12", optional = true} sqlalchemy = {version = "^2.0.31", optional = true} opentelemetry-sdk = {version = "^1.25.0", optional = true} opentelemetry-api = {version = "^1.25.0", optional = true} opentelemetry-instrumentation = {version = "^0.46b0", optional = true} opentelemetry-instrumentation-threading = {version = "^0.46b0", optional = true} opentelemetry-exporter-otlp-proto-http = {version = "^1.25.0", optional = true} -diffusers = {version = ">=0.29.1,<0.31.0", optional = true} -accelerate = {version = ">=0.32.1,<0.35.0", optional = true} -sentencepiece = {version = "^0.2.0", optional = true} -torch = {version = "^2.3.1", optional = true} +diffusers = {version = "^0.30.3", optional = true} +tavily-python = {version = "^0.5.0", optional = true} +exa-py = {version = "^1.1.4", optional = true} # loaders pandas = {version = "^1.3", optional = true} -pypdf = {version = "^3.9", optional = true} +pypdf = {version = "^5.0.1", optional = true} pillow = {version = "^10.2.0", optional = true} mail-parser = {version = "^3.15.0", optional = true} -filetype = {version = "^1.2", optional = true} [tool.poetry.extras] drivers-prompt-cohere = ["cohere"] @@ -111,6 +110,8 @@ drivers-web-scraper-trafilatura = ["trafilatura"] drivers-web-scraper-markdownify = ["playwright", "beautifulsoup4", "markdownify"] drivers-web-search-duckduckgo = ["duckduckgo-search"] +drivers-web-search-tavily = ["tavily-python"] +drivers-web-search-exa = ["exa-py"] drivers-event-listener-amazon-sqs = ["boto3"] drivers-event-listener-amazon-iot = ["boto3"] @@ -142,19 +143,11 @@ drivers-observability-datadog = [ "opentelemetry-exporter-otlp-proto-http", ] -drivers-image-generation-huggingface = [ - "diffusers", - "accelerate", - "sentencepiece", - "torch", - "pillow", -] +drivers-image-generation-huggingface = ["diffusers", "pillow"] -loaders-dataframe = ["pandas"] loaders-pdf = ["pypdf"] loaders-image = ["pillow"] loaders-email = ["mail-parser"] -loaders-audio = ["filetype"] loaders-sql = ["sqlalchemy"] all = [ @@ -186,22 +179,19 @@ all = [ "pusher", "ollama", "duckduckgo-search", + "tavily-python", "opentelemetry-sdk", "opentelemetry-api", "opentelemetry-instrumentation", "opentelemetry-instrumentation-threading", "opentelemetry-exporter-otlp-proto-http", "diffusers", - "accelerate", - "sentencepiece", - "torch", "pillow", # loaders "pandas", "pypdf", "mail-parser", - "filetype", ] [tool.poetry.group.test] @@ -213,12 +203,13 @@ pytest-mock = "^3.1.4" mongomock = "^4.1.2" twine = "^5.1.1" -moto = {extras = ["dynamodb", "iotdata", "sqs"], version = "^4.2.13"} +moto = {extras = ["dynamodb", "iotdata", "sqs"], version = "^5.0.16"} pytest-xdist = "^3.3.1" pytest-cov = "^5.0.0" pytest-env = "^1.1.1" fuzzywuzzy = "^0.18.0" pytest-clarity = "^1.0.1" +torch = "^2.4.1" [tool.poetry.group.dev] @@ -227,8 +218,8 @@ optional = true [tool.poetry.group.dev.dependencies] ruff = "^0.6.0" pyright = "^1.1.376" -pre-commit = "^3.7.1" -boto3-stubs = {extras = ["bedrock", "iam", "opensearch", "s3", "sagemaker"], version = "^1.34.105"} +pre-commit = "^4.0.0" +boto3-stubs = {extras = ["bedrock", "iam", "opensearch", "s3", "sagemaker", "sqs", "iot-data", "dynamodb", "redshift-data"], version = "^1.34.105"} typos = "^1.22.9" diff --git a/tests/mocks/mock_multi_text_input_task.py b/tests/mocks/mock_multi_text_input_task.py deleted file mode 100644 index be00bbf65..000000000 --- a/tests/mocks/mock_multi_text_input_task.py +++ /dev/null @@ -1,10 +0,0 @@ -from attrs import define - -from griptape.artifacts import TextArtifact -from griptape.tasks import BaseMultiTextInputTask - - -@define -class MockMultiTextInputTask(BaseMultiTextInputTask): - def run(self) -> TextArtifact: - return TextArtifact(self.input[0].to_text()) diff --git a/tests/mocks/mock_tool/tool.py b/tests/mocks/mock_tool/tool.py index 7d09f391e..9c2241636 100644 --- a/tests/mocks/mock_tool/tool.py +++ b/tests/mocks/mock_tool/tool.py @@ -1,4 +1,4 @@ -from attrs import define, field +from attrs import Factory, define, field from schema import Literal, Schema from griptape.artifacts import BaseArtifact, ErrorArtifact, ListArtifact, TextArtifact @@ -11,6 +11,7 @@ class MockTool(BaseTool): test_field: str = field(default="test", kw_only=True) test_int: int = field(default=5, kw_only=True) test_dict: dict = field(factory=dict, kw_only=True) + custom_schema: dict = field(default=Factory(lambda: {"test": str}), kw_only=True) @activity( config={ @@ -52,6 +53,15 @@ def test_str_output(self, value: dict) -> str: def test_no_schema(self, value: dict) -> str: return "no schema" + @activity( + config={ + "description": "test description", + "schema": lambda _self: _self.build_custom_schema(), + } + ) + def test_callable_schema(self) -> TextArtifact: + return TextArtifact("ack") + @activity(config={"description": "test description"}) def test_list_output(self, value: dict) -> ListArtifact: return ListArtifact([TextArtifact("foo"), TextArtifact("bar")]) @@ -64,3 +74,6 @@ def test_without_default_memory(self, value: dict) -> str: def foo(self) -> str: return "foo" + + def build_custom_schema(self) -> Schema: + return Schema(self.custom_schema, description="Test input") diff --git a/tests/resources/test_ruleset.json b/tests/resources/test_ruleset.json new file mode 100644 index 000000000..418640a6a --- /dev/null +++ b/tests/resources/test_ruleset.json @@ -0,0 +1,19 @@ +{ + "rules": [ + { + "value": "value1", + "meta": { + "foo": "bar" + } + }, + { + "value": "value2", + "meta": { + "foo": "baz" + } + } + ], + "meta": { + "foo": "bar" + } +} \ No newline at end of file diff --git a/tests/unit/configs/drivers/test_amazon_bedrock_drivers_config.py b/tests/unit/configs/drivers/test_amazon_bedrock_drivers_config.py index d74c273f6..b061e5b67 100644 --- a/tests/unit/configs/drivers/test_amazon_bedrock_drivers_config.py +++ b/tests/unit/configs/drivers/test_amazon_bedrock_drivers_config.py @@ -65,6 +65,11 @@ def test_to_dict(self, config): }, "type": "LocalVectorStoreDriver", }, + "ruleset_driver": { + "type": "LocalRulesetDriver", + "raise_not_found": True, + "persist_dir": None, + }, "type": "AmazonBedrockDriversConfig", "text_to_speech_driver": {"type": "DummyTextToSpeechDriver"}, "audio_transcription_driver": {"type": "DummyAudioTranscriptionDriver"}, @@ -120,6 +125,11 @@ def test_to_dict_with_values(self, config_with_values): }, "type": "LocalVectorStoreDriver", }, + "ruleset_driver": { + "type": "LocalRulesetDriver", + "raise_not_found": True, + "persist_dir": None, + }, "type": "AmazonBedrockDriversConfig", "text_to_speech_driver": {"type": "DummyTextToSpeechDriver"}, "audio_transcription_driver": {"type": "DummyAudioTranscriptionDriver"}, diff --git a/tests/unit/configs/drivers/test_anthropic_drivers_config.py b/tests/unit/configs/drivers/test_anthropic_drivers_config.py index 2bdb9497d..b69893560 100644 --- a/tests/unit/configs/drivers/test_anthropic_drivers_config.py +++ b/tests/unit/configs/drivers/test_anthropic_drivers_config.py @@ -49,6 +49,11 @@ def test_to_dict(self, config): "type": "LocalConversationMemoryDriver", "persist_file": None, }, + "ruleset_driver": { + "type": "LocalRulesetDriver", + "raise_not_found": True, + "persist_dir": None, + }, "text_to_speech_driver": {"type": "DummyTextToSpeechDriver"}, "audio_transcription_driver": {"type": "DummyAudioTranscriptionDriver"}, } diff --git a/tests/unit/configs/drivers/test_azure_openai_drivers_config.py b/tests/unit/configs/drivers/test_azure_openai_drivers_config.py index 83b9dd77c..6c6d49483 100644 --- a/tests/unit/configs/drivers/test_azure_openai_drivers_config.py +++ b/tests/unit/configs/drivers/test_azure_openai_drivers_config.py @@ -97,4 +97,9 @@ def test_to_dict(self, config): "voice": "alloy", }, "audio_transcription_driver": {"type": "DummyAudioTranscriptionDriver"}, + "ruleset_driver": { + "type": "LocalRulesetDriver", + "raise_not_found": True, + "persist_dir": None, + }, } diff --git a/tests/unit/configs/drivers/test_cohere_drivers_config.py b/tests/unit/configs/drivers/test_cohere_drivers_config.py index 0d16d1ab2..b828fef41 100644 --- a/tests/unit/configs/drivers/test_cohere_drivers_config.py +++ b/tests/unit/configs/drivers/test_cohere_drivers_config.py @@ -41,4 +41,9 @@ def test_to_dict(self, config): "input_type": "search_document", }, }, + "ruleset_driver": { + "type": "LocalRulesetDriver", + "raise_not_found": True, + "persist_dir": None, + }, } diff --git a/tests/unit/configs/drivers/test_drivers_config.py b/tests/unit/configs/drivers/test_drivers_config.py index de7edc9ae..8eba0cb6a 100644 --- a/tests/unit/configs/drivers/test_drivers_config.py +++ b/tests/unit/configs/drivers/test_drivers_config.py @@ -32,6 +32,11 @@ def test_to_dict(self, config): }, "text_to_speech_driver": {"type": "DummyTextToSpeechDriver"}, "audio_transcription_driver": {"type": "DummyAudioTranscriptionDriver"}, + "ruleset_driver": { + "type": "LocalRulesetDriver", + "raise_not_found": True, + "persist_dir": None, + }, } def test_from_dict(self, config): @@ -64,6 +69,7 @@ def test_lazy_init(self): assert Defaults.drivers_config._conversation_memory_driver is None assert Defaults.drivers_config._text_to_speech_driver is None assert Defaults.drivers_config._audio_transcription_driver is None + assert Defaults.drivers_config._ruleset_driver is None assert Defaults.drivers_config.prompt_driver is not None assert Defaults.drivers_config.image_generation_driver is not None @@ -73,6 +79,7 @@ def test_lazy_init(self): assert Defaults.drivers_config.conversation_memory_driver is not None assert Defaults.drivers_config.text_to_speech_driver is not None assert Defaults.drivers_config.audio_transcription_driver is not None + assert Defaults.drivers_config.ruleset_driver is not None assert Defaults.drivers_config._prompt_driver is not None assert Defaults.drivers_config._image_generation_driver is not None @@ -82,3 +89,4 @@ def test_lazy_init(self): assert Defaults.drivers_config._conversation_memory_driver is not None assert Defaults.drivers_config._text_to_speech_driver is not None assert Defaults.drivers_config._audio_transcription_driver is not None + assert Defaults.drivers_config._ruleset_driver is not None diff --git a/tests/unit/configs/drivers/test_google_drivers_config.py b/tests/unit/configs/drivers/test_google_drivers_config.py index 7a752c6de..ab695369e 100644 --- a/tests/unit/configs/drivers/test_google_drivers_config.py +++ b/tests/unit/configs/drivers/test_google_drivers_config.py @@ -49,6 +49,11 @@ def test_to_dict(self, config): }, "text_to_speech_driver": {"type": "DummyTextToSpeechDriver"}, "audio_transcription_driver": {"type": "DummyAudioTranscriptionDriver"}, + "ruleset_driver": { + "type": "LocalRulesetDriver", + "raise_not_found": True, + "persist_dir": None, + }, } def test_from_dict(self, config): diff --git a/tests/unit/configs/drivers/test_openai_driver_config.py b/tests/unit/configs/drivers/test_openai_driver_config.py index 016383c32..a3cca9608 100644 --- a/tests/unit/configs/drivers/test_openai_driver_config.py +++ b/tests/unit/configs/drivers/test_openai_driver_config.py @@ -83,6 +83,11 @@ def test_to_dict(self, config): "model": "whisper-1", "organization": None, }, + "ruleset_driver": { + "type": "LocalRulesetDriver", + "raise_not_found": True, + "persist_dir": None, + }, } def test_from_dict(self, config): diff --git a/tests/unit/drivers/event_listener/test_amazon_sqs_event_listener_driver.py b/tests/unit/drivers/event_listener/test_amazon_sqs_event_listener_driver.py index 10ef0354c..c4885f23f 100644 --- a/tests/unit/drivers/event_listener/test_amazon_sqs_event_listener_driver.py +++ b/tests/unit/drivers/event_listener/test_amazon_sqs_event_listener_driver.py @@ -1,6 +1,6 @@ import boto3 import pytest -from moto import mock_sqs +from moto import mock_aws from griptape.drivers.event_listener.amazon_sqs_event_listener_driver import AmazonSqsEventListenerDriver from tests.mocks.mock_event import MockEvent @@ -14,7 +14,7 @@ def _run_before_and_after_tests(self): @pytest.fixture() def driver(self): - mock = mock_sqs() + mock = mock_aws() mock.start() session = boto3.Session(region_name="us-east-1") diff --git a/tests/unit/drivers/event_listener/test_aws_iot_event_listener_driver.py b/tests/unit/drivers/event_listener/test_aws_iot_event_listener_driver.py index b597a5332..09241ffd5 100644 --- a/tests/unit/drivers/event_listener/test_aws_iot_event_listener_driver.py +++ b/tests/unit/drivers/event_listener/test_aws_iot_event_listener_driver.py @@ -1,13 +1,13 @@ import boto3 import pytest -from moto import mock_iotdata +from moto import mock_aws from griptape.drivers.event_listener.aws_iot_core_event_listener_driver import AwsIotCoreEventListenerDriver from tests.mocks.mock_event import MockEvent from tests.utils.aws import mock_aws_credentials -@mock_iotdata +@mock_aws class TestAwsIotCoreEventListenerDriver: @pytest.fixture(autouse=True) def _run_before_and_after_tests(self): diff --git a/tests/unit/drivers/event_listener/test_pusher_event_listener_driver.py b/tests/unit/drivers/event_listener/test_pusher_event_listener_driver.py index 50856c0da..4168f762d 100644 --- a/tests/unit/drivers/event_listener/test_pusher_event_listener_driver.py +++ b/tests/unit/drivers/event_listener/test_pusher_event_listener_driver.py @@ -9,11 +9,11 @@ class TestPusherEventListenerDriver: @pytest.fixture(autouse=True) def mock_post(self, mocker): - mock_pusher_client = mocker.patch("pusher.Pusher") - mock_pusher_client.return_value.trigger.return_value = Mock() - mock_pusher_client.return_value.trigger_batch.return_value = Mock() + mock_client = mocker.patch("pusher.Pusher") + mock_client.return_value.trigger.return_value = Mock() + mock_client.return_value.trigger_batch.return_value = Mock() - return mock_pusher_client + return mock_client @pytest.fixture() def driver(self): @@ -33,12 +33,12 @@ def test_try_publish_event_payload(self, driver): data = MockEvent().to_dict() driver.try_publish_event_payload(data) - driver.pusher_client.trigger.assert_called_with(channels="test-channel", event_name="test-event", data=data) + driver.client.trigger.assert_called_with(channels="test-channel", event_name="test-event", data=data) def test_try_publish_event_payload_batch(self, driver): data = [MockEvent().to_dict() for _ in range(3)] driver.try_publish_event_payload_batch(data) - driver.pusher_client.trigger_batch.assert_called_with( + driver.client.trigger_batch.assert_called_with( [{"channel": "test-channel", "name": "test-event", "data": data[i]} for i in range(3)] ) diff --git a/tests/unit/drivers/file_manager/test_amazon_s3_file_manager_driver.py b/tests/unit/drivers/file_manager/test_amazon_s3_file_manager_driver.py index 84ce61768..2240dee58 100644 --- a/tests/unit/drivers/file_manager/test_amazon_s3_file_manager_driver.py +++ b/tests/unit/drivers/file_manager/test_amazon_s3_file_manager_driver.py @@ -3,11 +3,11 @@ import boto3 import pytest -from moto import mock_s3 +from moto import mock_aws -from griptape.artifacts import InfoArtifact, ListArtifact, TextArtifact +from griptape.artifacts import InfoArtifact, TextArtifact +from griptape.artifacts.blob_artifact import BlobArtifact from griptape.drivers import AmazonS3FileManagerDriver -from griptape.loaders import TextLoader from tests.utils.aws import mock_aws_credentials @@ -18,7 +18,7 @@ def _set_aws_credentials(self): @pytest.fixture() def session(self): - mock = mock_s3() + mock = mock_aws() mock.start() yield boto3.Session(region_name="us-east-1") mock.stop() @@ -154,8 +154,7 @@ def test_list_files_failure(self, workdir, path, expected, driver): def test_load_file(self, driver): artifact = driver.load_file("resources/bitcoin.pdf") - assert isinstance(artifact, ListArtifact) - assert len(artifact.value) == 4 + assert isinstance(artifact, BlobArtifact) @pytest.mark.parametrize( ("workdir", "path", "expected"), @@ -185,9 +184,8 @@ def test_load_file_failure(self, workdir, path, expected, driver): def test_load_file_with_encoding(self, driver): artifact = driver.load_file("resources/test.txt") - assert isinstance(artifact, ListArtifact) - assert len(artifact.value) == 1 - assert isinstance(artifact.value[0], TextArtifact) + assert isinstance(artifact, BlobArtifact) + assert artifact.encoding == "utf-8" @pytest.mark.parametrize( ("workdir", "path", "content"), @@ -240,9 +238,7 @@ def test_save_file_failure(self, workdir, path, expected, temp_dir, driver, s3_c def test_save_file_with_encoding(self, session, bucket, get_s3_value): workdir = "/sub-folder" - driver = AmazonS3FileManagerDriver( - session=session, bucket=bucket, default_loader=TextLoader(encoding="utf-8"), loaders={}, workdir=workdir - ) + driver = AmazonS3FileManagerDriver(session=session, bucket=bucket, workdir=workdir) path = "test/foobar.txt" result = driver.save_file(path, "foobar") @@ -253,9 +249,7 @@ def test_save_file_with_encoding(self, session, bucket, get_s3_value): def test_save_and_load_file_with_encoding(self, session, bucket, get_s3_value): workdir = "/sub-folder" - driver = AmazonS3FileManagerDriver( - session=session, bucket=bucket, loaders={"txt": TextLoader(encoding="ascii")}, workdir=workdir - ) + driver = AmazonS3FileManagerDriver(session=session, bucket=bucket, encoding="ascii", workdir=workdir) path = "test/foobar.txt" result = driver.save_file(path, "foobar") @@ -264,13 +258,10 @@ def test_save_and_load_file_with_encoding(self, session, bucket, get_s3_value): assert get_s3_value(expected_s3_key) == "foobar" assert result.value == "Successfully saved file" - driver = AmazonS3FileManagerDriver( - session=session, bucket=bucket, default_loader=TextLoader(encoding="ascii"), loaders={}, workdir=workdir - ) + driver = AmazonS3FileManagerDriver(session=session, bucket=bucket, encoding="ascii", workdir=workdir) path = "test/foobar.txt" result = driver.load_file(path) - assert isinstance(result, ListArtifact) - assert len(result.value) == 1 - assert isinstance(result.value[0], TextArtifact) + assert isinstance(result, TextArtifact) + assert result.encoding == "ascii" diff --git a/tests/unit/drivers/file_manager/test_local_file_manager_driver.py b/tests/unit/drivers/file_manager/test_local_file_manager_driver.py index 394a838a3..99f0285bc 100644 --- a/tests/unit/drivers/file_manager/test_local_file_manager_driver.py +++ b/tests/unit/drivers/file_manager/test_local_file_manager_driver.py @@ -4,9 +4,9 @@ import pytest -from griptape.artifacts import InfoArtifact, ListArtifact, TextArtifact +from griptape.artifacts import InfoArtifact, TextArtifact +from griptape.artifacts.blob_artifact import BlobArtifact from griptape.drivers import LocalFileManagerDriver -from griptape.loaders.text_loader import TextLoader class TestLocalFileManagerDriver: @@ -127,8 +127,7 @@ def test_list_files_failure(self, workdir, path, expected, temp_dir, driver): def test_load_file(self, driver: LocalFileManagerDriver): artifact = driver.load_file("resources/bitcoin.pdf") - assert isinstance(artifact, ListArtifact) - assert len(artifact.value) == 4 + assert isinstance(artifact, BlobArtifact) @pytest.mark.parametrize( ("workdir", "path", "expected"), @@ -156,23 +155,6 @@ def test_load_file_failure(self, workdir, path, expected, temp_dir, driver): with pytest.raises(expected): driver.load_file(path) - def test_load_file_with_encoding(self, driver: LocalFileManagerDriver): - artifact = driver.load_file("resources/test.txt") - - assert isinstance(artifact, ListArtifact) - assert len(artifact.value) == 1 - assert isinstance(artifact.value[0], TextArtifact) - - def test_load_file_with_encoding_failure(self, driver): - driver = LocalFileManagerDriver( - default_loader=TextLoader(encoding="utf-8"), - loaders={}, - workdir=os.path.normpath(os.path.abspath(os.path.dirname(__file__) + "../../../../")), - ) - - with pytest.raises(UnicodeDecodeError): - driver.load_file("resources/bitcoin.pdf") - @pytest.mark.parametrize( ("workdir", "path", "content"), [ @@ -224,25 +206,24 @@ def test_save_file_failure(self, workdir, path, expected, temp_dir, driver): driver.save_file(path, "foobar") def test_save_file_with_encoding(self, temp_dir): - driver = LocalFileManagerDriver(default_loader=TextLoader(encoding="utf-8"), loaders={}, workdir=temp_dir) + driver = LocalFileManagerDriver(encoding="utf-8", workdir=temp_dir) result = driver.save_file(os.path.join("test", "foobar.txt"), "foobar") assert Path(os.path.join(temp_dir, "test", "foobar.txt")).read_text() == "foobar" assert result.value == "Successfully saved file" def test_save_and_load_file_with_encoding(self, temp_dir): - driver = LocalFileManagerDriver(loaders={"txt": TextLoader(encoding="ascii")}, workdir=temp_dir) + driver = LocalFileManagerDriver(encoding="ascii", workdir=temp_dir) result = driver.save_file(os.path.join("test", "foobar.txt"), "foobar") assert Path(os.path.join(temp_dir, "test", "foobar.txt")).read_text() == "foobar" assert result.value == "Successfully saved file" - driver = LocalFileManagerDriver(default_loader=TextLoader(encoding="ascii"), loaders={}, workdir=temp_dir) + driver = LocalFileManagerDriver(encoding="ascii", workdir=temp_dir) result = driver.load_file(os.path.join("test", "foobar.txt")) - assert isinstance(result, ListArtifact) - assert len(result.value) == 1 - assert isinstance(result.value[0], TextArtifact) + assert isinstance(result, TextArtifact) + assert result.encoding == "ascii" def _to_driver_workdir(self, temp_dir, workdir): # Treat the workdir as an absolute path, but modify it to be relative to the temp_dir. diff --git a/tests/unit/drivers/image_generation/test_amazon_bedrock_stable_diffusion_image_generation_driver.py b/tests/unit/drivers/image_generation/test_amazon_bedrock_stable_diffusion_image_generation_driver.py index 05e669b66..dae6f695b 100644 --- a/tests/unit/drivers/image_generation/test_amazon_bedrock_stable_diffusion_image_generation_driver.py +++ b/tests/unit/drivers/image_generation/test_amazon_bedrock_stable_diffusion_image_generation_driver.py @@ -8,13 +8,13 @@ class TestAmazonBedrockImageGenerationDriver: @pytest.fixture() - def bedrock_client(self): + def client(self): return Mock() @pytest.fixture() - def session(self, bedrock_client): + def session(self, client): session = Mock() - session.client.return_value = bedrock_client + session.client.return_value = client return session @@ -40,7 +40,7 @@ def test_init_requires_image_generation_model_driver(self, session): AmazonBedrockImageGenerationDriver(session=session, model="stability.stable-diffusion-xl-v1") # pyright: ignore[reportCallIssue] def test_try_text_to_image(self, driver): - driver.bedrock_client.invoke_model.return_value = { + driver.client.invoke_model.return_value = { "body": io.BytesIO( b"""{ "artifacts": [ diff --git a/tests/unit/drivers/image_query/test_amazon_bedrock_image_query_driver.py b/tests/unit/drivers/image_query/test_amazon_bedrock_image_query_driver.py index 9493ab23d..66b23d0c3 100644 --- a/tests/unit/drivers/image_query/test_amazon_bedrock_image_query_driver.py +++ b/tests/unit/drivers/image_query/test_amazon_bedrock_image_query_driver.py @@ -9,13 +9,13 @@ class TestAmazonBedrockImageQueryDriver: @pytest.fixture() - def bedrock_client(self, mocker): + def client(self, mocker): return Mock() @pytest.fixture() - def session(self, bedrock_client): + def session(self, client): session = Mock() - session.client.return_value = bedrock_client + session.client.return_value = client return session @@ -35,7 +35,7 @@ def test_init(self, image_query_driver): assert image_query_driver def test_try_query(self, image_query_driver): - image_query_driver.bedrock_client.invoke_model.return_value = {"body": io.BytesIO(b"""{"content": []}""")} + image_query_driver.client.invoke_model.return_value = {"body": io.BytesIO(b"""{"content": []}""")} text_artifact = image_query_driver.try_query( "Prompt String", [ImageArtifact(value=b"test-data", width=100, height=100, format="png")] @@ -44,7 +44,7 @@ def test_try_query(self, image_query_driver): assert text_artifact.value == "content" def test_try_query_no_body(self, image_query_driver): - image_query_driver.bedrock_client.invoke_model.return_value = {"body": io.BytesIO(b"")} + image_query_driver.client.invoke_model.return_value = {"body": io.BytesIO(b"")} with pytest.raises(ValueError): image_query_driver.try_query( diff --git a/tests/unit/drivers/memory/conversation/test_dynamodb_conversation_memory_driver.py b/tests/unit/drivers/memory/conversation/test_dynamodb_conversation_memory_driver.py index 96e2ca969..03d83a652 100644 --- a/tests/unit/drivers/memory/conversation/test_dynamodb_conversation_memory_driver.py +++ b/tests/unit/drivers/memory/conversation/test_dynamodb_conversation_memory_driver.py @@ -1,6 +1,6 @@ import boto3 import pytest -from moto import mock_dynamodb +from moto import mock_aws from griptape.drivers import AmazonDynamoDbConversationMemoryDriver from griptape.memory.structure import ConversationMemory @@ -11,15 +11,18 @@ class TestDynamoDbConversationMemoryDriver: DYNAMODB_TABLE_NAME = "griptape" + DYNAMODB_COMPOSITE_TABLE_NAME = "griptape_composite" DYNAMODB_PARTITION_KEY = "entryId" + DYNAMODB_SORT_KEY = "sortKey" AWS_REGION = "us-west-2" VALUE_ATTRIBUTE_KEY = "foo" PARTITION_KEY_VALUE = "bar" + SORT_KEY_VALUE = "baz" @pytest.fixture(autouse=True) def _run_before_and_after_tests(self): mock_aws_credentials() - self.mock_dynamodb = mock_dynamodb() + self.mock_dynamodb = mock_aws() self.mock_dynamodb.start() dynamodb = boto3.Session(region_name=self.AWS_REGION).client("dynamodb") @@ -30,9 +33,23 @@ def _run_before_and_after_tests(self): BillingMode="PAY_PER_REQUEST", ) + dynamodb.create_table( + TableName=self.DYNAMODB_COMPOSITE_TABLE_NAME, + KeySchema=[ + {"AttributeName": self.DYNAMODB_PARTITION_KEY, "KeyType": "HASH"}, + {"AttributeName": self.DYNAMODB_SORT_KEY, "KeyType": "RANGE"}, + ], + AttributeDefinitions=[ + {"AttributeName": self.DYNAMODB_PARTITION_KEY, "AttributeType": "S"}, + {"AttributeName": self.DYNAMODB_SORT_KEY, "AttributeType": "S"}, + ], + BillingMode="PAY_PER_REQUEST", + ) + yield dynamodb.delete_table(TableName=self.DYNAMODB_TABLE_NAME) + dynamodb.delete_table(TableName=self.DYNAMODB_COMPOSITE_TABLE_NAME) self.mock_dynamodb.stop() def test_store(self): @@ -62,27 +79,31 @@ def test_store(self): def test_store_with_sort_key(self): session = boto3.Session(region_name=self.AWS_REGION) dynamodb = session.resource("dynamodb") - table = dynamodb.Table(self.DYNAMODB_TABLE_NAME) + table = dynamodb.Table(self.DYNAMODB_COMPOSITE_TABLE_NAME) memory_driver = AmazonDynamoDbConversationMemoryDriver( session=session, - table_name=self.DYNAMODB_TABLE_NAME, + table_name=self.DYNAMODB_COMPOSITE_TABLE_NAME, partition_key=self.DYNAMODB_PARTITION_KEY, value_attribute_key=self.VALUE_ATTRIBUTE_KEY, partition_key_value=self.PARTITION_KEY_VALUE, - sort_key="sortKey", - sort_key_value="foo", + sort_key=self.DYNAMODB_SORT_KEY, + sort_key_value=self.SORT_KEY_VALUE, ) memory = ConversationMemory(conversation_memory_driver=memory_driver) pipeline = Pipeline(conversation_memory=memory) pipeline.add_task(PromptTask("test")) - response = table.get_item(TableName=self.DYNAMODB_TABLE_NAME, Key={"entryId": "bar", "sortKey": "foo"}) + response = table.get_item( + TableName=self.DYNAMODB_COMPOSITE_TABLE_NAME, Key={"entryId": "bar", "sortKey": "baz"} + ) assert "Item" not in response pipeline.run() - response = table.get_item(TableName=self.DYNAMODB_TABLE_NAME, Key={"entryId": "bar", "sortKey": "foo"}) + response = table.get_item( + TableName=self.DYNAMODB_COMPOSITE_TABLE_NAME, Key={"entryId": "bar", "sortKey": "baz"} + ) assert "Item" in response def test_load(self): @@ -109,12 +130,12 @@ def test_load(self): def test_load_with_sort_key(self): memory_driver = AmazonDynamoDbConversationMemoryDriver( session=boto3.Session(region_name=self.AWS_REGION), - table_name=self.DYNAMODB_TABLE_NAME, + table_name=self.DYNAMODB_COMPOSITE_TABLE_NAME, partition_key=self.DYNAMODB_PARTITION_KEY, value_attribute_key=self.VALUE_ATTRIBUTE_KEY, partition_key_value=self.PARTITION_KEY_VALUE, - sort_key="sortKey", - sort_key_value="foo", + sort_key=self.DYNAMODB_SORT_KEY, + sort_key_value=self.SORT_KEY_VALUE, ) memory = ConversationMemory(conversation_memory_driver=memory_driver, meta={"foo": "bar"}) pipeline = Pipeline(conversation_memory=memory) diff --git a/tests/unit/drivers/memory/conversation/test_griptape_cloud_conversation_memory_driver.py b/tests/unit/drivers/memory/conversation/test_griptape_cloud_conversation_memory_driver.py index dccdc9fd0..0c76d6ecd 100644 --- a/tests/unit/drivers/memory/conversation/test_griptape_cloud_conversation_memory_driver.py +++ b/tests/unit/drivers/memory/conversation/test_griptape_cloud_conversation_memory_driver.py @@ -13,61 +13,70 @@ class TestGriptapeCloudConversationMemoryDriver: @pytest.fixture(autouse=True) def _mock_requests(self, mocker): - def get(*args, **kwargs): - if str(args[0]).endswith("/messages"): - return mocker.Mock( - raise_for_status=lambda: None, - json=lambda: { - "messages": [ - { - "message_id": "123", - "input": '{"type": "TextArtifact", "id": "1234", "value": "Hi There, Hello"}', - "output": '{"type": "TextArtifact", "id": "123", "value": "Hello! How can I assist you today?"}', - "index": 0, - "metadata": {"run_id": "1234"}, - } - ] - }, - ) - else: - thread_id = args[0].split("/")[-1] - return mocker.Mock( - raise_for_status=lambda: None, - json=lambda: { - "metadata": {"foo": "bar"}, - "name": "test", - "thread_id": "test_metadata", - } - if thread_id == "test_metadata" - else {"name": "test", "thread_id": "test"}, - ) - - mocker.patch( - "requests.get", - side_effect=get, - ) - - def post(*args, **kwargs): - if str(args[0]).endswith("/threads"): - return mocker.Mock( - raise_for_status=lambda: None, - json=lambda: {"thread_id": "test", "name": "test"}, - ) + def request(*args, **kwargs): + if args[0] == "get": + if "/messages" in str(args[1]): + thread_id = args[1].split("/")[-2] + return mocker.Mock( + raise_for_status=lambda: None, + json=lambda: { + "messages": [ + { + "message_id": f"{thread_id}_message", + "input": '{"type": "TextArtifact", "id": "1234", "value": "Hi There, Hello"}', + "output": '{"type": "TextArtifact", "id": "123", "value": "Hello! How can I assist you today?"}', + "metadata": {"run_id": "1234"}, + } + ] + } + if thread_id != "no_messages" + else {"messages": []}, + status_code=200, + ) + elif "/threads/" in str(args[1]): + thread_id = args[1].split("/")[-1] + return mocker.Mock( + # raise for status if thread_id is == not_found + raise_for_status=lambda: None if thread_id != "not_found" else ValueError, + json=lambda: { + "metadata": {"foo": "bar"}, + "name": "test", + "thread_id": thread_id, + }, + status_code=200 if thread_id != "not_found" else 404, + ) + elif "/threads?alias=" in str(args[1]): + alias = args[1].split("=")[-1] + return mocker.Mock( + raise_for_status=lambda: None, + json=lambda: {"threads": [{"thread_id": alias, "alias": alias, "metadata": {"foo": "bar"}}]} + if alias != "not_found" + else {"threads": []}, + status_code=200, + ) + else: + return mocker.Mock() + elif args[0] == "post": + if str(args[1]).endswith("/threads"): + body = kwargs["json"] + body["thread_id"] = "test" + return mocker.Mock( + raise_for_status=lambda: None, + json=lambda: body, + ) + else: + return mocker.Mock( + raise_for_status=lambda: None, + json=lambda: {"message_id": "test"}, + ) else: return mocker.Mock( raise_for_status=lambda: None, - json=lambda: {"message_id": "test"}, ) mocker.patch( - "requests.post", - side_effect=post, - ) - mocker.patch( - "requests.patch", - return_value=mocker.Mock( - raise_for_status=lambda: None, - ), + "requests.request", + side_effect=request, ) @pytest.fixture() @@ -80,12 +89,22 @@ def test_no_api_key(self): def test_thread_id(self): driver = GriptapeCloudConversationMemoryDriver(api_key="test") + assert driver.thread_id is None + assert driver.thread.get("thread_id") == "test" assert driver.thread_id == "test" os.environ["GT_CLOUD_THREAD_ID"] = "test_env" driver = GriptapeCloudConversationMemoryDriver(api_key="test") + assert driver.thread_id is None + assert driver.thread.get("thread_id") == "test_env" assert driver.thread_id == "test_env" - driver = GriptapeCloudConversationMemoryDriver(api_key="test", thread_id="test_init") - assert driver.thread_id == "test_init" + os.environ.pop("GT_CLOUD_THREAD_ID") + + def test_thread_alias(self): + driver = GriptapeCloudConversationMemoryDriver(api_key="test", alias="test") + assert driver.thread_id is None + assert driver.thread.get("thread_id") == "test" + assert driver.thread_id == "test" + assert driver.alias == "test" def test_store(self, driver: GriptapeCloudConversationMemoryDriver): runs = [ @@ -98,8 +117,4 @@ def test_load(self, driver): runs, metadata = driver.load() assert len(runs) == 1 assert runs[0].id == "1234" - assert metadata == {} - driver.thread_id = "test_metadata" - runs, metadata = driver.load() - assert len(runs) == 1 assert metadata == {"foo": "bar"} diff --git a/tests/unit/drivers/prompt/test_amazon_bedrock_prompt_driver.py b/tests/unit/drivers/prompt/test_amazon_bedrock_prompt_driver.py index c36c46074..40b0a8a0e 100644 --- a/tests/unit/drivers/prompt/test_amazon_bedrock_prompt_driver.py +++ b/tests/unit/drivers/prompt/test_amazon_bedrock_prompt_driver.py @@ -32,6 +32,30 @@ class TestAmazonBedrockPromptDriver: "name": "MockTool_test", } }, + { + "toolSpec": { + "description": "test description", + "inputSchema": { + "json": { + "$id": "http://json-schema.org/draft-07/schema#", + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": False, + "properties": { + "values": { + "additionalProperties": False, + "description": "Test input", + "properties": {"test": {"type": "string"}}, + "required": ["test"], + "type": "object", + } + }, + "required": ["values"], + "type": "object", + } + }, + "name": "MockTool_test_callable_schema", + } + }, { "toolSpec": { "description": "test description: foo", diff --git a/tests/unit/drivers/prompt/test_anthropic_prompt_driver.py b/tests/unit/drivers/prompt/test_anthropic_prompt_driver.py index 88c1e75ff..40c983f7d 100644 --- a/tests/unit/drivers/prompt/test_anthropic_prompt_driver.py +++ b/tests/unit/drivers/prompt/test_anthropic_prompt_driver.py @@ -14,8 +14,6 @@ class TestAnthropicPromptDriver: { "description": "test description: foo", "input_schema": { - "$id": "Input Schema", - "$schema": "http://json-schema.org/draft-07/schema#", "additionalProperties": False, "properties": { "values": { @@ -31,11 +29,27 @@ class TestAnthropicPromptDriver: }, "name": "MockTool_test", }, + { + "description": "test description", + "input_schema": { + "additionalProperties": False, + "properties": { + "values": { + "additionalProperties": False, + "description": "Test input", + "properties": {"test": {"type": "string"}}, + "required": ["test"], + "type": "object", + } + }, + "required": ["values"], + "type": "object", + }, + "name": "MockTool_test_callable_schema", + }, { "description": "test description: foo", "input_schema": { - "$id": "Input Schema", - "$schema": "http://json-schema.org/draft-07/schema#", "additionalProperties": False, "properties": { "values": { @@ -54,8 +68,6 @@ class TestAnthropicPromptDriver: { "description": "test description: foo", "input_schema": { - "$id": "Input Schema", - "$schema": "http://json-schema.org/draft-07/schema#", "additionalProperties": False, "properties": { "values": { @@ -74,8 +86,6 @@ class TestAnthropicPromptDriver: { "description": "test description", "input_schema": { - "$id": "Input Schema", - "$schema": "http://json-schema.org/draft-07/schema#", "additionalProperties": False, "properties": {}, "required": [], @@ -86,8 +96,6 @@ class TestAnthropicPromptDriver: { "description": "test description", "input_schema": { - "$id": "Input Schema", - "$schema": "http://json-schema.org/draft-07/schema#", "additionalProperties": False, "properties": {}, "required": [], @@ -98,8 +106,6 @@ class TestAnthropicPromptDriver: { "description": "test description: foo", "input_schema": { - "$id": "Input Schema", - "$schema": "http://json-schema.org/draft-07/schema#", "additionalProperties": False, "properties": { "values": { @@ -118,8 +124,6 @@ class TestAnthropicPromptDriver: { "description": "test description", "input_schema": { - "$id": "Input Schema", - "$schema": "http://json-schema.org/draft-07/schema#", "additionalProperties": False, "properties": { "values": { diff --git a/tests/unit/drivers/prompt/test_cohere_prompt_driver.py b/tests/unit/drivers/prompt/test_cohere_prompt_driver.py index c642b7ee0..e110d9469 100644 --- a/tests/unit/drivers/prompt/test_cohere_prompt_driver.py +++ b/tests/unit/drivers/prompt/test_cohere_prompt_driver.py @@ -17,6 +17,11 @@ class TestCoherePromptDriver: "name": "MockTool_test", "parameter_definitions": {"test": {"required": True, "type": "string"}}, }, + { + "description": "test description", + "name": "MockTool_test_callable_schema", + "parameter_definitions": {"test": {"required": True, "type": "string"}}, + }, { "description": "test description: foo", "name": "MockTool_test_error", diff --git a/tests/unit/drivers/prompt/test_google_prompt_driver.py b/tests/unit/drivers/prompt/test_google_prompt_driver.py index 5d01217d9..776664eb1 100644 --- a/tests/unit/drivers/prompt/test_google_prompt_driver.py +++ b/tests/unit/drivers/prompt/test_google_prompt_driver.py @@ -19,6 +19,11 @@ class TestGooglePromptDriver: "description": "test description: foo", "parameters": {"type": "OBJECT", "properties": {"test": {"type": "STRING"}}, "required": ["test"]}, }, + { + "name": "MockTool_test_callable_schema", + "description": "test description", + "parameters": {"type": "OBJECT", "properties": {"test": {"type": "STRING"}}, "required": ["test"]}, + }, { "name": "MockTool_test_error", "description": "test description: foo", diff --git a/tests/unit/drivers/prompt/test_ollama_prompt_driver.py b/tests/unit/drivers/prompt/test_ollama_prompt_driver.py index 0fc9e0f09..e4e9c4712 100644 --- a/tests/unit/drivers/prompt/test_ollama_prompt_driver.py +++ b/tests/unit/drivers/prompt/test_ollama_prompt_driver.py @@ -22,6 +22,20 @@ class TestOllamaPromptDriver: }, "type": "function", }, + { + "function": { + "description": "test description", + "name": "MockTool_test_callable_schema", + "parameters": { + "additionalProperties": False, + "description": "Test input", + "properties": {"test": {"type": "string"}}, + "required": ["test"], + "type": "object", + }, + }, + "type": "function", + }, { "function": { "description": "test description: foo", diff --git a/tests/unit/drivers/prompt/test_openai_chat_prompt_driver.py b/tests/unit/drivers/prompt/test_openai_chat_prompt_driver.py index ae42aa3a1..26358e132 100644 --- a/tests/unit/drivers/prompt/test_openai_chat_prompt_driver.py +++ b/tests/unit/drivers/prompt/test_openai_chat_prompt_driver.py @@ -1,6 +1,7 @@ from unittest.mock import Mock import pytest +import schema from griptape.artifacts import ActionArtifact, ImageArtifact, ListArtifact, TextArtifact from griptape.common import ActionCallDeltaMessageContent, PromptStack, TextDeltaMessageContent, ToolAction @@ -35,6 +36,29 @@ class TestOpenAiChatPromptDriverFixtureMixin: }, "type": "function", }, + { + "function": { + "name": "MockTool_test_callable_schema", + "description": "test description", + "parameters": { + "type": "object", + "properties": { + "values": { + "description": "Test input", + "type": "object", + "properties": {"test": {"type": "string"}}, + "required": ["test"], + "additionalProperties": False, + } + }, + "required": ["values"], + "additionalProperties": False, + "$id": "Parameters Schema", + "$schema": "http://json-schema.org/draft-07/schema#", + }, + }, + "type": "function", + }, { "function": { "description": "test description: foo", @@ -343,10 +367,12 @@ def test_try_run(self, mock_chat_completion_create, prompt_stack, messages, use_ assert message.value[1].value.path == "test" assert message.value[1].value.input == {"foo": "bar"} - def test_try_run_response_format(self, mock_chat_completion_create, prompt_stack, messages): + def test_try_run_response_format_json_object(self, mock_chat_completion_create, prompt_stack, messages): # Given driver = OpenAiChatPromptDriver( - model=OpenAiTokenizer.DEFAULT_OPENAI_GPT_3_CHAT_MODEL, response_format="json_object", use_native_tools=False + model=OpenAiTokenizer.DEFAULT_OPENAI_GPT_3_CHAT_MODEL, + response_format={"type": "json_object"}, + use_native_tools=False, ) # When @@ -365,6 +391,51 @@ def test_try_run_response_format(self, mock_chat_completion_create, prompt_stack assert message.usage.input_tokens == 5 assert message.usage.output_tokens == 10 + def test_try_run_response_format_json_schema(self, mock_chat_completion_create, prompt_stack, messages): + # Given + driver = OpenAiChatPromptDriver( + model=OpenAiTokenizer.DEFAULT_OPENAI_GPT_3_CHAT_MODEL, + response_format={ + "type": "json_schema", + "json_schema": { + "strict": True, + "name": "OutputSchema", + "schema": schema.Schema({"test": str}).json_schema("Output Schema"), + }, + }, + use_native_tools=False, + ) + + # When + message = driver.try_run(prompt_stack) + + # Then + mock_chat_completion_create.assert_called_once_with( + model=driver.model, + temperature=driver.temperature, + user=driver.user, + messages=[*messages], + seed=driver.seed, + response_format={ + "json_schema": { + "schema": { + "$id": "Output Schema", + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": False, + "properties": {"test": {"type": "string"}}, + "required": ["test"], + "type": "object", + }, + "name": "OutputSchema", + "strict": True, + }, + "type": "json_schema", + }, + ) + assert message.value[0].value == "model-output" + assert message.usage.input_tokens == 5 + assert message.usage.output_tokens == 10 + @pytest.mark.parametrize("use_native_tools", [True, False]) def test_try_stream_run(self, mock_chat_completion_stream_create, prompt_stack, messages, use_native_tools): # Given diff --git a/tests/unit/drivers/ruleset/__init__.py b/tests/unit/drivers/ruleset/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/unit/drivers/ruleset/test_griptape_cloud_ruleset_driver.py b/tests/unit/drivers/ruleset/test_griptape_cloud_ruleset_driver.py new file mode 100644 index 000000000..e4102fbf9 --- /dev/null +++ b/tests/unit/drivers/ruleset/test_griptape_cloud_ruleset_driver.py @@ -0,0 +1,117 @@ +import pytest + +from griptape.drivers import GriptapeCloudRulesetDriver +from griptape.rules import Rule + + +class TestGriptapeCloudRulesetDriver: + @pytest.fixture(autouse=True) + def _mock_requests(self, mocker): + mocker.patch("requests.get") + + def get(*args, **kwargs): + if "rules?ruleset_id=" in str(args[1]): + ruleset_id = args[1].split("=")[-1] + return mocker.Mock( + raise_for_status=lambda: None, + json=lambda: { + "rules": [ + { + "rule_id": f"{ruleset_id}_rule", + "rule": "test rule value", + "metadata": {"foo": "bar"}, + } + ] + } + if ruleset_id != "no_rules" + else {"rules": []}, + status_code=200, + ) + elif "/rulesets/" in str(args[1]): + ruleset_id = args[1].split("/")[-1] + return mocker.Mock( + # raise for status if ruleset_id is == not_found + raise_for_status=lambda: None if ruleset_id != "not_found" else ValueError, + json=lambda: { + "metadata": {"foo": "bar"}, + "name": "test", + "ruleset_id": ruleset_id, + }, + status_code=200 if ruleset_id != "not_found" else 404, + ) + elif "/rulesets?alias=" in str(args[1]): + alias = args[1].split("=")[-1] + return mocker.Mock( + raise_for_status=lambda: None, + json=lambda: {"rulesets": [{"ruleset_id": alias, "alias": alias, "metadata": {"foo": "bar"}}]} + if alias != "not_found" + else {"rulesets": []}, + status_code=200, + ) + else: + return mocker.Mock() + + mocker.patch( + "requests.request", + side_effect=get, + ) + + @pytest.fixture() + def ruleset_driver(self): + return GriptapeCloudRulesetDriver(api_key="test", raise_not_found=False) + + def test_no_api_key_raises(self): + with pytest.raises(ValueError): + GriptapeCloudRulesetDriver(api_key=None) + + def test_no_ruleset_raises(self, ruleset_driver): + ruleset_driver.raise_not_found = True + with pytest.raises(ValueError): + ruleset_driver.load(ruleset_name="not_found") + + def test_no_ruleset_by_id_raises(self, ruleset_driver): + ruleset_driver.ruleset_id = "not_found" + ruleset_driver.raise_not_found = True + with pytest.raises(ValueError): + ruleset_driver.load(ruleset_name="not_found") + + def test_no_ruleset_by_id(self, ruleset_driver): + ruleset_driver.ruleset_id = "not_found" + rules, meta = ruleset_driver.load("not_found") + assert rules == [] + assert meta == {} + + def test_no_ruleset_by_name(self, ruleset_driver): + rules, meta = ruleset_driver.load(ruleset_name="not_found") + assert rules == [] + assert meta == {} + + def test_load_by_name(self, ruleset_driver): + name = "test" + rules, meta = ruleset_driver.load(ruleset_name=name) + assert len(rules) == 1 + assert isinstance(rules[0], Rule) + assert rules[0].value == "test rule value" + assert rules[0].meta == {"foo": "bar", "griptape_cloud_rule_id": f"{name}_rule"} + assert meta == {"foo": "bar"} + + def test_load_by_id(self, ruleset_driver): + ruleset_id = "1234" + ruleset_driver.ruleset_id = ruleset_id + rules, meta = ruleset_driver.load("not_found") + assert len(rules) == 1 + assert isinstance(rules[0], Rule) + assert rules[0].value == "test rule value" + assert rules[0].meta == {"foo": "bar", "griptape_cloud_rule_id": f"{ruleset_id}_rule"} + assert meta == {"foo": "bar"} + + def test_load_by_id_no_rules(self, ruleset_driver): + ruleset_driver.ruleset_id = "no_rules" + rules, meta = ruleset_driver.load("not_found") + assert rules == [] + assert meta == {"foo": "bar"} + + def test_load_by_name_no_rules(self, ruleset_driver): + rules, meta = ruleset_driver.load(ruleset_name="no_rules") + assert rules == [] + assert meta == {"foo": "bar"} diff --git a/tests/unit/drivers/ruleset/test_local_ruleset_driver.py b/tests/unit/drivers/ruleset/test_local_ruleset_driver.py new file mode 100644 index 000000000..f5465e07b --- /dev/null +++ b/tests/unit/drivers/ruleset/test_local_ruleset_driver.py @@ -0,0 +1,44 @@ +from pathlib import Path + +import pytest + +from griptape.drivers import LocalRulesetDriver + +TEST_RULESET_DIR = str(Path("tests/resources/")) +TEST_RULESET_NAME = "test_ruleset.json" + + +class TestLocalRulesetDriver: + @pytest.fixture() + def ruleset_driver(self): + return LocalRulesetDriver(raise_not_found=False, persist_dir=TEST_RULESET_DIR) + + def test_load(self, ruleset_driver): + rules, meta = ruleset_driver.load("name") + assert rules == [] + assert meta == {} + + def test_load_raises(self, ruleset_driver): + ruleset_driver.raise_not_found = True + with pytest.raises(ValueError): + ruleset_driver.load("test") + + def test_load_by_persist_dir(self, ruleset_driver): + rules, meta = ruleset_driver.load(TEST_RULESET_NAME) + assert len(rules) == 2 + assert rules[0].value == "value1" + assert rules[0].meta == {"foo": "bar"} + assert rules[1].value == "value2" + assert rules[1].meta == {"foo": "baz"} + assert meta == {"foo": "bar"} + + def test_load_by_name(self, ruleset_driver): + rules, meta = ruleset_driver.load(TEST_RULESET_NAME) + assert len(rules) == 2 + assert rules[0].value == "value1" + assert rules[0].meta == {"foo": "bar"} + assert meta == {"foo": "bar"} + + def test_load_bad_file(self, ruleset_driver): + with pytest.raises(ValueError): + ruleset_driver.load("test.txt") diff --git a/tests/unit/drivers/vector/test_base_local_vector_store_driver.py b/tests/unit/drivers/vector/test_base_vector_store_driver.py similarity index 95% rename from tests/unit/drivers/vector/test_base_local_vector_store_driver.py rename to tests/unit/drivers/vector/test_base_vector_store_driver.py index 20a3e2b50..8438568ad 100644 --- a/tests/unit/drivers/vector/test_base_local_vector_store_driver.py +++ b/tests/unit/drivers/vector/test_base_vector_store_driver.py @@ -4,12 +4,13 @@ import pytest from griptape.artifacts import TextArtifact +from griptape.drivers import BaseVectorStoreDriver -class BaseLocalVectorStoreDriver(ABC): +class TestBaseVectorStoreDriver(ABC): @pytest.fixture() @abstractmethod - def driver(self): ... + def driver(self, *args, **kwargs) -> BaseVectorStoreDriver: ... def test_upsert(self, driver): namespace = driver.upsert_text_artifact(TextArtifact(id="foo1", value="foobar")) diff --git a/tests/unit/drivers/vector/test_griptape_cloud_knowledge_base_vector_store_driver.py b/tests/unit/drivers/vector/test_griptape_cloud_knowledge_base_vector_store_driver.py index 0f52ba6c5..f87b30444 100644 --- a/tests/unit/drivers/vector/test_griptape_cloud_knowledge_base_vector_store_driver.py +++ b/tests/unit/drivers/vector/test_griptape_cloud_knowledge_base_vector_store_driver.py @@ -2,10 +2,10 @@ import pytest -from griptape.drivers import GriptapeCloudKnowledgeBaseVectorStoreDriver +from griptape.drivers import GriptapeCloudVectorStoreDriver -class TestGriptapeCloudKnowledgeBaseVectorStoreDriver: +class TestGriptapeCloudVectorStoreDriver: test_ids = [str(uuid.uuid4()), str(uuid.uuid4())] test_vecs = [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]] test_namespaces = [str(uuid.uuid4()), str(uuid.uuid4())] @@ -38,7 +38,7 @@ def driver(self, mocker): mock_response.json.return_value = test_entries mocker.patch("requests.post", return_value=mock_response) - return GriptapeCloudKnowledgeBaseVectorStoreDriver(api_key="foo bar", knowledge_base_id="1") + return GriptapeCloudVectorStoreDriver(api_key="foo bar", knowledge_base_id="1") def test_query(self, driver): result = driver.query( diff --git a/tests/unit/drivers/vector/test_local_vector_store_driver.py b/tests/unit/drivers/vector/test_local_vector_store_driver.py index 2504b2486..9722bb25d 100644 --- a/tests/unit/drivers/vector/test_local_vector_store_driver.py +++ b/tests/unit/drivers/vector/test_local_vector_store_driver.py @@ -3,10 +3,10 @@ from griptape.artifacts import TextArtifact from griptape.drivers import LocalVectorStoreDriver from tests.mocks.mock_embedding_driver import MockEmbeddingDriver -from tests.unit.drivers.vector.test_base_local_vector_store_driver import BaseLocalVectorStoreDriver +from tests.unit.drivers.vector.test_base_vector_store_driver import TestBaseVectorStoreDriver -class TestLocalVectorStoreDriver(BaseLocalVectorStoreDriver): +class TestLocalVectorStoreDriver(TestBaseVectorStoreDriver): @pytest.fixture() def driver(self): return LocalVectorStoreDriver(embedding_driver=MockEmbeddingDriver()) diff --git a/tests/unit/drivers/vector/test_marqo_vector_store_driver.py b/tests/unit/drivers/vector/test_marqo_vector_store_driver.py index 254a2b3a1..2d824e1c2 100644 --- a/tests/unit/drivers/vector/test_marqo_vector_store_driver.py +++ b/tests/unit/drivers/vector/test_marqo_vector_store_driver.py @@ -86,7 +86,7 @@ def driver(self, mock_marqo): api_key="foobar", url="http://localhost:8000", index="test", - mq=mock_marqo, + client=mock_marqo, embedding_driver=MockEmbeddingDriver(), ) diff --git a/tests/unit/drivers/vector/test_persistent_local_vector_store_driver.py b/tests/unit/drivers/vector/test_persistent_local_vector_store_driver.py index c130858b5..9fa725e80 100644 --- a/tests/unit/drivers/vector/test_persistent_local_vector_store_driver.py +++ b/tests/unit/drivers/vector/test_persistent_local_vector_store_driver.py @@ -6,10 +6,10 @@ from griptape.artifacts import TextArtifact from griptape.drivers import LocalVectorStoreDriver from tests.mocks.mock_embedding_driver import MockEmbeddingDriver -from tests.unit.drivers.vector.test_base_local_vector_store_driver import BaseLocalVectorStoreDriver +from tests.unit.drivers.vector.test_base_vector_store_driver import TestBaseVectorStoreDriver -class TestPersistentLocalVectorStoreDriver(BaseLocalVectorStoreDriver): +class TestPersistentLocalVectorStoreDriver(TestBaseVectorStoreDriver): @pytest.fixture() def temp_dir(self): with tempfile.TemporaryDirectory() as temp_dir: diff --git a/tests/unit/drivers/vector/test_pinecone_vector_storage_driver.py b/tests/unit/drivers/vector/test_pinecone_vector_storage_driver.py index 0726a0c7e..8be38c51e 100644 --- a/tests/unit/drivers/vector/test_pinecone_vector_storage_driver.py +++ b/tests/unit/drivers/vector/test_pinecone_vector_storage_driver.py @@ -7,7 +7,7 @@ class TestPineconeVectorStorageDriver: @pytest.fixture(autouse=True) - def _mock_pinecone(self, mocker): + def mock_client(self, mocker): # Create a fake response fake_query_response = { "matches": [{"id": "foo", "values": [0, 1, 0], "score": 42, "metadata": {"foo": "bar"}}], @@ -15,14 +15,24 @@ def _mock_pinecone(self, mocker): } mock_client = mocker.patch("pinecone.Pinecone") - mock_client().Index().upsert.return_value = None - mock_client().Index().query.return_value = fake_query_response - mock_client().create_index.return_value = None + mock_index = mock_client().Index() + mock_index.upsert.return_value = None + mock_index.query.return_value = fake_query_response + mock_index.create_index.return_value = None + + # Return the mock index when the Pinecone client is called + mock_client.Index.return_value = mock_index + + return mock_client @pytest.fixture() - def driver(self): + def driver(self, mock_client): return PineconeVectorStoreDriver( - api_key="foobar", index_name="test", environment="test", embedding_driver=MockEmbeddingDriver() + api_key="foobar", + index_name="test", + environment="test", + embedding_driver=MockEmbeddingDriver(), + client=mock_client, ) def test_upsert_text_artifact(self, driver): diff --git a/tests/unit/drivers/vector/test_qdrant_vector_store_driver.py b/tests/unit/drivers/vector/test_qdrant_vector_store_driver.py index ffb359953..3c14f2396 100644 --- a/tests/unit/drivers/vector/test_qdrant_vector_store_driver.py +++ b/tests/unit/drivers/vector/test_qdrant_vector_store_driver.py @@ -37,30 +37,6 @@ def driver(self, embedding_driver, mocker): embedding_driver=embedding_driver, ) - def test_attrs_post_init(self, driver): - with patch("griptape.drivers.vector.qdrant_vector_store_driver.import_optional_dependency") as mock_import: - mock_qdrant_client = MagicMock() - mock_import.return_value.QdrantClient.return_value = mock_qdrant_client - - driver.__attrs_post_init__() - - mock_import.assert_called_once_with("qdrant_client") - mock_import.return_value.QdrantClient.assert_called_once_with( - location=driver.location, - url=driver.url, - host=driver.host, - path=driver.path, - port=driver.port, - prefer_grpc=driver.prefer_grpc, - grpc_port=driver.grpc_port, - api_key=driver.api_key, - https=driver.https, - prefix=driver.prefix, - force_disable_check_same_thread=driver.force_disable_check_same_thread, - timeout=driver.timeout, - ) - assert driver.client == mock_qdrant_client - def test_delete_vector(self, driver): vector_id = "test_vector_id" diff --git a/tests/unit/drivers/web_search/test_exa_web_search_driver.py b/tests/unit/drivers/web_search/test_exa_web_search_driver.py new file mode 100644 index 000000000..7456fd35e --- /dev/null +++ b/tests/unit/drivers/web_search/test_exa_web_search_driver.py @@ -0,0 +1,56 @@ +import pytest + +from griptape.artifacts import ListArtifact +from griptape.drivers import ExaWebSearchDriver + + +class TestExaWebSearchDriver: + @pytest.fixture() + def mock_exa_client(self, mocker): + return mocker.patch("exa_py.Exa") + + def mock_data(self, mocker): + return mocker.MagicMock(title="foo", url="bar", highlights="baz", text="qux") + + @pytest.fixture() + def driver(self, mock_exa_client, mocker): + mock_response = mocker.Mock() + mock_response.results = [self.mock_data(mocker), self.mock_data(mocker)] # Make sure results is iterable + mock_exa_client.return_value.search_and_contents.return_value = mock_response + return ExaWebSearchDriver(api_key="test", highlights=True, use_autoprompt=True) + + def test_search_returns_results(self, driver, mock_exa_client): + results = driver.search("test") + assert isinstance(results, ListArtifact) + output = [result.value for result in results] + assert len(output) == 2 + assert output[0]["title"] == "foo" + assert output[0]["url"] == "bar" + assert output[0]["highlights"] == "baz" + assert output[0]["text"] == "qux" + mock_exa_client.return_value.search_and_contents.assert_called_once_with( + query="test", num_results=5, text=True, highlights=True, use_autoprompt=True + ) + + def test_search_raises_error(self, driver, mock_exa_client): + mock_exa_client.return_value.search_and_contents.side_effect = Exception("test_error") + driver = ExaWebSearchDriver(api_key="test", highlights=True, use_autoprompt=True) + with pytest.raises(Exception, match="test_error"): + driver.search("test") + mock_exa_client.return_value.search_and_contents.assert_called_once_with( + query="test", num_results=5, text=True, highlights=True, use_autoprompt=True + ) + + def test_search_with_params(self, driver, mock_exa_client): + driver.params = {"custom_param": "value"} + driver.search("test", additional_param="extra") + + mock_exa_client.return_value.search_and_contents.assert_called_once_with( + query="test", + num_results=5, + text=True, + highlights=True, + use_autoprompt=True, + custom_param="value", + additional_param="extra", + ) diff --git a/tests/unit/drivers/web_search/test_tavily_web_search_driver.py b/tests/unit/drivers/web_search/test_tavily_web_search_driver.py new file mode 100644 index 000000000..eed66e520 --- /dev/null +++ b/tests/unit/drivers/web_search/test_tavily_web_search_driver.py @@ -0,0 +1,72 @@ +import pytest + +from griptape.artifacts import ListArtifact +from griptape.drivers import TavilyWebSearchDriver + + +class TestTavilyWebSearchDriver: + @pytest.fixture() + def mock_tavily_client(self, mocker): + return mocker.patch("tavily.TavilyClient") + + @pytest.fixture() + def driver(self, mock_tavily_client): + mock_response = { + "results": [ + {"title": "foo", "url": "bar", "content": "baz"}, + {"title": "foo2", "url": "bar2", "content": "baz2"}, + ] + } + mock_tavily_client.return_value.search.return_value = mock_response + return TavilyWebSearchDriver(api_key="test") + + def test_search_returns_results(self, driver, mock_tavily_client): + results = driver.search("test") + assert isinstance(results, ListArtifact) + output = [result.value for result in results] + assert len(output) == 2 + assert output[0]["title"] == "foo" + assert output[0]["url"] == "bar" + assert output[0]["content"] == "baz" + mock_tavily_client.return_value.search.assert_called_once_with("test", max_results=5) + + def test_search_raises_error(self, mock_tavily_client): + mock_tavily_client.return_value.search.side_effect = Exception("test_error") + driver = TavilyWebSearchDriver(api_key="test") + with pytest.raises(Exception, match="test_error"): + driver.search("test") + + def test_search_with_params(self, mock_tavily_client): + mock_response = { + "results": [ + {"title": "custom", "url": "custom_url", "content": "custom_content"}, + ] + } + mock_tavily_client.return_value.search.return_value = mock_response + + driver = TavilyWebSearchDriver(api_key="test", params={"custom_param": "value"}) + results = driver.search("test", additional_param="extra") + + assert isinstance(results, ListArtifact) + output = results[0].value + assert output["title"] == "custom" + assert output["url"] == "custom_url" + assert output["content"] == "custom_content" + + mock_tavily_client.return_value.search.assert_called_once_with( + "test", max_results=5, custom_param="value", additional_param="extra" + ) + + def test_custom_results_count(self, mock_tavily_client): + mock_response = { + "results": [{"title": f"title_{i}", "url": f"url_{i}", "content": f"content_{i}"} for i in range(5)] + } + mock_tavily_client.return_value.search.return_value = mock_response + + driver = TavilyWebSearchDriver(api_key="test", results_count=5) + results = driver.search("test") + + assert isinstance(results, ListArtifact) + assert len(results) == 5 + + mock_tavily_client.return_value.search.assert_called_once_with("test", max_results=5) diff --git a/tests/unit/engines/extraction/test_csv_extraction_engine.py b/tests/unit/engines/extraction/test_csv_extraction_engine.py index 056df2d5a..36c58788d 100644 --- a/tests/unit/engines/extraction/test_csv_extraction_engine.py +++ b/tests/unit/engines/extraction/test_csv_extraction_engine.py @@ -8,8 +8,8 @@ class TestCsvExtractionEngine: def engine(self): return CsvExtractionEngine(column_names=["test1"]) - def test_extract(self, engine): - result = engine.extract("foo") + def test_extract_text(self, engine): + result = engine.extract_text("foo") assert len(result.value) == 1 assert result.value[0].value == "test1: mock output" diff --git a/tests/unit/engines/extraction/test_json_extraction_engine.py b/tests/unit/engines/extraction/test_json_extraction_engine.py index 9d6442579..b4aa58b75 100644 --- a/tests/unit/engines/extraction/test_json_extraction_engine.py +++ b/tests/unit/engines/extraction/test_json_extraction_engine.py @@ -1,3 +1,6 @@ +from os.path import dirname, join, normpath +from pathlib import Path + import pytest from schema import Schema @@ -15,24 +18,30 @@ def engine(self): template_schema=Schema({"foo": "bar"}).json_schema("TemplateSchema"), ) - def test_extract(self, engine): - result = engine.extract("foo") + def test_extract_text(self, engine): + result = engine.extract_text("foo") assert len(result.value) == 2 - assert result.value[0].value == '{"test_key_1": "test_value_1"}' - assert result.value[1].value == '{"test_key_2": "test_value_2"}' + assert result.value[0].value == {"test_key_1": "test_value_1"} + assert result.value[1].value == {"test_key_2": "test_value_2"} + + def test_chunked_extract_text(self, engine): + large_text = Path(normpath(join(dirname(__file__), "../../../resources", "test.txt"))).read_text() + + extracted = engine.extract_text(large_text * 50) + assert len(extracted) == 354 + assert extracted[0].value == {"test_key_1": "test_value_1"} def test_extract_error(self, engine): engine.template_schema = lambda: "non serializable" - with pytest.raises(TypeError): - engine.extract("foo") + engine.extract_text("foo") def test_json_to_text_artifacts(self, engine): assert [ a.value for a in engine.json_to_text_artifacts('[{"test_key_1": "test_value_1"}, {"test_key_2": "test_value_2"}]') - ] == ['{"test_key_1": "test_value_1"}', '{"test_key_2": "test_value_2"}'] + ] == [{"test_key_1": "test_value_1"}, {"test_key_2": "test_value_2"}] def test_json_to_text_artifacts_no_matches(self, engine): assert engine.json_to_text_artifacts("asdfasdfasdf") == [] diff --git a/tests/unit/engines/rag/modules/retrieval/test_text_loader_retrieval_rag_module.py b/tests/unit/engines/rag/modules/retrieval/test_text_loader_retrieval_rag_module.py index 69e334c7f..b2dbb613d 100644 --- a/tests/unit/engines/rag/modules/retrieval/test_text_loader_retrieval_rag_module.py +++ b/tests/unit/engines/rag/modules/retrieval/test_text_loader_retrieval_rag_module.py @@ -18,7 +18,7 @@ def test_run(self): embedding_driver = MockEmbeddingDriver() module = TextLoaderRetrievalRagModule( - loader=WebLoader(max_tokens=MAX_TOKENS, embedding_driver=embedding_driver), + loader=WebLoader(), vector_store_driver=LocalVectorStoreDriver(embedding_driver=embedding_driver), source="https://www.griptape.ai", ) diff --git a/tests/unit/loaders/conftest.py b/tests/unit/loaders/conftest.py index 1f698738a..0bbf839b8 100644 --- a/tests/unit/loaders/conftest.py +++ b/tests/unit/loaders/conftest.py @@ -1,4 +1,5 @@ import os +from io import BytesIO, StringIO from pathlib import Path import pytest @@ -14,15 +15,15 @@ def create_source(resource_path: str) -> Path: @pytest.fixture() def bytes_from_resource_path(path_from_resource_path): - def create_source(resource_path: str) -> bytes: - return Path(path_from_resource_path(resource_path)).read_bytes() + def create_source(resource_path: str) -> BytesIO: + return BytesIO(Path(path_from_resource_path(resource_path)).read_bytes()) return create_source @pytest.fixture() def str_from_resource_path(path_from_resource_path): - def test_csv_str(resource_path: str) -> str: - return Path(path_from_resource_path(resource_path)).read_text() + def test_csv_str(resource_path: str) -> StringIO: + return StringIO(Path(path_from_resource_path(resource_path)).read_text()) return test_csv_str diff --git a/tests/unit/loaders/test_audio_loader.py b/tests/unit/loaders/test_audio_loader.py index b7ebdd912..7b3516722 100644 --- a/tests/unit/loaders/test_audio_loader.py +++ b/tests/unit/loaders/test_audio_loader.py @@ -9,9 +9,9 @@ class TestAudioLoader: def loader(self): return AudioLoader() - @pytest.fixture() - def create_source(self, bytes_from_resource_path): - return bytes_from_resource_path + @pytest.fixture(params=["path_from_resource_path"]) + def create_source(self, request): + return request.getfixturevalue(request.param) @pytest.mark.parametrize(("resource_path", "mime_type"), [("sentences.wav", "audio/wav")]) def test_load(self, resource_path, mime_type, loader, create_source): @@ -21,7 +21,6 @@ def test_load(self, resource_path, mime_type, loader, create_source): assert isinstance(artifact, AudioArtifact) assert artifact.mime_type == mime_type - assert len(artifact.value) > 0 def test_load_collection(self, create_source, loader): resource_paths = ["sentences.wav", "sentences2.wav"] diff --git a/tests/unit/loaders/test_blob_loader.py b/tests/unit/loaders/test_blob_loader.py index 4812e669c..2042381bc 100644 --- a/tests/unit/loaders/test_blob_loader.py +++ b/tests/unit/loaders/test_blob_loader.py @@ -11,7 +11,7 @@ def loader(self, request): kwargs = {"encoding": encoding} if encoding is not None else {} return BlobLoader(**kwargs) - @pytest.fixture(params=["bytes_from_resource_path", "str_from_resource_path"]) + @pytest.fixture(params=["path_from_resource_path"]) def create_source(self, request): return request.getfixturevalue(request.param) diff --git a/tests/unit/loaders/test_csv_loader.py b/tests/unit/loaders/test_csv_loader.py index 7af409152..9c5d0febb 100644 --- a/tests/unit/loaders/test_csv_loader.py +++ b/tests/unit/loaders/test_csv_loader.py @@ -1,9 +1,6 @@ -import json - import pytest from griptape.loaders.csv_loader import CsvLoader -from tests.mocks.mock_embedding_driver import MockEmbeddingDriver class TestCsvLoader: @@ -11,15 +8,15 @@ class TestCsvLoader: def loader(self, request): encoding = request.param if encoding is None: - return CsvLoader(embedding_driver=MockEmbeddingDriver()) + return CsvLoader() else: - return CsvLoader(embedding_driver=MockEmbeddingDriver(), encoding=encoding) + return CsvLoader(encoding=encoding) @pytest.fixture() def loader_with_pipe_delimiter(self): - return CsvLoader(embedding_driver=MockEmbeddingDriver(), delimiter="|") + return CsvLoader(delimiter="|") - @pytest.fixture(params=["bytes_from_resource_path", "str_from_resource_path"]) + @pytest.fixture(params=["path_from_resource_path"]) def create_source(self, request): return request.getfixturevalue(request.param) @@ -31,7 +28,6 @@ def test_load(self, loader, create_source): assert len(artifacts) == 10 first_artifact = artifacts[0] assert first_artifact.value == "Foo: foo1\nBar: bar1" - assert first_artifact.embedding == [0, 1] def test_load_delimiter(self, loader_with_pipe_delimiter, create_source): source = create_source("test-pipe.csv") @@ -41,7 +37,6 @@ def test_load_delimiter(self, loader_with_pipe_delimiter, create_source): assert len(artifacts) == 10 first_artifact = artifacts[0] assert first_artifact.value == "Bar: foo1\nFoo: bar1" - assert first_artifact.embedding == [0, 1] def test_load_collection(self, loader, create_source): resource_paths = ["test-1.csv", "test-2.csv"] @@ -53,16 +48,5 @@ def test_load_collection(self, loader, create_source): assert collection.keys() == keys assert collection[loader.to_key(sources[0])][0].value == "Foo: foo1\nBar: bar1" - assert collection[loader.to_key(sources[0])][0].embedding == [0, 1] assert collection[loader.to_key(sources[1])][0].value == "Bar: bar1\nFoo: foo1" - assert collection[loader.to_key(sources[1])][0].embedding == [0, 1] - - def test_formatter_fn(self, loader, create_source): - loader.formatter_fn = lambda value: json.dumps(value) - source = create_source("test-1.csv") - - artifacts = loader.load(source) - - assert len(artifacts) == 10 - assert artifacts[0].value == '{"Foo": "foo1", "Bar": "bar1"}' diff --git a/tests/unit/loaders/test_email_loader.py b/tests/unit/loaders/test_email_loader.py index ade062743..1812dc531 100644 --- a/tests/unit/loaders/test_email_loader.py +++ b/tests/unit/loaders/test_email_loader.py @@ -1,14 +1,16 @@ from __future__ import annotations import email -from email import message -from typing import Optional +from typing import TYPE_CHECKING, Optional import pytest from griptape.artifacts import ListArtifact from griptape.loaders import EmailLoader +if TYPE_CHECKING: + from email.message import Message + class TestEmailLoader: @pytest.fixture(autouse=True) @@ -127,23 +129,21 @@ def to_fetch_message(body: str, content_type: Optional[str]): return to_fetch_response(to_message(body, content_type)) -def to_fetch_response(message: message): +def to_fetch_response(message: Message): return (None, ((None, message.as_bytes()),)) -def to_message(body: str, content_type: Optional[str]) -> message: +def to_message(body: str, content_type: Optional[str]) -> Message: message = email.message_from_string(body) if content_type: message.set_type(content_type) return message -def to_value_set(artifact_or_dict: ListArtifact | dict[str, ListArtifact]) -> set[str]: - if isinstance(artifact_or_dict, ListArtifact): - return {value.value for value in artifact_or_dict.value} - elif isinstance(artifact_or_dict, dict): - return { - text_artifact.value for list_artifact in artifact_or_dict.values() for text_artifact in list_artifact.value - } +def to_value_set(artifacts: ListArtifact | dict[str, ListArtifact]) -> set[str]: + if isinstance(artifacts, dict): + return set( + {text_artifact.value for list_artifact in artifacts.values() for text_artifact in list_artifact.value} + ) else: - raise Exception + return {artifact.value for artifact in artifacts.value} diff --git a/tests/unit/loaders/test_image_loader.py b/tests/unit/loaders/test_image_loader.py index 7093894b0..9c491fb88 100644 --- a/tests/unit/loaders/test_image_loader.py +++ b/tests/unit/loaders/test_image_loader.py @@ -13,9 +13,9 @@ def loader(self): def png_loader(self): return ImageLoader(format="png") - @pytest.fixture() - def create_source(self, bytes_from_resource_path): - return bytes_from_resource_path + @pytest.fixture(params=["path_from_resource_path"]) + def create_source(self, request): + return request.getfixturevalue(request.param) @pytest.mark.parametrize( ("resource_path", "mime_type"), diff --git a/tests/unit/loaders/test_pdf_loader.py b/tests/unit/loaders/test_pdf_loader.py index 3f4f7848e..45027b95c 100644 --- a/tests/unit/loaders/test_pdf_loader.py +++ b/tests/unit/loaders/test_pdf_loader.py @@ -1,29 +1,25 @@ import pytest from griptape.loaders import PdfLoader -from tests.mocks.mock_embedding_driver import MockEmbeddingDriver - -MAX_TOKENS = 50 class TestPdfLoader: @pytest.fixture() def loader(self): - return PdfLoader(max_tokens=MAX_TOKENS, embedding_driver=MockEmbeddingDriver()) + return PdfLoader() - @pytest.fixture() - def create_source(self, bytes_from_resource_path): - return bytes_from_resource_path + @pytest.fixture(params=["path_from_resource_path"]) + def create_source(self, request): + return request.getfixturevalue(request.param) def test_load(self, loader, create_source): source = create_source("bitcoin.pdf") - artifacts = loader.load(source) + artifact = loader.load(source) - assert len(artifacts) == 151 - assert artifacts[0].value.startswith("Bitcoin: A Peer-to-Peer") - assert artifacts[-1].value.endswith('its applications," 1957.\n9') - assert artifacts[0].embedding == [0, 1] + assert len(artifact) == 9 + assert artifact[0].value.startswith("Bitcoin: A Peer-to-Peer") + assert artifact[-1].value.endswith('its applications," 1957.\n9') def test_load_collection(self, loader, create_source): resource_paths = ["bitcoin.pdf", "bitcoin-2.pdf"] @@ -37,7 +33,6 @@ def test_load_collection(self, loader, create_source): for key in keys: artifact = collection[key] - assert len(artifact) == 151 + assert len(artifact) == 9 assert artifact[0].value.startswith("Bitcoin: A Peer-to-Peer") assert artifact[-1].value.endswith('its applications," 1957.\n9') - assert artifact[0].embedding == [0, 1] diff --git a/tests/unit/loaders/test_sql_loader.py b/tests/unit/loaders/test_sql_loader.py index 2ff6c7faf..4d33b634a 100644 --- a/tests/unit/loaders/test_sql_loader.py +++ b/tests/unit/loaders/test_sql_loader.py @@ -3,7 +3,6 @@ from griptape.drivers import SqlDriver from griptape.loaders import SqlLoader -from tests.mocks.mock_embedding_driver import MockEmbeddingDriver MAX_TOKENS = 50 @@ -16,7 +15,6 @@ def loader(self): engine_url="sqlite:///:memory:", create_engine_params={"connect_args": {"check_same_thread": False}, "poolclass": StaticPool}, ), - embedding_driver=MockEmbeddingDriver(), ) sql_loader.sql_driver.execute_query( @@ -35,14 +33,12 @@ def loader(self): return sql_loader def test_load(self, loader): - artifacts = loader.load("SELECT * FROM test_table;") + artifact = loader.load("SELECT * FROM test_table;") - assert len(artifacts) == 3 - assert artifacts[0].value == "id: 1\nname: Alice\nage: 25\ncity: New York" - assert artifacts[1].value == "id: 2\nname: Bob\nage: 30\ncity: Los Angeles" - assert artifacts[2].value == "id: 3\nname: Charlie\nage: 22\ncity: Chicago" - - assert artifacts[0].embedding == [0, 1] + assert len(artifact) == 3 + assert artifact[0].value == "id: 1\nname: Alice\nage: 25\ncity: New York" + assert artifact[1].value == "id: 2\nname: Bob\nage: 30\ncity: Los Angeles" + assert artifact[2].value == "id: 3\nname: Charlie\nage: 22\ncity: Chicago" def test_load_collection(self, loader): sources = ["SELECT * FROM test_table LIMIT 1;", "SELECT * FROM test_table LIMIT 2;"] @@ -55,4 +51,3 @@ def test_load_collection(self, loader): assert artifacts[loader.to_key(sources[0])][0].value == "id: 1\nname: Alice\nage: 25\ncity: New York" assert artifacts[loader.to_key(sources[1])][0].value == "id: 1\nname: Alice\nage: 25\ncity: New York" - assert list(artifacts.values())[0][0].embedding == [0, 1] diff --git a/tests/unit/loaders/test_text_loader.py b/tests/unit/loaders/test_text_loader.py index 07527f9e6..a435610ef 100644 --- a/tests/unit/loaders/test_text_loader.py +++ b/tests/unit/loaders/test_text_loader.py @@ -1,9 +1,6 @@ import pytest from griptape.loaders.text_loader import TextLoader -from tests.mocks.mock_embedding_driver import MockEmbeddingDriver - -MAX_TOKENS = 50 class TestTextLoader: @@ -11,23 +8,21 @@ class TestTextLoader: def loader(self, request): encoding = request.param if encoding is None: - return TextLoader(max_tokens=MAX_TOKENS, embedding_driver=MockEmbeddingDriver()) + return TextLoader() else: - return TextLoader(max_tokens=MAX_TOKENS, embedding_driver=MockEmbeddingDriver(), encoding=encoding) + return TextLoader(encoding=encoding) - @pytest.fixture(params=["bytes_from_resource_path", "str_from_resource_path"]) + @pytest.fixture(params=["path_from_resource_path"]) def create_source(self, request): return request.getfixturevalue(request.param) def test_load(self, loader, create_source): source = create_source("test.txt") - artifacts = loader.load(source) + artifact = loader.load(source) - assert len(artifacts) == 39 - assert artifacts[0].value.startswith("foobar foobar foobar") - assert artifacts[0].encoding == loader.encoding - assert artifacts[0].embedding == [0, 1] + assert artifact.value.startswith("foobar foobar foobar") + assert artifact.encoding == loader.encoding def test_load_collection(self, loader, create_source): resource_paths = ["test.txt"] @@ -39,9 +34,10 @@ def test_load_collection(self, loader, create_source): assert collection.keys() == keys key = next(iter(keys)) - artifacts = collection[key] - assert len(artifacts) == 39 + artifact = collection[key] - artifact = artifacts[0] - assert artifact.embedding == [0, 1] assert artifact.encoding == loader.encoding + + def test_load_deprecated_bytes(self, loader): + with pytest.warns(DeprecationWarning): + loader.load(b"test.txt") diff --git a/tests/unit/loaders/test_web_loader.py b/tests/unit/loaders/test_web_loader.py index f7cccb666..d6e958042 100644 --- a/tests/unit/loaders/test_web_loader.py +++ b/tests/unit/loaders/test_web_loader.py @@ -1,7 +1,6 @@ import pytest from griptape.loaders import WebLoader -from tests.mocks.mock_embedding_driver import MockEmbeddingDriver MAX_TOKENS = 50 @@ -13,15 +12,12 @@ def _mock_trafilatura_fetch_url(self, mocker): @pytest.fixture() def loader(self): - return WebLoader(max_tokens=MAX_TOKENS, embedding_driver=MockEmbeddingDriver()) + return WebLoader() def test_load(self, loader): - artifacts = loader.load("https://github.com/griptape-ai/griptape") + artifact = loader.load("https://github.com/griptape-ai/griptape") - assert len(artifacts) == 1 - assert "foobar" in artifacts[0].value.lower() - - assert artifacts[0].embedding == [0, 1] + assert "foobar" in artifact.value.lower() def test_load_exception(self, mocker, loader): mocker.patch("trafilatura.fetch_url", side_effect=Exception("error")) @@ -38,9 +34,7 @@ def test_load_collection(self, loader): loader.to_key("https://github.com/griptape-ai/griptape"), loader.to_key("https://github.com/griptape-ai/griptape-docs"), ] - assert "foobar" in [a.value for artifact_list in artifacts.values() for a in artifact_list][0].lower() - - assert list(artifacts.values())[0][0].embedding == [0, 1] + assert "foobar" in [a.value for a in artifacts.values()] def test_empty_page_string_response(self, loader, mocker): mocker.patch("trafilatura.extract", return_value="") diff --git a/tests/unit/mixins/test_activity_mixin.py b/tests/unit/mixins/test_activity_mixin.py index 1d684e2a5..07c173889 100644 --- a/tests/unit/mixins/test_activity_mixin.py +++ b/tests/unit/mixins/test_activity_mixin.py @@ -32,7 +32,7 @@ def test_find_activity(self): assert tool.find_activity("test_str_output") is None def test_activities(self, tool): - assert len(tool.activities()) == 7 + assert len(tool.activities()) == 8 assert tool.activities()[0] == tool.test def test_allowlist_and_denylist_validation(self): @@ -47,7 +47,7 @@ def test_allowlist(self): def test_denylist(self): tool = MockTool(test_field="hello", test_int=5, denylist=["test"]) - assert len(tool.activities()) == 6 + assert len(tool.activities()) == 7 def test_invalid_allowlist(self): with pytest.raises(ValueError): @@ -101,3 +101,24 @@ def test_extra_schema_properties(self): "additionalProperties": False, "type": "object", } + + def test_callable_schema(self): + tool = MockTool(custom_schema={"test": str}) + schema = tool.activity_schema(tool.test_callable_schema).json_schema("InputSchema") + + assert schema == { + "$id": "InputSchema", + "$schema": "http://json-schema.org/draft-07/schema#", + "properties": { + "values": { + "description": "Test input", + "properties": {"test": {"type": "string"}}, + "required": ["test"], + "additionalProperties": False, + "type": "object", + } + }, + "required": ["values"], + "additionalProperties": False, + "type": "object", + } diff --git a/tests/unit/mixins/test_rule_mixin.py b/tests/unit/mixins/test_rule_mixin.py index 393d721a3..27cabc33b 100644 --- a/tests/unit/mixins/test_rule_mixin.py +++ b/tests/unit/mixins/test_rule_mixin.py @@ -1,5 +1,3 @@ -import pytest - from griptape.mixins.rule_mixin import RuleMixin from griptape.rules import Rule, Ruleset from griptape.structures import Agent @@ -23,8 +21,13 @@ def test_rulesets(self): assert mixin.rulesets == [ruleset] def test_rules_and_rulesets(self): - with pytest.raises(ValueError): - RuleMixin(rules=[Rule("foo")], rulesets=[Ruleset("bar", [Rule("baz")])]) + mixin = RuleMixin(rules=[Rule("foo")], rulesets=[Ruleset("bar", [Rule("baz")])]) + + assert mixin.rules == [Rule("foo")] + assert mixin.rulesets[0].name == "bar" + assert mixin.rulesets[0].rules == [Rule("baz")] + assert mixin.rulesets[1].name == "Default Ruleset" + assert mixin.rulesets[1].rules == [Rule("foo")] def test_inherits_structure_rulesets(self): # Tests that a task using the mixin inherits rulesets from its structure. @@ -35,4 +38,4 @@ def test_inherits_structure_rulesets(self): task = PromptTask(rulesets=[ruleset2]) agent.add_task(task) - assert task.all_rulesets == [ruleset1, ruleset2] + assert task.rulesets == [ruleset1, ruleset2] diff --git a/tests/unit/rules/test_ruleset.py b/tests/unit/rules/test_ruleset.py index 07c2f087e..ce0c61c4c 100644 --- a/tests/unit/rules/test_ruleset.py +++ b/tests/unit/rules/test_ruleset.py @@ -1,9 +1,33 @@ +from pathlib import Path + +import pytest + +from griptape.drivers import LocalRulesetDriver from griptape.rules import Rule, Ruleset +TEST_RULESET_DIR = str(Path("tests/resources/")) + class TestRuleset: + @pytest.fixture() + def driver(self): + return LocalRulesetDriver(persist_dir=TEST_RULESET_DIR) + def test_init(self): ruleset = Ruleset("foobar", rules=[Rule("rule1"), Rule("rule2")]) assert ruleset.name == "foobar" assert len(ruleset.rules) == 2 + + def test_from_driver(self, driver): + ruleset = Ruleset( + name="test_ruleset.json", + rules=[Rule("rule1"), Rule("rule2")], + meta={"key": "value"}, + ruleset_driver=driver, + ) + + assert len(ruleset.rules) == 4 + assert ruleset.meta == {"foo": "bar", "key": "value"} + assert ruleset.rules[0].value == "value1" + assert ruleset.rules[3].value == "rule2" diff --git a/tests/unit/structures/test_agent.py b/tests/unit/structures/test_agent.py index 235363bbe..d522a43a2 100644 --- a/tests/unit/structures/test_agent.py +++ b/tests/unit/structures/test_agent.py @@ -29,9 +29,9 @@ def test_rulesets(self): agent.add_task(PromptTask(rulesets=[Ruleset("Bar", [Rule("bar test")])])) assert isinstance(agent.task, PromptTask) - assert len(agent.task.all_rulesets) == 2 - assert agent.task.all_rulesets[0].name == "Foo" - assert agent.task.all_rulesets[1].name == "Bar" + assert len(agent.task.rulesets) == 2 + assert agent.task.rulesets[0].name == "Foo" + assert agent.task.rulesets[1].name == "Bar" def test_rules(self): agent = Agent(rules=[Rule("foo test")]) @@ -39,17 +39,22 @@ def test_rules(self): agent.add_task(PromptTask(rules=[Rule("bar test")])) assert isinstance(agent.task, PromptTask) - assert len(agent.task.all_rulesets) == 2 - assert agent.task.all_rulesets[0].name == "Default Ruleset" - assert agent.task.all_rulesets[1].name == "Additional Ruleset" + assert len(agent.task.rulesets) == 1 + assert agent.task.rulesets[0].name == "Default Ruleset" + assert len(agent.task.rulesets[0].rules) == 2 + assert agent.task.rulesets[0].rules[0].value == "foo test" + assert agent.task.rulesets[0].rules[1].value == "bar test" def test_rules_and_rulesets(self): - with pytest.raises(ValueError): - Agent(rules=[Rule("foo test")], rulesets=[Ruleset("Bar", [Rule("bar test")])]) + agent = Agent(rules=[Rule("foo test")], rulesets=[Ruleset("Bar", [Rule("bar test")])]) + assert len(agent.rulesets) == 2 + assert len(agent.rules) == 1 agent = Agent() - with pytest.raises(ValueError): - agent.add_task(PromptTask(rules=[Rule("foo test")], rulesets=[Ruleset("Bar", [Rule("bar test")])])) + agent.add_task(PromptTask(rules=[Rule("foo test")], rulesets=[Ruleset("Bar", [Rule("bar test")])])) + assert isinstance(agent.task, PromptTask) + assert len(agent.task.rulesets) == 2 + assert len(agent.task.rules) == 1 def test_with_task_memory(self): agent = Agent(tools=[MockTool(off_prompt=True)]) @@ -133,17 +138,14 @@ def test_add_tasks(self): agent = Agent(prompt_driver=MockPromptDriver()) - try: + with pytest.raises(ValueError): agent.add_tasks(first_task, second_task) - raise AssertionError() - except ValueError: - assert True - try: + with pytest.raises(ValueError): agent + [first_task, second_task] - raise AssertionError() - except ValueError: - assert True + + with pytest.raises(ValueError): + agent.add_tasks([first_task, second_task]) def test_prompt_stack_without_memory(self): agent = Agent(prompt_driver=MockPromptDriver(), conversation_memory=None, rules=[Rule("test")]) diff --git a/tests/unit/structures/test_pipeline.py b/tests/unit/structures/test_pipeline.py index a7f7f40c1..e6fa6d770 100644 --- a/tests/unit/structures/test_pipeline.py +++ b/tests/unit/structures/test_pipeline.py @@ -45,14 +45,14 @@ def test_rulesets(self): ) assert isinstance(pipeline.tasks[0], PromptTask) - assert len(pipeline.tasks[0].all_rulesets) == 2 - assert pipeline.tasks[0].all_rulesets[0].name == "Foo" - assert pipeline.tasks[0].all_rulesets[1].name == "Bar" + assert len(pipeline.tasks[0].rulesets) == 2 + assert pipeline.tasks[0].rulesets[0].name == "Foo" + assert pipeline.tasks[0].rulesets[1].name == "Bar" assert isinstance(pipeline.tasks[1], PromptTask) - assert len(pipeline.tasks[1].all_rulesets) == 2 - assert pipeline.tasks[1].all_rulesets[0].name == "Foo" - assert pipeline.tasks[1].all_rulesets[1].name == "Baz" + assert len(pipeline.tasks[1].rulesets) == 2 + assert pipeline.tasks[1].rulesets[0].name == "Foo" + assert pipeline.tasks[1].rulesets[1].name == "Baz" def test_rules(self): pipeline = Pipeline(rules=[Rule("foo test")]) @@ -60,21 +60,24 @@ def test_rules(self): pipeline.add_tasks(PromptTask(rules=[Rule("bar test")]), PromptTask(rules=[Rule("baz test")])) assert isinstance(pipeline.tasks[0], PromptTask) - assert len(pipeline.tasks[0].all_rulesets) == 2 - assert pipeline.tasks[0].all_rulesets[0].name == "Default Ruleset" - assert pipeline.tasks[0].all_rulesets[1].name == "Additional Ruleset" + assert len(pipeline.tasks[0].rulesets) == 1 + assert pipeline.tasks[0].rulesets[0].name == "Default Ruleset" + assert len(pipeline.tasks[0].rulesets[0].rules) == 2 assert isinstance(pipeline.tasks[1], PromptTask) - assert pipeline.tasks[1].all_rulesets[0].name == "Default Ruleset" - assert pipeline.tasks[1].all_rulesets[1].name == "Additional Ruleset" + assert pipeline.tasks[1].rulesets[0].name == "Default Ruleset" + assert len(pipeline.tasks[1].rulesets[0].rules) == 2 def test_rules_and_rulesets(self): - with pytest.raises(ValueError): - Pipeline(rules=[Rule("foo test")], rulesets=[Ruleset("Bar", [Rule("bar test")])]) + pipeline = Pipeline(rules=[Rule("foo test")], rulesets=[Ruleset("Bar", [Rule("bar test")])]) + assert len(pipeline.rulesets) == 2 + assert len(pipeline.rules) == 1 pipeline = Pipeline() - with pytest.raises(ValueError): - pipeline.add_task(PromptTask(rules=[Rule("foo test")], rulesets=[Ruleset("Bar", [Rule("bar test")])])) + pipeline.add_task(PromptTask(rules=[Rule("foo test")], rulesets=[Ruleset("Bar", [Rule("bar test")])])) + assert isinstance(pipeline.tasks[0], PromptTask) + assert len(pipeline.tasks[0].rulesets) == 2 + assert len(pipeline.tasks[0].rules) == 1 def test_with_no_task_memory(self): pipeline = Pipeline() @@ -203,6 +206,22 @@ def test_add_tasks(self): assert len(second_task.parents) == 1 assert len(second_task.children) == 0 + def test_nested_tasks(self): + pipeline = Pipeline( + tasks=[ + [ + PromptTask("parent", id=f"parent_{i}"), + PromptTask("child", id=f"child_{i}", parent_ids=[f"parent_{i}"]), + PromptTask("grandchild", id=f"grandchild_{i}", parent_ids=[f"child_{i}"]), + ] + for i in range(3) + ] + ) + + pipeline.run() + assert pipeline.output_task.id == "grandchild_2" + assert len(pipeline.tasks) == 9 + def test_insert_task_in_middle(self): first_task = PromptTask("test1", id="test1") second_task = PromptTask("test2", id="test2") @@ -351,7 +370,8 @@ def test_run_with_error_artifact(self, error_artifact_task, waiting_task): pipeline = Pipeline(tasks=[waiting_task, error_artifact_task, end_task]) pipeline.run() - assert pipeline.output is None + with pytest.raises(ValueError, match="Structure's output Task has no output. Run"): + assert pipeline.output def test_run_with_error_artifact_no_fail_fast(self, error_artifact_task, waiting_task): end_task = PromptTask("end") @@ -374,7 +394,7 @@ def test_add_duplicate_task_directly(self): pipeline = Pipeline() pipeline + task - pipeline.tasks.append(task) + pipeline._tasks.append(task) with pytest.raises(ValueError, match=f"Duplicate task with id {task.id} found."): pipeline.run() diff --git a/tests/unit/structures/test_workflow.py b/tests/unit/structures/test_workflow.py index 45610bf20..e3bd8e886 100644 --- a/tests/unit/structures/test_workflow.py +++ b/tests/unit/structures/test_workflow.py @@ -42,14 +42,14 @@ def test_rulesets(self): ) assert isinstance(workflow.tasks[0], PromptTask) - assert len(workflow.tasks[0].all_rulesets) == 2 - assert workflow.tasks[0].all_rulesets[0].name == "Foo" - assert workflow.tasks[0].all_rulesets[1].name == "Bar" + assert len(workflow.tasks[0].rulesets) == 2 + assert workflow.tasks[0].rulesets[0].name == "Foo" + assert workflow.tasks[0].rulesets[1].name == "Bar" assert isinstance(workflow.tasks[1], PromptTask) - assert len(workflow.tasks[1].all_rulesets) == 2 - assert workflow.tasks[1].all_rulesets[0].name == "Foo" - assert workflow.tasks[1].all_rulesets[1].name == "Baz" + assert len(workflow.tasks[1].rulesets) == 2 + assert workflow.tasks[1].rulesets[0].name == "Foo" + assert workflow.tasks[1].rulesets[1].name == "Baz" def test_rules(self): workflow = Workflow(rules=[Rule("foo test")]) @@ -57,22 +57,25 @@ def test_rules(self): workflow.add_tasks(PromptTask(rules=[Rule("bar test")]), PromptTask(rules=[Rule("baz test")])) assert isinstance(workflow.tasks[0], PromptTask) - assert len(workflow.tasks[0].all_rulesets) == 2 - assert workflow.tasks[0].all_rulesets[0].name == "Default Ruleset" - assert workflow.tasks[0].all_rulesets[1].name == "Additional Ruleset" + assert len(workflow.tasks[0].rulesets) == 1 + assert workflow.tasks[0].rulesets[0].name == "Default Ruleset" + assert len(workflow.tasks[0].rulesets[0].rules) == 2 assert isinstance(workflow.tasks[1], PromptTask) - assert len(workflow.tasks[1].all_rulesets) == 2 - assert workflow.tasks[1].all_rulesets[0].name == "Default Ruleset" - assert workflow.tasks[1].all_rulesets[1].name == "Additional Ruleset" + assert len(workflow.tasks[1].rulesets) == 1 + assert workflow.tasks[1].rulesets[0].name == "Default Ruleset" + assert len(workflow.tasks[1].rulesets[0].rules) == 2 def test_rules_and_rulesets(self): - with pytest.raises(ValueError): - Workflow(rules=[Rule("foo test")], rulesets=[Ruleset("Bar", [Rule("bar test")])]) + workflow = Workflow(rules=[Rule("foo test")], rulesets=[Ruleset("Bar", [Rule("bar test")])]) + assert len(workflow.rulesets) == 2 + assert len(workflow.rules) == 1 workflow = Workflow() - with pytest.raises(ValueError): - workflow.add_task(PromptTask(rules=[Rule("foo test")], rulesets=[Ruleset("Bar", [Rule("bar test")])])) + workflow.add_task(PromptTask(rules=[Rule("foo test")], rulesets=[Ruleset("Bar", [Rule("bar test")])])) + assert isinstance(workflow.tasks[0], PromptTask) + assert len(workflow.tasks[0].rulesets) == 2 + assert len(workflow.tasks[0].rules) == 1 def test_with_no_task_memory(self): workflow = Workflow() @@ -752,7 +755,8 @@ def test_run_with_error_artifact(self, error_artifact_task, waiting_task): workflow = Workflow(tasks=[waiting_task, error_artifact_task, end_task]) workflow.run() - assert workflow.output is None + with pytest.raises(ValueError, match="Structure's output Task has no output. Run"): + assert workflow.output def test_run_with_error_artifact_no_fail_fast(self, error_artifact_task, waiting_task): end_task = PromptTask("end") @@ -762,6 +766,78 @@ def test_run_with_error_artifact_no_fail_fast(self, error_artifact_task, waiting assert workflow.output is not None + def test_nested_tasks(self): + workflow = Workflow( + tasks=[ + [ + PromptTask("parent", id=f"parent_{i}"), + PromptTask("child", id=f"child_{i}", parent_ids=[f"parent_{i}"]), + PromptTask("grandchild", id=f"grandchild_{i}", parent_ids=[f"child_{i}"]), + ] + for i in range(3) + ], + ) + + workflow.run() + + output_ids = [task.id for task in workflow.output_tasks] + assert output_ids == ["grandchild_0", "grandchild_1", "grandchild_2"] + assert len(workflow.tasks) == 9 + + def test_nested_tasks_property(self): + workflow = Workflow() + workflow._tasks = [ + [ + PromptTask("parent", id=f"parent_{i}"), + PromptTask("child", id=f"child_{i}", parent_ids=[f"parent_{i}"]), + PromptTask("grandchild", id=f"grandchild_{i}", parent_ids=[f"child_{i}"]), + ] + for i in range(3) + ] + + assert len(workflow.tasks) == 9 + + def test_output_tasks(self): + parent = PromptTask("parent") + child = PromptTask("child") + grandchild = PromptTask("grandchild") + workflow = Workflow( + tasks=[ + [parent, child, grandchild], + ] + ) + + workflow + parent + parent.add_child(child) + child.add_child(grandchild) + + assert workflow.output_tasks == [grandchild] + + def test_input_tasks(self): + parent = PromptTask("parent") + child = PromptTask("child") + grandchild = PromptTask("grandchild") + workflow = Workflow( + tasks=[ + [parent, child, grandchild], + ] + ) + + workflow + parent + parent.add_child(child) + child.add_child(grandchild) + + assert workflow.input_tasks == [parent] + + def test_outputs(self): + workflow = Workflow(tasks=[PromptTask("parent") for _ in range(3)]) + + assert workflow.outputs == [] + + workflow.run() + + assert [output.value for output in workflow.outputs] == ["mock output"] * 3 + @staticmethod def _validate_topology_1(workflow) -> None: assert len(workflow.tasks) == 4 diff --git a/tests/unit/tasks/test_base_multi_text_input_task.py b/tests/unit/tasks/test_base_multi_text_input_task.py deleted file mode 100644 index 8eaa832ae..000000000 --- a/tests/unit/tasks/test_base_multi_text_input_task.py +++ /dev/null @@ -1,56 +0,0 @@ -from griptape.artifacts import TextArtifact -from griptape.structures import Pipeline -from tests.mocks.mock_multi_text_input_task import MockMultiTextInputTask - - -class TestBaseMultiTextInputTask: - def test_string_input(self): - assert MockMultiTextInputTask(("foobar", "bazbar")).input[0].value == "foobar" - assert MockMultiTextInputTask(("foobar", "bazbar")).input[1].value == "bazbar" - - task = MockMultiTextInputTask() - task.input = ("foobar", "bazbar") - assert task.input[0].value == "foobar" - assert task.input[1].value == "bazbar" - - def test_artifact_input(self): - assert MockMultiTextInputTask((TextArtifact("foobar"), TextArtifact("bazbar"))).input[0].value == "foobar" - assert MockMultiTextInputTask((TextArtifact("foobar"), TextArtifact("bazbar"))).input[1].value == "bazbar" - - task = MockMultiTextInputTask() - task.input = (TextArtifact("foobar"), TextArtifact("bazbar")) - assert task.input[0].value == "foobar" - assert task.input[1].value == "bazbar" - - def test_callable_input(self): - assert ( - MockMultiTextInputTask((lambda _: TextArtifact("foobar"), lambda _: TextArtifact("bazbar"))).input[0].value - == "foobar" - ) - assert ( - MockMultiTextInputTask((lambda _: TextArtifact("foobar"), lambda _: TextArtifact("bazbar"))).input[1].value - == "bazbar" - ) - - task = MockMultiTextInputTask() - task.input = (lambda _: TextArtifact("foobar"), lambda _: TextArtifact("bazbar")) - assert task.input[0].value == "foobar" - assert task.input[1].value == "bazbar" - - def test_full_context(self): - parent = MockMultiTextInputTask(("parent1", "parent2")) - subtask = MockMultiTextInputTask(("test1", "test2"), context={"foo": "bar"}) - child = MockMultiTextInputTask(("child2", "child2")) - pipeline = Pipeline() - - pipeline.add_tasks(parent, subtask, child) - - pipeline.run() - - context = subtask.full_context - - assert context["foo"] == "bar" - assert context["parent_output"] == parent.output.to_text() - assert context["structure"] == pipeline - assert context["parent"] == parent - assert context["child"] == child diff --git a/tests/unit/tasks/test_base_text_input_task.py b/tests/unit/tasks/test_base_text_input_task.py index ff6afe42b..0adab72ff 100644 --- a/tests/unit/tasks/test_base_text_input_task.py +++ b/tests/unit/tasks/test_base_text_input_task.py @@ -30,6 +30,11 @@ def test_full_context(self): parent = MockTextInputTask("parent") subtask = MockTextInputTask("test", context={"foo": "bar"}) child = MockTextInputTask("child") + + assert parent.full_context == {} + assert subtask.full_context == {"foo": "bar"} + assert child.full_context == {} + pipeline = Pipeline() pipeline.add_tasks(parent, subtask, child) @@ -49,11 +54,11 @@ def test_rulesets(self): rulesets=[Ruleset("Foo", [Rule("foo test")]), Ruleset("Bar", [Rule("bar test")])] ) - assert len(prompt_task.all_rulesets) == 2 - assert prompt_task.all_rulesets[0].name == "Foo" - assert prompt_task.all_rulesets[1].name == "Bar" + assert len(prompt_task.rulesets) == 2 + assert prompt_task.rulesets[0].name == "Foo" + assert prompt_task.rulesets[1].name == "Bar" def test_rules(self): prompt_task = MockTextInputTask(rules=[Rule("foo test"), Rule("bar test")]) - assert prompt_task.all_rulesets[0].name == "Default Ruleset" + assert prompt_task.rulesets[0].name == "Default Ruleset" diff --git a/tests/unit/tasks/test_structure_run_task.py b/tests/unit/tasks/test_structure_run_task.py index 6ea9f5985..2973c4a05 100644 --- a/tests/unit/tasks/test_structure_run_task.py +++ b/tests/unit/tasks/test_structure_run_task.py @@ -5,7 +5,7 @@ class TestStructureRunTask: - def test_run(self, mock_config): + def test_run_single_input(self, mock_config): mock_config.drivers_config.prompt_driver = MockPromptDriver(mock_output="agent mock output") agent = Agent() mock_config.drivers_config.prompt_driver = MockPromptDriver(mock_output="pipeline mock output") @@ -17,3 +17,16 @@ def test_run(self, mock_config): pipeline.add_task(task) assert task.run().to_text() == "agent mock output" + + def test_run_multiple_inputs(self, mock_config): + mock_config.drivers_config.prompt_driver = MockPromptDriver(mock_output="agent mock output") + agent = Agent() + mock_config.drivers_config.prompt_driver = MockPromptDriver(mock_output="pipeline mock output") + pipeline = Pipeline() + driver = LocalStructureRunDriver(structure_factory_fn=lambda: agent) + + task = StructureRunTask(input=["foo", "bar", "baz"], driver=driver) + + pipeline.add_task(task) + + assert task.run().to_text() == "agent mock output" diff --git a/tests/unit/tasks/test_tool_task.py b/tests/unit/tasks/test_tool_task.py index dbb76a943..f92f6a887 100644 --- a/tests/unit/tasks/test_tool_task.py +++ b/tests/unit/tasks/test_tool_task.py @@ -40,6 +40,30 @@ class TestToolTask: "required": ["name", "path", "input", "tag"], "additionalProperties": False, }, + { + "type": "object", + "properties": { + "name": {"const": "MockTool"}, + "path": {"description": "test description", "const": "test_callable_schema"}, + "input": { + "type": "object", + "properties": { + "values": { + "description": "Test input", + "type": "object", + "properties": {"test": {"type": "string"}}, + "required": ["test"], + "additionalProperties": False, + } + }, + "required": ["values"], + "additionalProperties": False, + }, + "tag": {"description": "Unique tag name for action execution.", "type": "string"}, + }, + "required": ["name", "path", "input", "tag"], + "additionalProperties": False, + }, { "type": "object", "properties": { diff --git a/tests/unit/tasks/test_toolkit_task.py b/tests/unit/tasks/test_toolkit_task.py index 6b238c399..24c715423 100644 --- a/tests/unit/tasks/test_toolkit_task.py +++ b/tests/unit/tasks/test_toolkit_task.py @@ -36,6 +36,30 @@ class TestToolkitSubtask: "required": ["name", "path", "input", "tag"], "additionalProperties": False, }, + { + "type": "object", + "properties": { + "name": {"const": "MockTool"}, + "path": {"description": "test description", "const": "test_callable_schema"}, + "input": { + "type": "object", + "properties": { + "values": { + "description": "Test input", + "type": "object", + "properties": {"test": {"type": "string"}}, + "required": ["test"], + "additionalProperties": False, + } + }, + "required": ["values"], + "additionalProperties": False, + }, + "tag": {"description": "Unique tag name for action execution.", "type": "string"}, + }, + "required": ["name", "path", "input", "tag"], + "additionalProperties": False, + }, { "type": "object", "properties": { diff --git a/tests/unit/tools/test_base_tool.py b/tests/unit/tools/test_base_tool.py index 9c1764837..60c9f6825 100644 --- a/tests/unit/tools/test_base_tool.py +++ b/tests/unit/tools/test_base_tool.py @@ -2,7 +2,6 @@ import os import pytest -import yaml from schema import Or, Schema, SchemaMissingKeyError from griptape.common import ToolAction @@ -38,6 +37,29 @@ class TestBaseTool: "required": ["name", "path", "input"], "additionalProperties": False, }, + { + "type": "object", + "properties": { + "name": {"const": "MockTool"}, + "path": {"description": "test description", "const": "test_callable_schema"}, + "input": { + "type": "object", + "properties": { + "values": { + "description": "Test input", + "type": "object", + "properties": {"test": {"type": "string"}}, + "required": ["test"], + "additionalProperties": False, + } + }, + "required": ["values"], + "additionalProperties": False, + }, + }, + "required": ["name", "path", "input"], + "additionalProperties": False, + }, { "type": "object", "properties": { @@ -172,16 +194,9 @@ def test_off_prompt(self, tool): .output_memory ) - def test_manifest_path(self, tool): - assert tool.manifest_path == os.path.join(tool.abs_dir_path, tool.MANIFEST_FILE) - def test_requirements_path(self, tool): assert tool.requirements_path == os.path.join(tool.abs_dir_path, tool.REQUIREMENTS_FILE) - def test_manifest(self, tool): - with open(tool.manifest_path) as yaml_file: - assert tool.manifest == yaml.safe_load(yaml_file) - def test_abs_file_path(self, tool): assert tool.abs_file_path == os.path.abspath(inspect.getfile(tool.__class__)) diff --git a/tests/unit/tools/test_extraction_tool.py b/tests/unit/tools/test_extraction_tool.py index 3f783e8a4..7e66aa24d 100644 --- a/tests/unit/tools/test_extraction_tool.py +++ b/tests/unit/tools/test_extraction_tool.py @@ -40,15 +40,15 @@ def test_json_extract_artifacts(self, json_tool): ) assert len(result.value) == 2 - assert result.value[0].value == '{"test_key_1": "test_value_1"}' - assert result.value[1].value == '{"test_key_2": "test_value_2"}' + assert result.value[0].value == {"test_key_1": "test_value_1"} + assert result.value[1].value == {"test_key_2": "test_value_2"} def test_json_extract_content(self, json_tool): result = json_tool.extract({"values": {"data": "foo"}}) assert len(result.value) == 2 - assert result.value[0].value == '{"test_key_1": "test_value_1"}' - assert result.value[1].value == '{"test_key_2": "test_value_2"}' + assert result.value[0].value == {"test_key_1": "test_value_1"} + assert result.value[1].value == {"test_key_2": "test_value_2"} def test_csv_extract_artifacts(self, csv_tool): csv_tool.input_memory[0].store_artifact("foo", TextArtifact("foo,bar\nbaz,maz")) diff --git a/tests/unit/tools/test_file_manager.py b/tests/unit/tools/test_file_manager.py index 469918a02..4e035bdee 100644 --- a/tests/unit/tools/test_file_manager.py +++ b/tests/unit/tools/test_file_manager.py @@ -7,7 +7,6 @@ from griptape.artifacts import ListArtifact, TextArtifact from griptape.drivers.file_manager.local_file_manager_driver import LocalFileManagerDriver -from griptape.loaders.text_loader import TextLoader from griptape.tools import FileManagerTool from tests.utils import defaults @@ -36,7 +35,7 @@ def test_load_files_from_disk(self, file_manager): result = file_manager.load_files_from_disk({"values": {"paths": ["../../resources/bitcoin.pdf"]}}) assert isinstance(result, ListArtifact) - assert len(result.value) == 4 + assert len(result.value) == 9 def test_load_files_from_disk_with_encoding(self, file_manager): result = file_manager.load_files_from_disk({"values": {"paths": ["../../resources/test.txt"]}}) @@ -48,8 +47,7 @@ def test_load_files_from_disk_with_encoding(self, file_manager): def test_load_files_from_disk_with_encoding_failure(self): file_manager = FileManagerTool( file_manager_driver=LocalFileManagerDriver( - default_loader=TextLoader(encoding="utf-8"), - loaders={}, + encoding="utf-8", workdir=os.path.abspath(os.path.dirname(__file__)), ) ) @@ -116,9 +114,7 @@ def test_save_content_to_file(self, temp_dir): assert result.value == "Successfully saved file" def test_save_content_to_file_with_encoding(self, temp_dir): - file_manager = FileManagerTool( - file_manager_driver=LocalFileManagerDriver(default_loader=TextLoader(encoding="utf-8"), workdir=temp_dir) - ) + file_manager = FileManagerTool(file_manager_driver=LocalFileManagerDriver(encoding="utf-8", workdir=temp_dir)) result = file_manager.save_content_to_file( {"values": {"path": os.path.join("test", "foobar.txt"), "content": "foobar"}} ) @@ -127,9 +123,7 @@ def test_save_content_to_file_with_encoding(self, temp_dir): assert result.value == "Successfully saved file" def test_save_and_load_content_to_file_with_encoding(self, temp_dir): - file_manager = FileManagerTool( - file_manager_driver=LocalFileManagerDriver(loaders={"txt": TextLoader(encoding="ascii")}, workdir=temp_dir) - ) + file_manager = FileManagerTool(file_manager_driver=LocalFileManagerDriver(encoding="ascii", workdir=temp_dir)) result = file_manager.save_content_to_file( {"values": {"path": os.path.join("test", "foobar.txt"), "content": "foobar"}} ) @@ -137,11 +131,7 @@ def test_save_and_load_content_to_file_with_encoding(self, temp_dir): assert Path(os.path.join(temp_dir, "test", "foobar.txt")).read_text() == "foobar" assert result.value == "Successfully saved file" - file_manager = FileManagerTool( - file_manager_driver=LocalFileManagerDriver( - default_loader=TextLoader(encoding="ascii"), loaders={}, workdir=temp_dir - ) - ) + file_manager = FileManagerTool(file_manager_driver=LocalFileManagerDriver(encoding="ascii", workdir=temp_dir)) result = file_manager.load_files_from_disk({"values": {"paths": [os.path.join("test", "foobar.txt")]}}) assert isinstance(result, ListArtifact) diff --git a/tests/unit/utils/test_file_utils.py b/tests/unit/utils/test_file_utils.py deleted file mode 100644 index 00df6958d..000000000 --- a/tests/unit/utils/test_file_utils.py +++ /dev/null @@ -1,51 +0,0 @@ -import os -from concurrent import futures - -from griptape import utils -from griptape.loaders import TextLoader - -MAX_TOKENS = 50 - - -class TestFileUtils: - def test_load_file(self): - dirname = os.path.dirname(__file__) - file = utils.load_file(os.path.join(dirname, "../../resources/foobar-many.txt")) - - assert file.decode("utf-8").startswith("foobar foobar foobar") - - def test_load_files(self): - dirname = os.path.dirname(__file__) - sources = ["resources/foobar-many.txt", "resources/foobar-many.txt", "resources/small.png"] - sources = [os.path.join(dirname, "../../", source) for source in sources] - files = utils.load_files(sources, futures_executor=futures.ThreadPoolExecutor(max_workers=1)) - assert len(files) == 2 - - test_file = files[utils.str_to_hash(sources[0])] - assert test_file.decode("utf-8").startswith("foobar foobar foobar") - - small_file = files[utils.str_to_hash(sources[2])] - assert len(small_file) == 97 - assert small_file[:8] == b"\x89PNG\r\n\x1a\n" - - def test_load_file_with_loader(self): - dirname = os.path.dirname(__file__) - file = utils.load_file(os.path.join(dirname, "../../", "resources/foobar-many.txt")) - artifacts = TextLoader(max_tokens=MAX_TOKENS).load(file) - - assert len(artifacts) == 39 - assert isinstance(artifacts, list) - assert artifacts[0].value.startswith("foobar foobar foobar") - - def test_load_files_with_loader(self): - dirname = os.path.dirname(__file__) - sources = ["resources/foobar-many.txt"] - sources = [os.path.join(dirname, "../../", source) for source in sources] - files = utils.load_files(sources) - loader = TextLoader(max_tokens=MAX_TOKENS) - collection = loader.load_collection(list(files.values())) - - test_file_artifacts = collection[loader.to_key(files[utils.str_to_hash(sources[0])])] - assert len(test_file_artifacts) == 39 - assert isinstance(test_file_artifacts, list) - assert test_file_artifacts[0].value.startswith("foobar foobar foobar") diff --git a/tests/utils/structure_tester.py b/tests/utils/structure_tester.py index 9fadf4e36..c943525b6 100644 --- a/tests/utils/structure_tester.py +++ b/tests/utils/structure_tester.py @@ -235,7 +235,7 @@ def verify_structure_output(self, structure) -> dict: model="gpt-4o", azure_deployment=os.environ["AZURE_OPENAI_4_DEPLOYMENT_ID"], azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT_1"], - response_format="json_object", + response_format={"type": "json_object"}, ) output_schema = Schema( { @@ -248,7 +248,7 @@ def verify_structure_output(self, structure) -> dict: task_names = [task.__class__.__name__ for task in structure.tasks] prompt = structure.input_task.input.to_text() actual = structure.output.to_text() - rules = [rule.value for ruleset in structure.input_task.all_rulesets for rule in ruleset.rules] + rules = [rule.value for ruleset in structure.input_task.rulesets for rule in ruleset.rules] agent = Agent( rulesets=[