Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Dev - 1.4.0 #28

Merged
merged 36 commits into from
Feb 13, 2024
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
36 commits
Select commit Hold shift + click to select a range
0e82bc2
Add streaming support
zaldivards Nov 26, 2023
b8c836e
Remove unused prompts
zaldivards Nov 27, 2023
44fa10b
Update state handlers
zaldivards Nov 27, 2023
98a317e
Add `AsyncCallback` for the agent
zaldivards Nov 27, 2023
3c3984a
Update chatbox
zaldivards Nov 27, 2023
8509578
Remove deprecated code
zaldivards Nov 27, 2023
c8a3f61
Add `general` module
zaldivards Nov 27, 2023
1e8b2bb
Add streaming support for `QA`
zaldivards Nov 27, 2023
a0c1bfe
Rename `general` module to `streaming`
zaldivards Nov 27, 2023
b78a31b
Update the `ask` function
zaldivards Nov 27, 2023
66db0c4
Update text utils
zaldivards Nov 28, 2023
acb1684
Update the `stream` function
zaldivards Nov 28, 2023
703bccb
Update the `ask` function
zaldivards Nov 28, 2023
30aac42
Remove `clean` function
zaldivards Nov 28, 2023
6f58500
Merge pull request #24 from zaldivards/feature/streaming
zaldivards Dec 1, 2023
2529bc9
Update `build_sources` function
zaldivards Dec 18, 2023
9305abf
Update how the sources are streamed
zaldivards Dec 18, 2023
3f1b2d7
Add the `SourcesBox` component
zaldivards Dec 18, 2023
f78fc7a
Add rendering of sources
zaldivards Dec 18, 2023
b53dba8
Update `build_sources` function
zaldivards Feb 10, 2024
440312c
Add state of the latest sources
zaldivards Feb 10, 2024
e36af74
Update sources layout
zaldivards Feb 10, 2024
d00799a
Merge pull request #26 from zaldivards/feature/sourceRendering
zaldivards Feb 12, 2024
3c7b528
Update file uploader
zaldivards Feb 12, 2024
29c5786
Add the `BatchProcessor` class
zaldivards Feb 12, 2024
6339245
Update the `/ingest/` endpoint
zaldivards Feb 12, 2024
8b9b8a4
Fix bug when working with multiple threads
zaldivards Feb 12, 2024
2621722
Add `/check-sources` endpoint
zaldivards Feb 12, 2024
3299493
Update mounted function to check the sources availability
zaldivards Feb 12, 2024
e5f7e9c
Update QA session warnings
zaldivards Feb 12, 2024
2ff4f89
Fix error related to the connection pool
zaldivards Feb 12, 2024
e9a368c
Update the `/ingest/` endpoint
zaldivards Feb 12, 2024
f7b7d28
Update QA messages
zaldivards Feb 12, 2024
933c16a
Update section names
zaldivards Feb 12, 2024
b21a044
Fix bug regarding the `latestSources` state
zaldivards Feb 12, 2024
d1aa05a
Merge pull request #27 from zaldivards/feature/multiIngestion
zaldivards Feb 13, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Add /check-sources endpoint
This endpoint is useful for indicating whether there are sources available
  • Loading branch information
zaldivards committed Feb 12, 2024
commit 2621722cb03bec7877234155440a83d206d829b6
39 changes: 26 additions & 13 deletions api/contextqa/models/schemas.py
Original file line number Diff line number Diff line change
@@ -1,49 +1,62 @@
# pylint: disable=E0611
from enum import Enum
from typing import Annotated
from typing import Annotated, Literal

from pydantic import BaseModel, Field


class SimilarityProcessor(str, Enum):
"""Enum representing the supported vector stores

Note that the LOCAL identifier refers to ChromaDB
"""

LOCAL = "local"
PINECONE = "pinecone"


class SourceFormat(str, Enum):
"""Enum representing the supported file formats"""

PDF = "pdf"
TXT = "txt"
CSV = "csv"


class Source(BaseModel):
"""Source returned as metadata in QA sessions"""

title: str
format_: Annotated[SourceFormat, Field(alias="format")]
content: str | list


class LLMResult(BaseModel):
response: str
class SourceStatus(BaseModel):
"""Response model returning the status of data sources"""

status: Literal["ready", "empty"]

class QAResult(LLMResult):
sources: list[Source]
@classmethod
def from_count_status(cls, status_flag: bool) -> "SourceStatus":
"""Get instance given the status flag"""
status = "ready" if status_flag else "empty"
return cls(status=status)


class LLMRequestBodyBase(BaseModel):
separator: str = Field(description="Separator to use for the text splitting", default=".")
chunk_size: int = Field(description="size of each splitted chunk", default=100)
chunk_overlap: int = 50
class LLMResult(BaseModel):
"""LLM chat response object"""

response: str


class LLMContextQueryRequest(BaseModel):
"""QA session request object"""

question: str


class LLMQueryRequest(BaseModel):
"""Chat request object"""

message: str
internet_access: bool = False


class LLMQueryRequestBody(LLMRequestBodyBase):
query: str = Field(description="The query we want the llm to respond", min_length=10)
18 changes: 13 additions & 5 deletions api/contextqa/routes/qa.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,7 @@
from sqlalchemy.orm import Session

from contextqa import context, get_logger
from contextqa.models.schemas import (
LLMResult,
SimilarityProcessor,
LLMContextQueryRequest,
)
from contextqa.models.schemas import LLMResult, SimilarityProcessor, SourceStatus, LLMContextQueryRequest
from contextqa.routes.dependencies import get_db
from contextqa.utils.exceptions import VectorDBConnectionError, DuplicatedSourceError

Expand Down Expand Up @@ -70,3 +66,15 @@ async def qa(params: LLMContextQueryRequest):
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail={"message": "ContextQA server did not process the request successfully", "cause": str(ex)},
) from ex


@router.get("/check-sources")
async def check_sources(session: Annotated[Session, Depends(get_db)]):
try:
status_flag = context.sources_exists(session)
return SourceStatus.from_count_status(status_flag)
except Exception as ex:
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail={"message": "ContextQA could not get the results from the DB", "cause": str(ex)},
) from ex
16 changes: 16 additions & 0 deletions api/contextqa/services/context.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@

from contextqa import get_logger, settings
from contextqa.models.schemas import LLMResult, SimilarityProcessor, SourceFormat
from contextqa.models.orm import Source
from contextqa.utils import memory, prompts
from contextqa.utils.exceptions import VectorDBConnectionError
from contextqa.utils.sources import check_digest, get_not_seen_chunks
Expand Down Expand Up @@ -242,3 +243,18 @@ def get_setter(processor: SimilarityProcessor | None = None) -> LLMContextManage
return LocalManager()
case SimilarityProcessor.PINECONE:
return PineconeManager()


def sources_exists(session: Session) -> bool:
"""Check if there is at least one source available

Parameters
----------
session : Session
sqlalchemy session

Returns
-------
bool
"""
return session.query(Source.id).limit(1).count() > 0