diff --git a/multiprofile-chat-with-auth/.env.example b/multiprofile-chat-with-auth/.env.example new file mode 100644 index 000000000..6e530464a --- /dev/null +++ b/multiprofile-chat-with-auth/.env.example @@ -0,0 +1,7 @@ +OPENAI_API_KEY= +ANTHROPIC_API_KEY= +GOOGLE_API_KEY= +DEFAULT_USERNAME= +DEFAULT_USER_PASSWORD= +CHAINLIT_AUTH_SECRET= +LITERAL_API_KEY= diff --git a/multiprofile-chat-with-auth/.gitignore b/multiprofile-chat-with-auth/.gitignore new file mode 100644 index 000000000..5a1517561 --- /dev/null +++ b/multiprofile-chat-with-auth/.gitignore @@ -0,0 +1,164 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +.idea/ + + +# Misc +data/ \ No newline at end of file diff --git a/multiprofile-chat-with-auth/Dockerfile b/multiprofile-chat-with-auth/Dockerfile new file mode 100644 index 000000000..ad0132ce2 --- /dev/null +++ b/multiprofile-chat-with-auth/Dockerfile @@ -0,0 +1,28 @@ +FROM python:3.12-slim + +# Set the working directory in the container +WORKDIR /app + +# Copy the current directory contents into the container at /app +COPY requirements.txt . +COPY src/ ./src +COPY app.py setup.py . + +# Install any needed dependencies specified in requirements.txt +RUN pip install -r requirements.txt + +COPY public/ ./public +COPY .chainlit/ ./.chainlit +COPY README.md chainlit.md . + +# Set environment variables +ENV PYTHONUNBUFFERED 1 +ARG PORT=5500 + +# Command to run the app +CMD python -m chainlit run app.py -h --host 0.0.0.0 --port ${PORT} + +# Alternatively: Use entrypoint file +# COPY entrypoint.sh . +# RUN chmod +x ./entrypoint.sh +# ENTRYPOINT ["./entrypoint.sh"] \ No newline at end of file diff --git a/multiprofile-chat-with-auth/README.md b/multiprofile-chat-with-auth/README.md new file mode 100644 index 000000000..4813cecb3 --- /dev/null +++ b/multiprofile-chat-with-auth/README.md @@ -0,0 +1,105 @@ +# Home AI + +* This demo illustrates how to use [Chainlit](https://github.com/Chainlit/chainlit) to build chatbots with LLMs the big + three AI providers: OpenAI, Anthropic, and Gemini. +* [Live Demo](https://homeai.chainlit.com) + +## Features + +- Multiple user profiles +- Integration with [OpenAI](https://openai.com/), [Anthropic](https://www.anthropic.com/) + and [Gemini](https://www.gemini.com/) chat providers +- Chat settings +- Authentication +- Custom logo and favicon +- Custom theme + +## Installation + +1. Clone the repository: + + ```bash + git clone https://github.com/mungana-ai/homeai.git + cd homeai + ``` + +2. Create a virtual environment: + + ```bash + python -m venv venv # We assume you are using at least Python 3.10 + source venv/bin/activate # For Unix-based systems + venv\Scripts\activate.bat # For Windows + ``` + +3. Install the package and its dependencies: + + ```bash + pip install -r requirements.txt + ``` + +## Configuration + +1. Rename the provided `.env.example` file into `.env` in the project root directory. + +2. Update the necessary configuration variables to the `.env` file. The following variables are required: + + ```bash + DEFAULT_USER_PASSWORD=your_default_user_password + CHAINLIT_AUTH_SECRET=your_64_char_chainlit_auth_secret_for_signing_tokens + LITERAL_API_KEY=your_literal_api_key_for_storing_chat_history + + # Optional: At least one of the following chat providers is required + OPENAI_API_KEY=your_openai_api_key + ANTHROPIC_API_KEY=your_anthropic_api_key + GOOGLE_API_KEY=your_google_api_key + + # Optional + DEFAULT_USERNAME=your_default_username # Default: "admin" + ``` + + > **Hints:** You can generate a 64-character secret key using the following command: `chainlit create-secret`. To + > obtain an API key for [Literal](https://literal.chainlit.com), sign up for an account and create a new project. + +## Usage + +To run the Chainlit app, use the following command: + +```bash +chainlit run app.py --host 0.0.0.0 --port 5500 +``` + +* You app should now be accessible at `http://localhost:5500` + +## Project Structure + +The project structure is organized as follows: + +- `src/`: Contains the main application code. +- `.chainlit/`: Contains the Chainlit configuration files. +- `public/`: Contains the static files for custom logos and favicons. +- `app.py`: The main application entry point. +- `.env.example`: Stores the environment variables template. +- `requirements.txt`: Lists the project dependencies. +- `chainlit.md`: Provides documentation and instructions for the project. + +## Issues + +If you have any questions or inquiries, please contact [N Nemakhavhani](mailto://endeesa@yahoo.com). Bugs and issues can +be reported on the [GitHub Issues]() page. + +## License + +This project is licensed under the MIT License. You are free to use, modify, and distribute the code as you see fit. + +## Contributions + +Contributions are welcome! If you would like to contribute to the project, please fork the repository and submit a pull +request. + + +## Links + +* [OpenAI API](https://platform.openai.com/docs/quickstart) +* [Anthropic API](https://docs.anthropic.com/en/api/getting-started) +* [Gemini API](https://ai.google.dev/gemini-api/docs/api-key) +* [Literal API](https://cloud.getliteral.ai/) \ No newline at end of file diff --git a/multiprofile-chat-with-auth/app.py b/multiprofile-chat-with-auth/app.py new file mode 100644 index 000000000..5e0e22e05 --- /dev/null +++ b/multiprofile-chat-with-auth/app.py @@ -0,0 +1,98 @@ +import os +from typing import Any + +import chainlit as cl + + +@cl.password_auth_callback +def auth_callback(username: str, password: str): + # TODO: Fetch the user matching username from your database + # and compare the hashed password with the value stored in the database + if (username, password) == (os.getenv("DEFAULT_USERNAME", "admin"), os.getenv("DEFAULT_USER_PASSWORD")): + return cl.User( + identifier=os.getenv("DEFAULT_USERNAME"), + metadata={"role": "admin", + "provider": "credentials"} + ) + else: + return None + + +@cl.set_chat_profiles +async def load_chat_profiles(): + return [ + cl.ChatProfile( + name="ChatGPT", + markdown_description="ChatGPT by OpenAI", + icon="https://github.com/ndamulelonemakh/remote-assets/blob/7ed514dbd99ab86536daf3942127822bd979936c/images/openai-logomark.png?raw=true", + ), + cl.ChatProfile( + name="Claude", + markdown_description="Claude by Anthropic", + icon="https://www.anthropic.com/images/icons/apple-touch-icon.png", + ), + cl.ChatProfile( + name="Gemini", + markdown_description="Germini Pro by Google and DeepMind", + icon="https://github.com/ndamulelonemakh/remote-assets/blob/main/images/Google-Bard-Logo-758x473.jpg?raw=true", + ) + ] + + +@cl.on_settings_update +async def setup_agent(settings: dict[str, Any]): + cl.logger.debug(f"user settings updated: {settings}") + existing_settings: dict = cl.user_session.get("chat_settings", {}) + existing_settings.update(settings) + if "max_tokens" in existing_settings: + existing_settings["max_tokens"] = int(existing_settings["max_tokens"]) + if "max_tokens_to_sample" in existing_settings: + existing_settings["max_tokens_to_sample"] = int(existing_settings["max_tokens_to_sample"]) + cl.user_session.set("chat_settings", existing_settings) + + +@cl.on_chat_start +async def start_chat(): + active_chat_profile = cl.user_session.get("chat_profile") + if active_chat_profile == "ChatGPT": + from src.providers.chatgpt import AVATAR, chat_settings, call_chatgpt, user_setttings + + cl.user_session.set("prompt_history", []) + cl.user_session.set("call_llm", call_chatgpt) + cl.user_session.set("chat_settings", chat_settings) + s = cl.ChatSettings(user_setttings) + await s.send() + + await AVATAR.send() + + elif active_chat_profile == "Claude": + from src.providers.claude import AVATAR, chat_settings, call_claude, user_setttings + + cl.user_session.set("prompt_history", "") + cl.user_session.set("call_llm", call_claude) + cl.user_session.set("chat_settings", chat_settings) + s = cl.ChatSettings(user_setttings) + await s.send() + + await AVATAR.send() + elif active_chat_profile == "Gemini": + from src.providers.gemini import AVATAR, chat_settings, call_gemini, user_setttings + cl.user_session.set("prompt_history", []) + cl.user_session.set("call_llm", call_gemini) + cl.user_session.set("chat_settings", chat_settings) + s = cl.ChatSettings(user_setttings) + await s.send() + + await AVATAR.send() + else: + await cl.ErrorMessage(f"Unsupported profile: {active_chat_profile}").send() + return + + await cl.Message(f"Welcome back, ##TODO-USERNAME. {active_chat_profile} is ready to fulfill your requests!").send() + + +@cl.on_message +async def chat(message: cl.Message): + chat_callback = cl.user_session.get("call_llm") + chat_settings = cl.user_session.get("chat_settings") + await chat_callback(message.content, chat_settings) diff --git a/multiprofile-chat-with-auth/chainlit.md b/multiprofile-chat-with-auth/chainlit.md new file mode 100644 index 000000000..24562edb5 --- /dev/null +++ b/multiprofile-chat-with-auth/chainlit.md @@ -0,0 +1,104 @@ +# Home AI + +* This demo illustrates how to use [Chainlit](https://github.com/Chainlit/chainlit) to build chatbots with LLMs the big + three AI providers: OpenAI, Anthropic, and Gemini. +* [Live Demo](https://homeai.chainlit.com) + +## Features + +- Multiple user profiles +- Integration with [OpenAI](https://openai.com/), [Anthropic](https://www.anthropic.com/) + and [Gemini](https://www.gemini.com/) chat providers +- Chat settings +- Authentication +- Custom logo and favicon +- Custom theme + +## Installation + +1. Clone the repository: + + ```bash + git clone https://github.com/mungana-ai/homeai.git + cd homeai + ``` + +2. Create a virtual environment: + + ```bash + python -m venv venv # We assume you are using at least Python 3.10 + source venv/bin/activate # For Unix-based systems + venv\Scripts\activate.bat # For Windows + ``` + +3. Install the package and its dependencies: + + ```bash + pip install -r requirements.txt + ``` + +## Configuration + +1. Rename the provided `.env.example` file into `.env` in the project root directory. + +2. Update the necessary configuration variables to the `.env` file. The following variables are required: + + ```bash + DEFAULT_USER_PASSWORD=your_default_user_password + CHAINLIT_AUTH_SECRET=your_64_char_chainlit_auth_secret_for_signing_tokens + LITERAL_API_KEY=your_literal_api_key_for_storing_chat_history + + # Optional: At least one of the following chat providers is required + OPENAI_API_KEY=your_openai_api_key + ANTHROPIC_API_KEY=your_anthropic_api_key + GOOGLE_API_KEY=your_google_api_key + + DEFAULT_USERNAME=your_default_username # Default: "admin" + ``` + + > **Hints:** You can generate a 64-character secret key using the following command: `chainlit create-secret`. To + > obtain an API key for [Literal](https://literal.chainlit.com), sign up for an account and create a new project. + +## Usage + +To run the Chainlit app, use the following command: + +```bash +chainlit run app.py --host 0.0.0.0 --port 5500 +``` + +* You app should now be accessible at `http://localhost:5500` + +## Project Structure + +The project structure is organized as follows: + +- `src/`: Contains the main application code. +- `.chainlit/`: Contains the Chainlit configuration files. +- `public/`: Contains the static files for custom logos and favicons. +- `app.py`: The main application entry point. +- `.env.example`: Stores the environment variables template. +- `requirements.txt`: Lists the project dependencies. +- `chainlit.md`: Provides documentation and instructions for the project. + +## Issues + +If you have any questions or inquiries, please contact [N Nemakhavhani](mailto://endeesa@yahoo.com). Bugs and issues can +be reported on the [GitHub Issues]() page. + +## License + +This project is licensed under the MIT License. You are free to use, modify, and distribute the code as you see fit. + +## Contributions + +Contributions are welcome! If you would like to contribute to the project, please fork the repository and submit a pull +request. + + +## Links + +* [OpenAI API](https://platform.openai.com/docs/quickstart) +* [Anthropic API](https://docs.anthropic.com/en/api/getting-started) +* [Gemini API](https://ai.google.dev/gemini-api/docs/api-key) +* [Literal API](https://cloud.getliteral.ai/) \ No newline at end of file diff --git a/multiprofile-chat-with-auth/entrypoint.sh b/multiprofile-chat-with-auth/entrypoint.sh new file mode 100644 index 000000000..818280eb9 --- /dev/null +++ b/multiprofile-chat-with-auth/entrypoint.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +echo "Starting Chainlit app from entrypoint.sh..." +PYTHONPATH=$PYTHONPATH:$(pwd)/src python -m chainlit run app.py --host 0.0.0.0 --port ${PORT} diff --git a/multiprofile-chat-with-auth/public/favicon.ico b/multiprofile-chat-with-auth/public/favicon.ico new file mode 100644 index 000000000..cd4eaa5e4 Binary files /dev/null and b/multiprofile-chat-with-auth/public/favicon.ico differ diff --git a/multiprofile-chat-with-auth/public/logo_dark.png b/multiprofile-chat-with-auth/public/logo_dark.png new file mode 100644 index 000000000..941f513fc Binary files /dev/null and b/multiprofile-chat-with-auth/public/logo_dark.png differ diff --git a/multiprofile-chat-with-auth/public/logo_light.png b/multiprofile-chat-with-auth/public/logo_light.png new file mode 100644 index 000000000..62171eedf Binary files /dev/null and b/multiprofile-chat-with-auth/public/logo_light.png differ diff --git a/multiprofile-chat-with-auth/requirements.txt b/multiprofile-chat-with-auth/requirements.txt new file mode 100644 index 000000000..e94ca96b2 --- /dev/null +++ b/multiprofile-chat-with-auth/requirements.txt @@ -0,0 +1,5 @@ +chainlit==1.0.506 +anthropic==0.25.7 +openai==1.25.1 +google-generativeai==0.5.2 +-e . \ No newline at end of file diff --git a/multiprofile-chat-with-auth/setup.py b/multiprofile-chat-with-auth/setup.py new file mode 100644 index 000000000..48b4d4209 --- /dev/null +++ b/multiprofile-chat-with-auth/setup.py @@ -0,0 +1,42 @@ +from setuptools import setup, find_packages + +setup( + name="homeai-chainlit-app", + version="0.0.1b", + description="A Chainlit chat app that supports multiple profiles and chat providers", + author="Mungana AI", + author_email="info@mungana.com", + maintainer="N Nemakhavhani", + maintainer_email="endeesa@yahoo.com", + packages=find_packages(), + install_requires=[ + "chainlit", + "python-dotenv", + "openai", + "anthropic", + "google-generativeai" + ], + extras_require={ + "dev": [ + "pytest", + "pytest-cov", + "flake8", + "black", + # Add other development dependencies here + ] + }, + entry_points={ + "console_scripts": [ + ], + }, + keywords=["chatbot", "ai", "openai", "anthropic", "gemini", "chainlit"], + classifiers=[ + "Development Status :: 3 - Alpha", + "Intended Audience :: Developers", + "License :: OSI Approved :: MIT License", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + ], +) diff --git a/multiprofile-chat-with-auth/src/__init__.py b/multiprofile-chat-with-auth/src/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/multiprofile-chat-with-auth/src/providers/__init__.py b/multiprofile-chat-with-auth/src/providers/__init__.py new file mode 100644 index 000000000..8d5b0a1ce --- /dev/null +++ b/multiprofile-chat-with-auth/src/providers/__init__.py @@ -0,0 +1,5 @@ +from . import chatgpt +from . import claude +from . import gemini + +__all__ = [chatgpt, claude, gemini] diff --git a/multiprofile-chat-with-auth/src/providers/chatgpt.py b/multiprofile-chat-with-auth/src/providers/chatgpt.py new file mode 100644 index 000000000..a65b6a8c4 --- /dev/null +++ b/multiprofile-chat-with-auth/src/providers/chatgpt.py @@ -0,0 +1,198 @@ +import json +import ast +import os +from typing import Any +from openai import AsyncOpenAI + +from chainlit.playground.providers import ChatOpenAI +from chainlit.playground.providers.openai import stringify_function_call +import chainlit as cl +from chainlit.input_widget import Select, Slider + + +open_ai_client = AsyncOpenAI(api_key=os.environ["OPENAI_API_KEY"]) +AVATAR = cl.Avatar( + name="ChatGPT", + url="https://github.com/ndamulelonemakh/remote-assets/blob/7ed514dbd99ab86536daf3942127822bd979936c/images/openai-logomark.png?raw=true", +) +tools = [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + }, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, + }, + "required": ["location"], + }, + }, + } +] + +chat_settings = { + "model": "gpt-4", + "tools": tools, + "tool_choice": "auto", + "max_tokens": 1000, + "temperature": 0.2 +} +user_setttings = [ + Select( + id="model", + label="Model", + values=["gpt-4-turbo", "gpt-3.5-turbo-0125"], + initial_index=0, + ), + Slider( + id="temperature", + label="Temperature", + initial=0.2, + min=0, + max=1, + step=0.1, + ), + Slider( + id="max_tokens", + label="Maxiumum Completions Tokens", + initial=1000, + min=100, + max=32000, + step=10, + description="The maximum allowable tokens in the response", + ), + +] +MAX_ITER = 5 + + + +# Example dummy function hard coded to return the same weather +# In production, this could be your backend API or an external API +def get_current_weather(location, unit): + """Get the current weather in a given location""" + unit = unit or "Farenheit" + weather_info = { + "location": location, + "temperature": "72", + "unit": unit, + "forecast": ["sunny", "windy"], + } + return json.dumps(weather_info) + + +@cl.step(type="tool") +async def call_tool(tool_call, message_history): + function_name = tool_call.function.name + arguments = ast.literal_eval(tool_call.function.arguments) + + current_step = cl.context.current_step + current_step.name = function_name + + current_step.input = arguments + + function_response = get_current_weather( + location=arguments.get("location"), + unit=arguments.get("unit"), + ) + + current_step.output = function_response + current_step.language = "json" + + message_history.append( + { + "role": "function", + "name": function_name, + "content": function_response, + "tool_call_id": tool_call.id, + } + ) + + +@cl.step(name="ChatGPT-Completions", type="llm") +async def _get_chat_completions(message_history: list[dict], settings: dict[str, Any] = None): + settings = settings or chat_settings + if "max_tokens" in settings: + settings["max_tokens"] = int(settings["max_tokens"]) + + response = await open_ai_client.chat.completions.create( + messages=message_history, **settings + ) + + message = response.choices[0].message + for tool_call in message.tool_calls or []: + if tool_call.type == "function": + await call_tool(tool_call, message_history) + + if message.content: + cl.context.current_step.output = message.content + + elif message.tool_calls: + completion = stringify_function_call(message.tool_calls[0].function) + + cl.context.current_step.language = "json" + cl.context.current_step.output = completion + + return message + + +@cl.step(name="ChatGPT", + type="llm", + root=True) +async def call_chatgpt_with_tools(query: str, settings: dict[str, Any] = None): + message_history = cl.user_session.get("prompt_history") + message_history.append({"name": "user", "role": "user", "content": query}) + + cur_iter = 0 + + while cur_iter < MAX_ITER: + response_message = await _get_chat_completions(message_history, settings=settings) + if not response_message.tool_calls: + await cl.Message(content=response_message.content, author="Answer").send() + break + + cur_iter += 1 + + + +@cl.step(name="ChatGPT", + type="llm", + root=True) +async def call_chatgpt(query: str, settings: dict[str, Any] = chat_settings): + message_history = cl.user_session.get("prompt_history") + message_history.append({"name": "User", "role": "user", "content": query}) + + + if "max_tokens" in settings: + settings["max_tokens"] = int(settings["max_tokens"]) + + stream = await open_ai_client.chat.completions.create( + messages=message_history, + stream=True, + **settings + ) + + async for part in stream: + token = part.choices[0].delta.content + if token: + await cl.context.current_step.stream_token(token) + + + cl.context.current_step.generation = cl.CompletionGeneration( + formatted=query, + completion=cl.context.current_step.output, + settings=settings, + provider=ChatOpenAI.id, + ) + + message_history.append({"name": "ChatGPT", + "role": "assistant", + "content": cl.context.current_step.output}) + cl.user_session.set("prompt_history", message_history) + diff --git a/multiprofile-chat-with-auth/src/providers/claude.py b/multiprofile-chat-with-auth/src/providers/claude.py new file mode 100644 index 000000000..65e311b14 --- /dev/null +++ b/multiprofile-chat-with-auth/src/providers/claude.py @@ -0,0 +1,76 @@ +import os + +from typing import Any + +import anthropic +import chainlit as cl +from chainlit.playground.providers import Anthropic +from chainlit.input_widget import Select, Slider + +anthropic_client = anthropic.AsyncAnthropic(api_key=os.environ["ANTHROPIC_API_KEY"]) +AVATAR = cl.Avatar( + name="Claude", + url="https://www.anthropic.com/images/icons/apple-touch-icon.png", +) +chat_settings = settings = { + "stop_sequences": [anthropic.HUMAN_PROMPT], + "max_tokens_to_sample": 1000, + "model": "claude-2.0", +} +user_setttings = [ + Select( + id="model", + label="Model", + # https://docs.anthropic.com/claude/docs/models-overview#claude-3-a-new-generation-of-ai + values=["claude-2.1", "claude-3-opus-20240229", "claude-3-sonnet-20240229", "claude-3-haiku-20240307"], + initial_index=0, + ), + Slider( + id="temperature", + label="Temperature", + initial=0.2, + min=0, + max=1, + step=0.1, + ), + Slider( + id="max_tokens_to_sample", + label="Maxiumum Completions Tokens", + initial=1000, + min=100, + max=32000, + step=10, + description="The maximum allowable tokens in the response", + ), + +] + + +@cl.step(name="Claude", + type="llm", + root=True) +async def call_claude(query: str, settings: dict[str, Any] = None): + prompt_history = cl.user_session.get("prompt_history") + prompt = f"{prompt_history}{anthropic.HUMAN_PROMPT}{query}{anthropic.AI_PROMPT}" + + settings = settings or chat_settings + if "max_tokens_to_sample" in settings: + settings["max_tokens_to_sample"] = int(settings["max_tokens_to_sample"]) + stream = await anthropic_client.completions.create( + prompt=prompt, + stream=True, + **settings, + ) + + async for data in stream: + token = data.completion + await cl.context.current_step.stream_token(token) + + cl.context.current_step.generation = cl.CompletionGeneration( + formatted=prompt, + completion=cl.context.current_step.output, + settings=settings, + provider=Anthropic.id, + ) + + cl.user_session.set("prompt_history", prompt + cl.context.current_step.output) diff --git a/multiprofile-chat-with-auth/src/providers/gemini.py b/multiprofile-chat-with-auth/src/providers/gemini.py new file mode 100644 index 000000000..d95412b39 --- /dev/null +++ b/multiprofile-chat-with-auth/src/providers/gemini.py @@ -0,0 +1,95 @@ + +import os +from typing import Any + +import chainlit as cl +import google.generativeai as genai +from chainlit.playground.providers import Gemini +from chainlit.input_widget import Select, Slider, NumberInput + +genai.configure(api_key=os.environ["GOOGLE_API_KEY"]) +AVATAR = cl.Avatar( + name="Gemini", + url="https://github.com/ndamulelonemakh/remote-assets/blob/main/images/Google-Bard-Logo-758x473.jpg?raw=true", +) +chat_settings = settings = { + "max_output_tokens": 2000, + "model": "gemini-1.0-pro-latest", +} +user_setttings = [ + Select( + id="model", + label="Model", + # https://ai.google.dev/gemini-api/docs/models/gemini#model-variations + values=["gemini-1.0-pro-latest", "gemini-pro-vision", "gemini-pro"], + initial_index=0, + ), + Slider( + id="temperature", + label="Temperature", + initial=0.2, + min=0, + max=1, + step=0.1, + description="The temperature of the model. Higher values mean the model will generate more creative answers.", + ), + Slider( + id="max_output_tokens", + label="Maxiumum Completions Tokens", + initial=2000, + min=100, + max=32000, + step=10, + description="The maximum allowable tokens in the response", + ), + NumberInput( + id="candidate_count", + label="Numbr of Answers", + initial=1, + placeholder="Enter a number between 1 and 3" + ), + Select( + id="response_mime_type", + label="Response Type", + values=["text/plain", "application/json"], + initial_index=0, + ) +] + + + +@cl.step(name="Gemini", + type="llm", + root=True) +async def call_gemini(query: str, + settings: dict[str, Any] = chat_settings): + + prompt_history = cl.user_session.get("prompt_history") or [] + if "max_output_tokens" in settings: + settings["max_output_tokens"] = int(settings["max_output_tokens"]) + if "candidate_count" in settings: + settings["candidate_count"] = int(settings["candidate_count"]) + + model = genai.GenerativeModel(settings.pop("model", "gemini-1.0-pro-latest"), + generation_config=genai.GenerationConfig( + **settings + ), + tools=None, + tool_config=None + ) + chat = model.start_chat(history=prompt_history) + async for chunk in await chat.send_message_async(query, + stream=True): + await cl.context.current_step.stream_token(chunk.text) + + cl.context.current_step.generation = cl.CompletionGeneration( + formatted=query, + completion=cl.context.current_step.output, + settings=settings, + provider=Gemini.id, + ) + + updated_history = prompt_history + chat.history + # TODO: need to limit these to prevent exceeding model context + cl.user_session.set("prompt_history", updated_history) +