From ed7ca2048bf495f118a6672fb1e197143acef339 Mon Sep 17 00:00:00 2001 From: ndamulelo Date: Wed, 15 May 2024 20:49:27 +0200 Subject: [PATCH] add multiprofile demo --- multiprofile-chat-with-auth/.env.example | 7 + multiprofile-chat-with-auth/.gitignore | 164 +++++++++++++++ multiprofile-chat-with-auth/Dockerfile | 28 +++ multiprofile-chat-with-auth/README.md | 105 ++++++++++ multiprofile-chat-with-auth/app.py | 98 +++++++++ multiprofile-chat-with-auth/chainlit.md | 104 +++++++++ multiprofile-chat-with-auth/entrypoint.sh | 4 + .../public/favicon.ico | Bin 0 -> 15406 bytes .../public/logo_dark.png | Bin 0 -> 5332 bytes .../public/logo_light.png | Bin 0 -> 5257 bytes multiprofile-chat-with-auth/requirements.txt | 5 + multiprofile-chat-with-auth/setup.py | 42 ++++ multiprofile-chat-with-auth/src/__init__.py | 0 .../src/providers/__init__.py | 5 + .../src/providers/chatgpt.py | 198 ++++++++++++++++++ .../src/providers/claude.py | 76 +++++++ .../src/providers/gemini.py | 95 +++++++++ 17 files changed, 931 insertions(+) create mode 100644 multiprofile-chat-with-auth/.env.example create mode 100644 multiprofile-chat-with-auth/.gitignore create mode 100644 multiprofile-chat-with-auth/Dockerfile create mode 100644 multiprofile-chat-with-auth/README.md create mode 100644 multiprofile-chat-with-auth/app.py create mode 100644 multiprofile-chat-with-auth/chainlit.md create mode 100644 multiprofile-chat-with-auth/entrypoint.sh create mode 100644 multiprofile-chat-with-auth/public/favicon.ico create mode 100644 multiprofile-chat-with-auth/public/logo_dark.png create mode 100644 multiprofile-chat-with-auth/public/logo_light.png create mode 100644 multiprofile-chat-with-auth/requirements.txt create mode 100644 multiprofile-chat-with-auth/setup.py create mode 100644 multiprofile-chat-with-auth/src/__init__.py create mode 100644 multiprofile-chat-with-auth/src/providers/__init__.py create mode 100644 multiprofile-chat-with-auth/src/providers/chatgpt.py create mode 100644 multiprofile-chat-with-auth/src/providers/claude.py create mode 100644 multiprofile-chat-with-auth/src/providers/gemini.py diff --git a/multiprofile-chat-with-auth/.env.example b/multiprofile-chat-with-auth/.env.example new file mode 100644 index 000000000..6e530464a --- /dev/null +++ b/multiprofile-chat-with-auth/.env.example @@ -0,0 +1,7 @@ +OPENAI_API_KEY= +ANTHROPIC_API_KEY= +GOOGLE_API_KEY= +DEFAULT_USERNAME= +DEFAULT_USER_PASSWORD= +CHAINLIT_AUTH_SECRET= +LITERAL_API_KEY= diff --git a/multiprofile-chat-with-auth/.gitignore b/multiprofile-chat-with-auth/.gitignore new file mode 100644 index 000000000..5a1517561 --- /dev/null +++ b/multiprofile-chat-with-auth/.gitignore @@ -0,0 +1,164 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +.idea/ + + +# Misc +data/ \ No newline at end of file diff --git a/multiprofile-chat-with-auth/Dockerfile b/multiprofile-chat-with-auth/Dockerfile new file mode 100644 index 000000000..ad0132ce2 --- /dev/null +++ b/multiprofile-chat-with-auth/Dockerfile @@ -0,0 +1,28 @@ +FROM python:3.12-slim + +# Set the working directory in the container +WORKDIR /app + +# Copy the current directory contents into the container at /app +COPY requirements.txt . +COPY src/ ./src +COPY app.py setup.py . + +# Install any needed dependencies specified in requirements.txt +RUN pip install -r requirements.txt + +COPY public/ ./public +COPY .chainlit/ ./.chainlit +COPY README.md chainlit.md . + +# Set environment variables +ENV PYTHONUNBUFFERED 1 +ARG PORT=5500 + +# Command to run the app +CMD python -m chainlit run app.py -h --host 0.0.0.0 --port ${PORT} + +# Alternatively: Use entrypoint file +# COPY entrypoint.sh . +# RUN chmod +x ./entrypoint.sh +# ENTRYPOINT ["./entrypoint.sh"] \ No newline at end of file diff --git a/multiprofile-chat-with-auth/README.md b/multiprofile-chat-with-auth/README.md new file mode 100644 index 000000000..4813cecb3 --- /dev/null +++ b/multiprofile-chat-with-auth/README.md @@ -0,0 +1,105 @@ +# Home AI + +* This demo illustrates how to use [Chainlit](https://github.com/Chainlit/chainlit) to build chatbots with LLMs the big + three AI providers: OpenAI, Anthropic, and Gemini. +* [Live Demo](https://homeai.chainlit.com) + +## Features + +- Multiple user profiles +- Integration with [OpenAI](https://openai.com/), [Anthropic](https://www.anthropic.com/) + and [Gemini](https://www.gemini.com/) chat providers +- Chat settings +- Authentication +- Custom logo and favicon +- Custom theme + +## Installation + +1. Clone the repository: + + ```bash + git clone https://github.com/mungana-ai/homeai.git + cd homeai + ``` + +2. Create a virtual environment: + + ```bash + python -m venv venv # We assume you are using at least Python 3.10 + source venv/bin/activate # For Unix-based systems + venv\Scripts\activate.bat # For Windows + ``` + +3. Install the package and its dependencies: + + ```bash + pip install -r requirements.txt + ``` + +## Configuration + +1. Rename the provided `.env.example` file into `.env` in the project root directory. + +2. Update the necessary configuration variables to the `.env` file. The following variables are required: + + ```bash + DEFAULT_USER_PASSWORD=your_default_user_password + CHAINLIT_AUTH_SECRET=your_64_char_chainlit_auth_secret_for_signing_tokens + LITERAL_API_KEY=your_literal_api_key_for_storing_chat_history + + # Optional: At least one of the following chat providers is required + OPENAI_API_KEY=your_openai_api_key + ANTHROPIC_API_KEY=your_anthropic_api_key + GOOGLE_API_KEY=your_google_api_key + + # Optional + DEFAULT_USERNAME=your_default_username # Default: "admin" + ``` + + > **Hints:** You can generate a 64-character secret key using the following command: `chainlit create-secret`. To + > obtain an API key for [Literal](https://literal.chainlit.com), sign up for an account and create a new project. + +## Usage + +To run the Chainlit app, use the following command: + +```bash +chainlit run app.py --host 0.0.0.0 --port 5500 +``` + +* You app should now be accessible at `http://localhost:5500` + +## Project Structure + +The project structure is organized as follows: + +- `src/`: Contains the main application code. +- `.chainlit/`: Contains the Chainlit configuration files. +- `public/`: Contains the static files for custom logos and favicons. +- `app.py`: The main application entry point. +- `.env.example`: Stores the environment variables template. +- `requirements.txt`: Lists the project dependencies. +- `chainlit.md`: Provides documentation and instructions for the project. + +## Issues + +If you have any questions or inquiries, please contact [N Nemakhavhani](mailto://endeesa@yahoo.com). Bugs and issues can +be reported on the [GitHub Issues]() page. + +## License + +This project is licensed under the MIT License. You are free to use, modify, and distribute the code as you see fit. + +## Contributions + +Contributions are welcome! If you would like to contribute to the project, please fork the repository and submit a pull +request. + + +## Links + +* [OpenAI API](https://platform.openai.com/docs/quickstart) +* [Anthropic API](https://docs.anthropic.com/en/api/getting-started) +* [Gemini API](https://ai.google.dev/gemini-api/docs/api-key) +* [Literal API](https://cloud.getliteral.ai/) \ No newline at end of file diff --git a/multiprofile-chat-with-auth/app.py b/multiprofile-chat-with-auth/app.py new file mode 100644 index 000000000..5e0e22e05 --- /dev/null +++ b/multiprofile-chat-with-auth/app.py @@ -0,0 +1,98 @@ +import os +from typing import Any + +import chainlit as cl + + +@cl.password_auth_callback +def auth_callback(username: str, password: str): + # TODO: Fetch the user matching username from your database + # and compare the hashed password with the value stored in the database + if (username, password) == (os.getenv("DEFAULT_USERNAME", "admin"), os.getenv("DEFAULT_USER_PASSWORD")): + return cl.User( + identifier=os.getenv("DEFAULT_USERNAME"), + metadata={"role": "admin", + "provider": "credentials"} + ) + else: + return None + + +@cl.set_chat_profiles +async def load_chat_profiles(): + return [ + cl.ChatProfile( + name="ChatGPT", + markdown_description="ChatGPT by OpenAI", + icon="https://github.com/ndamulelonemakh/remote-assets/blob/7ed514dbd99ab86536daf3942127822bd979936c/images/openai-logomark.png?raw=true", + ), + cl.ChatProfile( + name="Claude", + markdown_description="Claude by Anthropic", + icon="https://www.anthropic.com/images/icons/apple-touch-icon.png", + ), + cl.ChatProfile( + name="Gemini", + markdown_description="Germini Pro by Google and DeepMind", + icon="https://github.com/ndamulelonemakh/remote-assets/blob/main/images/Google-Bard-Logo-758x473.jpg?raw=true", + ) + ] + + +@cl.on_settings_update +async def setup_agent(settings: dict[str, Any]): + cl.logger.debug(f"user settings updated: {settings}") + existing_settings: dict = cl.user_session.get("chat_settings", {}) + existing_settings.update(settings) + if "max_tokens" in existing_settings: + existing_settings["max_tokens"] = int(existing_settings["max_tokens"]) + if "max_tokens_to_sample" in existing_settings: + existing_settings["max_tokens_to_sample"] = int(existing_settings["max_tokens_to_sample"]) + cl.user_session.set("chat_settings", existing_settings) + + +@cl.on_chat_start +async def start_chat(): + active_chat_profile = cl.user_session.get("chat_profile") + if active_chat_profile == "ChatGPT": + from src.providers.chatgpt import AVATAR, chat_settings, call_chatgpt, user_setttings + + cl.user_session.set("prompt_history", []) + cl.user_session.set("call_llm", call_chatgpt) + cl.user_session.set("chat_settings", chat_settings) + s = cl.ChatSettings(user_setttings) + await s.send() + + await AVATAR.send() + + elif active_chat_profile == "Claude": + from src.providers.claude import AVATAR, chat_settings, call_claude, user_setttings + + cl.user_session.set("prompt_history", "") + cl.user_session.set("call_llm", call_claude) + cl.user_session.set("chat_settings", chat_settings) + s = cl.ChatSettings(user_setttings) + await s.send() + + await AVATAR.send() + elif active_chat_profile == "Gemini": + from src.providers.gemini import AVATAR, chat_settings, call_gemini, user_setttings + cl.user_session.set("prompt_history", []) + cl.user_session.set("call_llm", call_gemini) + cl.user_session.set("chat_settings", chat_settings) + s = cl.ChatSettings(user_setttings) + await s.send() + + await AVATAR.send() + else: + await cl.ErrorMessage(f"Unsupported profile: {active_chat_profile}").send() + return + + await cl.Message(f"Welcome back, ##TODO-USERNAME. {active_chat_profile} is ready to fulfill your requests!").send() + + +@cl.on_message +async def chat(message: cl.Message): + chat_callback = cl.user_session.get("call_llm") + chat_settings = cl.user_session.get("chat_settings") + await chat_callback(message.content, chat_settings) diff --git a/multiprofile-chat-with-auth/chainlit.md b/multiprofile-chat-with-auth/chainlit.md new file mode 100644 index 000000000..24562edb5 --- /dev/null +++ b/multiprofile-chat-with-auth/chainlit.md @@ -0,0 +1,104 @@ +# Home AI + +* This demo illustrates how to use [Chainlit](https://github.com/Chainlit/chainlit) to build chatbots with LLMs the big + three AI providers: OpenAI, Anthropic, and Gemini. +* [Live Demo](https://homeai.chainlit.com) + +## Features + +- Multiple user profiles +- Integration with [OpenAI](https://openai.com/), [Anthropic](https://www.anthropic.com/) + and [Gemini](https://www.gemini.com/) chat providers +- Chat settings +- Authentication +- Custom logo and favicon +- Custom theme + +## Installation + +1. Clone the repository: + + ```bash + git clone https://github.com/mungana-ai/homeai.git + cd homeai + ``` + +2. Create a virtual environment: + + ```bash + python -m venv venv # We assume you are using at least Python 3.10 + source venv/bin/activate # For Unix-based systems + venv\Scripts\activate.bat # For Windows + ``` + +3. Install the package and its dependencies: + + ```bash + pip install -r requirements.txt + ``` + +## Configuration + +1. Rename the provided `.env.example` file into `.env` in the project root directory. + +2. Update the necessary configuration variables to the `.env` file. The following variables are required: + + ```bash + DEFAULT_USER_PASSWORD=your_default_user_password + CHAINLIT_AUTH_SECRET=your_64_char_chainlit_auth_secret_for_signing_tokens + LITERAL_API_KEY=your_literal_api_key_for_storing_chat_history + + # Optional: At least one of the following chat providers is required + OPENAI_API_KEY=your_openai_api_key + ANTHROPIC_API_KEY=your_anthropic_api_key + GOOGLE_API_KEY=your_google_api_key + + DEFAULT_USERNAME=your_default_username # Default: "admin" + ``` + + > **Hints:** You can generate a 64-character secret key using the following command: `chainlit create-secret`. To + > obtain an API key for [Literal](https://literal.chainlit.com), sign up for an account and create a new project. + +## Usage + +To run the Chainlit app, use the following command: + +```bash +chainlit run app.py --host 0.0.0.0 --port 5500 +``` + +* You app should now be accessible at `http://localhost:5500` + +## Project Structure + +The project structure is organized as follows: + +- `src/`: Contains the main application code. +- `.chainlit/`: Contains the Chainlit configuration files. +- `public/`: Contains the static files for custom logos and favicons. +- `app.py`: The main application entry point. +- `.env.example`: Stores the environment variables template. +- `requirements.txt`: Lists the project dependencies. +- `chainlit.md`: Provides documentation and instructions for the project. + +## Issues + +If you have any questions or inquiries, please contact [N Nemakhavhani](mailto://endeesa@yahoo.com). Bugs and issues can +be reported on the [GitHub Issues]() page. + +## License + +This project is licensed under the MIT License. You are free to use, modify, and distribute the code as you see fit. + +## Contributions + +Contributions are welcome! If you would like to contribute to the project, please fork the repository and submit a pull +request. + + +## Links + +* [OpenAI API](https://platform.openai.com/docs/quickstart) +* [Anthropic API](https://docs.anthropic.com/en/api/getting-started) +* [Gemini API](https://ai.google.dev/gemini-api/docs/api-key) +* [Literal API](https://cloud.getliteral.ai/) \ No newline at end of file diff --git a/multiprofile-chat-with-auth/entrypoint.sh b/multiprofile-chat-with-auth/entrypoint.sh new file mode 100644 index 000000000..818280eb9 --- /dev/null +++ b/multiprofile-chat-with-auth/entrypoint.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +echo "Starting Chainlit app from entrypoint.sh..." +PYTHONPATH=$PYTHONPATH:$(pwd)/src python -m chainlit run app.py --host 0.0.0.0 --port ${PORT} diff --git a/multiprofile-chat-with-auth/public/favicon.ico b/multiprofile-chat-with-auth/public/favicon.ico new file mode 100644 index 0000000000000000000000000000000000000000..cd4eaa5e4290161361f8925d0dfac305588bde53 GIT binary patch literal 15406 zcmeHOcW4z&5Wf_ANutJHVj;#JTf_!YL9B?1Vu>0H8Y@;T#QdR%6+}%?RDvjWv7n+@ zKrBfVv7lgYs0sF-VjJ(bx{tkmdv~wp>HEG6Z0_CewB6g;-I>{FT96i^g@Dt zt5vH;ty{OI;NW16!=B;c;Z(YGX+xb#-xVuX5MbK0X%5_;J$urRA3tdI>eY@q6)oyk zty;zC!H46_n>UZuZQi`uQODcl=fsH<8SbG&ha9-$$B$=qM~)nE)X8*s4-E~axVSim zo1LBQ!0pnd3#+?z>z1QVMc=JkH~RSTBg4fX$UA)aFkQHCfz^He`jv_mD`v@~PMtb* z?%X+sfA{WP+P{B4?cKYVUcGw7>Y6lZVo49ZMvWT9a8I5*>A+1(OJj9Ih757k$$gkP zb0*cOQG>$5!syGFFO-*;MR{2Rharv$BodyYl-`KxJIC$`&lJENU>qRu$v}uzP&s9AlA|j}G@#56C zZ(jpne2<`@ASd2arc5#5S=p*9I^4N)hn-L5?++n!=FDN|sj*|n(xgd~*!#9^+YIt? zHgiRe8BVKKt?1daX9k|`zErMU*()8e{!O1goo?Q|>8@yok8_(@|J-@d(*{Har? z63%`J2?;c8*f7HSi*uw1z;Q>887};uoSYnyHQSG5{C@rV8E`gk+~^beki*aFT!6~H z%9k%s@FzHAW@dUMzjp1~)Ustu18v8S9XVU}eDdUpp`DM#>C>kjc%MFfQrarVHG9IJ zz+Re^l*Ifm;KAnMYl%>|Ze1rGV2>iGaLnZLdhRcR5Bw9@c1lVL!FLh?bC1*TXJlj~ z^LIF&*?z(CRZpyYSZD3v(xppI^1)ABi)PH2p~O+imFqB{j~zQk`T6-uRp7segoHTp z$Jv9Qg;uUy>C`UEv3p01p>yZXY^`0sd^x43r_-K2duY+3MW(#r(|-H*jn=MRYl>qh z*V}fiTd*5%`TU3J|E3R=t&b~wz`ylI zLifA?)<2MS545>br0*X)$9WYwn=4nYuzU~VG4A@~$g_0n)QS1H0|pGBiWMvRWqfg_ zm*YNg03JMeK+T&sx77jk{rmSf_{@lju3NXxG=AvI^#$X$VZ#O|p9Og+F$ZZTvr(f) z^zh+BCwll*Wy_Ye?Y|jcSL@4^DMMSfY;lqge+#j2J3hmQ4=4EEGVI*BlMvIfqxZHQ zW3+YaR#`lIU%Ys+ZA<_$Cozt}86m!dn6S6{+0mCORf>>TRzYfNs_h(}J$p9uJ!Sgi z$B+BPhLN*|->rh(yLS`fL{?(}8-cCJfSh%eDpf3Lt@ygrhP<a#dz&6+j+qW_>lgGkPaKq17<>|#=gdGlPk*a^fS>({UE7yXe(d;a{nD4zZP{{1_( zZQIsThj#7S8PVWvCYW)Wj#Kf%R z`mzo8RaAhxA69Y@KfHSNs*2R5{lkj4RU6j*sHiA*R|I%i$5rt9^=oC^P~?rhN(I}u zZ@28b$iv;=6DCYx_gipQfsS%cPt*r@U(i<+*fVma$W0>mjPnEHjw-lu<3^$B5BcZM zpI3=>szZM2(W6JKU`>LpqtCdjgtZ@IpaSHeUAgn+?$`q#F@EH{j~+d$6791(Ik(}7 zu2wY2k>kGA#ful|#fuj#kLWYybw;B{k1n)1B=!s6xlf-y^!V{(rTXvQy<_Vn{DS-U z?=ySB{WRohkTb-*Qo))vYy6@=`ht6F`}XZqsX;q->|pi^8^*jt!P<|4J7KVK>~}JF zn_Cvg*zPxMV(#3z%JU@dy;ZGR)pRV;4}1?9Q0FgS68(#hk0(_g9p|GKEn1lBZ}#nw z7cXoZ7QYMjHTixGA5$xT5_#i=3l}=gA=r(t&k^vsaZi`u%@p;=@8slUuCn2I;=~El z@dq7j;LV#i1{&-SzRIn0J+X)4j_9&w%an5ywyE-Ian8p);e6yL>_c@9gntA-Nyhi| z9q}IQnebV#K6L2N!RfxVnNQrKM-NjyVE?LE#;#qvZ2dTQ`qH#%Q-jZhew;aThTUz! zU8=^78?$>Bs`U{0dgy5;5B`8mhjWUZA7{o-Rgbs;&Wy4gK5;Q;akfDWMTX0lFO!vD z2s_4Dh=8*b_6?OTp44IdU<1gvy9?+S?!Z{d#XSW0UmS3bi;0P`q_N^>)`otJ88e1( zHv#8KQ6`ryKVdgmC(U@GzEY)1Y;44HDe~)9G~Tq~{#p0#-Pzs?yG88d>C>mmyAYUL zxMyKy184(3(WmD3;Jwk)juvvUf5Fx;zhLLEXUrGem60GYAA?J$(vp z1_JGo0PZg_G62^K>p(Q{!{mO-ng{~1HST`*#2sf71cCPLIGbCMtW1odNIX^!fx6BCX9V_SXkSKwXBK!43Wr1^#JC zV@M=-CDJG$8Vd5Jg2WAOR-&;7AB>Fpek*So+lz98E+LoZU&z zc%0a-X@n!*lcXgr{X^s$yUQcatXv)D?n2?Pfy$mrVSr1=U3n ze{y^`lJjVf-zY#Xb{Xyn0ugQQueV0jg= zyao*N8|z06VSh1BdXrA0v>h+2Tt*>P5MXtrqcT`o@vZfQ0ZljxgZwV>fBE}2B>g4>fYVQX4fz3X+DYu}6cC8r?=)Q3-22}Au>U#po^ShU zYL$(P8L*>b2X!^181_JC7{`^35Eqc0;fe1scP6;APf8wfITY!_Y8H&@#c&-na*a_y zN5z=1?Kybpd??&mMl7%HIWu#8(dsI7wd?>(%SiE1_0{W}KCNG=_H7!gn9s>q+q72g z_Jf2N#o>C)*_tO9#kt`(IGRm4IQ0%2>cSbLh3GlO;e`K~crU$M_>fC$l1ieAD=)sH zzFyC+%DdyEm9H;UNJvPww`+F!)R{9}-G+*#6_u6a!(O92-3DG>YDLAxCJ|^9iq@Nx zpHJ;bH>6LojS!N5c?GkzGKeqqTPKVV78VxNeHJsC3wL$`WVquYi@v_T@5ja@vLaD# zEbQ!5(ZDV7EY@m!b|)vNudA!Zd3aS1p*1A*ik^)K5Ow`d)2Y11UhzLeT z#sXbDBMY;H;)k`3je!hdW{J%-F$u1Sr$MjU+uNn)IRd!hsQSX5xbUZY@6gf#)*qKH zJun$a6Dzu};I$c$%gDs^VrzYgQ%LAQU|^uSrsjBCiY5k&ZJKCFFtxT0?d`n~hAl5I zm-jykxcaucTbbsw__3KB7S@n!UHpD_HVlZwuG*Jpsx3v|$7h{9Uk>DCfhtO!dsqBn zYHI%@v(S~5D|XLGOit~oO>xI9`@ijFSVq4HqEFNyN^Na#fB5u?2@uN5le4Q05WaKg zPF7yt%bpy|r#M^Lu1u5S)0(Bs%Jq>!7!E!@zFdb|5kXbA6INDLNDoq-y&M;A^*4XJ%$(R}9n6y|0)`ZE(%6zI8H;_XVKs=o2x1&eAeO zKZNZ=al5v@rDa0Xq9c+r_(46wtCip6!XP_q>yCq)n=D`41-=|}dLdq^JS;-a*VPfp%?_)wzQp$HI*jf)dkRJ>bJot9~md@Dx!?AUUj zgQ%yVUY7Z>ucaH^`QglT#1aQjeFlR(!JY zrFu2hVj|Ayi1yAJa&DlcWrn!EwsvWAWnPLpHa6z|@fBxsF4Xc^+^d$B2ClO(@p7;$ zJp;pHUS8iP0E_Wc*s!2i0VrWOyUv#U%9qi0%>&3sX4&VgtjItDZ7d*%5AQ=D5Et%4 zBqZv`N;Io9Zi zz5lXB&P*X}%lR2@80uPigt5ilD5qy^pu|=F(19v6wc#5z^hBxdE;m!0VxLd_JX{$a z6?JWUb5+To$>JDKYT%Af&PG>XU&#DmnWU5ygKZhEWTquxa|L_&@L@4|c@BVp&g0|f z4+WZkvLXrqueHrhNf{YNG#YK}$t!2a2q4R(Hd7M;-PhZDtENVKngmULW_Tk8%b~UY zF~)d;KqN-ry}P%>=1yZHJgdhg)3u@UHYQL(YH zDR62R>F(~9?_mLe`<@0d@pWlQ$%i;g`|Aejxr+F&Bb|)#@-rPLa!XN5Qd(iB3 z=6KeAr9%hQ2_MPLh4%w@DA{>{N|zMrrD|nm<$i4-AjS6Lef0DSamDoUC&@NXc|nO% zxsMWSW70h;i+o$+!B)kW?=&m)Ryrs-wSBB^a#j9XNS>U$oas9JK7| zEEPn02*MQ<6w>otTPLpI@%c^d@dZ1oDttoz{{F_Et*j8MO(5V`us|XJmI&(Sk?I!n zzSJ_Bbx_YCxGdtVC45D^C5=)ob=I11Kby#+Rm+ePF4roxOu#OFB2lB%=5Z4t_2~Ib zTD%g2>6tSucM2}|MeDqfQ>=>R0pbQ%x0jDGgqcPE!UFHU0O#_>sB{qEr3KnU~*mPcyIYK zmq{YdNM&Y_w!$nCZ_yJRd@85s;p4|4t8&Lotitd^45mn=Bvez=Bw_*k#mM-b>#K~u zS;H+#SQyL0^z=ga4|>KFMf%U5KTprh=v}?4dFuD?} z1_uXyg2@*w!y+fis5kbO@YT=5O6#zzD{khNmXgPhw>UEHsmnMW6BQSCGbu^n#;sd6 zvMq!O#jk~I@l@D!9%b$2JP#*lTaws78D13Fcnm#LU$jr1G&D?q_N;p*<%qt%zNv-9 z&GK?}b93`VaKk~k{{0qY1<@d;XDnUpu7Yb&R&}-T^73*}8y^P;sKiEVW*$2{JnS*n z$n+*z4d1`yBO)SVJkOSz%g`^vk*OiE(DByz_UR9f1QR$0Gy?_L1A+jO_ju@BC^DX58I&TTG*qwdegg5_oHAEHr5F!U4 zfmV&*HP6A3Wq3^GnO0%n##>ZLIf@_7%F6l%-<+uFmDkiR z>Fn&x`Rr(omb;MJoQi=Y|Hjl6PtT@sekJeO<&~9(X=#Xly4QNFxc25|CSa|pVgqH8 za(NSE@uQ=aUhJNpo@6rlJvcc!`Z~~I%BrdoFqjZ9Vi-@U2kun8#odk=S2wq?cyURd z*0zU!|9(jg4SwMKe@=$`)$7+mIXTC_XzXmvLcRiz%F1H2wzgKaj_m5}40`zR;q~^e z$7h|A8a8Hf^4(YF-XW+7VB1@d?*|MxUbMLg1~SVDij9qxr#Ho5WOlY!9RTe2zIs&& zuSA&XKALusTbSyBadp+lV!>TGk9F4G)9nsOFJ4^tA#aRF>g)vpBVg3syFtK?s_V~i zEjE_>>({Rb{4S@Q+S%Fs(wgd!ZNA75-EV0}kZwui2MKBU#;&hCH_N^e8Ocs$^z`x? zZw%$qd+x;dQYR3B5GM))?>KTMo4Z|>>!un)*vGrh>XhWg=nJX4gF@IvRW(!xsG8wc z0J8QS&-!Ro8dw>)vo2k;`B~F+U}tNoowDO`ru7iq!Ed3dAVe3NmYeI=&*8Z=*{XhJ zu0Lx)6Bs{$tw$MHDh19NVvat^`#BTbf*C<0zOzpegRRUxLqp-Kqk*HD*xOBq;J|JH zz?&-8Hoq@9YCb!cd*752`a3L9|;Gh(&XyqI%qVk6S6l8^g_RDXvi+P&2@gLf(yWV!G)>&ev3?cdU{bwNw-<} z6oP>LOlN=a$8z3{k8TvCz_7=~y|OZw*K+>S^j zCl3$3j*iZan>WR^v{F`IhoU3XUqCJh8Krmh`%S17M$7?8F*Cx_+uh1|N0~FL6!inX2&^XK-ZRO-+cMt#w9b<{)5DxOeZ~ zgTC30o*w!;2EiK}zFQl!`D-6SMBP7i9vLCtPJ8g++N#m#-lw*_fvFUVb4+VH(AD@C z*jLeX*9y1E>f)&S^fdne=4AO_9!!5%Uk#iqH0{uSwQaon-tqLwKj3+KmxBHUb|E_G literal 0 HcmV?d00001 diff --git a/multiprofile-chat-with-auth/public/logo_light.png b/multiprofile-chat-with-auth/public/logo_light.png new file mode 100644 index 0000000000000000000000000000000000000000..62171eedfaa6384a044039894a58bdde56dc7f4a GIT binary patch literal 5257 zcmcIoc|4SB`yVG;vYt{#VsNC?7{gd*8Z;VPi8x-2wb`s=hM6I<6tZ+;BncrQ zvSb-$JBcJ)vaeB03+a7y&grer@Av!T_rC9ZKF`cE*L~gBecjjhy1w`IJh3OtjYWi{ zg+L&Xh^YzI3IyU&0Par;@&n&hxG*yCv(?80#{hvun>H_=%LhbvgFw6+9@chDyW?gk zBF$SBPofdXssY|UfHnw(4nP67-ee{o65vgtGEf1!%3m~4!2PBermQCf`SXLOlUz|& zSff8xfq%Nn?o6f+3I;oM>Xhm!4OJT54F*RdkuWuNn7TR?(10=msZ4wTl*&*9EPZbZ zOJ)%19zIMD8Wpl>8c(44F?E%dziR#A5{dYA86Q77<;x-@B8*HSdy}b51`MtW|ISMi z5ykXiQpi6zj-fDrkWf4}cY>nescyQ;0ZndB388kmSk^F4}{AutAos4IaNqXvP z>IkTsCR7cHhW#+~6+`r&1BL-iQ<9#xnhsn?okWDfk-$6znWPRy64XdgErd2%o9wEi zK~~3qwe`o8zgaRO0ERTxHPy8=k#GcD8>y+I_I1ve!M{zhruh-wH@Bsy{#El|vVW=n zFED=}uD`bWeOUiPK>K`CRGTP&sn$(=fdK$tjqmUUWGIXu)15}wGbiD_{U~G#4Ps9A z#(TNr{oV0Y5A^1=zf^wM=D(km0CRkokzaic!2A3y7Xf9|m%O3Tt!Xrh-Vr?29}n4Q zMW&L-bTWNE8up{c|K;yrl=OoRKu+J}HS8<7>9w8pehLDK!A-Fk>wt`fp)j+aRXKsB zsVN;DN`hbvFN;T$sXTL4+VAAio2Fh|FITDK+QiXtLT8!*e|!6#@_Re3LUA32ggT~z zy@;h{&qOhsuw%cFya4auetp&P#WjHu#ZZ~SIflNT+>-Of3%x@j!>_vzjb+v9zwTQ5 zv?1tP)-T*5m6T|($gkESB_KgG^OwP%hxhx6!&^ukOp{d7p8u>cFS(khF@)RiwogT+ zG;pw>xc1yX3T#9IVNj?@-RF(!j?!xXg>a*slIjbT;I;82i>1{*mqXT^I9M&Kr>Ey^ zpZ=$n?e}OS)Uv@VPzP~2x!1o}4LzycmR(?5oLy5hIO%&VMRm{eaC6))-^q4xI+-?f zpKLquyy*lEXZmrEblb|z(-_`F^E|~~WTkM<*f9Ee;2sQFRIq zM(?%A5(Z6APuHIs35hv(P6&ZO{8nAPJ4xBXh{;4`1x|w9%k2yNR#sMCzHErl@t>C} za;lE*>@>e=t->oHKwKQG3ivpaCm|`RG}~oR+&4DDcK?=Ys!$ET)pYHNol zmw}lkW@b$Tw7LtasZs@;+b(sowndI5xIS3jdU$`C+Zcf3!@1(RcrKSK!AZVyg}{m5 zV^`@TFSKo&1Q?85!WkO!T}n#w9cvLOSluNliEoS+u`40)`7aK}=Kq=(-=77*CJ%#& z35dyJEi5i}aJL*bF%h(~vPwuy9mB542otX{~_O3ys{xHPtHC{ z2i$8+vd}|%3gS{bT<1sXE0L&3XE3fmaX(>W zW28D|mb_$g^r!(A8`&}TzS|5Xg9X)o-Uw>qaK>Jgd--41PBUxm>M|w}6omOBocyL& zPuFl?c&VkCvBw=b@pYaYpa6XZD!^d3|#w0n3E>7v*t-qF&VD zn%Jf$tc;FN{UU09_1#F=0NSX~W?)|8r%w%!*`r(xbGB#yFG@-c?g*4)-|`qI5wKcY ztM`Pg|9PEpx4b-7g)#A1@}T|Q)}m^xfT&D;6=RCu`Q7U3Xl{!0!?D&wF_ZH8mf{rl2NN=j^ENN}*v z;vmx6)^@D8ZbPBVl+IK_Uw%YV5Rf3>xyoU&$L?XUd&FWdUgX^K9uSw5jEdT}i_n#& z%cg5;YGyA!>_~-gkqcXg|FmVxb(?~cz_+gSeq5QWyt1+Yz!Jm5!>x}V$)^++7juA# zCY&2LZV-lR0zqf$>km@~D-*4YfJp5x%k=i}@W9Y$#T{cm{q&Ow5u2Hr`TN{zr@M4n zgLlMqckVIvSoAwH*`Rs&n+dU1nZuur93$M606)?lxm8nH5eCa6pC2O`lP9~`rtH$f z!ou--$C`lUgg&4W*mVAghRMNwZojQWTdPP02M3$zDtKnL`)>Dl^}}EqSci@$4}qaK zU20#xd?{ZnDa02Z9&YkBYIb(Eb)jShaS%Z0<;;xf$K4y(V`QbD7?s-}muAsuG-cY3 zT|0Mbjr#$`oiFoySWNfG$hF`a-F-xqXSsAZB;hpnaG1CyjFa|hFGak!uWulPQ&3Tn zg4P=!9fiNrwXm?T!og2#P1?&70$9!W@I?bSHE9BvhTowMj%&C*xJT$x&CI?^!8}Dbz&VIWwJuJ zxb!5U{JDosqBiMfdu5d`)klhC+d2pGrE7Uhzsa&GP|~Ge{bYPgHZdioFQL!13_KFL z1}!Zuy-m%sad3!x@xneiIT>=`z&7B3HKI~gZyZl|Ef4jyV~Hr7fpxL#Q(UR$w;LAiLNWEpshR+ zX%x|LMtdaq>F8-sPtQ9|jJuKT#wI=HWWb#nph6xWW9j+^=UzMh%N<{9FWSX8E{p`l@RSEH^%AJBqm zPtAh6c5>0Y-8+vRmbze+(nD*A=q=)4#tpXP$!klo(M1Zb$|fpp2Ty03Pjx@I1{~0> zhfb@nRg%R`S0|EY(eX;vGxIGn&4Tsnb-5$O=P(e!?F58Ph zSX)<*Lj)hP@)+k`-v>)SPBJ^Qkrx&j6(x^GJ5t`ZuzNjuOrAQ7$|ibMk;Qo+`nh*Y zOX;n1yS1qauFL0;F&VrV$UB?p&p3cz60KELRD{AI5IEeDxz<9+w>Y;oMh0;49tIJ% z5!~&&#$9NKyDKy9>W(xpq`Kzkqug0cYS;w}z(t;UqLk6jDf@U85iFsX+wy7!lo z1Aya$BI|~bE(!iX2O_oEYlVb_us;%s#77p9my;lZcBQ1PGjnP2z&_)r7?4NZ5LRS%J+wP7y=k?3> zX73;@K&b&s4FZoiBefyvG6tYKr&m3Lt|}oh;^Gn#m_a&dL`}D?Vq(TtR+o;=`fIg0<^knX z&EB|s_pbcYEkL21i;G)s+HS!rI+kmN2S`6)Ha{=A#-b`m(?;BKQTpgI+y2z#Jq$0% z>)GvwaK3FB_IVJ&ef##Ms;%|eg-?H&y2cs1sK3~AUr|vp@yZpgSD((D0j)1JinVrj z%BF53wGCD=EG;eJci4@aVVRV`1s$y5d&e4|dWHe0`s}M$1gwz}s<6|0;2!eRV!{F3 zCc_3VH}7IAJDdO_ei8^pqE&UCVY#zc!c$SQN|dgzV-+7a;T>MC$(x_fyw4nGLu z2>Z`^`*{UP;B*cIr+pgKW1ZOm-=CP6c)#-0V!3{c7)xbtW+wLX0ozw^Cfe*OsF`v0 z)2PNEdFc!MDKM872igOrk44vti-DI+>PtX7{6ca*D=SM-Sh(q~$J5rS8~lm(g|e#} zvwJ_b`?U-$tO1V!-EX#e)lGZ#6*U7Vw)d5Ctub7_wStloA0RR$la)f(mpNxS0|2g@ zDGKxlKzk#&b?aH9oK4gcQd4y%M*wEYXE#`Tr3j>@rLo)Mfu{_K*AJFkcDvJP6}V^i z#m#+vyI$9Q)>GHe*zDJmQ&Mtn-4YTJ5mA#8o?cjps;JOj|MW>hTKe4dv^&r^3Ld+1 zFfkZ+%*3QM!Qn#=*vn^qG)8W3F9seA^EOgUb*iGI!CM61cb|vvJtV5ok~*)$k|LIl kK?vs`#YeFI>%uj;)Tt2ghZZBv&6kCyhUVBJgOlO^1eCNbjsO4v literal 0 HcmV?d00001 diff --git a/multiprofile-chat-with-auth/requirements.txt b/multiprofile-chat-with-auth/requirements.txt new file mode 100644 index 000000000..e94ca96b2 --- /dev/null +++ b/multiprofile-chat-with-auth/requirements.txt @@ -0,0 +1,5 @@ +chainlit==1.0.506 +anthropic==0.25.7 +openai==1.25.1 +google-generativeai==0.5.2 +-e . \ No newline at end of file diff --git a/multiprofile-chat-with-auth/setup.py b/multiprofile-chat-with-auth/setup.py new file mode 100644 index 000000000..48b4d4209 --- /dev/null +++ b/multiprofile-chat-with-auth/setup.py @@ -0,0 +1,42 @@ +from setuptools import setup, find_packages + +setup( + name="homeai-chainlit-app", + version="0.0.1b", + description="A Chainlit chat app that supports multiple profiles and chat providers", + author="Mungana AI", + author_email="info@mungana.com", + maintainer="N Nemakhavhani", + maintainer_email="endeesa@yahoo.com", + packages=find_packages(), + install_requires=[ + "chainlit", + "python-dotenv", + "openai", + "anthropic", + "google-generativeai" + ], + extras_require={ + "dev": [ + "pytest", + "pytest-cov", + "flake8", + "black", + # Add other development dependencies here + ] + }, + entry_points={ + "console_scripts": [ + ], + }, + keywords=["chatbot", "ai", "openai", "anthropic", "gemini", "chainlit"], + classifiers=[ + "Development Status :: 3 - Alpha", + "Intended Audience :: Developers", + "License :: OSI Approved :: MIT License", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + ], +) diff --git a/multiprofile-chat-with-auth/src/__init__.py b/multiprofile-chat-with-auth/src/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/multiprofile-chat-with-auth/src/providers/__init__.py b/multiprofile-chat-with-auth/src/providers/__init__.py new file mode 100644 index 000000000..8d5b0a1ce --- /dev/null +++ b/multiprofile-chat-with-auth/src/providers/__init__.py @@ -0,0 +1,5 @@ +from . import chatgpt +from . import claude +from . import gemini + +__all__ = [chatgpt, claude, gemini] diff --git a/multiprofile-chat-with-auth/src/providers/chatgpt.py b/multiprofile-chat-with-auth/src/providers/chatgpt.py new file mode 100644 index 000000000..a65b6a8c4 --- /dev/null +++ b/multiprofile-chat-with-auth/src/providers/chatgpt.py @@ -0,0 +1,198 @@ +import json +import ast +import os +from typing import Any +from openai import AsyncOpenAI + +from chainlit.playground.providers import ChatOpenAI +from chainlit.playground.providers.openai import stringify_function_call +import chainlit as cl +from chainlit.input_widget import Select, Slider + + +open_ai_client = AsyncOpenAI(api_key=os.environ["OPENAI_API_KEY"]) +AVATAR = cl.Avatar( + name="ChatGPT", + url="https://github.com/ndamulelonemakh/remote-assets/blob/7ed514dbd99ab86536daf3942127822bd979936c/images/openai-logomark.png?raw=true", +) +tools = [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + }, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, + }, + "required": ["location"], + }, + }, + } +] + +chat_settings = { + "model": "gpt-4", + "tools": tools, + "tool_choice": "auto", + "max_tokens": 1000, + "temperature": 0.2 +} +user_setttings = [ + Select( + id="model", + label="Model", + values=["gpt-4-turbo", "gpt-3.5-turbo-0125"], + initial_index=0, + ), + Slider( + id="temperature", + label="Temperature", + initial=0.2, + min=0, + max=1, + step=0.1, + ), + Slider( + id="max_tokens", + label="Maxiumum Completions Tokens", + initial=1000, + min=100, + max=32000, + step=10, + description="The maximum allowable tokens in the response", + ), + +] +MAX_ITER = 5 + + + +# Example dummy function hard coded to return the same weather +# In production, this could be your backend API or an external API +def get_current_weather(location, unit): + """Get the current weather in a given location""" + unit = unit or "Farenheit" + weather_info = { + "location": location, + "temperature": "72", + "unit": unit, + "forecast": ["sunny", "windy"], + } + return json.dumps(weather_info) + + +@cl.step(type="tool") +async def call_tool(tool_call, message_history): + function_name = tool_call.function.name + arguments = ast.literal_eval(tool_call.function.arguments) + + current_step = cl.context.current_step + current_step.name = function_name + + current_step.input = arguments + + function_response = get_current_weather( + location=arguments.get("location"), + unit=arguments.get("unit"), + ) + + current_step.output = function_response + current_step.language = "json" + + message_history.append( + { + "role": "function", + "name": function_name, + "content": function_response, + "tool_call_id": tool_call.id, + } + ) + + +@cl.step(name="ChatGPT-Completions", type="llm") +async def _get_chat_completions(message_history: list[dict], settings: dict[str, Any] = None): + settings = settings or chat_settings + if "max_tokens" in settings: + settings["max_tokens"] = int(settings["max_tokens"]) + + response = await open_ai_client.chat.completions.create( + messages=message_history, **settings + ) + + message = response.choices[0].message + for tool_call in message.tool_calls or []: + if tool_call.type == "function": + await call_tool(tool_call, message_history) + + if message.content: + cl.context.current_step.output = message.content + + elif message.tool_calls: + completion = stringify_function_call(message.tool_calls[0].function) + + cl.context.current_step.language = "json" + cl.context.current_step.output = completion + + return message + + +@cl.step(name="ChatGPT", + type="llm", + root=True) +async def call_chatgpt_with_tools(query: str, settings: dict[str, Any] = None): + message_history = cl.user_session.get("prompt_history") + message_history.append({"name": "user", "role": "user", "content": query}) + + cur_iter = 0 + + while cur_iter < MAX_ITER: + response_message = await _get_chat_completions(message_history, settings=settings) + if not response_message.tool_calls: + await cl.Message(content=response_message.content, author="Answer").send() + break + + cur_iter += 1 + + + +@cl.step(name="ChatGPT", + type="llm", + root=True) +async def call_chatgpt(query: str, settings: dict[str, Any] = chat_settings): + message_history = cl.user_session.get("prompt_history") + message_history.append({"name": "User", "role": "user", "content": query}) + + + if "max_tokens" in settings: + settings["max_tokens"] = int(settings["max_tokens"]) + + stream = await open_ai_client.chat.completions.create( + messages=message_history, + stream=True, + **settings + ) + + async for part in stream: + token = part.choices[0].delta.content + if token: + await cl.context.current_step.stream_token(token) + + + cl.context.current_step.generation = cl.CompletionGeneration( + formatted=query, + completion=cl.context.current_step.output, + settings=settings, + provider=ChatOpenAI.id, + ) + + message_history.append({"name": "ChatGPT", + "role": "assistant", + "content": cl.context.current_step.output}) + cl.user_session.set("prompt_history", message_history) + diff --git a/multiprofile-chat-with-auth/src/providers/claude.py b/multiprofile-chat-with-auth/src/providers/claude.py new file mode 100644 index 000000000..65e311b14 --- /dev/null +++ b/multiprofile-chat-with-auth/src/providers/claude.py @@ -0,0 +1,76 @@ +import os + +from typing import Any + +import anthropic +import chainlit as cl +from chainlit.playground.providers import Anthropic +from chainlit.input_widget import Select, Slider + +anthropic_client = anthropic.AsyncAnthropic(api_key=os.environ["ANTHROPIC_API_KEY"]) +AVATAR = cl.Avatar( + name="Claude", + url="https://www.anthropic.com/images/icons/apple-touch-icon.png", +) +chat_settings = settings = { + "stop_sequences": [anthropic.HUMAN_PROMPT], + "max_tokens_to_sample": 1000, + "model": "claude-2.0", +} +user_setttings = [ + Select( + id="model", + label="Model", + # https://docs.anthropic.com/claude/docs/models-overview#claude-3-a-new-generation-of-ai + values=["claude-2.1", "claude-3-opus-20240229", "claude-3-sonnet-20240229", "claude-3-haiku-20240307"], + initial_index=0, + ), + Slider( + id="temperature", + label="Temperature", + initial=0.2, + min=0, + max=1, + step=0.1, + ), + Slider( + id="max_tokens_to_sample", + label="Maxiumum Completions Tokens", + initial=1000, + min=100, + max=32000, + step=10, + description="The maximum allowable tokens in the response", + ), + +] + + +@cl.step(name="Claude", + type="llm", + root=True) +async def call_claude(query: str, settings: dict[str, Any] = None): + prompt_history = cl.user_session.get("prompt_history") + prompt = f"{prompt_history}{anthropic.HUMAN_PROMPT}{query}{anthropic.AI_PROMPT}" + + settings = settings or chat_settings + if "max_tokens_to_sample" in settings: + settings["max_tokens_to_sample"] = int(settings["max_tokens_to_sample"]) + stream = await anthropic_client.completions.create( + prompt=prompt, + stream=True, + **settings, + ) + + async for data in stream: + token = data.completion + await cl.context.current_step.stream_token(token) + + cl.context.current_step.generation = cl.CompletionGeneration( + formatted=prompt, + completion=cl.context.current_step.output, + settings=settings, + provider=Anthropic.id, + ) + + cl.user_session.set("prompt_history", prompt + cl.context.current_step.output) diff --git a/multiprofile-chat-with-auth/src/providers/gemini.py b/multiprofile-chat-with-auth/src/providers/gemini.py new file mode 100644 index 000000000..d95412b39 --- /dev/null +++ b/multiprofile-chat-with-auth/src/providers/gemini.py @@ -0,0 +1,95 @@ + +import os +from typing import Any + +import chainlit as cl +import google.generativeai as genai +from chainlit.playground.providers import Gemini +from chainlit.input_widget import Select, Slider, NumberInput + +genai.configure(api_key=os.environ["GOOGLE_API_KEY"]) +AVATAR = cl.Avatar( + name="Gemini", + url="https://github.com/ndamulelonemakh/remote-assets/blob/main/images/Google-Bard-Logo-758x473.jpg?raw=true", +) +chat_settings = settings = { + "max_output_tokens": 2000, + "model": "gemini-1.0-pro-latest", +} +user_setttings = [ + Select( + id="model", + label="Model", + # https://ai.google.dev/gemini-api/docs/models/gemini#model-variations + values=["gemini-1.0-pro-latest", "gemini-pro-vision", "gemini-pro"], + initial_index=0, + ), + Slider( + id="temperature", + label="Temperature", + initial=0.2, + min=0, + max=1, + step=0.1, + description="The temperature of the model. Higher values mean the model will generate more creative answers.", + ), + Slider( + id="max_output_tokens", + label="Maxiumum Completions Tokens", + initial=2000, + min=100, + max=32000, + step=10, + description="The maximum allowable tokens in the response", + ), + NumberInput( + id="candidate_count", + label="Numbr of Answers", + initial=1, + placeholder="Enter a number between 1 and 3" + ), + Select( + id="response_mime_type", + label="Response Type", + values=["text/plain", "application/json"], + initial_index=0, + ) +] + + + +@cl.step(name="Gemini", + type="llm", + root=True) +async def call_gemini(query: str, + settings: dict[str, Any] = chat_settings): + + prompt_history = cl.user_session.get("prompt_history") or [] + if "max_output_tokens" in settings: + settings["max_output_tokens"] = int(settings["max_output_tokens"]) + if "candidate_count" in settings: + settings["candidate_count"] = int(settings["candidate_count"]) + + model = genai.GenerativeModel(settings.pop("model", "gemini-1.0-pro-latest"), + generation_config=genai.GenerationConfig( + **settings + ), + tools=None, + tool_config=None + ) + chat = model.start_chat(history=prompt_history) + async for chunk in await chat.send_message_async(query, + stream=True): + await cl.context.current_step.stream_token(chunk.text) + + cl.context.current_step.generation = cl.CompletionGeneration( + formatted=query, + completion=cl.context.current_step.output, + settings=settings, + provider=Gemini.id, + ) + + updated_history = prompt_history + chat.history + # TODO: need to limit these to prevent exceeding model context + cl.user_session.set("prompt_history", updated_history) +