File size: 5,895 Bytes
c1fe1f5
 
 
 
81877a5
80cf692
81877a5
 
 
9c11e3d
80cf692
81877a5
 
 
c1fe1f5
8ce06a3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4961fa3
9c11e3d
 
 
 
 
 
 
 
 
 
 
 
 
4961fa3
 
 
 
 
 
3668c61
07d943d
4961fa3
 
 
 
 
 
 
 
 
 
8ce06a3
4961fa3
 
 
 
8ce06a3
9ec8758
 
 
8ce06a3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4961fa3
 
 
c1fe1f5
 
97885b6
81877a5
c1fe1f5
 
81877a5
 
 
 
 
 
5e4c255
a1cf67d
 
1fa5c27
5e4c255
81877a5
 
 
 
 
 
 
 
 
 
 
 
80cf692
81877a5
91cf9c0
5e4c255
91cf9c0
81877a5
 
80cf692
1fa5c27
81877a5
1fa5c27
 
 
 
 
 
 
 
 
 
81877a5
4961fa3
8ce06a3
4961fa3
c1fe1f5
 
 
8ce06a3
c1fe1f5
8ce06a3
82793b3
8ce06a3
c1fe1f5
80cf692
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fc87289
 
c1fe1f5
8ce06a3
 
80cf692
 
 
 
ba2642f
 
9375232
fc87289
ba2642f
c1fe1f5
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
# main.py
import chainlit as cl
from agent import make_graph

from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_core.messages import AIMessageChunk, HumanMessage

from chainlit.input_widget import Select, Slider

from typing import Optional
import os, uuid, base64
from dotenv import load_dotenv

_ : bool = load_dotenv()

# Function to Encode Images 
async def process_image(image: cl.Image):
    """
    Processes an image file, reads its data, and converts it to a base64 encoded string.
    """
    try:
        with open(image.path, "rb") as image_file:
            image_data = image_file.read()
        base64_image = base64.b64encode(image_data).decode("utf-8")
        return {
            "type": "image_url",
            "image_url": {
                "url": f"data:image/{image.mime.split('/')[-1]};base64,{base64_image}"
            }
        }
    except Exception as e:
        print(f"Error reading image file: {e}")
        return {"type": "text", "text": f"Error processing image {image.name}."}

#################################
# User Authentication
#################################
@cl.oauth_callback
def oauth_callback(
  provider_id: str,
  token: str,
  raw_user_data: dict[str, str],
  default_user: cl.User,
) -> Optional[cl.User]:

  return default_user

#################################
# Quick Starter Questions
#################################
@cl.set_starters
async def set_starters():
    return [
        cl.Starter(
            label="LangGraph Agent Creation",
            message="Create an Agent in LangGraph which can search the web using Tavily.",
            icon="/public/msg_icons/chatbot.png",
            ),

        cl.Starter(
            label="Explain MCP",
            message="Explain Model Context Protocol (MCP) to a non-tech person.",
            icon="/public/msg_icons/usb.png",
            ),
        cl.Starter(
            label="Composio Tools Integration",
            message="How can I connect Composio tools to my agent?",
            icon="/public/msg_icons/tools.png",
            ),

        ]

#################################
# Response modes for Axiom
#################################
@cl.set_chat_profiles
async def chat_profile():
    return [
        cl.ChatProfile(
            name="Agent Mode",
            markdown_description= "Ideal for complex tasks like brainstorming, code generation, and web apps creation."

        ),
        cl.ChatProfile(
            name="Chat Mode",
            markdown_description="Suited for quick information retrieval and answering questions from the provided documentations."

        ),
    ]

#################################
# Chat Settings
#################################
@cl.on_chat_start
async def on_chat_start():
    thread_id = f"thread-{uuid.uuid4()}"
    # Store thread ID in session
    cl.user_session.set("thread_id", thread_id)

    # Get model settings from user
    settings = await cl.ChatSettings(
        [
            Select(
                id="model",
                label="Gemini - Model",
                values=[
                    "gemini-2.5-flash",
                    "gemini-2.5-pro", 
                    "gemini-2.5-flash-lite"
                    ],
                initial_index=0,
            ),
            Slider(
                id="temperature",
                label="Temperature",
                initial=1,
                min=0,
                max=2,
                step=0.1,
            ),
        ]
    ).send()

    # Create model with given settings
    model = ChatGoogleGenerativeAI(
        model=settings["model"],
        api_key=os.getenv("GOOGLE_API_KEY"),
        temperature=settings["temperature"]
        )

    # Store model and temperature in session
    cl.user_session.set("model", model)
    cl.user_session.set("temperature", settings["temperature"])

#################################
# Settings Update
#################################
@cl.on_settings_update
async def on_settings_update(settings: dict):
    # Update user session with new settings
    cl.user_session.set("model", settings.get("model"))
    cl.user_session.set("temperature", settings.get("temperature"))

#################################
# Processing User Messages
#################################
@cl.on_message
async def on_message(message: cl.Message):
    thread_id = cl.user_session.get("thread_id")  # Retrieve the user-specific thread ID
    config = {"configurable": {"thread_id": thread_id}}

    # Get model & chat profile from session
    model = cl.user_session.get("model")
    answer_mode = cl.user_session.get("chat_profile", "Agent Mode")

    # Prepare the content list for the current message
    content = []

    # Add text content
    if message.content:
        content.append({"type": "text", "text": message.content})
    
    # Process image files
    image_elements = [element for element in message.elements if "image" in element.mime]
    for image in image_elements:
        if image.path:
            content.append(await process_image(image))
        else:
            print(f"Image {image.name} has no content and no path.")
            content.append({"type": "text", "text": f"Image {image.name} could not be processed."})
    
    msg = cl.Message(content="")  # Initialize an empty message for streaming

    try:
        async with make_graph(model, answer_mode) as agent:
            async for stream, _ in agent.astream(
                {"messages": HumanMessage(content=content)}, 
                config=config, 
                stream_mode="messages"
                ):

                if isinstance(stream, AIMessageChunk) and stream.content:   
                    await msg.stream_token(stream.content.replace("```", "\n```"))
            await msg.send()

    except Exception as e:
        await cl.Message(content=f"Error during agent invocation: {e}").send()