Arif commited on
Commit
9581ef6
Β·
1 Parent(s): 18bd5e3

Updating app.py removing llama.cpp

Browse files
Files changed (1) hide show
  1. app.py +65 -108
app.py CHANGED
@@ -1,10 +1,6 @@
1
  import streamlit as st
2
  import pandas as pd
3
- import numpy as np
4
- from llama_cpp import Llama
5
- from huggingface_hub import hf_hub_download
6
- import io
7
- import os
8
 
9
  # Page configuration
10
  st.set_page_config(
@@ -14,58 +10,25 @@ st.set_page_config(
14
  initial_sidebar_state="expanded"
15
  )
16
 
17
- # Custom CSS for better UI
18
- st.markdown("""
19
- <style>
20
- .main {
21
- padding: 0rem 1rem;
22
- }
23
- .stTabs [data-baseweb="tab-list"] {
24
- gap: 2px;
25
- }
26
- </style>
27
- """, unsafe_allow_html=True)
28
-
29
- # Title and description
30
  st.title("πŸ“Š LLM Data Analyzer")
31
- st.markdown("""
32
- Analyze your CSV/Excel files and chat with an AI assistant powered by Llama 2.
33
- This app runs on **free Hugging Face CPU** - response time ~5-10 seconds per query.
34
- """)
35
 
36
- # Cache model loading to avoid reloading
37
  @st.cache_resource
38
- def load_llm_model():
39
- """Load Llama 2 model from Hugging Face Hub"""
40
- st.info("πŸ“₯ Downloading model (first time only, ~4GB)... This may take 2-3 minutes.")
41
-
42
  try:
43
- model_path = hf_hub_download(
44
- repo_id="TheBloke/Llama-2-7B-Chat-GGUF",
45
- filename="llama-2-7b-chat.Q4_K_M.gguf"
46
- )
47
-
48
- llm = Llama(
49
- model_path=model_path,
50
- n_ctx=2048,
51
- n_threads=4,
52
- n_gpu_layers=0, # CPU only (free tier)
53
- verbose=False
54
- )
55
- return llm
56
  except Exception as e:
57
- st.error(f"Error loading model: {e}")
58
  return None
59
 
60
- # Load model
61
- llm = load_llm_model()
62
 
63
- if llm is None:
64
- st.error("Failed to load model. Please refresh the page.")
65
  st.stop()
66
 
67
- st.success("βœ… Model loaded successfully!")
68
-
69
  # Create tabs
70
  tab1, tab2, tab3 = st.tabs(["πŸ“€ Upload & Analyze", "πŸ’¬ Chat", "πŸ“Š About"])
71
 
@@ -84,7 +47,6 @@ with tab1:
84
  if uploaded_file is not None:
85
  st.success(f"βœ… File uploaded: {uploaded_file.name}")
86
 
87
- # Read the file
88
  try:
89
  if uploaded_file.name.endswith('.csv'):
90
  df = pd.read_csv(uploaded_file)
@@ -118,9 +80,9 @@ with tab1:
118
 
119
  if question:
120
  with st.spinner("πŸ€” AI is analyzing your data..."):
121
- # Create prompt
122
- data_summary = df.describe().to_string()
123
- prompt = f"""You are a data analyst expert. You have the following data summary:
124
 
125
  {data_summary}
126
 
@@ -128,20 +90,19 @@ Column names: {', '.join(df.columns.tolist())}
128
 
129
  User's question: {question}
130
 
131
- Please provide a clear, concise analysis based on the data summary. Focus on actionable insights."""
132
-
133
- # Generate response
134
- response = llm(
135
- prompt,
136
- max_tokens=300,
137
- stop=["\n\nUser:", "Question:"],
138
- echo=False,
139
- temperature=0.7
140
- )
141
-
142
- answer = response['choices'][0]['text'].strip()
143
- st.success("βœ… Analysis Complete")
144
- st.write(answer)
145
 
146
  except Exception as e:
147
  st.error(f"Error reading file: {e}")
@@ -151,7 +112,7 @@ Please provide a clear, concise analysis based on the data summary. Focus on act
151
  # ============================================================================
152
  with tab2:
153
  st.header("πŸ’¬ Chat with AI Assistant")
154
- st.write("Have a conversation with Llama 2. Ask anything!")
155
 
156
  # Initialize session state for chat history
157
  if "messages" not in st.session_state:
@@ -176,26 +137,25 @@ with tab2:
176
  # Generate AI response
177
  with st.chat_message("assistant"):
178
  with st.spinner("⏳ Generating response..."):
179
- prompt = f"""You are a helpful AI assistant. The user asks: {user_input}
180
-
181
- Provide a clear, helpful, and concise response."""
182
-
183
- response = llm(
184
- prompt,
185
- max_tokens=300,
186
- stop=["\n\nUser:", "User:"],
187
- echo=False,
188
- temperature=0.7
189
- )
190
-
191
- assistant_message = response['choices'][0]['text'].strip()
192
- st.markdown(assistant_message)
193
-
194
- # Add assistant message to history
195
- st.session_state.messages.append({
196
- "role": "assistant",
197
- "content": assistant_message
198
- })
199
 
200
  # ============================================================================
201
  # TAB 3: About
@@ -210,42 +170,39 @@ with tab3:
210
 
211
  ### πŸ”§ Technology Stack
212
 
213
- - **Model:** Llama 2 7B (quantized to 4-bit)
214
- - **Framework:** Llama.cpp (CPU inference)
215
- - **Frontend:** Streamlit
216
  - **Hosting:** Hugging Face Spaces (Free Tier)
 
217
 
218
- ### ⚑ Performance
219
 
220
- - **Speed:** ~5-10 tokens per second (free CPU)
221
- - **Context:** 2048 tokens max
222
- - **Model Size:** 4GB (quantized)
223
- - **Hardware:** Free tier CPU
224
 
225
- ### πŸ’‘ Use Cases
226
 
227
- 1. **Data Analysis**: Upload CSV/Excel and ask questions
228
- 2. **Chat**: General conversation with AI
229
- 3. **Learning**: Understand your data better
 
230
 
231
- ### πŸš€ Faster Version Available
232
 
233
- For **GPU acceleration** (70+ tokens/sec):
234
- - Run locally on Apple Silicon Mac using MLX
235
- - Upgrade to Hugging Face PRO tier
236
- - Deploy on GPU-enabled cloud servers
237
 
238
- ### πŸ“ Tips
239
 
240
  - Keep questions focused and specific for best results
241
- - First request takes longer (model loading)
242
  - Data is processed locally, not stored on server
243
 
244
  ### πŸ”— Links
245
 
246
- - [GitHub Repository](#) - Source code
247
- - [Hugging Face Hub](#) - Model info
248
- - [Llama.cpp](#) - Inference engine
249
 
250
  ---
251
 
 
1
  import streamlit as st
2
  import pandas as pd
3
+ from huggingface_hub import InferenceClient
 
 
 
 
4
 
5
  # Page configuration
6
  st.set_page_config(
 
10
  initial_sidebar_state="expanded"
11
  )
12
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  st.title("πŸ“Š LLM Data Analyzer")
14
+ st.write("*Analyze data and chat with AI powered by Hugging Face Inference API*")
 
 
 
15
 
16
+ # Initialize HF Inference Client
17
  @st.cache_resource
18
+ def get_hf_client():
19
+ """Get Hugging Face Inference Client"""
 
 
20
  try:
21
+ return InferenceClient()
 
 
 
 
 
 
 
 
 
 
 
 
22
  except Exception as e:
23
+ st.error(f"Error initializing HF client: {e}")
24
  return None
25
 
26
+ client = get_hf_client()
 
27
 
28
+ if client is None:
29
+ st.error("Failed to initialize Hugging Face client")
30
  st.stop()
31
 
 
 
32
  # Create tabs
33
  tab1, tab2, tab3 = st.tabs(["πŸ“€ Upload & Analyze", "πŸ’¬ Chat", "πŸ“Š About"])
34
 
 
47
  if uploaded_file is not None:
48
  st.success(f"βœ… File uploaded: {uploaded_file.name}")
49
 
 
50
  try:
51
  if uploaded_file.name.endswith('.csv'):
52
  df = pd.read_csv(uploaded_file)
 
80
 
81
  if question:
82
  with st.spinner("πŸ€” AI is analyzing your data..."):
83
+ try:
84
+ data_summary = df.describe().to_string()
85
+ prompt = f"""You are a data analyst expert. You have the following data summary:
86
 
87
  {data_summary}
88
 
 
90
 
91
  User's question: {question}
92
 
93
+ Please provide a clear, concise analysis based on the data summary."""
94
+
95
+ # Use Hugging Face Inference API
96
+ response = client.text_generation(
97
+ prompt,
98
+ max_new_tokens=300,
99
+ temperature=0.7,
100
+ )
101
+
102
+ st.success("βœ… Analysis Complete")
103
+ st.write(response)
104
+ except Exception as e:
105
+ st.error(f"Error analyzing data: {e}")
 
106
 
107
  except Exception as e:
108
  st.error(f"Error reading file: {e}")
 
112
  # ============================================================================
113
  with tab2:
114
  st.header("πŸ’¬ Chat with AI Assistant")
115
+ st.write("Have a conversation with an AI assistant powered by Hugging Face.")
116
 
117
  # Initialize session state for chat history
118
  if "messages" not in st.session_state:
 
137
  # Generate AI response
138
  with st.chat_message("assistant"):
139
  with st.spinner("⏳ Generating response..."):
140
+ try:
141
+ prompt = f"User: {user_input}\n\nAssistant:"
142
+
143
+ response = client.text_generation(
144
+ prompt,
145
+ max_new_tokens=300,
146
+ temperature=0.7,
147
+ )
148
+
149
+ assistant_message = response.strip()
150
+ st.markdown(assistant_message)
151
+
152
+ # Add assistant message to history
153
+ st.session_state.messages.append({
154
+ "role": "assistant",
155
+ "content": assistant_message
156
+ })
157
+ except Exception as e:
158
+ st.error(f"Error generating response: {e}")
 
159
 
160
  # ============================================================================
161
  # TAB 3: About
 
170
 
171
  ### πŸ”§ Technology Stack
172
 
173
+ - **Framework:** Streamlit
174
+ - **AI Engine:** Hugging Face Inference API
 
175
  - **Hosting:** Hugging Face Spaces (Free Tier)
176
+ - **Language:** Python
177
 
178
+ ### ⚑ Features
179
 
180
+ 1. **Data Analysis**: Upload CSV/Excel and ask questions about your data
181
+ 2. **Chat**: Have conversations with an AI assistant
182
+ 3. **Statistics**: View data summaries and insights
 
183
 
184
+ ### πŸ“ How to Use
185
 
186
+ 1. **Upload Data** - Start by uploading a CSV or Excel file
187
+ 2. **Preview** - Review your data and statistics
188
+ 3. **Ask Questions** - Get AI-powered analysis
189
+ 4. **Chat** - Have follow-up conversations
190
 
191
+ ### 🌐 Powered By
192
 
193
+ - [Hugging Face](https://huggingface.co/) - AI models and hosting
194
+ - [Streamlit](https://streamlit.io/) - Web framework
 
 
195
 
196
+ ### πŸ“– Quick Tips
197
 
198
  - Keep questions focused and specific for best results
199
+ - Responses may take a few seconds
200
  - Data is processed locally, not stored on server
201
 
202
  ### πŸ”— Links
203
 
204
+ - [GitHub Repository](https://github.com/Arif-Badhon/LLM-Data-Analyzer)
205
+ - [Hugging Face Hub](https://huggingface.co/)
 
206
 
207
  ---
208