Really-amin commited on
Commit
628c421
·
verified ·
1 Parent(s): d6186b7

Upload 12 files

Browse files
app.py CHANGED
@@ -21,6 +21,7 @@ import os
21
  from urllib.parse import urljoin
22
 
23
  from database import Database
 
24
  from config import config as global_config
25
 
26
  class SentimentRequest(BaseModel):
 
21
  from urllib.parse import urljoin
22
 
23
  from database import Database
24
+ from database import compat as db_compat # noqa: F401 (monkey-patch DatabaseManager)
25
  from config import config as global_config
26
 
27
  class SentimentRequest(BaseModel):
database.py CHANGED
@@ -107,81 +107,81 @@ class Database:
107
  )
108
  """)
109
 
110
- # Configuration table
111
- cursor.execute("""
112
- CREATE TABLE IF NOT EXISTS configuration (
113
- key TEXT PRIMARY KEY,
114
- value TEXT NOT NULL,
115
- updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
116
- )
117
- """)
118
-
119
- # Pools table
120
- cursor.execute("""
121
- CREATE TABLE IF NOT EXISTS pools (
122
- id INTEGER PRIMARY KEY AUTOINCREMENT,
123
- name TEXT NOT NULL,
124
- category TEXT NOT NULL,
125
- rotation_strategy TEXT NOT NULL,
126
- description TEXT,
127
- enabled INTEGER DEFAULT 1,
128
- created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
129
- )
130
- """)
131
-
132
- # Pool members table
133
- cursor.execute("""
134
- CREATE TABLE IF NOT EXISTS pool_members (
135
- id INTEGER PRIMARY KEY AUTOINCREMENT,
136
- pool_id INTEGER NOT NULL,
137
- provider_id TEXT NOT NULL,
138
- provider_name TEXT NOT NULL,
139
- priority INTEGER DEFAULT 1,
140
- weight INTEGER DEFAULT 1,
141
- use_count INTEGER DEFAULT 0,
142
- success_rate REAL DEFAULT 0,
143
- rate_limit_usage INTEGER DEFAULT 0,
144
- rate_limit_limit INTEGER DEFAULT 0,
145
- rate_limit_percentage REAL DEFAULT 0,
146
- created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
147
- FOREIGN KEY (pool_id) REFERENCES pools(id) ON DELETE CASCADE
148
- )
149
- """)
150
-
151
- # Pool rotation history
152
- cursor.execute("""
153
- CREATE TABLE IF NOT EXISTS pool_rotations (
154
- id INTEGER PRIMARY KEY AUTOINCREMENT,
155
- pool_id INTEGER NOT NULL,
156
- provider_id TEXT NOT NULL,
157
- provider_name TEXT NOT NULL,
158
- reason TEXT NOT NULL,
159
- created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
160
- FOREIGN KEY (pool_id) REFERENCES pools(id) ON DELETE CASCADE
161
- )
162
- """)
163
-
164
- # Create indexes
165
- cursor.execute("""
166
- CREATE INDEX IF NOT EXISTS idx_status_log_provider
167
- ON status_log(provider_name, timestamp)
168
- """)
169
- cursor.execute("""
170
- CREATE INDEX IF NOT EXISTS idx_status_log_timestamp
171
- ON status_log(timestamp)
172
- """)
173
- cursor.execute("""
174
- CREATE INDEX IF NOT EXISTS idx_incidents_provider
175
- ON incidents(provider_name, start_time)
176
- """)
177
- cursor.execute("""
178
- CREATE INDEX IF NOT EXISTS idx_pool_members_pool
179
- ON pool_members(pool_id, provider_id)
180
- """)
181
- cursor.execute("""
182
- CREATE INDEX IF NOT EXISTS idx_pool_rotations_pool
183
- ON pool_rotations(pool_id, created_at)
184
- """)
185
 
186
  logger.info("Database initialized successfully")
187
 
@@ -533,244 +533,244 @@ class Database:
533
 
534
  logger.info(f"Exported {len(rows)} rows to {output_path}")
535
 
536
- # ------------------------------------------------------------------
537
- # Pool management helpers
538
- # ------------------------------------------------------------------
539
-
540
- def create_pool(
541
- self,
542
- name: str,
543
- category: str,
544
- rotation_strategy: str,
545
- description: Optional[str] = None,
546
- enabled: bool = True
547
- ) -> int:
548
- """Create a new pool and return its ID"""
549
- with self.get_connection() as conn:
550
- cursor = conn.cursor()
551
- cursor.execute("""
552
- INSERT INTO pools (name, category, rotation_strategy, description, enabled)
553
- VALUES (?, ?, ?, ?, ?)
554
- """, (name, category, rotation_strategy, description, int(enabled)))
555
- return cursor.lastrowid
556
-
557
- def update_pool_usage(self, pool_id: int, enabled: Optional[bool] = None):
558
- """Update pool properties"""
559
- if enabled is None:
560
- return
561
- with self.get_connection() as conn:
562
- cursor = conn.cursor()
563
- cursor.execute("""
564
- UPDATE pools
565
- SET enabled = ?, created_at = created_at
566
- WHERE id = ?
567
- """, (int(enabled), pool_id))
568
-
569
- def delete_pool(self, pool_id: int):
570
- """Delete pool and cascade members/history"""
571
- with self.get_connection() as conn:
572
- cursor = conn.cursor()
573
- cursor.execute("DELETE FROM pools WHERE id = ?", (pool_id,))
574
-
575
- def add_pool_member(
576
- self,
577
- pool_id: int,
578
- provider_id: str,
579
- provider_name: str,
580
- priority: int = 1,
581
- weight: int = 1,
582
- success_rate: float = 0.0,
583
- rate_limit_usage: int = 0,
584
- rate_limit_limit: int = 0,
585
- rate_limit_percentage: float = 0.0
586
- ) -> int:
587
- """Add a provider to a pool"""
588
- with self.get_connection() as conn:
589
- cursor = conn.cursor()
590
- cursor.execute("""
591
- INSERT INTO pool_members
592
- (pool_id, provider_id, provider_name, priority, weight,
593
- success_rate, rate_limit_usage, rate_limit_limit, rate_limit_percentage)
594
- VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
595
- """, (
596
- pool_id,
597
- provider_id,
598
- provider_name,
599
- priority,
600
- weight,
601
- success_rate,
602
- rate_limit_usage,
603
- rate_limit_limit,
604
- rate_limit_percentage
605
- ))
606
- return cursor.lastrowid
607
-
608
- def remove_pool_member(self, pool_id: int, provider_id: str):
609
- """Remove provider from pool"""
610
- with self.get_connection() as conn:
611
- cursor = conn.cursor()
612
- cursor.execute("""
613
- DELETE FROM pool_members
614
- WHERE pool_id = ? AND provider_id = ?
615
- """, (pool_id, provider_id))
616
-
617
- def increment_member_use(self, pool_id: int, provider_id: str):
618
- """Increment use count for pool member"""
619
- with self.get_connection() as conn:
620
- cursor = conn.cursor()
621
- cursor.execute("""
622
- UPDATE pool_members
623
- SET use_count = use_count + 1
624
- WHERE pool_id = ? AND provider_id = ?
625
- """, (pool_id, provider_id))
626
-
627
- def update_member_stats(
628
- self,
629
- pool_id: int,
630
- provider_id: str,
631
- success_rate: Optional[float] = None,
632
- rate_limit_usage: Optional[int] = None,
633
- rate_limit_limit: Optional[int] = None,
634
- rate_limit_percentage: Optional[float] = None
635
- ):
636
- """Update success/rate limit stats"""
637
- updates = []
638
- params = []
639
-
640
- if success_rate is not None:
641
- updates.append("success_rate = ?")
642
- params.append(success_rate)
643
- if rate_limit_usage is not None:
644
- updates.append("rate_limit_usage = ?")
645
- params.append(rate_limit_usage)
646
- if rate_limit_limit is not None:
647
- updates.append("rate_limit_limit = ?")
648
- params.append(rate_limit_limit)
649
- if rate_limit_percentage is not None:
650
- updates.append("rate_limit_percentage = ?")
651
- params.append(rate_limit_percentage)
652
-
653
- if not updates:
654
- return
655
-
656
- params.extend([pool_id, provider_id])
657
-
658
- with self.get_connection() as conn:
659
- cursor = conn.cursor()
660
- cursor.execute(f"""
661
- UPDATE pool_members
662
- SET {', '.join(updates)}
663
- WHERE pool_id = ? AND provider_id = ?
664
- """, params)
665
-
666
- def log_pool_rotation(
667
- self,
668
- pool_id: int,
669
- provider_id: str,
670
- provider_name: str,
671
- reason: str
672
- ):
673
- """Log rotation event"""
674
- with self.get_connection() as conn:
675
- cursor = conn.cursor()
676
- cursor.execute("""
677
- INSERT INTO pool_rotations
678
- (pool_id, provider_id, provider_name, reason)
679
- VALUES (?, ?, ?, ?)
680
- """, (pool_id, provider_id, provider_name, reason))
681
-
682
- def get_pools(self) -> List[Dict]:
683
- """Get all pools with members and stats"""
684
- with self.get_connection() as conn:
685
- cursor = conn.cursor()
686
- cursor.execute("""
687
- SELECT p.*,
688
- COALESCE((SELECT COUNT(*) FROM pool_rotations pr WHERE pr.pool_id = p.id), 0) as rotation_count
689
- FROM pools p
690
- ORDER BY p.created_at DESC
691
- """)
692
- pools = [dict(row) for row in cursor.fetchall()]
693
-
694
- for pool in pools:
695
- cursor.execute("""
696
- SELECT * FROM pool_members
697
- WHERE pool_id = ?
698
- ORDER BY priority DESC, weight DESC, provider_name
699
- """, (pool['id'],))
700
- pool['members'] = [dict(row) for row in cursor.fetchall()]
701
-
702
- return pools
703
-
704
- def get_pool(self, pool_id: int) -> Optional[Dict]:
705
- """Get single pool"""
706
- with self.get_connection() as conn:
707
- cursor = conn.cursor()
708
- cursor.execute("""
709
- SELECT p.*,
710
- COALESCE((SELECT COUNT(*) FROM pool_rotations pr WHERE pr.pool_id = p.id), 0) as rotation_count
711
- FROM pools p
712
- WHERE p.id = ?
713
- """, (pool_id,))
714
- row = cursor.fetchone()
715
- if not row:
716
- return None
717
- pool = dict(row)
718
- cursor.execute("""
719
- SELECT * FROM pool_members
720
- WHERE pool_id = ?
721
- ORDER BY priority DESC, weight DESC, provider_name
722
- """, (pool_id,))
723
- pool['members'] = [dict(r) for r in cursor.fetchall()]
724
- return pool
725
-
726
- def get_pool_rotation_history(self, pool_id: Optional[int] = None, limit: int = 50) -> List[Dict]:
727
- """Get rotation history (optionally filtered by pool)"""
728
- with self.get_connection() as conn:
729
- cursor = conn.cursor()
730
- if pool_id is not None:
731
- cursor.execute("""
732
- SELECT * FROM pool_rotations
733
- WHERE pool_id = ?
734
- ORDER BY created_at DESC
735
- LIMIT ?
736
- """, (pool_id, limit))
737
- else:
738
- cursor.execute("""
739
- SELECT * FROM pool_rotations
740
- ORDER BY created_at DESC
741
- LIMIT ?
742
- """, (limit,))
743
- return [dict(row) for row in cursor.fetchall()]
744
-
745
- # ------------------------------------------------------------------
746
- # Provider health logging
747
- # ------------------------------------------------------------------
748
-
749
- def log_provider_status(
750
- self,
751
- provider_name: str,
752
- category: str,
753
- status: str,
754
- response_time: Optional[float] = None,
755
- status_code: Optional[int] = None,
756
- endpoint_tested: Optional[str] = None,
757
- error_message: Optional[str] = None
758
- ):
759
- """Log provider status in status_log table"""
760
- with self.get_connection() as conn:
761
- cursor = conn.cursor()
762
- cursor.execute("""
763
- INSERT INTO status_log
764
- (provider_name, category, status, response_time, status_code,
765
- error_message, endpoint_tested, timestamp)
766
- VALUES (?, ?, ?, ?, ?, ?, ?, ?)
767
- """, (
768
- provider_name,
769
- category,
770
- status,
771
- response_time,
772
- status_code,
773
- error_message,
774
- endpoint_tested,
775
- time.time()
776
- ))
 
107
  )
108
  """)
109
 
110
+ # Configuration table
111
+ cursor.execute("""
112
+ CREATE TABLE IF NOT EXISTS configuration (
113
+ key TEXT PRIMARY KEY,
114
+ value TEXT NOT NULL,
115
+ updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
116
+ )
117
+ """)
118
+
119
+ # Pools table
120
+ cursor.execute("""
121
+ CREATE TABLE IF NOT EXISTS pools (
122
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
123
+ name TEXT NOT NULL,
124
+ category TEXT NOT NULL,
125
+ rotation_strategy TEXT NOT NULL,
126
+ description TEXT,
127
+ enabled INTEGER DEFAULT 1,
128
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
129
+ )
130
+ """)
131
+
132
+ # Pool members table
133
+ cursor.execute("""
134
+ CREATE TABLE IF NOT EXISTS pool_members (
135
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
136
+ pool_id INTEGER NOT NULL,
137
+ provider_id TEXT NOT NULL,
138
+ provider_name TEXT NOT NULL,
139
+ priority INTEGER DEFAULT 1,
140
+ weight INTEGER DEFAULT 1,
141
+ use_count INTEGER DEFAULT 0,
142
+ success_rate REAL DEFAULT 0,
143
+ rate_limit_usage INTEGER DEFAULT 0,
144
+ rate_limit_limit INTEGER DEFAULT 0,
145
+ rate_limit_percentage REAL DEFAULT 0,
146
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
147
+ FOREIGN KEY (pool_id) REFERENCES pools(id) ON DELETE CASCADE
148
+ )
149
+ """)
150
+
151
+ # Pool rotation history
152
+ cursor.execute("""
153
+ CREATE TABLE IF NOT EXISTS pool_rotations (
154
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
155
+ pool_id INTEGER NOT NULL,
156
+ provider_id TEXT NOT NULL,
157
+ provider_name TEXT NOT NULL,
158
+ reason TEXT NOT NULL,
159
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
160
+ FOREIGN KEY (pool_id) REFERENCES pools(id) ON DELETE CASCADE
161
+ )
162
+ """)
163
+
164
+ # Create indexes
165
+ cursor.execute("""
166
+ CREATE INDEX IF NOT EXISTS idx_status_log_provider
167
+ ON status_log(provider_name, timestamp)
168
+ """)
169
+ cursor.execute("""
170
+ CREATE INDEX IF NOT EXISTS idx_status_log_timestamp
171
+ ON status_log(timestamp)
172
+ """)
173
+ cursor.execute("""
174
+ CREATE INDEX IF NOT EXISTS idx_incidents_provider
175
+ ON incidents(provider_name, start_time)
176
+ """)
177
+ cursor.execute("""
178
+ CREATE INDEX IF NOT EXISTS idx_pool_members_pool
179
+ ON pool_members(pool_id, provider_id)
180
+ """)
181
+ cursor.execute("""
182
+ CREATE INDEX IF NOT EXISTS idx_pool_rotations_pool
183
+ ON pool_rotations(pool_id, created_at)
184
+ """)
185
 
186
  logger.info("Database initialized successfully")
187
 
 
533
 
534
  logger.info(f"Exported {len(rows)} rows to {output_path}")
535
 
536
+ # ------------------------------------------------------------------
537
+ # Pool management helpers
538
+ # ------------------------------------------------------------------
539
+
540
+ def create_pool(
541
+ self,
542
+ name: str,
543
+ category: str,
544
+ rotation_strategy: str,
545
+ description: Optional[str] = None,
546
+ enabled: bool = True
547
+ ) -> int:
548
+ """Create a new pool and return its ID"""
549
+ with self.get_connection() as conn:
550
+ cursor = conn.cursor()
551
+ cursor.execute("""
552
+ INSERT INTO pools (name, category, rotation_strategy, description, enabled)
553
+ VALUES (?, ?, ?, ?, ?)
554
+ """, (name, category, rotation_strategy, description, int(enabled)))
555
+ return cursor.lastrowid
556
+
557
+ def update_pool_usage(self, pool_id: int, enabled: Optional[bool] = None):
558
+ """Update pool properties"""
559
+ if enabled is None:
560
+ return
561
+ with self.get_connection() as conn:
562
+ cursor = conn.cursor()
563
+ cursor.execute("""
564
+ UPDATE pools
565
+ SET enabled = ?, created_at = created_at
566
+ WHERE id = ?
567
+ """, (int(enabled), pool_id))
568
+
569
+ def delete_pool(self, pool_id: int):
570
+ """Delete pool and cascade members/history"""
571
+ with self.get_connection() as conn:
572
+ cursor = conn.cursor()
573
+ cursor.execute("DELETE FROM pools WHERE id = ?", (pool_id,))
574
+
575
+ def add_pool_member(
576
+ self,
577
+ pool_id: int,
578
+ provider_id: str,
579
+ provider_name: str,
580
+ priority: int = 1,
581
+ weight: int = 1,
582
+ success_rate: float = 0.0,
583
+ rate_limit_usage: int = 0,
584
+ rate_limit_limit: int = 0,
585
+ rate_limit_percentage: float = 0.0
586
+ ) -> int:
587
+ """Add a provider to a pool"""
588
+ with self.get_connection() as conn:
589
+ cursor = conn.cursor()
590
+ cursor.execute("""
591
+ INSERT INTO pool_members
592
+ (pool_id, provider_id, provider_name, priority, weight,
593
+ success_rate, rate_limit_usage, rate_limit_limit, rate_limit_percentage)
594
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
595
+ """, (
596
+ pool_id,
597
+ provider_id,
598
+ provider_name,
599
+ priority,
600
+ weight,
601
+ success_rate,
602
+ rate_limit_usage,
603
+ rate_limit_limit,
604
+ rate_limit_percentage
605
+ ))
606
+ return cursor.lastrowid
607
+
608
+ def remove_pool_member(self, pool_id: int, provider_id: str):
609
+ """Remove provider from pool"""
610
+ with self.get_connection() as conn:
611
+ cursor = conn.cursor()
612
+ cursor.execute("""
613
+ DELETE FROM pool_members
614
+ WHERE pool_id = ? AND provider_id = ?
615
+ """, (pool_id, provider_id))
616
+
617
+ def increment_member_use(self, pool_id: int, provider_id: str):
618
+ """Increment use count for pool member"""
619
+ with self.get_connection() as conn:
620
+ cursor = conn.cursor()
621
+ cursor.execute("""
622
+ UPDATE pool_members
623
+ SET use_count = use_count + 1
624
+ WHERE pool_id = ? AND provider_id = ?
625
+ """, (pool_id, provider_id))
626
+
627
+ def update_member_stats(
628
+ self,
629
+ pool_id: int,
630
+ provider_id: str,
631
+ success_rate: Optional[float] = None,
632
+ rate_limit_usage: Optional[int] = None,
633
+ rate_limit_limit: Optional[int] = None,
634
+ rate_limit_percentage: Optional[float] = None
635
+ ):
636
+ """Update success/rate limit stats"""
637
+ updates = []
638
+ params = []
639
+
640
+ if success_rate is not None:
641
+ updates.append("success_rate = ?")
642
+ params.append(success_rate)
643
+ if rate_limit_usage is not None:
644
+ updates.append("rate_limit_usage = ?")
645
+ params.append(rate_limit_usage)
646
+ if rate_limit_limit is not None:
647
+ updates.append("rate_limit_limit = ?")
648
+ params.append(rate_limit_limit)
649
+ if rate_limit_percentage is not None:
650
+ updates.append("rate_limit_percentage = ?")
651
+ params.append(rate_limit_percentage)
652
+
653
+ if not updates:
654
+ return
655
+
656
+ params.extend([pool_id, provider_id])
657
+
658
+ with self.get_connection() as conn:
659
+ cursor = conn.cursor()
660
+ cursor.execute(f"""
661
+ UPDATE pool_members
662
+ SET {', '.join(updates)}
663
+ WHERE pool_id = ? AND provider_id = ?
664
+ """, params)
665
+
666
+ def log_pool_rotation(
667
+ self,
668
+ pool_id: int,
669
+ provider_id: str,
670
+ provider_name: str,
671
+ reason: str
672
+ ):
673
+ """Log rotation event"""
674
+ with self.get_connection() as conn:
675
+ cursor = conn.cursor()
676
+ cursor.execute("""
677
+ INSERT INTO pool_rotations
678
+ (pool_id, provider_id, provider_name, reason)
679
+ VALUES (?, ?, ?, ?)
680
+ """, (pool_id, provider_id, provider_name, reason))
681
+
682
+ def get_pools(self) -> List[Dict]:
683
+ """Get all pools with members and stats"""
684
+ with self.get_connection() as conn:
685
+ cursor = conn.cursor()
686
+ cursor.execute("""
687
+ SELECT p.*,
688
+ COALESCE((SELECT COUNT(*) FROM pool_rotations pr WHERE pr.pool_id = p.id), 0) as rotation_count
689
+ FROM pools p
690
+ ORDER BY p.created_at DESC
691
+ """)
692
+ pools = [dict(row) for row in cursor.fetchall()]
693
+
694
+ for pool in pools:
695
+ cursor.execute("""
696
+ SELECT * FROM pool_members
697
+ WHERE pool_id = ?
698
+ ORDER BY priority DESC, weight DESC, provider_name
699
+ """, (pool['id'],))
700
+ pool['members'] = [dict(row) for row in cursor.fetchall()]
701
+
702
+ return pools
703
+
704
+ def get_pool(self, pool_id: int) -> Optional[Dict]:
705
+ """Get single pool"""
706
+ with self.get_connection() as conn:
707
+ cursor = conn.cursor()
708
+ cursor.execute("""
709
+ SELECT p.*,
710
+ COALESCE((SELECT COUNT(*) FROM pool_rotations pr WHERE pr.pool_id = p.id), 0) as rotation_count
711
+ FROM pools p
712
+ WHERE p.id = ?
713
+ """, (pool_id,))
714
+ row = cursor.fetchone()
715
+ if not row:
716
+ return None
717
+ pool = dict(row)
718
+ cursor.execute("""
719
+ SELECT * FROM pool_members
720
+ WHERE pool_id = ?
721
+ ORDER BY priority DESC, weight DESC, provider_name
722
+ """, (pool_id,))
723
+ pool['members'] = [dict(r) for r in cursor.fetchall()]
724
+ return pool
725
+
726
+ def get_pool_rotation_history(self, pool_id: Optional[int] = None, limit: int = 50) -> List[Dict]:
727
+ """Get rotation history (optionally filtered by pool)"""
728
+ with self.get_connection() as conn:
729
+ cursor = conn.cursor()
730
+ if pool_id is not None:
731
+ cursor.execute("""
732
+ SELECT * FROM pool_rotations
733
+ WHERE pool_id = ?
734
+ ORDER BY created_at DESC
735
+ LIMIT ?
736
+ """, (pool_id, limit))
737
+ else:
738
+ cursor.execute("""
739
+ SELECT * FROM pool_rotations
740
+ ORDER BY created_at DESC
741
+ LIMIT ?
742
+ """, (limit,))
743
+ return [dict(row) for row in cursor.fetchall()]
744
+
745
+ # ------------------------------------------------------------------
746
+ # Provider health logging
747
+ # ------------------------------------------------------------------
748
+
749
+ def log_provider_status(
750
+ self,
751
+ provider_name: str,
752
+ category: str,
753
+ status: str,
754
+ response_time: Optional[float] = None,
755
+ status_code: Optional[int] = None,
756
+ endpoint_tested: Optional[str] = None,
757
+ error_message: Optional[str] = None
758
+ ):
759
+ """Log provider status in status_log table"""
760
+ with self.get_connection() as conn:
761
+ cursor = conn.cursor()
762
+ cursor.execute("""
763
+ INSERT INTO status_log
764
+ (provider_name, category, status, response_time, status_code,
765
+ error_message, endpoint_tested, timestamp)
766
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?)
767
+ """, (
768
+ provider_name,
769
+ category,
770
+ status,
771
+ response_time,
772
+ status_code,
773
+ error_message,
774
+ endpoint_tested,
775
+ time.time()
776
+ ))
database/__init__.py CHANGED
@@ -1,8 +1,44 @@
1
- """Database module for crypto API monitoring"""
2
 
3
- from database.db_manager import DatabaseManager
 
 
 
 
 
 
4
 
5
- # For backward compatibility
6
- Database = DatabaseManager
 
7
 
8
- __all__ = ['DatabaseManager', 'Database']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Database package exports.
2
 
3
+ This package exposes both the new SQLAlchemy-based ``DatabaseManager`` and the
4
+ legacy SQLite-backed ``Database`` class that the existing application modules
5
+ still import via ``from database import Database``. During the transition phase
6
+ we dynamically load the legacy implementation from the root ``database.py``
7
+ module (renamed here as ``legacy_database`` when importing) and fall back to the
8
+ new manager if that module is unavailable.
9
+ """
10
 
11
+ from importlib import util as _importlib_util
12
+ from pathlib import Path as _Path
13
+ from typing import Optional as _Optional
14
 
15
+ from .db_manager import DatabaseManager
16
+
17
+ def _load_legacy_database() -> _Optional[type]:
18
+ """Load the legacy Database class from the root-level ``database.py`` if it exists."""
19
+ legacy_path = _Path(__file__).resolve().parent.parent / "database.py"
20
+ if not legacy_path.exists():
21
+ return None
22
+
23
+ spec = _importlib_util.spec_from_file_location("legacy_database", legacy_path)
24
+ if spec is None or spec.loader is None:
25
+ return None
26
+
27
+ module = _importlib_util.module_from_spec(spec)
28
+ try:
29
+ spec.loader.exec_module(module)
30
+ except Exception:
31
+ # If loading the legacy module fails we silently fall back to DatabaseManager
32
+ return None
33
+
34
+ return getattr(module, "Database", None)
35
+
36
+
37
+ _LegacyDatabase = _load_legacy_database()
38
+
39
+ if _LegacyDatabase is not None:
40
+ Database = _LegacyDatabase
41
+ else:
42
+ Database = DatabaseManager
43
+
44
+ __all__ = ["DatabaseManager", "Database"]
database/__pycache__/__init__.cpython-313.pyc CHANGED
Binary files a/database/__pycache__/__init__.cpython-313.pyc and b/database/__pycache__/__init__.cpython-313.pyc differ
 
database/__pycache__/data_access.cpython-313.pyc CHANGED
Binary files a/database/__pycache__/data_access.cpython-313.pyc and b/database/__pycache__/data_access.cpython-313.pyc differ
 
database/__pycache__/db_manager.cpython-313.pyc CHANGED
Binary files a/database/__pycache__/db_manager.cpython-313.pyc and b/database/__pycache__/db_manager.cpython-313.pyc differ
 
database/__pycache__/models.cpython-313.pyc CHANGED
Binary files a/database/__pycache__/models.cpython-313.pyc and b/database/__pycache__/models.cpython-313.pyc differ
 
database/compat.py ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Compat layer for DatabaseManager to provide methods expected by legacy app code.
2
+
3
+ This module monkey-patches the DatabaseManager class from database.db_manager
4
+ to add:
5
+ - log_provider_status
6
+ - get_uptime_percentage
7
+ - get_avg_response_time
8
+
9
+ The implementations are lightweight and defensive: if the underlying engine
10
+ is not available, they fail gracefully instead of raising errors.
11
+ """
12
+
13
+ from __future__ import annotations
14
+
15
+ from datetime import datetime, timedelta
16
+ from typing import Optional
17
+
18
+ try:
19
+ from sqlalchemy import text as _sa_text
20
+ except Exception: # pragma: no cover - extremely defensive
21
+ _sa_text = None # type: ignore
22
+
23
+ try:
24
+ from .db_manager import DatabaseManager # type: ignore
25
+ except Exception: # pragma: no cover
26
+ DatabaseManager = None # type: ignore
27
+
28
+
29
+ def _get_engine(instance) -> Optional[object]:
30
+ """Best-effort helper to get an SQLAlchemy engine from the manager."""
31
+ return getattr(instance, "engine", None)
32
+
33
+
34
+ def _ensure_table(conn) -> None:
35
+ """Create provider_status table if it does not exist yet."""
36
+ if _sa_text is None:
37
+ return
38
+ conn.execute(
39
+ _sa_text(
40
+ """
41
+ CREATE TABLE IF NOT EXISTS provider_status (
42
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
43
+ provider_name TEXT NOT NULL,
44
+ category TEXT NOT NULL,
45
+ status TEXT NOT NULL,
46
+ response_time REAL,
47
+ status_code INTEGER,
48
+ error_message TEXT,
49
+ endpoint_tested TEXT,
50
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
51
+ )
52
+ """
53
+ )
54
+ )
55
+
56
+
57
+ def _log_provider_status(
58
+ self,
59
+ provider_name: str,
60
+ category: str,
61
+ status: str,
62
+ response_time: Optional[float] = None,
63
+ status_code: Optional[int] = None,
64
+ endpoint_tested: Optional[str] = None,
65
+ error_message: Optional[str] = None,
66
+ ) -> None:
67
+ """Insert a status row into provider_status.
68
+
69
+ This is a best-effort logger; if no engine is available it silently returns.
70
+ """
71
+ engine = _get_engine(self)
72
+ if engine is None or _sa_text is None:
73
+ return
74
+
75
+ now = datetime.utcnow()
76
+ try:
77
+ with engine.begin() as conn: # type: ignore[call-arg]
78
+ _ensure_table(conn)
79
+ conn.execute(
80
+ _sa_text(
81
+ """
82
+ INSERT INTO provider_status (
83
+ provider_name,
84
+ category,
85
+ status,
86
+ response_time,
87
+ status_code,
88
+ error_message,
89
+ endpoint_tested,
90
+ created_at
91
+ )
92
+ VALUES (
93
+ :provider_name,
94
+ :category,
95
+ :status,
96
+ :response_time,
97
+ :status_code,
98
+ :error_message,
99
+ :endpoint_tested,
100
+ :created_at
101
+ )
102
+ """
103
+ ),
104
+ {
105
+ "provider_name": provider_name,
106
+ "category": category,
107
+ "status": status,
108
+ "response_time": response_time,
109
+ "status_code": status_code,
110
+ "error_message": error_message,
111
+ "endpoint_tested": endpoint_tested,
112
+ "created_at": now,
113
+ },
114
+ )
115
+ except Exception: # pragma: no cover - we never want this to crash the app
116
+ # Swallow DB errors; health endpoints must not bring the whole app down.
117
+ return
118
+
119
+
120
+ def _get_uptime_percentage(self, provider_name: str, hours: int = 24) -> float:
121
+ """Compute uptime percentage for a provider in the last N hours.
122
+
123
+ Uptime is calculated as the ratio of rows with status='online' to total
124
+ rows in the provider_status table within the given time window.
125
+ """
126
+ engine = _get_engine(self)
127
+ if engine is None or _sa_text is None:
128
+ return 0.0
129
+
130
+ cutoff = datetime.utcnow() - timedelta(hours=hours)
131
+ try:
132
+ with engine.begin() as conn: # type: ignore[call-arg]
133
+ _ensure_table(conn)
134
+ result = conn.execute(
135
+ _sa_text(
136
+ """
137
+ SELECT
138
+ COUNT(*) AS total,
139
+ SUM(CASE WHEN status = 'online' THEN 1 ELSE 0 END) AS online
140
+ FROM provider_status
141
+ WHERE provider_name = :provider_name
142
+ AND created_at >= :cutoff
143
+ """
144
+ ),
145
+ {"provider_name": provider_name, "cutoff": cutoff},
146
+ ).first()
147
+ except Exception:
148
+ return 0.0
149
+
150
+ if not result or result[0] in (None, 0):
151
+ return 0.0
152
+
153
+ total = float(result[0] or 0)
154
+ online = float(result[1] or 0)
155
+ return round(100.0 * online / total, 2)
156
+
157
+
158
+ def _get_avg_response_time(self, provider_name: str, hours: int = 24) -> float:
159
+ """Average response time (ms) for a provider over the last N hours."""
160
+ engine = _get_engine(self)
161
+ if engine is None or _sa_text is None:
162
+ return 0.0
163
+
164
+ cutoff = datetime.utcnow() - timedelta(hours=hours)
165
+ try:
166
+ with engine.begin() as conn: # type: ignore[call-arg]
167
+ _ensure_table(conn)
168
+ result = conn.execute(
169
+ _sa_text(
170
+ """
171
+ SELECT AVG(response_time) AS avg_response
172
+ FROM provider_status
173
+ WHERE provider_name = :provider_name
174
+ AND response_time IS NOT NULL
175
+ AND created_at >= :cutoff
176
+ """
177
+ ),
178
+ {"provider_name": provider_name, "cutoff": cutoff},
179
+ ).first()
180
+ except Exception:
181
+ return 0.0
182
+
183
+ if not result or result[0] is None:
184
+ return 0.0
185
+
186
+ return round(float(result[0]), 2)
187
+
188
+
189
+ # Apply monkey-patches when this module is imported.
190
+ if DatabaseManager is not None: # pragma: no cover
191
+ if not hasattr(DatabaseManager, "log_provider_status"):
192
+ DatabaseManager.log_provider_status = _log_provider_status # type: ignore[attr-defined]
193
+ if not hasattr(DatabaseManager, "get_uptime_percentage"):
194
+ DatabaseManager.get_uptime_percentage = _get_uptime_percentage # type: ignore[attr-defined]
195
+ if not hasattr(DatabaseManager, "get_avg_response_time"):
196
+ DatabaseManager.get_avg_response_time = _get_avg_response_time # type: ignore[attr-defined]
database/db_manager.py CHANGED
@@ -1537,126 +1537,3 @@ if __name__ == "__main__":
1537
  if table != 'database_size_mb':
1538
  print(f" {table}: {count}")
1539
  print(f" Database Size: {stats.get('database_size_mb', 0)} MB")
1540
-
1541
-
1542
- # === Monkey-patch compatibility methods for legacy health logging ===
1543
- # These provide the same interface as the older `Database` class from database.py
1544
- # so that calls like `db.log_provider_status` and `db.get_uptime_percentage`
1545
- # used in app.py continue to work when using DatabaseManager with SQLAlchemy.
1546
-
1547
- from sqlalchemy import text as _sa_text
1548
- from datetime import datetime as _dt, timedelta as _td
1549
-
1550
- def _dm_log_provider_status(
1551
- self,
1552
- provider_name: str,
1553
- category: str,
1554
- status: str,
1555
- response_time: Optional[float] = None,
1556
- status_code: Optional[int] = None,
1557
- endpoint_tested: Optional[str] = None,
1558
- error_message: Optional[str] = None,
1559
- ):
1560
- """Log provider status into a simple `status_log` table.
1561
-
1562
- This mirrors the behavior of the older sqlite-based `Database.log_provider_status`
1563
- implementation so that existing code paths in app.py keep working.
1564
- """
1565
- try:
1566
- # Ensure table exists (idempotent)
1567
- create_sql = _sa_text("""
1568
- CREATE TABLE IF NOT EXISTS status_log (
1569
- id INTEGER PRIMARY KEY AUTOINCREMENT,
1570
- provider_name TEXT NOT NULL,
1571
- category TEXT NOT NULL,
1572
- status TEXT NOT NULL,
1573
- response_time REAL,
1574
- status_code INTEGER,
1575
- error_message TEXT,
1576
- endpoint_tested TEXT,
1577
- timestamp REAL NOT NULL,
1578
- created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
1579
- )
1580
- """)
1581
- insert_sql = _sa_text("""
1582
- INSERT INTO status_log
1583
- (provider_name, category, status, response_time, status_code,
1584
- error_message, endpoint_tested, timestamp)
1585
- VALUES (:provider_name, :category, :status, :response_time, :status_code,
1586
- :error_message, :endpoint_tested, :timestamp)
1587
- """)
1588
- with self.engine.begin() as conn:
1589
- conn.execute(create_sql)
1590
- conn.execute(
1591
- insert_sql,
1592
- {
1593
- "provider_name": provider_name,
1594
- "category": category,
1595
- "status": status,
1596
- "response_time": response_time,
1597
- "status_code": status_code,
1598
- "error_message": error_message,
1599
- "endpoint_tested": endpoint_tested,
1600
- "timestamp": _dt.now().timestamp(),
1601
- },
1602
- )
1603
- except Exception as e: # pragma: no cover - logging safeguard
1604
- logger.error(f"Failed to log provider status for {provider_name}: {e}", exc_info=True)
1605
-
1606
-
1607
- def _dm_get_uptime_percentage(
1608
- self,
1609
- provider_name: str,
1610
- hours: int = 24,
1611
- ) -> float:
1612
- """Calculate uptime percentage from `status_log` table.
1613
-
1614
- This approximates the legacy behavior:
1615
- uptime = (online_rows / total_rows) * 100
1616
- where `status = 'online'` is treated as healthy.
1617
- """
1618
- try:
1619
- cutoff = _dt.now() - _td(hours=hours)
1620
- # Ensure table exists before querying
1621
- create_sql = _sa_text("""
1622
- CREATE TABLE IF NOT EXISTS status_log (
1623
- id INTEGER PRIMARY KEY AUTOINCREMENT,
1624
- provider_name TEXT NOT NULL,
1625
- category TEXT NOT NULL,
1626
- status TEXT NOT NULL,
1627
- response_time REAL,
1628
- status_code INTEGER,
1629
- error_message TEXT,
1630
- endpoint_tested TEXT,
1631
- timestamp REAL NOT NULL,
1632
- created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
1633
- )
1634
- """)
1635
- query_sql = _sa_text("""
1636
- SELECT
1637
- COUNT(*) AS total,
1638
- SUM(CASE WHEN status = 'online' THEN 1 ELSE 0 END) AS online
1639
- FROM status_log
1640
- WHERE provider_name = :provider_name
1641
- AND created_at >= :cutoff
1642
- """)
1643
- with self.engine.begin() as conn:
1644
- conn.execute(create_sql)
1645
- result = conn.execute(
1646
- query_sql,
1647
- {"provider_name": provider_name, "cutoff": cutoff},
1648
- ).first()
1649
- if result and result[0]:
1650
- total = result[0] or 0
1651
- online = result[1] or 0
1652
- if total > 0:
1653
- return round((online / total) * 100.0, 2)
1654
- return 0.0
1655
- except Exception as e: # pragma: no cover - logging safeguard
1656
- logger.error(f"Failed to compute uptime for {provider_name}: {e}", exc_info=True)
1657
- return 0.0
1658
-
1659
-
1660
- # Attach methods to DatabaseManager so existing code can call them.
1661
- DatabaseManager.log_provider_status = _dm_log_provider_status # type: ignore[attr-defined]
1662
- DatabaseManager.get_uptime_percentage = _dm_get_uptime_percentage # type: ignore[attr-defined]
 
1537
  if table != 'database_size_mb':
1538
  print(f" {table}: {count}")
1539
  print(f" Database Size: {stats.get('database_size_mb', 0)} MB")