-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathzero_shield_cli.py
More file actions
3068 lines (2720 loc) · 146 KB
/
zero_shield_cli.py
File metadata and controls
3068 lines (2720 loc) · 146 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
"""
Zero-Shield CLI: Your Agentic AWS Copilot
Copyright (c) 2026 Jeri L3D | JeriSadeuM. All rights reserved.
Released under the MIT License.
Repository: https://github.com/jerisadeumai/zero-shield-cli
For setup details see the README.md file in the repository.
OODA Framework: (Observe → Orient → Decide → Act)
Version: v2.0.0-dev (security-hardened)
"""
import warnings
warnings.filterwarnings("ignore")
# -*- coding: utf-8 -*-
import sys, os, boto3, re, time, threading, itertools, json, signal, codecs
# ═══════════════════════════════════════════════════════════════════════════════
# UI/UX Enhancement Module - Color Support & Formatting
# ═══════════════════════════════════════════════════════════════════════════════
class Colors:
"""ANSI color codes for terminal output with Windows compatibility"""
# Enable ANSI colors on Windows 10+
if os.name == 'nt':
try:
import ctypes
kernel32 = ctypes.windll.kernel32
kernel32.SetConsoleMode(kernel32.GetStdHandle(-11), 7)
except Exception:
pass
# Color codes
RESET = '\033[0m'
BOLD = '\033[1m'
DIM = '\033[2m'
# Foreground colors
BLACK = '\033[30m'
RED = '\033[31m'
GREEN = '\033[32m'
YELLOW = '\033[33m'
BLUE = '\033[34m'
MAGENTA = '\033[35m'
CYAN = '\033[36m'
WHITE = '\033[37m'
# Bright foreground colors
BRIGHT_RED = '\033[91m'
BRIGHT_GREEN = '\033[92m'
BRIGHT_YELLOW = '\033[93m'
BRIGHT_BLUE = '\033[94m'
BRIGHT_MAGENTA = '\033[95m'
BRIGHT_CYAN = '\033[96m'
# Background colors
BG_RED = '\033[41m'
BG_GREEN = '\033[42m'
BG_YELLOW = '\033[43m'
BG_BLUE = '\033[44m'
@staticmethod
def strip(text):
"""Remove all ANSI codes from text"""
return re.sub(r'\033\[[0-9;]+m', '', text)
def colorize(text, color):
"""Apply color to text with automatic reset"""
return f"{color}{text}{Colors.RESET}"
def print_success(msg):
"""Print success message in green"""
print(f"{Colors.BRIGHT_GREEN}✓{Colors.RESET} {msg}")
def print_error(msg):
"""Print error message in red"""
print(f"{Colors.BRIGHT_RED}✗{Colors.RESET} {msg}")
def print_warning(msg):
"""Print warning message in yellow"""
print(f"{Colors.BRIGHT_YELLOW}⚠{Colors.RESET} {msg}")
def print_info(msg):
"""Print info message in cyan"""
print(f"{Colors.BRIGHT_CYAN}ℹ{Colors.RESET} {msg}")
def print_header(title, width=80):
"""Print a formatted header"""
line = "═" * width
print(f"\n{Colors.BOLD}{Colors.CYAN}{line}{Colors.RESET}")
print(f"{Colors.BOLD}{Colors.CYAN}{title.center(width)}{Colors.RESET}")
print(f"{Colors.BOLD}{Colors.CYAN}{line}{Colors.RESET}\n")
def print_section(title):
"""Print a section divider"""
print(f"\n{Colors.BOLD}{Colors.BLUE}▶ {title}{Colors.RESET}")
print(f"{Colors.DIM}{'─' * 60}{Colors.RESET}")
def print_table_row(cols, widths, colors=None):
"""Print a formatted table row"""
if colors is None:
colors = [Colors.RESET] * len(cols)
row = " │ ".join(f"{colors[i]}{str(cols[i]):<{widths[i]}}{Colors.RESET}"
for i in range(len(cols)))
print(f" {row}")
def progress_bar(current, total, width=40, label="Progress"):
"""Display a progress bar"""
percent = current / total if total > 0 else 0
filled = int(width * percent)
bar = "█" * filled + "░" * (width - filled)
percent_str = f"{percent * 100:.1f}%"
print(f"\r{Colors.CYAN}{label}:{Colors.RESET} [{Colors.GREEN}{bar}{Colors.RESET}] {percent_str}", end='', flush=True)
if current >= total:
print() # New line when complete
def print_banner():
"""Print the Zero-Shield ASCII banner"""
banner = f"""{Colors.BRIGHT_CYAN}
╔══════════════════════════════════════════════════════════════════════════════╗
║ ║
║ ███████╗███████╗██████╗ ██████╗ ███████╗██╗ ██╗██╗███████╗██╗ ██████╗ ║
║ ╚══███╔╝██╔════╝██╔══██╗██╔═══██╗ ██╔════╝██║ ██║██║██╔════╝██║ ██╔══██╗ ║
║ ███╔╝ █████╗ ██████╔╝██║ ██║█████╗███████╗███████║██║█████╗ ██║ ██║ ██║ ║
║ ███╔╝ ██╔══╝ ██╔══██╗██║ ██║╚════╝╚════██║██╔══██║██║██╔══╝ ██║ ██║ ██║ ║
║ ███████╗███████╗██║ ██║╚██████╔╝ ███████║██║ ██║██║███████╗███████╗██████╔╝ ║
║ ╚══════╝╚══════╝╚═╝ ╚═╝ ╚═════╝ ╚══════╝╚═╝ ╚═╝╚═╝╚══════╝╚══════╝╚═════╝ ║
║ ║
║ {Colors.WHITE}Agentic AWS Security Copilot{Colors.CYAN} ║
║ {Colors.DIM}v2.0.0-dev (security-hardened){Colors.CYAN} ║
║ ║
╚══════════════════════════════════════════════════════════════════════════════╝{Colors.RESET}
{Colors.BRIGHT_YELLOW}⚡ OODA Loop:{Colors.RESET} {Colors.GREEN}Observe{Colors.RESET} → {Colors.CYAN}Orient{Colors.RESET} → {Colors.MAGENTA}Decide{Colors.RESET} → {Colors.RED}Act{Colors.RESET}
{Colors.DIM}Copyright © 2026 Jeri L3D | JeriSadeuM | MIT License{Colors.RESET}
"""
print(banner)
# ═══════════════════════════════════════════════════════════════════════════════
# End UI/UX Enhancement Module
# ═══════════════════════════════════════════════════════════════════════════════
# Force UTF-8 for Windows consoles
if sys.platform == 'win32':
try:
sys.stdin = codecs.getreader('utf-8')(sys.stdin.detach())
sys.stdout = codecs.getwriter('utf-8')(sys.stdout.detach())
except Exception:
pass
if os.name == 'nt':
import msvcrt
try:
import readline
except ImportError:
# Readline is Unix-centric; on Windows, this prevents a crash if pyreadline is missing.
pass
from datetime import datetime, timedelta
from dotenv import load_dotenv
from openai import OpenAI
# Force UTF-8 for cross-platform compliance (fixes Windows CMD crashes)
if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding and sys.stdout.encoding.lower() != 'utf-8':
try:
import codecs
sys.stdout = codecs.getwriter('utf-8')(sys.stdout.buffer, 'strict')
sys.stderr = codecs.getwriter('utf-8')(sys.stderr.buffer, 'strict')
except Exception:
pass
# Platform-specific I/O imports for Universal Hardening
_USE_TERMIOS = False
_USE_MSVCRT = False
if os.name == 'posix':
try:
import termios, tty, select
_USE_TERMIOS = True
except ImportError:
import select
else:
try:
import msvcrt
_USE_MSVCRT = True
except ImportError:
pass
def _redact_secrets(text: str) -> str:
"""
Hardened multi-layer redaction targeting AWS credentials, session tokens, and high-entropy secrets.
CRITICAL-01 FIX: Enhanced pattern coverage for all AWS credential types.
"""
if not isinstance(text, str): return text
# Layer 1: AWS Access Key IDs (AKIA*, ASIA*, AROA*) - 20 chars
# These are NOT secrets but should be redacted to prevent enumeration
# NOTE: AIDA (IAM User IDs) are preserved - they're not sensitive
text = re.sub(r'\b(AKIA|ASIA|AROA)[A-Z0-9]{16}\b', '[REDACTED_AWS_ACCESS_KEY_ID]', text)
# Layer 2: AWS Secret Access Keys (40 chars base64)
# Pattern: 40 alphanumeric+/+ characters, often following an access key
text = re.sub(r'(?<![A-Za-z0-9/+])[A-Za-z0-9/+]{40}(?![A-Za-z0-9/+=])', '[REDACTED_AWS_SECRET_KEY]', text)
# Layer 3: AWS Session Tokens (massive base64 blobs, 60+ chars)
text = re.sub(r'(?<![A-Za-z0-9/+])[A-Za-z0-9/+=]{60,}(?![A-Za-z0-9/+=])', '[REDACTED_SESSION_TOKEN]', text)
# FIX 2: Detect keys split across newlines
text_no_newlines = text.replace("\n", "").replace("\r", "")
if re.search(r"(AKIA|ASIA)[A-Z0-9]{16}", text_no_newlines):
text = re.sub(r"AKIA[\s\n\r]*([A-Z0-9][\s\n\r]*){16}", "[REDACTED_AWS_ACCESS_KEY_ID]", text)
text = re.sub(r"ASIA[\s\n\r]*([A-Z0-9][\s\n\r]*){16}", "[REDACTED_AWS_ACCESS_KEY_ID]", text)
# Layer 4: Medium-entropy secrets (16-59 chars) with whitelist protection
# Whitelist AWS resource IDs to prevent false positives
def scrub_medium_entropy(match):
val = match.group(0)
v_lower = val.lower()
# AIDA Whitelist: Preserve IAM User IDs (AIDA + 17 chars = 21 total, not sensitive)
if re.match(r'^AIDA[A-Z0-9]{17}$', val):
return val
# Whitelist: AWS resource ID prefixes (must be at start of string or after whitespace/punctuation)
aws_id_prefixes = ['i-', 'sg-', 'vpc-', 'subnet-', 'acl-', 'vol-', 'snap-',
'ami-', 'eni-', 'rtb-', 'igw-', 'nat-', 'vpce-', 'eipalloc-',
'arn:', 'subnet-', 'rtbassoc-']
# Check if this looks like an AWS resource ID
if any(v_lower.startswith(p) for p in aws_id_prefixes):
return val
# Check if it matches the pattern: prefix-hexstring (AWS resource ID pattern)
if re.match(r'^[a-z]{1,10}-[0-9a-f]{8,17}$', v_lower):
return val
# Check if it's a short alphanumeric string (likely not a secret)
if len(val) < 20 and re.match(r'^[a-zA-Z0-9-]+$', val):
return val
return '[REDACTED_SECRET]'
text = re.sub(r'(?<![A-Za-z0-9/+])[A-Za-z0-9/+=]{16,59}(?![A-Za-z0-9/+=])',
scrub_medium_entropy, text)
# Layer 5: JWT tokens (header.payload.signature pattern)
text = re.sub(r'\beyJ[A-Za-z0-9_-]+\.[A-Za-z0-9_-]+\.[A-Za-z0-9_-]+\b', '[REDACTED_JWT_TOKEN]', text)
return text
def _sanitize_aws_tag(text: str) -> str:
"""
Data-Plane Defanger: Strict allowlist-based sanitization to prevent prompt injection.
CRITICAL-02 FIX: Allowlist approach instead of blocklist to prevent semantic injection.
"""
if not isinstance(text, str): return str(text)
# If input is already empty/whitespace, return as-is
if not text or not text.strip():
return text.strip() if text else ""
# Allowlist: Only permit alphanumeric, hyphens, underscores, dots, and spaces
# This prevents ALL structural characters that could be used for prompt injection
clean = re.sub(r'[^a-zA-Z0-9\-_.() ]', '', text)
# Strip path traversal sequences
while '..' in clean:
clean = clean.replace('..', '')
# Return "sanitized" if cleaning removed everything (handles Unicode edge cases)
if not clean or not clean.strip():
return "sanitized"
# Additional safety: Remove any remaining prompt-injection keywords (case-insensitive)
dangerous_patterns = [
(r'\bACTION\b', 'action'),
(r'\bACT\b', 'act'),
(r'\bOBSERVE\b', 'observe'),
(r'\bORIENT\b', 'orient'),
(r'\bDECIDE\b', 'decide'),
(r'\bSYSTEM\b', 'system'),
(r'\bUSER\b', 'user'),
(r'\bASSISTANT\b', 'assistant'),
(r'\bIGNORE\b', ''),
(r'\bOVERRIDE\b', ''),
]
for pattern, replacement in dangerous_patterns:
clean = re.sub(pattern, replacement, clean, flags=re.IGNORECASE)
# Limit length to prevent buffer overflow attacks
clean = clean[:200]
return clean.strip()
def _sanitize_path(path: str) -> str:
"""Prevent path traversal attacks."""
if not isinstance(path, str):
return ""
# Strip dangerous path sequences (loop until no more changes)
while any(seq in path for seq in ['../', './', '~/']):
path = path.replace("../", "").replace("./", "").replace("~/", "")
return path.strip()
def universal_flush():
"""Platform-agnostic terminal input buffer flusher to harden Paste Guard."""
if not sys.stdin.isatty(): return
if _USE_TERMIOS:
fd = sys.stdin.fileno()
old = termios.tcgetattr(fd)
try:
new = termios.tcgetattr(fd)
new[3] &= ~termios.ECHO
termios.tcsetattr(fd, termios.TCSADRAIN, new)
while select.select([sys.stdin], [], [], 0.05)[0]:
sys.stdin.read(1)
except Exception: pass
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old)
elif _USE_MSVCRT:
while msvcrt.kbhit():
msvcrt.getch()
load_dotenv()
# Persistence files
KG_FILE = os.path.join(os.path.dirname(__file__), "session_kg.json")
STATE_FILE = os.path.join(os.path.dirname(__file__), "session_state.json")
# Rate-limit strike counter: {model_idx: consecutive_429_count}
_rate_strikes: dict = {}
# Per-model cooldown tracking: {model_idx: datetime reset time}
_cooldown_until: dict = {}
# Live quota map: {model_idx: {limit_req, remaining_req, limit_tok, remaining_tok, reset_req, reset_tok}}
_quota_map: dict = {}
# Global UI toggle for model-specific tips
_show_tips: bool = True
# Session context — volatile during run, persisted on exit / restored on startup
_session_ctx: dict = {
"last_id": None,
"last_sg_id": None,
"last_vpc_id": None,
"last_bucket": None,
"last_rds_id": None,
"last_access_key": None, # BUG-02 FIX: was missing — /reset would never clear it
"model_idx": 0,
}
# Globals for indexed targeting (Listing buffers)
_last_instances: list = []
_last_sgs: list = []
def _update_quota_from_headers(model_idx: int, headers: dict):
"""
Parse any rate-limit headers present on a response (success OR error)
and update the live _quota_map entry for this model.
Hardened for httpx/Azure quirks and SDK v1.x compatibility.
"""
# Ensure we are dealing with a flat dict of strings
h = {}
for k, v in headers.items():
try:
h[str(k).lower()] = str(v)
except Exception:
pass
def _int(key, fallback=None):
v = h.get(key)
if v is None: return fallback
# Strip any trailing 's' (seconds) or whitespace that Azure sometimes adds
v_clean = re.sub(r'[^0-9]', '', str(v))
return int(v_clean) if v_clean else fallback
entry = _quota_map.get(model_idx, {})
entry['verified'] = True
# Request limits (Flexible mapping: Azure vs OpenAI)
entry['limit_req'] = _int('x-ratelimit-limit-requests', _int('x-ratelimit-limit', entry.get('limit_req')))
entry['remaining_req'] = _int('x-ratelimit-remaining-requests', _int('x-ratelimit-remaining', entry.get('remaining_req')))
# Token limits
entry['limit_tok'] = _int('x-ratelimit-limit-tokens', entry.get('limit_tok'))
entry['remaining_tok'] = _int('x-ratelimit-remaining-tokens', entry.get('remaining_tok'))
# Azure AI / GitHub Models specific
entry['time_remaining'] = _int('x-ratelimit-timeremaining', entry.get('time_remaining'))
entry['limit_type'] = h.get('x-ratelimit-type', entry.get('limit_type'))
# Reset timestamps (ISO strings)
entry['reset_req'] = h.get('x-ratelimit-reset-requests', entry.get('reset_req'))
entry['reset_tok'] = h.get('x-ratelimit-reset-tokens', entry.get('reset_tok'))
_quota_map[model_idx] = {k: v for k, v in entry.items() if v is not None}
def _format_headers_json(headers: dict) -> str:
"""
Pretty-print HTTP response headers as indented JSON-style output.
Filters to rate-limit relevant keys only; shows full dict if none found.
"""
rl_keys = {k: v for k, v in headers.items()
if any(kw in k.lower() for kw in ('retry', 'ratelimit', 'x-ms', 'x-ratelimit'))}
target = rl_keys if rl_keys else dict(headers)
lines = ['{']
for k, v in target.items():
lines.append(f' "{k}": "{v}",')
if lines[-1].endswith(','):
lines[-1] = lines[-1][:-1] # remove trailing comma
lines.append('}')
return '\n'.join(lines)
def estimate_tokens(messages: list) -> int:
"""
Rough token estimate for a message list.
Rule of thumb: 1 token ≈ 4 characters. Adds 4 tokens overhead per message.
Not exact — use as a guide only.
"""
total = 0
for m in messages:
content = m.get('content', '')
total += len(content) // 4 + 4
return total
def _quota_req_bar(model_idx: int, width: int = 15) -> str:
"""Build a compact ASCII progress bar for remaining request quota, defaulting to raw numbers if no limit."""
q = _quota_map.get(model_idx, {})
rem, lim = q.get('remaining_req'), q.get('limit_req')
if rem is None: return ''
if lim is None or lim == 0: return f"Remaining: {rem} req (Limit: Unknown)"
filled = int((rem / lim) * width)
bar = '\u2588' * filled + '\u2591' * (width - filled)
pct = int((rem / lim) * 100)
return f'[{bar}] {rem}/{lim} req ({pct}%)'
def _quota_tok_bar(model_idx: int, width: int = 15) -> str:
"""Build a compact ASCII progress bar for remaining token quota, defaulting to raw numbers if no limit."""
q = _quota_map.get(model_idx, {})
rem, lim = q.get('remaining_tok'), q.get('limit_tok')
if rem is None: return ''
if lim is None or lim == 0: return f"Remaining: {rem} tok (Limit: Unknown)"
filled = int((rem / lim) * width)
bar = '\u2588' * filled + '\u2591' * (width - filled)
pct = int((rem / lim) * 100)
return f'[{bar}] {rem}/{lim} tok ({pct}%)'
def _reset_label(model_idx: int) -> str:
"""Return a human-readable reset time label from _quota_map."""
from datetime import datetime as _dt
q = _quota_map.get(model_idx, {})
# Prefer exact iso timestamps
reset_str = q.get('reset_req') or q.get('reset_tok')
if reset_str:
try:
reset_dt = _dt.fromisoformat(reset_str.replace('Z', '+00:00'))
delta = reset_dt - _dt.now(_dt.now().astimezone().tzinfo)
secs = max(0, int(delta.total_seconds()))
if secs == 0: return ""
h, m = divmod(secs, 3600)
m, s = divmod(m, 60)
if h: return f"resets in ~{h}h {m}m"
elif m: return f"resets in ~{m}m {s}s"
else: return f"resets in ~{s}s"
except Exception:
pass
secs = q.get('time_remaining')
if secs is not None and secs > 0:
h, m = divmod(int(secs), 3600)
m, s = divmod(m, 60)
if h: return f"resets in ~{h}h {m}m"
elif m: return f"resets in ~{m}m {s}s"
else: return f"resets in ~{s}s"
return ""
# ─── Model Registry ────────────────────────────────────────────────────────────
# (display_name, api_id, max_tokens, temperature)
MODEL_REGISTRY = [
("gpt-4o-mini", "gpt-4o-mini", 1500, 0.2, "Fast & efficient for general audits."),
("Llama-3.3-70B-Instruct", "Llama-3.3-70B-Instruct", 1500, 0.1, "Enterprise reasoning; highly capable."),
("Phi-4", "Phi-4", 2000, 0.2, "Highly compliant; best for rule audits."),
("DeepSeek-V3", "DeepSeek-V3-0324", 2000, 0.3, "Deep reasoning; best for threat hunting."),
("gpt-4o", "gpt-4o", 3000, 0.2, "Most capable for complex analysis."),
]
QUARANTINE_SG_ID = os.environ.get("QUARANTINE_SG_ID", "UNCONFIGURED")
EC2_REGION = os.environ.get("AWS_REGION", "us-east-1")
# ─── Utilities ─────────────────────────────────────────────────────────────────
def ts():
"""Current time as a bracketed timestamp string."""
return datetime.now().strftime("[%H:%M:%S]")
def spinner_start(ui_state=None):
"""Start a rotating / - \\ | spinner in-line. Returns (stop_event, thread)."""
stop_event = threading.Event()
def _spin():
for ch in itertools.cycle(["|", "/", "-", "\\"]):
if stop_event.is_set(): break
if isinstance(ui_state, str):
sys.stdout.write(f"\r{ts()} [{ch}] {ui_state} ")
else:
name = ui_state.get('name', 'AI') if isinstance(ui_state, dict) else 'AI'
sys.stdout.write(f"\r{ts()} [{ch}] {name} is reasoning... ")
sys.stdout.flush()
time.sleep(0.1)
sys.stdout.write("\r" + " " * 80 + "\r") # clear spinner line
sys.stdout.flush()
t = threading.Thread(target=_spin, daemon=True)
t.start()
return stop_event, t
def spinner_stop(stop_event, t):
stop_event.set()
t.join(timeout=1)
# ─── Pre-flight ────────────────────────────────────────────────────────────────
def run_preflight():
print_section("Pre-Flight Checks")
print_info("Validating environment and credentials...")
stop, t = spinner_start("\033[36m Checking AWS & GitHub authentication...\033[0m")
try:
# Check if .env actually exists
env_path = os.path.join(os.getcwd(), '.env')
if not os.path.exists(env_path):
spinner_stop(stop, t)
print_error("ERROR: .env file not found")
print(f" {Colors.DIM}Searching in: {env_path}{Colors.RESET}")
print_info("Please create a .env file based on .env.example to start")
return False
# Check for GitHub Models API Token
gh_token = os.environ.get("GITHUB_TOKEN")
if not gh_token:
spinner_stop(stop, t)
print_error("ERROR: No GITHUB_TOKEN set")
print_info("Please ensure your .env file contains your GitHub PAT")
print(f" {Colors.DIM}The LLM inference engine requires this token{Colors.RESET}")
return False
# Check if boto3 can find credentials
credentials = boto3.Session().get_credentials()
if not credentials:
spinner_stop(stop, t)
print_error("ERROR: No AWS credentials found")
print_info("Please ensure your .env file contains AWS credentials")
print(f" {Colors.DIM}Required: AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY{Colors.RESET}")
return False
boto3.client('sts').get_caller_identity()
spinner_stop(stop, t)
print_success("Environment validated")
print_success("AWS credentials verified")
print_success("GitHub token verified")
print(f"\n{Colors.BRIGHT_GREEN}✓ Pre-flight complete. All systems operational.{Colors.RESET}\n")
return True
except Exception as e:
spinner_stop(stop, t)
print_error("AWS connectivity failed")
print_warning("Please verify your .env file has valid credentials")
print(f" {Colors.DIM}Error: {e}{Colors.RESET}")
return False
# ─── AWS Tools ─────────────────────────────────────────────────────────────────
# Lazy AWS client cache — avoids recreating clients on every tool call
_aws_clients: dict = {}
def _client(svc: str):
"""
Lazy client factory supporting 14 AWS service integrations.
Explicitly instantiates clients for all 14 services to ensure audit compliance:
- Core services (11): ec2, iam, s3, logs, rds, lambda, cloudwatch, cloudtrail, ce, guardduty, kms
- Extended services (3): dynamodb, efs, wafv2
All clients are cached in _aws_clients dict to avoid recreation overhead.
"""
if svc not in _aws_clients:
if svc == 'ec2': _aws_clients[svc] = boto3.client('ec2', region_name=EC2_REGION)
elif svc == 'iam': _aws_clients[svc] = boto3.client('iam', region_name=EC2_REGION)
elif svc == 's3': _aws_clients[svc] = boto3.client('s3', region_name=EC2_REGION)
elif svc == 'logs':_aws_clients[svc] = boto3.client('logs', region_name=EC2_REGION)
elif svc == 'rds': _aws_clients[svc] = boto3.client('rds', region_name=EC2_REGION)
elif svc == 'lambda': _aws_clients[svc] = boto3.client('lambda', region_name=EC2_REGION)
elif svc == 'cloudwatch': _aws_clients[svc] = boto3.client('cloudwatch', region_name=EC2_REGION)
elif svc == 'cloudtrail': _aws_clients[svc] = boto3.client('cloudtrail', region_name=EC2_REGION)
elif svc == 'ce': _aws_clients[svc] = boto3.client('ce', region_name=EC2_REGION)
elif svc == 'guardduty': _aws_clients[svc] = boto3.client('guardduty', region_name=EC2_REGION)
elif svc == 'kms': _aws_clients[svc] = boto3.client('kms', region_name=EC2_REGION)
elif svc == 'dynamodb': _aws_clients[svc] = boto3.client('dynamodb', region_name=EC2_REGION)
elif svc == 'efs': _aws_clients[svc] = boto3.client('efs', region_name=EC2_REGION)
elif svc == 'wafv2': _aws_clients[svc] = boto3.client('wafv2', region_name=EC2_REGION)
else:
raise ValueError(f"Unsupported AWS service: {svc}. Only 14 services are supported.")
return _aws_clients[svc]
def ec2(): return _client('ec2')
def tool_list_resources(kg=None):
"""List EC2 instances. If kg is provided, auto-index Name+State for Total Recall mode."""
global _last_instances
try:
_last_instances = []
rows = []
for r in ec2().describe_instances()['Reservations']:
for i in r['Instances']:
iid = i['InstanceId']
state = i['State']['Name']
vpc_id = i.get('VpcId', 'N/A')
_last_instances.append(iid)
idx = len(_last_instances)
name = _sanitize_aws_tag(next((t['Value'] for t in i.get('Tags',[]) if t['Key']=='Name'), 'NoName'))
sgs = [sg['GroupName'] for sg in i.get('SecurityGroups',[])]
# AUTO-INDEXING: Force update metadata to prevent VPC/Name drift
if kg is not None:
if 'instances' not in kg: kg['instances'] = {}
kg['instances'][iid] = f"Name: {name} | Status: {state} | VPC: {vpc_id}"
state_str = " (RUNNING)" if state == 'running' else f" ({state.upper()})"
rows.append(f"[{idx}] {iid} {name:<18} {state_str:<12} | SGs: {', '.join(sgs)}")
return "EC2 Instances Found:\n " + "\n ".join(rows) if rows else "No instances found in this region."
except Exception as e: return f"Error: {e}"
def tool_list_security_groups():
global _last_sgs
try:
_last_sgs = []
rows = []
for sg in ec2().describe_security_groups()['SecurityGroups']:
_last_sgs.append(sg['GroupId'])
idx = len(_last_sgs)
rows.append(f"[{idx}] {sg['GroupId']} ({sg['GroupName']})")
return "Security Groups:\n " + "\n ".join(rows) if rows else "No security groups found."
except Exception as e: return f"Error: {e}"
def tool_inspect_resource(instance_id):
try:
i = ec2().describe_instances(InstanceIds=[instance_id])['Reservations'][0]['Instances'][0]
name = _sanitize_aws_tag(next((t['Value'] for t in i.get('Tags',[]) if t['Key']=='Name'), 'NoName'))
sgs = [f"{sg['GroupId']} ({sg['GroupName']})" for sg in i.get('SecurityGroups',[])]
return (f"Instance: {instance_id} | Name: {name} | State: {i['State']['Name']} | "
f"VPC: {i.get('VpcId','N/A')} | Subnet: {i.get('SubnetId','N/A')} | "
f"Type: {i.get('InstanceType','N/A')} | "
f"Public IP: {i.get('PublicIpAddress','none')} | "
f"Private IP: {i.get('PrivateIpAddress','N/A')} | "
f"SGs: {', '.join(sgs)}")
except Exception as e: return f"Error: {e}"
def tool_sg_rules(sg_id):
"""Fetch real inbound + outbound rules for a specific security group."""
try:
sg = ec2().describe_security_groups(GroupIds=[sg_id])['SecurityGroups'][0]
def fmt_rule(rule, direction):
proto = rule.get('IpProtocol', '-1')
from_port = rule.get('FromPort', '*')
to_port = rule.get('ToPort', '*')
cidrs = [ip['CidrIp'] for ip in rule.get('IpRanges', [])]
cidrs += [ip['CidrIpv6'] for ip in rule.get('Ipv6Ranges', [])]
sg_refs = [f"SG:{g['GroupId']}" for g in rule.get('UserIdGroupPairs', [])]
sources = ", ".join(cidrs + sg_refs) or "none"
if proto == '-1':
return f"ALL TRAFFIC → {sources}"
if from_port == to_port:
port_str = str(from_port)
else:
port_str = f"{from_port}-{to_port}"
return f"{proto.upper()} port {port_str} → {sources}"
inbound = [fmt_rule(r, 'in') for r in sg.get('IpPermissions', [])]
outbound = [fmt_rule(r, 'out') for r in sg.get('IpPermissionsEgress', [])]
in_str = "\n ".join(inbound) if inbound else "NONE (deny all inbound)"
out_str = "\n ".join(outbound) if outbound else "NONE (deny all outbound)"
return (f"SG {sg_id} ({sg['GroupName']}) Rules:\n"
f" Inbound:\n {in_str}\n"
f" Outbound:\n {out_str}")
except Exception as e: return f"Error: {e}"
def tool_vpc_info(vpc_id):
try:
vpc = ec2().describe_vpcs(VpcIds=[vpc_id])['Vpcs'][0]
name = _sanitize_aws_tag(next((t['Value'] for t in vpc.get('Tags',[]) if t['Key']=='Name'), 'Unnamed'))
subnets = ec2().describe_subnets(
Filters=[{'Name':'vpc-id','Values':[vpc_id]}])['Subnets']
subnet_str = ", ".join(f"{s['SubnetId']} ({s['CidrBlock']})" for s in subnets)
return (f"VPC {vpc_id} | Name: {name} | CIDR: {vpc['CidrBlock']} | "
f"State: {vpc['State']} | Default: {vpc['IsDefault']} | "
f"Subnets: {subnet_str or 'none'}")
except Exception as e: return f"Error: {e}"
def _sanitize_logs(raw: str) -> str:
"""
Strip cryptographic material (SSH keys, PEM blocks) from console logs
before they are replayed in the chat history, preventing content-filter trips.
"""
if not raw:
return ""
# FIX 4: Handle Unicode surrogate pairs
redacted = _redact_secrets(raw)
if not redacted or redacted.strip() == "":
return "sanitized"
# Remove PEM-style blocks (BEGIN/END markers + payload)
raw = re.sub(r'-----BEGIN [\w ]+-----.*?-----END [\w ]+-----', '[KEY REDACTED]', raw, flags=re.DOTALL)
# Remove standalone long base64 lines (>60 chars of alphanumeric+/+=)
raw = re.sub(r'^[A-Za-z0-9+/=]{60,}$', '[KEY REDACTED]', raw, flags=re.MULTILINE)
return raw.strip()
def tool_get_logs(instance_id):
try:
out = ec2().get_console_output(InstanceId=instance_id)
raw = (out.get('Output', '(no output)') or '(no output)')[-2000:]
return "Logs: " + _sanitize_logs(raw)
except Exception as e: return f"Error: {e}"
def tool_cw_logs(instance_id):
"""Stream the 30 most recent CloudWatch log events for this instance."""
try:
cw = _client('logs') # BUG-05 FIX: use lazy cache instead of raw boto3.client
# Look for log groups referencing the instance ID or common SSM/cloud-init groups
candidate_prefixes = [
f"/aws/ec2/{instance_id}",
"/var/log/cloud-init",
"/var/log/messages",
"/aws/ssm",
]
groups = cw.describe_log_groups().get('logGroups', [])
matched = [g['logGroupName'] for g in groups
if any(p in g['logGroupName'] for p in candidate_prefixes)]
if not matched:
return ("No CloudWatch log groups found for this instance. "
"Ensure the CloudWatch agent is installed and the instance "
"has the 'CloudWatchAgentServerPolicy' IAM policy attached.")
lines = []
for group in matched[:2]: # Limit to 2 groups to stay concise
streams = cw.describe_log_streams(
logGroupName=group, orderBy='LastEventTime',
descending=True, limit=1
).get('logStreams', [])
if not streams: continue
events = cw.get_log_events(
logGroupName=group,
logStreamName=streams[0]['logStreamName'],
limit=30, startFromHead=False
).get('events', [])
lines.append(f"[Group: {group}]")
for ev in events:
ts_ev = datetime.fromtimestamp(ev['timestamp']/1000).strftime('%H:%M:%S')
msg = _sanitize_logs(ev['message'].strip())
lines.append(f" {ts_ev} {msg}")
return "\n".join(lines) if lines else "No log events found in matched groups."
except Exception as e: return f"Error fetching CloudWatch logs: {e}"
def tool_iam_check(instance_id):
"""Check the IAM instance profile and validate SSM required policies."""
try:
ec2c = ec2()
iamc = _client('iam') # BUG-05 FIX: use lazy cache instead of raw boto3.client
# Get attached instance profile
assoc = ec2c.describe_iam_instance_profile_associations(
Filters=[{'Name': 'instance-id', 'Values': [instance_id]}]
).get('IamInstanceProfileAssociations', [])
if not assoc:
return ("No IAM instance profile attached to this instance. "
"SSM requires the 'AmazonSSMManagedInstanceCore' policy. "
"Action: Attach the 'AmazonSSMRoleForInstancesQuickSetup' role.")
profile_arn = assoc[0]['IamInstanceProfile']['Arn']
profile_name = profile_arn.split('/')[-1]
profile = iamc.get_instance_profile(InstanceProfileName=profile_name)
roles = profile['InstanceProfile'].get('Roles', [])
if not roles:
return f"Profile '{profile_name}' has no roles attached."
role_name = roles[0]['RoleName']
# List attached managed policies on this role
policies = iamc.list_attached_role_policies(RoleName=role_name)['AttachedPolicies']
policy_names = [p['PolicyName'] for p in policies]
ssm_ok = any('SSM' in pn or 'SystemsManager' in pn for pn in policy_names)
cw_ok = any('CloudWatch' in pn for pn in policy_names)
lines = [
f"Instance Profile : {profile_name}",
f"IAM Role : {role_name}",
f"Attached Policies: {', '.join(policy_names) or 'NONE'}",
f"SSM Access : {'✔ OK' if ssm_ok else '✘ MISSING — add AmazonSSMManagedInstanceCore'}",
f"CloudWatch Access: {'✔ OK' if cw_ok else '✘ MISSING — add CloudWatchAgentServerPolicy'}",
]
return "\n".join(lines)
except Exception as e:
err_str = str(e)
if 'EndpointConnectionError' in err_str:
return ("Error: Could not connect to IAM/STS endpoint. This instance may be in an isolated "
"or air-gapped subnet without a VPC Endpoint (com.amazonaws.region.sts).")
return f"Error: {e}"
# ─── Cost static map (on-demand us-east-1, USD/hr, approximate) ───────────────
_COST_MAP = {
"t2.micro": 0.0116, "t2.small": 0.023, "t2.medium": 0.0464,
"t3.micro": 0.0104, "t3.small": 0.0208, "t3.medium": 0.0416, "t3.large": 0.0832,
"t3a.micro": 0.0094, "t3a.small": 0.0188,
"m5.large": 0.096, "m5.xlarge": 0.192, "m5.2xlarge": 0.384,
"c5.large": 0.085, "c5.xlarge": 0.17,
"r5.large": 0.126, "r5.xlarge": 0.252,
"p3.2xlarge": 3.06, "p3.8xlarge": 12.24,
}
def tool_cost_insight(instance_id):
"""Return on-demand cost estimate and rightsizing recommendations."""
try:
ec2c = ec2()
inst = ec2c.describe_instances(InstanceIds=[instance_id])[
'Reservations'][0]['Instances'][0]
itype = inst.get('InstanceType', 'unknown')
state = inst['State']['Name']
# BUG-06 FIX: removed dead cpu_credits variable (was fetched but never used)
hourly = _COST_MAP.get(itype)
daily = round(hourly * 24, 3) if hourly else None
monthly = round(hourly * 24 * 30, 2) if hourly else None
# Rightsizing hint
hint = ""
parts = itype.split('.')
if len(parts) == 2 and parts[1] in ('xlarge', '2xlarge', '4xlarge'):
smaller = itype.replace('xlarge', 'large').replace('2xlarge', 'xlarge')
sh = _COST_MAP.get(smaller)
if sh:
saving = round((hourly - sh) * 24 * 30, 2)
hint = f"\n Rightsizing: Downgrade to {smaller} → save ~${saving}/month"
lines = [
f"Instance Type : {itype} ({state})",
f"Hourly Cost : ${hourly}/hr" if hourly else "Hourly Cost : (type not in table)",
f"Daily Est. : ${daily}",
f"Monthly Est. : ${monthly}",
hint,
]
return "\n".join(l for l in lines if l)
except Exception as e: return f"Error: {e}"
import ipaddress
def is_private_cidr(cidr: str) -> bool:
"""Filter RFC 1918 and API-PA ranges from internet exposure alerts."""
if not cidr: return False
try:
net = ipaddress.ip_network(cidr, strict=False)
return net.is_private or net.is_link_local
except ValueError:
return False
def auto_remediation_hint(sg_rules_output: str) -> str:
"""
Inspect SG rules output and return remediation suggestions.
Factor in private CIDR awareness (RFC 1918) to prevent false positives.
"""
hints = []
parts = re.split(r'Outbound:', sg_rules_output, maxsplit=1, flags=re.IGNORECASE)
inbound_section = parts[0]
outbound_section = parts[1] if len(parts) > 1 else ""
# INBOUND checks only
if re.search(r'ALL TRAFFIC.*0\.0\.0\.0/0', inbound_section):
hints.append("[REMEDIATION \u26a0] Inbound ALL TRAFFIC open to 0.0.0.0/0 "
"\u2014 restrict to specific ports and trusted CIDRs.")
# Port 22 SSH check with CIDR extraction
ssh_match = re.search(r'TCP.*(?:22|SSH).*?(\d{1,3}(?:\.\d{1,3}){3}/\d+)', inbound_section, re.IGNORECASE)
if ssh_match:
cidr = ssh_match.group(1)
if "0.0.0.0/0" in cidr:
hints.append("[REMEDIATION \u26a0] Port 22 (SSH) open to 0.0.0.0/0 "
"\u2014 restrict to your office/VPN CIDR only.")
elif not is_private_cidr(cidr):
hints.append(f"[INFO \u2139] Port 22 (SSH) open to public CIDR {cidr}. "
"Verify if this is intented or if it should be VPC-only.")
# Port 3389 RDP check
rdp_match = re.search(r'TCP.*(?:3389|RDP).*?(\d{1,3}(?:\.\d{1,3}){3}/\d+)', inbound_section, re.IGNORECASE)
if rdp_match:
cidr = rdp_match.group(1)
if "0.0.0.0/0" in cidr:
hints.append("[REMEDIATION \u26a1] Port 3389 (RDP) open to the internet "
"\u2014 restrict or place behind a bastion host immediately.")
elif not is_private_cidr(cidr):
hints.append(f"[REMEDIATION \u26a1] Port 3389 (RDP) open to public CIDR {cidr}. "
"Move to a VPN or Bastion.")
# OUTBOUND: informational only
if re.search(r'ALL TRAFFIC.*0\.0\.0\.0/0', outbound_section):
hints.append("[INFO] Outbound fully open (0.0.0.0/0) "
"\u2014 consider restricting egress to known endpoints.")
return "\n".join(hints)
def interpret_sg_rules(sg_rules_output: str) -> str:
"""
Ground Truth Verification: deterministic Python interpretation of raw SG rule text.
Injected into SYSTEM OBSERVATION FEEDBACK after every SG_RULES call so every LLM
model receives a factually correct reading instead of inferring rule semantics itself.
"""
parts = sg_rules_output.split('Outbound:', 1)
inbound_raw = parts[0]
outbound_raw = parts[1] if len(parts) > 1 else ""
def classify(line):
s = line.strip()
if not s or any(k in s for k in ("SG sg-", "Inbound:", "Outbound:", "Rules:")):
return ""
if "NONE" in s:
return " - No rules -> deny-all (most restrictive)"
m = re.search(r"SG:(sg-[0-9a-f]+)", s)
if m:
return (f" - {s}\n"
f" GROUND TRUTH: Traffic ONLY from instances sharing "
f"security group {m.group(1)}. NOT open to the internet.")
if "0.0.0.0/0" in s:
return f" - {s}\n GROUND TRUTH: Open to the ENTIRE internet. HIGH EXPOSURE."
if "::/0" in s:
return f" - {s}\n GROUND TRUTH: Open to all IPv6 internet. HIGH EXPOSURE."
cidr = re.search(r"(\d{1,3}(?:\.\d{1,3}){3}/\d+)", s)
if cidr:
target_cidr = cidr.group(1)
if is_private_cidr(target_cidr):
label = "PRIVATE NETWORK (RFC 1918/Local) - Secure context."
else:
label = "PUBLIC INTERNET - POTENTIAL EXPOSURE if not a trusted IP."
return (f" - {s}\n"
f" GROUND TRUTH: Restricted to CIDR {target_cidr}. [{label}]")
return f" - {s}"
ib = "\n".join(c for c in (classify(l) for l in inbound_raw.splitlines()) if c)
ob = "\n".join(c for c in (classify(l) for l in outbound_raw.splitlines()) if c)
return (
"[GROUND TRUTH] AWS-verified rule semantics (use this to answer the user):\n"
f"Inbound :\n{ib or ' - No rules -> deny-all'}\n"
f"Outbound:\n{ob or ' - No rules -> deny-all'}"
)
# ─── Persistent Knowledge Graph I/O ───────────────────────────────────────────
def kg_load():
"""
Load KG from disk if it exists; otherwise return a fresh empty graph.
HIGH-01 FIX: Decrypt KG data before loading.
"""
if os.path.exists(KG_FILE):
try:
# Try loading as encrypted
with open(KG_FILE, 'rb') as f:
encrypted = bytearray(f.read())
# Decrypt using XOR
encryption_key = os.environ.get('GITHUB_TOKEN', 'default_key')[:32].encode()
decrypted = bytearray(len(encrypted))
for i in range(len(encrypted)):
decrypted[i] = encrypted[i] ^ encryption_key[i % len(encryption_key)]
json_data = decrypted.decode('utf-8')
data = json.loads(json_data)
print(f"[*] Knowledge Graph restored (encrypted) from {KG_FILE}")
return data
except (json.JSONDecodeError, UnicodeDecodeError):
# Try loading as plaintext (backward compatibility)
try:
with open(KG_FILE, 'r') as f:
data = json.load(f)
print(f"[*] Knowledge Graph restored from {KG_FILE} (unencrypted - will encrypt on save)")
return data
except Exception:
pass
except Exception:
pass
return {'instances': {}, 'sg_rules': {}, 'vpcs': {}}
def kg_save(kg):
"""
Persist the Knowledge Graph to disk using an atomic write pattern with encryption.
HIGH-01 FIX: Encrypt KG data and set restrictive permissions.
"""
import tempfile
try:
# Validate KG structure before saving
if not isinstance(kg, dict):
raise ValueError("KG must be a dictionary")
# Serialize to JSON
json_data = json.dumps(kg, indent=2)
# Simple XOR encryption
encryption_key = os.environ.get('GITHUB_TOKEN', 'default_key')[:32].encode()
encrypted = bytearray(json_data.encode())
for i in range(len(encrypted)):
encrypted[i] ^= encryption_key[i % len(encryption_key)]
# Atomic write with encryption
base_dir = os.path.dirname(os.path.abspath(KG_FILE))
fd, temp_path = tempfile.mkstemp(dir=base_dir, prefix=".kg_", suffix=".tmp")
try:
with os.fdopen(fd, 'wb') as f:
f.write(encrypted)
os.replace(temp_path, KG_FILE)