From 58d6af6ba1477fab24497e81b267757e3ab7426f Mon Sep 17 00:00:00 2001 From: Durgesh Ninave Date: Wed, 8 Oct 2025 00:01:40 +0530 Subject: [PATCH 1/2] Added service for auto remove expired otps --- .github/workflows/lint.yml | 34 +++++++++++ .github/workflows/test.yml | 43 ++++++++++++++ .github/workflows/trivy.yml | 24 ++++++++ Dockerfile | 17 ++++++ README.md | 50 +++++++++++++++++ cleanup.py | 109 ++++++++++++++++++++++++++++++++++++ config.py | 28 +++++++++ docker-compose.yml | 18 ++++++ models.py | 25 +++++++++ requirements.txt | 5 ++ 10 files changed, 353 insertions(+) create mode 100644 .github/workflows/lint.yml create mode 100644 .github/workflows/test.yml create mode 100644 .github/workflows/trivy.yml create mode 100644 Dockerfile create mode 100644 README.md create mode 100644 cleanup.py create mode 100644 config.py create mode 100644 docker-compose.yml create mode 100644 models.py create mode 100644 requirements.txt diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml new file mode 100644 index 0000000..7a04c10 --- /dev/null +++ b/.github/workflows/lint.yml @@ -0,0 +1,34 @@ +name: Python Lint + +on: + push: + branches: [main] + pull_request: + branches: [main] + +jobs: + python-lint: + name: Lint Python Code + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v3 + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.13' + + - name: Install linters + run: | + python -m pip install --upgrade pip + pip install flake8 black isort + + - name: Run flake8 + run: | + flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics + + - name: Check import sorting with isort + run: | + isort . --check-only diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 0000000..709a4f1 --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,43 @@ +name: Python Tests with Coverage + +on: + push: + branches: [main] + pull_request: + branches: [main] + +jobs: + test: + if: false # disables the job temporary + runs-on: ubuntu-latest + + strategy: + matrix: + python-version: ['3.11', '3.12', '3.13'] + + steps: + - name: Checkout code + uses: actions/checkout@v3 + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + pip install pytest pytest-cov + + - name: Run tests with coverage + run: | + pytest --cov=your_package_name --cov-report=term --cov-report=xml --cov-fail-under=80 + + # Optional: Upload coverage report to Codecov (for public repos or with CODECOV_TOKEN) + - name: Upload to Codecov + uses: codecov/codecov-action@v3 + with: + files: coverage.xml + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} # Only needed for private repos diff --git a/.github/workflows/trivy.yml b/.github/workflows/trivy.yml new file mode 100644 index 0000000..5b006b7 --- /dev/null +++ b/.github/workflows/trivy.yml @@ -0,0 +1,24 @@ +name: Trivy Security Scan + +on: + push: + branches: [main] + pull_request: + branches: [main] + +jobs: + trivy-scan: + runs-on: ubuntu-latest + name: Trivy FS Scan + + steps: + - name: Checkout repo + uses: actions/checkout@v3 + + - name: Run Trivy vulnerability scanner on file system + uses: aquasecurity/trivy-action@master + with: + scan-type: 'fs' + scan-ref: '.' + scanners: 'vuln,secret,config' + ignore-unfixed: true diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..67427c6 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,17 @@ +FROM python:3.13-alpine + +# Set working directory +WORKDIR /app + +# Install system dependencies (use Alpine package manager) +# 'apk add --no-cache' keeps the image small and matches the alpine base +RUN apk add --no-cache postgresql-client + +# Copy requirements first for better caching +COPY requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt + +# Copy the rest of the application +COPY . . + +CMD ["python", "cleanup.py"] diff --git a/README.md b/README.md new file mode 100644 index 0000000..325e5cc --- /dev/null +++ b/README.md @@ -0,0 +1,50 @@ +# OTP Cleanup Service + +This service handles the automatic cleanup of expired OTPs in the JamAndFlow database. + +## Setup + +1. Create a `.env` file: +```env +POSTGRES_USER=postgres +POSTGRES_PASSWORD=PassWord +POSTGRES_DB=JamAndFlow +POSTGRES_HOST=db +POSTGRES_PORT=5432 +CLEANUP_INTERVAL_SECONDS=300 # 5 minutes +``` + +2. Build and run with Docker Compose: +```bash +docker compose up --build +``` + +## Configuration + +- `POSTGRES_USER`: PostgreSQL username +- `POSTGRES_PASSWORD`: PostgreSQL password +- `POSTGRES_DB`: PostgreSQL database name +- `POSTGRES_HOST`: PostgreSQL host (default: `db` for Docker Compose) +- `POSTGRES_PORT`: PostgreSQL port (default: `5432`) +- `CLEANUP_INTERVAL_SECONDS`: Interval between cleanup runs (default: 300 seconds / 5 minutes) + +## Docker Network + +This service needs to be on the same network as your main JamAndFlow API: + +```bash +# Create the network if it doesn't exist +docker network create jamandflows-network +``` + +## Development + +1. Install dependencies: +```bash +pip install -r requirements.txt +``` + +2. Run locally: +```bash +python cleanup.py +``` diff --git a/cleanup.py b/cleanup.py new file mode 100644 index 0000000..bedbdf0 --- /dev/null +++ b/cleanup.py @@ -0,0 +1,109 @@ +import asyncio +import logging +from datetime import datetime, timezone + +from sqlalchemy import create_engine, text +from sqlalchemy.orm import sessionmaker + +from config import settings +from models import Base, OTP + +# Set up logging with explicit handler control +logger = logging.getLogger(__name__) +# Remove any existing handlers to prevent duplicates +for handler in logger.handlers[:]: + logger.removeHandler(handler) +# Remove root logger handlers +for handler in logging.getLogger().handlers[:]: + logging.getLogger().removeHandler(handler) + +# Add single stream handler +handler = logging.StreamHandler() +handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')) +logger.addHandler(handler) +logger.setLevel(logging.DEBUG) + +# Database setup +engine = create_engine(settings.database_url) +SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine) + +def wait_for_db(max_retries=5, retry_interval=5): + """Wait for database to be available.""" + retry_count = 0 + while retry_count < max_retries: + try: + db = SessionLocal() + try: + db.execute(text("SELECT 1")) + logger.info("Database connection successful") + return True + finally: + db.close() + except Exception as e: + retry_count += 1 + if retry_count < max_retries: + logger.warning(f"Database connection attempt {retry_count} failed: {e}") + logger.info(f"Retrying in {retry_interval} seconds...") + asyncio.sleep(retry_interval) + else: + logger.error(f"Failed to connect to database after {max_retries} attempts: {e}") + return False + return False + +def cleanup_expired_otps(): + """Delete expired OTPs from the database.""" + try: + db = SessionLocal() + try: + # Use timezone-aware UTC for comparison to match models + now = datetime.now(timezone.utc) + logger.debug(f"Running cleanup check at {now}") + + result = db.query(OTP).filter( + OTP.expires_at < now + ).delete() + db.commit() + + # Always log the check, even if no deletions + if result > 0: + logger.info(f"Deleted {result} expired OTPs") + else: + logger.debug("No expired OTPs found to delete") + + finally: + db.close() + except Exception as e: + logger.error(f"Error cleaning up expired OTPs: {e}") + +async def run_cleanup_loop(): + """Run the cleanup task periodically.""" + logger.info(f"Starting cleanup loop with interval: {settings.CLEANUP_INTERVAL_SECONDS} seconds") + + while True: + logger.debug("Running cleanup cycle...") + cleanup_expired_otps() + logger.debug(f"Sleeping for {settings.CLEANUP_INTERVAL_SECONDS} seconds...") + await asyncio.sleep(settings.CLEANUP_INTERVAL_SECONDS) + +def main(): + # Only show startup banner once + logger.info("Starting OTP cleanup service...") + logger.info(f"Database URL: {settings.database_url.replace(settings.POSTGRES_PASSWORD, '****')}") + logger.info(f"Cleanup interval: {settings.CLEANUP_INTERVAL_SECONDS} seconds") + + # Wait for database with retries + if not wait_for_db(): + logger.error("Failed to connect to database after retries. Exiting.") + return + + try: + # Run the cleanup loop + asyncio.run(run_cleanup_loop()) + except KeyboardInterrupt: + logger.info("Shutting down OTP cleanup service...") + except Exception as e: + logger.error(f"Error in cleanup service: {e}") + raise + +if __name__ == "__main__": + main() diff --git a/config.py b/config.py new file mode 100644 index 0000000..b869b74 --- /dev/null +++ b/config.py @@ -0,0 +1,28 @@ +from pydantic_settings import BaseSettings + + +class Settings(BaseSettings): + # Database settings + POSTGRES_USER: str + POSTGRES_PASSWORD: str + POSTGRES_DB: str + POSTGRES_HOST: str = "localhost" + POSTGRES_PORT: str = "5432" + + # Cleanup settings + CLEANUP_INTERVAL_SECONDS: int = 300 # 5 minutes + + @property + def database_url(self) -> str: + """Construct the database URL from the settings.""" + return ( + f"postgresql://{self.POSTGRES_USER}:{self.POSTGRES_PASSWORD}" + f"@{self.POSTGRES_HOST}:{self.POSTGRES_PORT}/{self.POSTGRES_DB}" + ) + + class Config: + env_file = ".env" + case_sensitive = True + + +settings = Settings() diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..0fe8432 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,18 @@ +version: '3.8' + +services: + cleanup: + build: . + container_name: otp_cleanup_service + env_file: + - .env + networks: + - jamandflows-network + restart: "no" # Don't auto-restart on failure + +volumes: + postgres_data: + +networks: + jamandflows-network: + external: true diff --git a/models.py b/models.py new file mode 100644 index 0000000..49a7476 --- /dev/null +++ b/models.py @@ -0,0 +1,25 @@ +from datetime import datetime, timezone +from sqlalchemy import Column, DateTime, Integer, String +from sqlalchemy.ext.declarative import declarative_base + +Base = declarative_base() + +class OTP(Base): + __tablename__ = "otps" + id = Column(Integer, primary_key=True, index=True) + email = Column(String, index=True, nullable=False) + otp_code = Column(String, nullable=False) + name = Column(String, nullable=False) + password = Column(String, nullable=True) + is_active = Column(Integer, default=1) + # Use timezone-aware DateTime columns and defaults + created_at = Column(DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)) + expires_at = Column(DateTime(timezone=True), nullable=False) + + def is_expired(self): + # Safely compare timezone-aware datetimes. If expires_at is naive, treat it as UTC. + now = datetime.now(timezone.utc) + expires = self.expires_at + if expires is not None and expires.tzinfo is None: + expires = expires.replace(tzinfo=timezone.utc) + return now > expires diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..820382e --- /dev/null +++ b/requirements.txt @@ -0,0 +1,5 @@ +sqlalchemy>=2.0.0 +pydantic>=2.0.0 +pydantic-settings>=2.0.0 +psycopg2-binary>=2.9.0 +python-dotenv>=1.0.0 From 8c96f2357db6b6841ad6998e9e5cea432f5d5b52 Mon Sep 17 00:00:00 2001 From: Durgesh Ninave Date: Wed, 8 Oct 2025 00:05:01 +0530 Subject: [PATCH 2/2] fix isort issue --- cleanup.py | 2 +- models.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/cleanup.py b/cleanup.py index bedbdf0..5990307 100644 --- a/cleanup.py +++ b/cleanup.py @@ -6,7 +6,7 @@ from sqlalchemy.orm import sessionmaker from config import settings -from models import Base, OTP +from models import OTP, Base # Set up logging with explicit handler control logger = logging.getLogger(__name__) diff --git a/models.py b/models.py index 49a7476..cd8fc50 100644 --- a/models.py +++ b/models.py @@ -1,4 +1,5 @@ from datetime import datetime, timezone + from sqlalchemy import Column, DateTime, Integer, String from sqlalchemy.ext.declarative import declarative_base